diff --git a/.gitignore b/.gitignore index 9389d8f2c..7be24ad1d 100644 --- a/.gitignore +++ b/.gitignore @@ -30,3 +30,4 @@ r2/srcount.pickle r2/myproduction.ini .DS_Store r2/r2.egg-info/** +r2/r2/public/static/sprite.png diff --git a/r2/example.ini b/r2/example.ini index f7ad061a1..8771d189f 100644 --- a/r2/example.ini +++ b/r2/example.ini @@ -9,7 +9,7 @@ template_debug = true uncompressedJS = true translator = true sqlprinting = false - +exception_logging = false log_start = true proxy_addr = @@ -30,6 +30,9 @@ adframetracker_url = clicktracker_url = traffic_url = +# Just a list of words. Used by errlog.py to make up names for new errors. +words_file = /usr/dict/words + # for sponsored links: payment_domain = https://pay.localhost/ authorizenetname = @@ -93,6 +96,9 @@ db_table_report_account_subreddit = relation, account, subreddit, main db_table_award = thing, award db_table_trophy = relation, account, award, award +db_table_ad = thing, main +db_table_adsr = relation, ad, subreddit, main + disallow_db_writes = False ### @@ -103,6 +109,8 @@ timezone = UTC lang = en monitored_servers = localhost +enable_usage_stats = false + #query cache settings num_query_queue_workers = 0 query_queue_worker = @@ -174,6 +182,10 @@ sr_dropdown_threshold = 15 smtp_server = localhost new_link_share_delay = 5 minutes + +# email address of the person / people running your site +nerds_email = root@localhost + share_reply = noreply@yourdomain.com #user-agents to limit diff --git a/r2/r2/config/middleware.py b/r2/r2/config/middleware.py index a559b2c6d..fb72d0eaf 100644 --- a/r2/r2/config/middleware.py +++ b/r2/r2/config/middleware.py @@ -33,7 +33,7 @@ from pylons.wsgiapp import PylonsApp, PylonsBaseWSGIApp from r2.config.environment import load_environment from r2.config.rewrites import rewrites -from r2.lib.utils import rstrips +from r2.lib.utils import rstrips, is_authorized_cname from r2.lib.jsontemplates import api_type #middleware stuff @@ -245,11 +245,10 @@ class DomainMiddleware(object): auth_cnames = [x.strip() for x in auth_cnames.split(',')] # we are going to be matching with endswith, so make sure there # are no empty strings that have snuck in - self.auth_cnames = [x for x in auth_cnames if x] + self.auth_cnames = filter(None, auth_cnames) def is_auth_cname(self, domain): - return any((domain == cname or domain.endswith('.' + cname)) - for cname in self.auth_cnames) + return is_authorized_cname(domain, self.auth_cnames) def __call__(self, environ, start_response): # get base domain as defined in INI file diff --git a/r2/r2/config/routing.py b/r2/r2/config/routing.py index 3442aa5ca..d7763f809 100644 --- a/r2/r2/config/routing.py +++ b/r2/r2/config/routing.py @@ -31,7 +31,7 @@ def make_map(global_conf={}, app_conf={}): mc = map.connect admin_routes.add(mc) - + mc('/login', controller='front', action='login') mc('/logout', controller='front', action='logout') mc('/verify', controller='front', action='verify') @@ -41,15 +41,16 @@ def make_map(global_conf={}, app_conf={}): mc('/validuser', controller='front', action='validuser') mc('/over18', controller='post', action='over18') - + mc('/search', controller='front', action='search') mc('/sup', controller='front', action='sup') mc('/traffic', controller='front', action='site_traffic') - + + mc('/about/message/:where', controller='message', action='listing') mc('/about/:location', controller='front', action='editreddit', location = 'about') - + mc('/reddits/create', controller='front', action='newreddit') mc('/reddits/search', controller='front', action='search_reddits') mc('/reddits/login', controller='front', action='login') @@ -60,42 +61,51 @@ def make_map(global_conf={}, app_conf={}): mc('/reddits/mine/:where', controller='myreddits', action='listing', where='subscriber', requirements=dict(where='subscriber|contributor|moderator')) - + mc('/buttons', controller='buttons', action='button_demo_page') #the frame mc('/button_content', controller='buttons', action='button_content') #/button.js and buttonlite.js - the embeds - mc('/button', controller='buttons', action='button_embed') + mc('/button', controller='buttonjs', action='button_embed') mc('/buttonlite', controller='buttons', action='button_lite') - + mc('/widget', controller='buttons', action='widget_demo_page') mc('/bookmarklets', controller='buttons', action='bookmarklets') - + mc('/awards', controller='front', action='awards') - + mc('/i18n', controller='feedback', action='i18n') mc('/feedback', controller='feedback', action='feedback') mc('/ad_inq', controller='feedback', action='ad_inq') - + mc('/admin/i18n', controller='i18n', action='list') mc('/admin/i18n/:action', controller='i18n') mc('/admin/i18n/:action/:lang', controller='i18n') + mc('/admin/usage', controller='usage') + + # Used for editing ads + mc('/admin/ads', controller='ads') + mc('/admin/ads/:adcn/:action', controller='ads', + requirements=dict(action="assign|srs")) + mc('/admin/awards', controller='awards') mc('/admin/awards/:awardcn/:action', controller='awards', requirements=dict(action="give|winners")) + mc('/admin/errors', controller='errorlog') + mc('/admin/:action', controller='admin') - + mc('/user/:username/about', controller='user', action='about', where='overview') mc('/user/:username/:where', controller='user', action='listing', where='overview') - + mc('/prefs/:location', controller='front', action='prefs', location='options') - + mc('/info/0:article/*rest', controller = 'front', action='oldinfo', dest='comments', type='ancient') mc('/info/:article/:dest/:comment', controller='front', @@ -113,7 +123,7 @@ def make_map(global_conf={}, app_conf={}): action = 'comments', title=None, comment = None) mc('/duplicates/:article/:title', controller = 'front', action = 'duplicates', title=None) - + mc('/mail/optout', controller='front', action = 'optout') mc('/mail/optin', controller='front', action = 'optin') mc('/stylesheet', controller = 'front', action = 'stylesheet') @@ -138,8 +148,8 @@ def make_map(global_conf={}, app_conf={}): mc('/shutdown', controller='health', action='shutdown') mc('/', controller='hot', action='listing') - - listing_controllers = "hot|saved|toplinks|new|recommended|randomrising|comments" + + listing_controllers = "hot|saved|new|recommended|randomrising|comments" mc('/:controller', action='listing', requirements=dict(controller=listing_controllers)) @@ -148,18 +158,20 @@ def make_map(global_conf={}, app_conf={}): mc('/:sort', controller='browse', sort='top', action = 'listing', requirements = dict(sort = 'top|controversial')) - + mc('/message/compose', controller='message', action='compose') mc('/message/messages/:mid', controller='message', action='listing', where = "messages") mc('/message/:where', controller='message', action='listing') - + mc('/message/moderator/:subwhere', controller='message', action='listing', + where = 'moderator') + mc('/:action', controller='front', requirements=dict(action="password|random|framebuster")) mc('/:action', controller='embed', requirements=dict(action="help|blog")) mc('/help/*anything', controller='embed', action='help') - + mc('/goto', controller='toolbar', action='goto') mc('/tb/:id', controller='toolbar', action='tb') mc('/toolbar/:action', controller='toolbar', @@ -172,7 +184,7 @@ def make_map(global_conf={}, app_conf={}): # additional toolbar-related rules just above the catchall mc('/d/:what', controller='api', action='bookmarklet') - + mc('/resetpassword/:key', controller='front', action='resetpassword') mc('/verification/:key', controller='front', @@ -184,7 +196,7 @@ def make_map(global_conf={}, app_conf={}): requirements=dict(action="login|reg")) mc('/post/:action', controller='post', requirements=dict(action="options|over18|unlogged_options|optout|optin|login|reg")) - + mc('/api/distinguish/:how', controller='api', action="distinguish") mc('/api/:action/:url_user', controller='api', requirements=dict(action="login|register")) @@ -193,7 +205,7 @@ def make_map(global_conf={}, app_conf={}): mc('/api/:action', controller='promote', requirements=dict(action="promote|unpromote|new_promo|link_thumb|freebie|promote_note|update_pay|refund|traffic_viewer|rm_traffic_viewer")) mc('/api/:action', controller='api') - + mc('/captcha/:iden', controller='captcha', action='captchaimg') mc('/mediaembed/:link', controller="mediaembed", action="mediaembed") @@ -202,17 +214,23 @@ def make_map(global_conf={}, app_conf={}): mc('/store', controller='redirect', action='redirect', dest='http://store.reddit.com/index.html') - + mc('/code', controller='redirect', action='redirect', dest='http://code.reddit.com/') - + mc('/mobile', controller='redirect', action='redirect', dest='http://m.reddit.com/') mc('/authorize_embed', controller = 'front', action = 'authorize_embed') - - mc("/ads/", controller = "front", action = "ad") - mc("/ads/:reddit", controller = "front", action = "ad") + + # Used for showing ads + mc("/ads/", controller = "mediaembed", action = "ad") + mc("/ads/r/:reddit_name", controller = "mediaembed", action = "ad") + mc("/ads/:codename", controller = "mediaembed", action = "ad_by_codename") + + mc('/comscore-iframe/', controller='mediaembed', action='comscore') + mc('/comscore-iframe/*url', controller='mediaembed', action='comscore') + # This route handles displaying the error page and # graphics used in the 404/500 # error pages. It should likely stay at the top diff --git a/r2/r2/controllers/__init__.py b/r2/r2/controllers/__init__.py index bd02fd347..bb580fe5f 100644 --- a/r2/r2/controllers/__init__.py +++ b/r2/r2/controllers/__init__.py @@ -22,7 +22,6 @@ from listingcontroller import ListingController from listingcontroller import HotController from listingcontroller import SavedController -from listingcontroller import ToplinksController from listingcontroller import NewController from listingcontroller import BrowseController from listingcontroller import RecommendedController @@ -39,6 +38,7 @@ from feedback import FeedbackController from front import FrontController from health import HealthController from buttons import ButtonsController +from buttons import ButtonjsController from captcha import CaptchaController from embed import EmbedController from error import ErrorController @@ -46,6 +46,9 @@ from post import PostController from toolbar import ToolbarController from i18n import I18nController from awards import AwardsController +from ads import AdsController +from usage import UsageController +from errorlog import ErrorlogController from promotecontroller import PromoteController from mediaembed import MediaembedController diff --git a/r2/r2/controllers/ads.py b/r2/r2/controllers/ads.py new file mode 100644 index 000000000..1b2ebb648 --- /dev/null +++ b/r2/r2/controllers/ads.py @@ -0,0 +1,56 @@ +# The contents of this file are subject to the Common Public Attribution +# License Version 1.0. (the "License"); you may not use this file except in +# compliance with the License. You may obtain a copy of the License at +# http://code.reddit.com/LICENSE. The License is based on the Mozilla Public +# License Version 1.1, but Sections 14 and 15 have been added to cover use of +# software over a computer network and provide for limited attribution for the +# Original Developer. In addition, Exhibit A has been modified to be consistent +# with Exhibit B. +# +# Software distributed under the License is distributed on an "AS IS" basis, +# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for +# the specific language governing rights and limitations under the License. +# +# The Original Code is Reddit. +# +# The Original Developer is the Initial Developer. The Initial Developer of the +# Original Code is CondeNet, Inc. +# +# All portions of the code written by CondeNet are Copyright (c) 2006-2010 +# CondeNet, Inc. All Rights Reserved. +################################################################################ +from pylons import request, g +from reddit_base import RedditController +from r2.lib.pages import AdminPage, AdminAds, AdminAdAssign, AdminAdSRs +from validator import * + +class AdsController(RedditController): + + @validate(VSponsor()) + def GET_index(self): + res = AdminPage(content = AdminAds(), + show_sidebar = False, + title = 'ads').render() + return res + + @validate(VSponsor(), + ad = VAdByCodename('adcn')) + def GET_assign(self, ad): + if ad is None: + abort(404, 'page not found') + + res = AdminPage(content = AdminAdAssign(ad), + show_sidebar = False, + title='assign an ad to a community').render() + return res + + @validate(VSponsor(), + ad = VAdByCodename('adcn')) + def GET_srs(self, ad): + if ad is None: + abort(404, 'page not found') + + res = AdminPage(content = AdminAdSRs(ad), + show_sidebar = False, + title='ad srs').render() + return res diff --git a/r2/r2/controllers/api.py b/r2/r2/controllers/api.py index 4942ae785..2bdad90dd 100644 --- a/r2/r2/controllers/api.py +++ b/r2/r2/controllers/api.py @@ -50,10 +50,25 @@ from r2.lib.media import force_thumbnail, thumbnail_url from r2.lib.comment_tree import add_comment, delete_comment from r2.lib import tracking, cssfilter, emailer from r2.lib.subreddit_search import search_reddits +from r2.lib.log import log_text from datetime import datetime, timedelta from md5 import md5 +def reject_vote(thing): + voteword = request.params.get('dir') + + if voteword == '1': + voteword = 'upvote' + elif voteword == '0': + voteword = '0-vote' + elif voteword == '-1': + voteword = 'downvote' + + log_text ("rejected vote", "Rejected %s from %s (%s) on %s %s via %s" % + (voteword, c.user.name, request.ip, thing.__class__.__name__, + thing._id36, request.referer), "info") + class ApiController(RedditController): """ Controller which deals with almost all AJAX site interaction. @@ -73,6 +88,9 @@ class ApiController(RedditController): return abort(404, 'not found') links = link_from_url(request.params.get('url'), filter_spam = False) + if not links: + return abort(404, 'not found') + listing = wrap_links(links, num = count) return BoringPage(_("API"), content = listing).render() @@ -107,19 +125,19 @@ class ApiController(RedditController): VUser(), VModhash(), ip = ValidIP(), - to = VExistingUname('to'), + to = VMessageRecipent('to'), subject = VRequired('subject', errors.NO_SUBJECT), - body = VMessage(['text', 'message'])) + body = VMarkdown(['text', 'message'])) def POST_compose(self, form, jquery, to, subject, body, ip): """ handles message composition under /message/compose. """ if not (form.has_errors("to", errors.USER_DOESNT_EXIST, - errors.NO_USER) or + errors.NO_USER, errors.SUBREDDIT_NOEXIST) or form.has_errors("subject", errors.NO_SUBJECT) or form.has_errors("text", errors.NO_TEXT, errors.TOO_LONG) or form.has_errors("captcha", errors.BAD_CAPTCHA)): - + m, inbox_rel = Message._new(c.user, to, subject, body, ip) form.set_html(".status", _("your message has been delivered")) form.set_inputs(to = "", subject = "", text = "", captcha="") @@ -128,20 +146,20 @@ class ApiController(RedditController): @validatedForm(VUser(), VCaptcha(), - ValidDomain('url'), VRatelimit(rate_user = True, rate_ip = True, prefix = "rate_submit_"), ip = ValidIP(), sr = VSubmitSR('sr'), url = VUrl(['url', 'sr']), + banmsg = VOkayDomain('url'), title = VTitle('title'), save = VBoolean('save'), - selftext = VSelfText('text'), + selftext = VMarkdown('text'), kind = VOneOf('kind', ['link', 'self', 'poll']), then = VOneOf('then', ('tb', 'comments'), default='comments')) - def POST_submit(self, form, jquery, url, selftext, kind, title, save, - sr, ip, then): + def POST_submit(self, form, jquery, url, banmsg, selftext, kind, title, + save, sr, ip, then): #backwards compatability if url == 'self': kind = 'self' @@ -178,6 +196,11 @@ class ApiController(RedditController): elif form.has_errors("title", errors.NO_TEXT): pass +# Uncomment if we want to let spammers know we're on to them +# if banmsg: +# form.set_html(".field-url.BAD_URL", banmsg) +# return + elif kind == 'self' and form.has_errors('text', errors.TOO_LONG): pass @@ -194,6 +217,9 @@ class ApiController(RedditController): l = Link._submit(request.post.title, url if kind == 'link' else 'self', c.user, sr, ip) + if banmsg: + admintools.spam(l, banner = "domain (%s)" % banmsg) + if kind == 'self': l.url = l.make_permalink_slow() l.is_self = True @@ -264,31 +290,18 @@ class ApiController(RedditController): rem = VBoolean('rem'), reason = VReason('reason')) def POST_login(self, form, jquery, user, username, dest, rem, reason): + if reason and reason[0] == 'redirect': dest = reason[1] - hc_key = "login_attempts-%s" % request.ip - - # TODO: You-know-what (not mentioning it, just in case - # we accidentally release code with this comment in it) - - # Cache lifetime for login_attmempts - la_expire_time = 3600 * 8 - - recent_attempts = g.hardcache.add(hc_key, 0, time=la_expire_time) - - fake_failure = False - if recent_attempts >= 25: - g.log.error ("%s failed to login as %s (attempt #%d)" - % (request.ip, username, recent_attempts)) - fake_failure = True - - if fake_failure or form.has_errors("passwd", errors.WRONG_PASSWORD): + if login_throttle(username, wrong_password = form.has_errors("passwd", + errors.WRONG_PASSWORD)): VRatelimit.ratelimit(rate_ip = True, prefix = 'login_', seconds=1) - g.hardcache.incr(hc_key, time = la_expire_time) - else: - self._login(form, user, dest, rem) + c.errors.add(errors.WRONG_PASSWORD, field = "passwd") + + if not form.has_errors("passwd", errors.WRONG_PASSWORD): + self._login(form, user, dest, rem) @validatedForm(VCaptcha(), VRatelimit(rate_ip = True, prefix = "rate_register_"), @@ -310,11 +323,11 @@ class ApiController(RedditController): user = register(name, password) VRatelimit.ratelimit(rate_ip = True, prefix = "rate_register_") - + #anything else we know (email, languages)? if email: user.email = email - + user.pref_lang = c.lang if c.content_langs == 'all': user.pref_content_langs = 'all' @@ -322,10 +335,10 @@ class ApiController(RedditController): langs = list(c.content_langs) langs.sort() user.pref_content_langs = tuple(langs) - + d = c.user._dirties.copy() user._commit() - + c.user = user if reason: if reason[0] == 'redirect': @@ -333,7 +346,7 @@ class ApiController(RedditController): elif reason[0] == 'subscribe': for sr, sub in reason[1].iteritems(): self._subscribe(sr, sub) - + self._login(form, user, dest, rem) @noresponse(VUser(), @@ -530,7 +543,7 @@ class ApiController(RedditController): if isinstance(thing, Link): sr = thing.subreddit_slow expire_hot(sr) - queries.new_link(thing) + queries.delete_links(thing) #comments have special delete tasks elif isinstance(thing, Comment): @@ -567,11 +580,11 @@ class ApiController(RedditController): @validatedForm(VUser(), VModhash(), item = VByNameIfAuthor('thing_id'), - text = VComment('text')) + text = VMarkdown('text')) def POST_editusertext(self, form, jquery, item, text): - if not form.has_errors("text", - errors.NO_TEXT, errors.TOO_LONG, - errors.NOT_AUTHOR): + if (not form.has_errors("text", + errors.NO_TEXT, errors.TOO_LONG) and + not form.has_errors("thing_id", errors.NOT_AUTHOR)): if isinstance(item, Comment): kind = 'comment' @@ -580,7 +593,10 @@ class ApiController(RedditController): kind = 'link' item.selftext = text - if (item._date < timeago('60 seconds') + if item._deleted: + return abort(403, "forbidden") + + if (item._date < timeago('3 minutes') or (item._ups + item._downs > 2)): item.editted = True @@ -601,7 +617,7 @@ class ApiController(RedditController): prefix = "rate_comment_"), ip = ValidIP(), parent = VSubmitParent(['thing_id', 'parent']), - comment = VComment(['text', 'comment'])) + comment = VMarkdown(['text', 'comment'])) def POST_comment(self, commentform, jquery, parent, comment, ip): should_ratelimit = True #check the parent type here cause we need that for the @@ -633,7 +649,8 @@ class ApiController(RedditController): not commentform.has_errors("ratelimit", errors.RATELIMIT) and not commentform.has_errors("parent", - errors.DELETED_COMMENT)): + errors.DELETED_COMMENT, + errors.DELETED_LINK)): if is_message: to = Account._byID(parent.author_id) @@ -650,11 +667,9 @@ class ApiController(RedditController): queries.queue_vote(c.user, item, True, ip, cheater = (errors.CHEATER, None) in c.errors) - #update last modified - set_last_modified(link, 'comments') - - #update the comment cache - add_comment(item) + # adding to comments-tree is done as part of + # newcomments_q, so if they refresh immediately they + # won't see their comment # clean up the submission form and remove it from the DOM (if reply) t = commentform.find("textarea") @@ -666,7 +681,7 @@ class ApiController(RedditController): # insert the new comment jquery.insert_things(item) - + # remove any null listings that may be present jquery("#noresults").hide() @@ -751,14 +766,12 @@ class ApiController(RedditController): return if vote_type == "rejected": - g.log.error("POST_vote: rejected vote (%s) from '%s' on %s (%s)"% - (request.params.get('dir'), c.user.name, - thing._fullname, request.ip)) + reject_vote(thing) store = False # TODO: temporary hack until we migrate the rest of the vote data if thing._date < datetime(2009, 4, 17, 0, 0, 0, 0, g.tz): - g.log.error("POST_vote: ignoring old vote on %s" % thing._fullname) + g.log.debug("POST_vote: ignoring old vote on %s" % thing._fullname) store = False # in a lock to prevent duplicate votes from people @@ -982,21 +995,20 @@ class ApiController(RedditController): name = VSubredditName("name"), title = VLength("title", max_length = 100), domain = VCnameDomain("domain"), - description = VLength("description", max_length = 1000), + description = VMarkdown("description", max_length = 1000), lang = VLang("lang"), over_18 = VBoolean('over_18'), allow_top = VBoolean('allow_top'), show_media = VBoolean('show_media'), + use_whitelist = VBoolean('use_whitelist'), type = VOneOf('type', ('public', 'private', 'restricted')), ip = ValidIP(), - ad_type = VOneOf('ad', ('default', 'basic', 'custom')), - ad_file = VLength('ad-location', max_length = 500), sponsor_text =VLength('sponsorship-text', max_length = 500), sponsor_name =VLength('sponsorship-name', max_length = 500), sponsor_url = VLength('sponsorship-url', max_length = 500), css_on_cname = VBoolean("css_on_cname"), ) - def POST_site_admin(self, form, jquery, name, ip, sr, ad_type, ad_file, + def POST_site_admin(self, form, jquery, name, ip, sr, sponsor_text, sponsor_url, sponsor_name, **kw): # the status button is outside the form -- have to reset by hand form.parent().set_html('.status', "") @@ -1005,7 +1017,7 @@ class ApiController(RedditController): kw = dict((k, v) for k, v in kw.iteritems() if k in ('name', 'title', 'domain', 'description', 'over_18', 'show_media', 'type', 'lang', "css_on_cname", - 'allow_top')) + 'allow_top', 'use_whitelist')) #if a user is banned, return rate-limit errors if c.user._spam: @@ -1054,10 +1066,6 @@ class ApiController(RedditController): elif sr.is_moderator(c.user) or c.user_is_admin: if c.user_is_admin: - sr.ad_type = ad_type - if ad_type != "custom": - ad_file = Subreddit._defaults['ad_file'] - sr.ad_file = ad_file sr.sponsorship_text = sponsor_text or "" sr.sponsorship_url = sponsor_url or None sr.sponsorship_name = sponsor_name or None @@ -1137,50 +1145,58 @@ class ApiController(RedditController): if r: queries.new_savehide(r) - @noresponse(VUser(), - VModhash(), - thing = VByName('id', multiple = True)) - def POST_collapse_message(self, thing): - if not thing: + def collapse_handler(self, things, collapse): + if not things: return - for t in tup(thing): + things = tup(things) + srs = Subreddit._byID([t.sr_id for t in things if t.sr_id], + return_dict = True) + for t in things: if hasattr(t, "to_id") and c.user._id == t.to_id: - t.to_collapse = True + t.to_collapse = collapse elif hasattr(t, "author_id") and c.user._id == t.author_id: - t.author_collapse = True + t.author_collapse = collapse + elif isinstance(t, Message) and t.sr_id: + if srs[t.sr_id].is_moderator(c.user): + t.to_collapse = collapse t._commit() @noresponse(VUser(), VModhash(), - thing = VByName('id', multiple = True)) - def POST_uncollapse_message(self, thing): + things = VByName('id', multiple = True)) + def POST_collapse_message(self, things): + self.collapse_handler(things, True) + + @noresponse(VUser(), + VModhash(), + things = VByName('id', multiple = True)) + def POST_uncollapse_message(self, things): + self.collapse_handler(things, False) + + def unread_handler(self, thing, unread): if not thing: return - for t in tup(thing): - if hasattr(t, "to_id") and c.user._id == t.to_id: - t.to_collapse = False - elif hasattr(t, "author_id") and c.user._id == t.author_id: - t.author_collapse = False - t._commit() + # if the message has a recipient, try validating that + # desitination first (as it is cheaper and more common) + if not hasattr(thing, "to_id") or c.user._id == thing.to_id: + queries.set_unread(thing, c.user, unread) + # if the message is for a subreddit, check that next + if hasattr(thing, "sr_id"): + sr = thing.subreddit_slow + if sr and sr.is_moderator(c.user): + queries.set_unread(thing, sr, unread) @noresponse(VUser(), VModhash(), thing = VByName('id')) def POST_unread_message(self, thing): - if not thing: - return - if hasattr(thing, "to_id") and c.user._id != thing.to_id: - return - queries.set_unread(thing, True) + self.unread_handler(thing, True) @noresponse(VUser(), VModhash(), thing = VByName('id')) def POST_read_message(self, thing): - if not thing: return - if hasattr(thing, "to_id") and c.user._id != thing.to_id: - return - queries.set_unread(thing, False) + self.unread_handler(thing, False) @noresponse(VUser(), VModhash(), @@ -1203,10 +1219,14 @@ class ApiController(RedditController): @validatedForm(VUser(), parent = VByName('parent_id')) def POST_moremessages(self, form, jquery, parent): - if not parent.can_view(): + if not parent.can_view_slow(): return self.abort(403,'forbidden') - builder = MessageBuilder(c.user, parent = parent, skip = False) + if parent.sr_id: + builder = SrMessageBuilder(parent.subreddit_slow, + parent = parent, skip = False) + else: + builder = UserMessageBuilder(c.user, parent = parent, skip = False) listing = Listing(builder).listing() a = [] for item in listing.things: @@ -1221,19 +1241,18 @@ class ApiController(RedditController): @validatedForm(link = VByName('link_id'), sort = VMenu('where', CommentSortMenu), children = VCommentIDs('children'), - depth = VInt('depth', min = 0, max = 8), mc_id = nop('id')) def POST_morechildren(self, form, jquery, - link, sort, children, depth, mc_id): + link, sort, children, mc_id): user = c.user if c.user_is_loggedin else None if not link or not link.subreddit_slow.can_view(user): - return self.abort(403,'forbidden') + return abort(403,'forbidden') if children: builder = CommentBuilder(link, CommentSortMenu.operator(sort), children) listing = Listing(builder, nextprev = False) - items = listing.get_items(starting_depth = depth, num = 20) + items = listing.get_items(num = 20) def _children(cur_items): items = [] for cm in cur_items: @@ -1399,6 +1418,107 @@ class ApiController(RedditController): tr._is_enabled = True + @validatedForm(VAdmin(), + hexkey=VLength("hexkey", max_length=32), + nickname=VLength("nickname", max_length = 1000), + status = VOneOf("status", + ("new", "severe", "interesting", "normal", "fixed"))) + def POST_edit_error(self, form, jquery, hexkey, nickname, status): + if form.has_errors(("hexkey", "nickname", "status"), + errors.NO_TEXT, errors.INVALID_OPTION): + pass + + if form.has_error(): + return + + key = "error_nickname-%s" % str(hexkey) + g.hardcache.set(key, nickname, 86400 * 365) + + key = "error_status-%s" % str(hexkey) + g.hardcache.set(key, status, 86400 * 365) + + form.set_html(".status", _('saved')) + + @validatedForm(VSponsor(), + ad = VByName("fullname"), + colliding_ad=VAdByCodename(("codename", "fullname")), + codename = VLength("codename", max_length = 100), + imgurl = VLength("imgurl", max_length = 1000), + linkurl = VLength("linkurl", max_length = 1000)) + def POST_editad(self, form, jquery, ad, colliding_ad, codename, + imgurl, linkurl): + if form.has_errors(("codename", "imgurl", "linkurl"), + errors.NO_TEXT): + pass + + if form.has_errors(("codename"), errors.INVALID_OPTION): + form.set_html(".status", "some other ad has that codename") + pass + + if form.has_error(): + return + + if ad is None: + Ad._new(codename, imgurl, linkurl) + form.set_html(".status", "saved. reload to see it.") + return + + ad.codename = codename + ad.imgurl = imgurl + ad.linkurl = linkurl + ad._commit() + form.set_html(".status", _('saved')) + + @validatedForm(VSponsor(), + ad = VByName("fullname"), + sr = VSubmitSR("community"), + weight = VInt("weight", + coerce=False, min=0, max=100000), + ) + def POST_assignad(self, form, jquery, ad, sr, weight): + if form.has_errors("ad", errors.NO_TEXT): + pass + + if form.has_errors("community", errors.SUBREDDIT_REQUIRED, + errors.SUBREDDIT_NOEXIST, errors.SUBREDDIT_NOTALLOWED): + pass + + if form.has_errors("fullname", errors.NO_TEXT): + pass + + if form.has_errors("weight", errors.BAD_NUMBER): + pass + + if form.has_error(): + return + + if ad.codename == "DART" and sr.name == g.default_sr and weight != 100: + log_text("Bad default DART weight", + "The default DART weight can only be 100, not %s." + % weight, + "error") + abort(403, 'forbidden') + + existing = AdSR.by_ad_and_sr(ad, sr) + + if weight is not None: + if existing: + existing.weight = weight + existing._commit() + else: + AdSR._new(ad, sr, weight) + + form.set_html(".status", _('saved')) + + else: + if existing: + existing._delete() + AdSR.by_ad(ad, _update=True) + AdSR.by_sr(sr, _update=True) + + form.set_html(".status", _('deleted')) + + @validatedForm(VAdmin(), award = VByName("fullname"), colliding_award=VAwardByCodename(("codename", "fullname")), @@ -1448,10 +1568,8 @@ class ApiController(RedditController): if form.has_errors("award", errors.NO_TEXT): pass - if form.has_errors("recipient", errors.USER_DOESNT_EXIST): - pass - - if form.has_errors("recipient", errors.NO_USER): + if form.has_errors("recipient", errors.USER_DOESNT_EXIST, + errors.NO_USER): pass if form.has_errors("fullname", errors.NO_TEXT): @@ -1488,6 +1606,7 @@ class ApiController(RedditController): return self.abort404() recipient = trophy._thing1 award = trophy._thing2 + trophy._delete() Trophy.by_account(recipient, _update=True) Trophy.by_award(award, _update=True) @@ -1566,7 +1685,7 @@ class ApiController(RedditController): "%s_%s" % (s._fullname, s.sponsorship_name)) - @json_validate(query = nop('query')) + @json_validate(query = VPrintable('query', max_length = 50)) def POST_search_reddit_names(self, query): names = [] if query: diff --git a/r2/r2/controllers/buttons.py b/r2/r2/controllers/buttons.py index df99e0930..34f1b1995 100644 --- a/r2/r2/controllers/buttons.py +++ b/r2/r2/controllers/buttons.py @@ -19,7 +19,7 @@ # All portions of the code written by CondeNet are Copyright (c) 2006-2010 # CondeNet, Inc. All Rights Reserved. ################################################################################ -from reddit_base import RedditController +from reddit_base import RedditController, MinimalController, make_key from r2.lib.pages import Button, ButtonNoBody, ButtonEmbed, ButtonLite, \ ButtonDemoPanel, WidgetDemoPanel, Bookmarklets, BoringPage from r2.lib.pages.things import wrap_links @@ -31,6 +31,55 @@ from pylons.i18n import _ from r2.lib.filters import spaceCompress from r2.controllers.listingcontroller import ListingController +class ButtonjsController(MinimalController): + def pre(self): + MinimalController.pre(self) + # override user loggedin behavior to ensure this page always + # uses the page cache + (user, maybe_admin) = \ + valid_cookie(c.cookies[g.login_cookie].value + if g.login_cookie in c.cookies + else '') + if user: + self.user_is_loggedin = True + + @validate(buttontype = VInt('t', 1, 5), + url = VSanitizedUrl("url"), + _height = VInt('height', 0, 300), + _width = VInt('width', 0, 800), + autohide = VBoolean("autohide")) + def GET_button_embed(self, buttontype, _height, _width, url, autohide): + # no buttons on domain listings + if isinstance(c.site, DomainSR): + return self.abort404() + c.render_style = 'js' + c.response_content_type = 'text/javascript; charset=UTF-8' + if not c.user_is_loggedin and autohide: + c.response.content = "void(0);" + return c.response + + buttontype = buttontype or 1 + width, height = ((120, 22), (51, 69), (69, 52), + (51, 52), (600, 52))[min(buttontype - 1, 4)] + if _width: width = _width + if _height: height = _height + + bjs = ButtonEmbed(button=buttontype, + width=width, + height=height, + url = url, + referer = request.referer).render() + return self.sendjs(bjs, callback='', escape=False) + + def request_key(self): + return make_key('button_request_key', + c.lang, + c.content_langs, + request.host, + c.cname, + request.referer, + request.fullpath) + class ButtonsController(RedditController): def buttontype(self): b = request.get.get('t') or 1 @@ -83,7 +132,6 @@ class ButtonsController(RedditController): width = VInt('width', 0, 800), l = VByName('id')) def GET_button_content(self, url, title, css, vote, newwindow, width, l): - # no buttons on domain listings if isinstance(c.site, DomainSR): c.site = Default @@ -108,12 +156,9 @@ class ButtonsController(RedditController): button = self.buttontype(), **kw) l = self.get_wrapped_link(url, l, wrapper) - res = l.render() - c.response.content = spaceCompress(res) - return c.response + return l.render() - @validate(buttontype = VInt('t', 1, 5), url = VSanitizedUrl("url"), _height = VInt('height', 0, 300), @@ -191,5 +236,3 @@ class ButtonsController(RedditController): return BoringPage(_("bookmarklets"), show_sidebar = False, content=Bookmarklets()).render() - - diff --git a/r2/r2/controllers/error.py b/r2/r2/controllers/error.py index b155e4687..c48942153 100644 --- a/r2/r2/controllers/error.py +++ b/r2/r2/controllers/error.py @@ -32,7 +32,7 @@ from r2.lib.filters import safemarkdown, unsafe try: # place all r2 specific imports in here. If there is a code error, it'll get caught and # the stack trace won't be presented to the user in production - from reddit_base import RedditController + from reddit_base import RedditController, Cookies from r2.models.subreddit import Default, Subreddit from r2.models.link import Link from r2.lib import pages @@ -122,7 +122,7 @@ class ErrorController(RedditController): c.site.name) message = (strings.banned_subreddit % dict(link = '/message/compose?to=%s&subject=%s' % - (g.admin_message_acct, + (url_escape(g.admin_message_acct), url_escape(subject)))) res = pages.RedditError(_('this reddit has been banned'), @@ -146,8 +146,8 @@ class ErrorController(RedditController): def GET_document(self): try: - #no cookies on errors - c.cookies.clear() + # clear cookies the old fashioned way + c.cookies = Cookies() code = request.GET.get('code', '') srname = request.GET.get('srname', '') @@ -155,7 +155,7 @@ class ErrorController(RedditController): if srname: c.site = Subreddit._by_name(srname) if c.render_style not in self.allowed_render_styles: - return str(code) + return str(int(code)) elif takedown and code == '404': link = Link._by_fullname(takedown) return pages.TakedownPage(link).render() diff --git a/r2/r2/controllers/errorlog.py b/r2/r2/controllers/errorlog.py new file mode 100644 index 000000000..d11f0c100 --- /dev/null +++ b/r2/r2/controllers/errorlog.py @@ -0,0 +1,34 @@ +# The contents of this file are subject to the Common Public Attribution +# License Version 1.0. (the "License"); you may not use this file except in +# compliance with the License. You may obtain a copy of the License at +# http://code.reddit.com/LICENSE. The License is based on the Mozilla Public +# License Version 1.1, but Sections 14 and 15 have been added to cover use of +# software over a computer network and provide for limited attribution for the +# Original Developer. In addition, Exhibit A has been modified to be consistent +# with Exhibit B. +# +# Software distributed under the License is distributed on an "AS IS" basis, +# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for +# the specific language governing rights and limitations under the License. +# +# The Original Code is Reddit. +# +# The Original Developer is the Initial Developer. The Initial Developer of the +# Original Code is CondeNet, Inc. +# +# All portions of the code written by CondeNet are Copyright (c) 2006-2010 +# CondeNet, Inc. All Rights Reserved. +################################################################################ +from pylons import request, g +from reddit_base import RedditController +from r2.lib.pages import AdminPage, AdminErrorLog +from validator import * + +class ErrorlogController(RedditController): + @validate(VAdmin()) + def GET_index(self): + res = AdminPage(content = AdminErrorLog(), + title = 'error log', + show_sidebar = False + ).render() + return res diff --git a/r2/r2/controllers/errors.py b/r2/r2/controllers/errors.py index 527a346e5..28bcacf10 100644 --- a/r2/r2/controllers/errors.py +++ b/r2/r2/controllers/errors.py @@ -24,8 +24,8 @@ from pylons.i18n import _ from copy import copy error_list = dict(( - ('USER_REQUIRED', _("please login to do that")), - ('VERIFIED_USER_REQUIRED', _("you need to set a valid email address to do that.")), + ('USER_REQUIRED', _("please login to do that")), + ('VERIFIED_USER_REQUIRED', _("you need to set a valid email address to do that.")), ('NO_URL', _('a url is required')), ('BAD_URL', _('you should check that url')), ('BAD_CAPTCHA', _('care to try these again?')), @@ -33,9 +33,10 @@ error_list = dict(( ('USERNAME_TAKEN', _('that username is already taken')), ('NO_THING_ID', _('id not specified')), ('NOT_AUTHOR', _("you can't do that")), + ('DELETED_LINK', _('the link you are commenting on has been deleted')), ('DELETED_COMMENT', _('that comment has been deleted')), - ('DELETED_THING', _('that element has been deleted.')), - ('BAD_PASSWORD', _('invalid password')), + ('DELETED_THING', _('that element has been deleted')), + ('BAD_PASSWORD', _('that password is unacceptable')), ('WRONG_PASSWORD', _('invalid password')), ('BAD_PASSWORD_MATCH', _('passwords do not match')), ('NO_NAME', _('please enter a name')), @@ -47,6 +48,7 @@ error_list = dict(( ('NO_USER', _('please enter a username')), ('INVALID_PREF', "that preference isn't valid"), ('BAD_NUMBER', _("that number isn't in the right range (%(min)d to %(max)d)")), + ('BAD_STRING', _("you used a character here that we can't handle")), ('BAD_BID', _("your bid must be at least $%(min)d per day and no more than to $%(max)d in total.")), ('ALREADY_SUB', _("that link has already been submitted")), ('SUBREDDIT_EXISTS', _('that reddit already exists')), @@ -58,7 +60,6 @@ error_list = dict(( ('EXPIRED', _('your session has expired')), ('DRACONIAN', _('you must accept the terms first')), ('BANNED_IP', "IP banned"), - ('BANNED_DOMAIN', "Domain banned"), ('BAD_CNAME', "that domain isn't going to work"), ('USED_CNAME', "that domain is already in use"), ('INVALID_OPTION', _('that option is not valid')), diff --git a/r2/r2/controllers/front.py b/r2/r2/controllers/front.py index 212e69736..362d159d9 100644 --- a/r2/r2/controllers/front.py +++ b/r2/r2/controllers/front.py @@ -89,13 +89,15 @@ class FrontController(RedditController): else: links = list(links)[:g.num_serendipity] + rand.shuffle(links) + builder = IDBuilder(links, skip = True, keep_fn = lambda x: x.fresh, - num = g.num_serendipity) + num = 1) links = builder.get_items()[0] if links: - l = rand.choice(links) + l = links[0] return self.redirect(add_sr("/tb/" + l._id36)) else: return self.redirect(add_sr('/')) @@ -274,8 +276,12 @@ class FrontController(RedditController): content.append(FriendList()) elif location == 'update': content = PrefUpdate() + elif location == 'feeds' and c.user.pref_private_feeds: + content = PrefFeeds() elif location == 'delete': content = PrefDelete() + else: + return self.abort404() return PrefsPage(content = content, infotext=infotext).render() @@ -310,7 +316,7 @@ class FrontController(RedditController): # moderator is either reddit's moderator or an admin is_moderator = c.user_is_loggedin and c.site.is_moderator(c.user) or c.user_is_admin - + extension_handling = False if is_moderator and location == 'edit': pane = PaneStack() if created == 'true': @@ -320,8 +326,11 @@ class FrontController(RedditController): pane = ModList(editable = is_moderator) elif is_moderator and location == 'banned': pane = BannedList(editable = is_moderator) - elif location == 'contributors' and c.site.type != 'public': - pane = ContributorList(editable = is_moderator) + elif (location == 'contributors' and + (c.site.type != 'public' or + (c.user_is_loggedin and c.site.use_whitelist and + (c.site.is_moderator(c.user) or c.user_is_admin)))): + pane = ContributorList(editable = is_moderator) elif (location == 'stylesheet' and c.site.can_change_stylesheet(c.user) and not g.css_killswitch): @@ -338,18 +347,35 @@ class FrontController(RedditController): else c.site.get_spam()) builder_cls = (QueryBuilder if isinstance(query, thing.Query) else IDBuilder) + def keep_fn(x): + # no need to bother mods with banned users, or deleted content + if x.hidden or x._deleted: + return False + if location == "reports" and not x._spam: + return (x.reported > 0) + if location == "spam": + return x._spam + return True + builder = builder_cls(query, + skip = True, num = num, after = after, + keep_fn = keep_fn, count = count, reverse = reverse, wrap = ListingController.builder_wrapper) listing = LinkListing(builder) pane = listing.listing() + if c.user.pref_private_feeds: + extension_handling = "private" elif is_moderator and location == 'traffic': pane = RedditTraffic() + elif c.user_is_sponsor and location == 'ads': + pane = RedditAds() else: return self.abort404() - return EditReddit(content = pane).render() + return EditReddit(content = pane, + extension_handling = extension_handling).render() def GET_awards(self): """The awards page.""" @@ -517,51 +543,50 @@ class FrontController(RedditController): return builder.total_num, timing, res - def GET_login(self): + @validate(dest = VDestination()) + def GET_login(self, dest): """The /login form. No link to this page exists any more on the site (all actions invoking it now go through the login cover). However, this page is still used for logging the user in during submission or voting from the bookmarklets.""" - # dest is the location to redirect to upon completion - dest = request.get.get('dest','') or request.referer or '/' if (c.user_is_loggedin and not request.environ.get('extension') == 'embed'): return self.redirect(dest) return LoginPage(dest = dest).render() - def GET_logout(self): - dest = request.referer or '/' + @validate(VUser(), + VModhash(), + dest = VDestination()) + def GET_logout(self, dest): return self.redirect(dest) @validate(VUser(), - VModhash()) - def POST_logout(self, dest = None): + VModhash(), + dest = VDestination()) + def POST_logout(self, dest): """wipe login cookie and redirect to referer.""" self.logout() - dest = request.post.get('dest','') or request.referer or '/' return self.redirect(dest) - - @validate(VUser()) - def GET_adminon(self): + + @validate(VUser(), + dest = VDestination()) + def GET_adminon(self, dest): """Enable admin interaction with site""" #check like this because c.user_is_admin is still false if not c.user.name in g.admins: return self.abort404() self.login(c.user, admin = True) - - dest = request.referer or '/' return self.redirect(dest) - @validate(VAdmin()) - def GET_adminoff(self): + @validate(VAdmin(), + dest = VDestination()) + def GET_adminoff(self, dest): """disable admin interaction with site.""" if not c.user.name in g.admins: return self.abort404() self.login(c.user, admin = False) - - dest = request.referer or '/' return self.redirect(dest) def GET_validuser(self): @@ -604,9 +629,9 @@ class FrontController(RedditController): captcha = Captcha() if c.user.needs_captcha() else None sr_names = (Subreddit.submit_sr_names(c.user) or Subreddit.submit_sr_names(None)) - - return FormPage(_("submit"), + return FormPage(_("submit"), + show_sidebar = True, content=NewLink(url=url or '', title=title or '', subreddits = sr_names, @@ -714,7 +739,3 @@ class FrontController(RedditController): def GET_site_traffic(self): return BoringPage("traffic", content = RedditTraffic()).render() - - - def GET_ad(self, reddit = None): - return Dart_Ad(reddit).render(style="html") diff --git a/r2/r2/controllers/health.py b/r2/r2/controllers/health.py index ce98150f7..0668245c0 100644 --- a/r2/r2/controllers/health.py +++ b/r2/r2/controllers/health.py @@ -8,6 +8,8 @@ from pylons import c, g from reddit_base import RedditController from r2.lib.amqp import worker +from validator import * + class HealthController(RedditController): def shutdown(self): thread_pool = c.thread_pool @@ -40,9 +42,12 @@ class HealthController(RedditController): c.response.content = "i'm still alive!" return c.response - def GET_shutdown(self): - if not g.allow_shutdown: + @validate(secret=nop('secret')) + def GET_shutdown(self, secret): + if not g.shutdown_secret: self.abort404() + if not secret or secret != g.shutdown_secret: + self.abort403() c.dontcache = True #the will make the next health-check initiate the shutdown diff --git a/r2/r2/controllers/listingcontroller.py b/r2/r2/controllers/listingcontroller.py index 23ca47d31..81a8a67be 100644 --- a/r2/r2/controllers/listingcontroller.py +++ b/r2/r2/controllers/listingcontroller.py @@ -122,7 +122,7 @@ class ListingController(RedditController): builder_cls = SearchBuilder elif isinstance(self.query_obj, iters): builder_cls = IDBuilder - elif isinstance(self.query_obj, queries.CachedResults): + elif isinstance(self.query_obj, (queries.CachedResults, queries.MergedCachedResults)): builder_cls = IDBuilder b = builder_cls(self.query_obj, @@ -253,7 +253,7 @@ class HotController(FixListing, ListingController): and not isinstance(c.site, FakeSubreddit) and self.after is None and self.count == 0): - return get_hot(c.site, only_fullnames = True) + return get_hot([c.site], only_fullnames = True)[0] else: return c.site.get_links('hot', 'all') @@ -286,16 +286,6 @@ class SavedController(ListingController): def GET_listing(self, **env): return ListingController.GET_listing(self, **env) -class ToplinksController(ListingController): - where = 'toplinks' - title_text = _('top scoring links') - - def query(self): - return c.site.get_links('toplinks', 'all') - - def GET_listing(self, **env): - return ListingController.GET_listing(self, **env) - class NewController(ListingController): where = 'new' title_text = _('newest submissions') @@ -503,11 +493,12 @@ class UserController(ListingController): class MessageController(ListingController): show_sidebar = False + show_nums = False render_cls = MessagePage @property def menus(self): - if self.where in ('inbox', 'messages', 'comments', + if c.default_sr and self.where in ('inbox', 'messages', 'comments', 'selfreply', 'unread'): buttons = (NavButton(_("all"), "inbox"), NavButton(_("unread"), "unread"), @@ -517,12 +508,27 @@ class MessageController(ListingController): return [NavMenu(buttons, base_path = '/message/', default = 'inbox', type = "flatlist")] + elif not c.default_sr or self.where == 'moderator': + buttons = (NavButton(_("all"), "inbox"), + NavButton(_("unread"), "unread")) + return [NavMenu(buttons, base_path = '/message/moderator/', + default = 'inbox', type = "flatlist")] return [] def title(self): return _('messages') + ': ' + _(self.where) + def keep_fn(self): + def keep(item): + wouldkeep = item.keep_item(item) + # don't show user their own unread stuff + if ((self.where == 'unread' or self.subwhere == 'unread') + and item.author_id == c.user._id): + return False + return wouldkeep + return keep + @staticmethod def builder_wrapper(thing): if isinstance(thing, Comment): @@ -539,24 +545,32 @@ class MessageController(ListingController): return w def builder(self): - if self.where == 'messages': + if (self.where == 'messages' or + (self.where == "moderator" and self.subwhere != "unread")): + root = c.user + message_cls = UserMessageBuilder + if not c.default_sr: + root = c.site + message_cls = SrMessageBuilder + elif self.where == 'moderator' and self.subwhere != 'unread': + message_cls = ModeratorMessageBuilder + + parent = None + skip = False if self.message: if self.message.first_message: parent = Message._byID(self.message.first_message) else: parent = self.message - return MessageBuilder(c.user, parent = parent, - skip = False, - focal = self.message, - wrap = self.builder_wrapper, - num = self.num) elif c.user.pref_threaded_messages: skip = (c.render_style == "html") - return MessageBuilder(c.user, wrap = self.builder_wrapper, - skip = skip, - num = self.num, - after = self.after, - reverse = self.reverse) + + return message_cls(root, wrap = self.builder_wrapper, + parent = parent, + skip = skip, + num = self.num, + after = self.after, + reverse = self.reverse) return ListingController.builder(self) def listing(self): @@ -578,7 +592,22 @@ class MessageController(ListingController): q = queries.get_unread_inbox(c.user) elif self.where == 'sent': q = queries.get_sent(c.user) - + elif self.where == 'moderator' and self.subwhere == 'unread': + if c.default_sr: + srids = Subreddit.reverse_moderator_ids(c.user) + srs = Subreddit._byID(srids, data = False, return_dict = False) + q = queries.merge_results( + *[queries.get_unread_subreddit_messages(s) for s in srs]) + else: + q = queries.get_unread_subreddit_messages(c.site) + elif self.where == 'moderator': + if c.have_mod_messages and self.mark != 'false': + c.user.modmsgtime = False + c.user._commit() + # the query is handled by the builder on the moderator page + return + else: + return self.abort404() if self.where != 'sent': #reset the inbox if c.have_messages and self.mark != 'false': @@ -590,11 +619,16 @@ class MessageController(ListingController): @validate(VUser(), message = VMessageID('mid'), mark = VOneOf('mark',('true','false'), default = 'true')) - def GET_listing(self, where, mark, message, **env): - self.where = where + def GET_listing(self, where, mark, message, subwhere = None, **env): + if not (c.default_sr or c.site.is_moderator(c.user) or c.user_is_admin): + abort(403, "forbidden") + if not c.default_sr: + self.where = "moderator" + else: + self.where = where + self.subwhere = subwhere self.mark = mark self.message = message - c.msg_location = where return ListingController.GET_listing(self, **env) @validate(VUser(), @@ -609,7 +643,7 @@ class MessageController(ListingController): message = message, success = success) return MessagePage(content = content).render() - + class RedditsController(ListingController): render_cls = SubredditsPage @@ -645,7 +679,8 @@ class MyredditsController(ListingController): NavButton(plurals.contributor, 'contributor'), NavButton(plurals.moderator, 'moderator')) - return [NavMenu(buttons, base_path = '/reddits/mine/', default = 'subscriber', type = "flatlist")] + return [NavMenu(buttons, base_path = '/reddits/mine/', + default = 'subscriber', type = "flatlist")] def title(self): return _('reddits: ') + self.where diff --git a/r2/r2/controllers/mediaembed.py b/r2/r2/controllers/mediaembed.py index 9cb4c5cf6..c2e30a11f 100644 --- a/r2/r2/controllers/mediaembed.py +++ b/r2/r2/controllers/mediaembed.py @@ -20,23 +20,24 @@ # CondeNet, Inc. All Rights Reserved. ################################################################################ from validator import * -from reddit_base import RedditController +from reddit_base import MinimalController from r2.lib.scraper import scrapers -from r2.lib.pages import MediaEmbedBody +from r2.lib.pages import MediaEmbedBody, ComScore, render_ad from pylons import request +from pylons.controllers.util import abort -class MediaembedController(RedditController): +class MediaembedController(MinimalController): @validate(link = VLink('link')) def GET_mediaembed(self, link): if request.host != g.media_domain: # don't serve up untrusted content except on our # specifically untrusted domain - return self.abort404() + abort(404) if not link or not link.media_object: - return self.abort404() + abort(404) if isinstance(link.media_object, basestring): # it's an old-style string @@ -50,3 +51,16 @@ class MediaembedController(RedditController): content = media_embed.content return MediaEmbedBody(body = content).render() + + def GET_ad(self, reddit_name = None): + c.render_style = "html" + return render_ad(reddit_name=reddit_name) + + def GET_ad_by_codename(self, codename = None): + if not codename: + abort(404) + c.render_style = "html" + return render_ad(codename=codename) + + def GET_comscore(self, reddit = None): + return ComScore().render(style="html") diff --git a/r2/r2/controllers/post.py b/r2/r2/controllers/post.py index 5736b7631..130ac3754 100644 --- a/r2/r2/controllers/post.py +++ b/r2/r2/controllers/post.py @@ -29,29 +29,10 @@ from pylons.i18n import _ from r2.models import * import sha -def to_referer(func, **params): - def _to_referer(self, *a, **kw): - res = func(self, *a, **kw) - dest = res.get('redirect') or request.referer or '/' - return self.redirect(dest + query_string(params)) - return _to_referer - - class PostController(ApiController): def api_wrapper(self, kw): return Storage(**kw) -#TODO: feature disabled for now -# @to_referer -# @validate(VUser(), -# key = VOneOf('key', ('pref_bio','pref_location', -# 'pref_url')), -# value = nop('value')) -# def POST_user_desc(self, key, value): -# setattr(c.user, key, value) -# c.user._commit() -# return {} - def set_options(self, all_langs, pref_lang, **kw): if c.errors.errors: print "fucker" @@ -87,7 +68,9 @@ class PostController(ApiController): self.set_options( all_langs, pref_lang) return self.redirect(request.referer) - @validate(pref_frame = VBoolean('frame'), + @validate(VUser(), + VModhash(), + pref_frame = VBoolean('frame'), pref_clickgadget = VBoolean('clickgadget'), pref_organic = VBoolean('organic'), pref_newwindow = VBoolean('newwindow'), @@ -110,6 +93,7 @@ class PostController(ApiController): pref_mark_messages_read = VBoolean("mark_messages_read"), pref_threaded_messages = VBoolean("threaded_messages"), pref_collapse_read_messages = VBoolean("collapse_read_messages"), + pref_private_feeds = VBoolean("private_feeds"), all_langs = nop('all-langs', default = 'all')) def POST_options(self, all_langs, pref_lang, **kw): #temporary. eventually we'll change pref_clickgadget to an @@ -176,12 +160,12 @@ class PostController(ApiController): msg_hash = msg_hash)).render() - def POST_login(self, *a, **kw): + @validate(dest = VDestination(default = "/")) + def POST_login(self, dest, *a, **kw): ApiController.POST_login(self, *a, **kw) c.render_style = "html" c.response_content_type = "" - dest = request.post.get('dest', request.referer or '/') errors = list(c.errors) if errors: for e in errors: @@ -190,18 +174,17 @@ class PostController(ApiController): c.errors.remove(e) c.errors.add(e[0], msg) - dest = request.post.get('dest', request.referer or '/') return LoginPage(user_login = request.post.get('user'), dest = dest).render() return self.redirect(dest) - def POST_reg(self, *a, **kw): + @validate(dest = VDestination(default = "/")) + def POST_reg(self, dest, *a, **kw): ApiController.POST_register(self, *a, **kw) c.render_style = "html" c.response_content_type = "" - dest = request.post.get('dest', request.referer or '/') errors = list(c.errors) if errors: for e in errors: diff --git a/r2/r2/controllers/promotecontroller.py b/r2/r2/controllers/promotecontroller.py index 31ff182cf..73539d5f6 100644 --- a/r2/r2/controllers/promotecontroller.py +++ b/r2/r2/controllers/promotecontroller.py @@ -168,7 +168,7 @@ class PromoteController(ListingController): promote.reject_promo(thing, reason = reason) # also reject anything that is live but has a reason given elif (c.user_is_sponsor and reason and - thing.promte_status == promote.STATUS.promoted): + thing.promote_status == promote.STATUS.promoted): promote.reject_promo(thing, reason = reason) # otherwise, mark it as "finished" else: @@ -220,6 +220,9 @@ class PromoteController(ListingController): # want the URL url = url[0].url + if form.has_errors('bid', errors.BAD_BID): + return + # check dates and date range start, end = [x.date() for x in dates] if dates else (None, None) if (not l or @@ -242,7 +245,6 @@ class PromoteController(ListingController): if (form.has_errors('title', errors.NO_TEXT, errors.TOO_LONG) or form.has_errors('url', errors.NO_URL, errors.BAD_URL) or - form.has_errors('bid', errors.BAD_BID) or (not l and jquery.has_errors('ratelimit', errors.RATELIMIT))): return elif l: diff --git a/r2/r2/controllers/reddit_base.py b/r2/r2/controllers/reddit_base.py index e291cbc45..a9e797acb 100644 --- a/r2/r2/controllers/reddit_base.py +++ b/r2/r2/controllers/reddit_base.py @@ -25,11 +25,11 @@ from pylons.controllers.util import abort, redirect_to from pylons.i18n import _ from pylons.i18n.translation import LanguageError from r2.lib.base import BaseController, proxyurl -from r2.lib import pages, utils, filters +from r2.lib import pages, utils, filters, amqp from r2.lib.utils import http_utils, UniqueIterator -from r2.lib.cache import LocalCache +from r2.lib.cache import LocalCache, make_key, MemcachedError import random as rand -from r2.models.account import valid_cookie, FakeAccount +from r2.models.account import valid_cookie, FakeAccount, valid_feed from r2.models.subreddit import Subreddit from r2.models import * from errors import ErrorSet @@ -37,12 +37,14 @@ from validator import * from r2.lib.template_helpers import add_sr from r2.lib.jsontemplates import api_type +from Cookie import CookieError from copy import copy from Cookie import CookieError from datetime import datetime -import sha, simplejson, locale +from hashlib import sha1, md5 from urllib import quote, unquote -from simplejson import dumps +import simplejson +import locale from r2.lib.tracking import encrypt, decrypt @@ -224,7 +226,7 @@ def over18(): else: if 'over18' in c.cookies: cookie = c.cookies['over18'].value - if cookie == sha.new(request.ip).hexdigest(): + if cookie == sha1(request.ip).hexdigest(): return True def set_subreddit(): @@ -281,6 +283,13 @@ def set_content_type(): return utils.to_js(content,callback = request.params.get( "callback", "document.write")) c.response_wrappers.append(to_js) + if ext in ("rss", "api", "json") and request.method.upper() == "GET": + user = valid_feed(request.GET.get("user"), + request.GET.get("feed"), + request.path) + if user: + c.user = user + c.user_is_loggedin = True def get_browser_langs(): browser_langs = [] @@ -403,6 +412,7 @@ def ratelimit_throttled(): if throttled(ip) or throttled(subnet): abort(503, 'service temporarily unavailable') + #TODO i want to get rid of this function. once the listings in front.py are #moved into listingcontroller, we shouldn't have a need for this #anymore @@ -411,13 +421,16 @@ def base_listing(fn): after = VByName('after'), before = VByName('before'), count = VCount('count'), - target = VTarget("target")) + target = VTarget("target"), + show = VLength('show', 3)) def new_fn(self, before, **env): if c.render_style == "htmllite": c.link_target = env.get("target") elif "target" in env: del env["target"] + if "show" in env and env['show'] == 'all': + c.ignore_hide_rules = True kw = build_arg_list(fn, env) #turn before into after/reverse @@ -429,40 +442,32 @@ def base_listing(fn): return fn(self, **kw) return new_fn -class RedditController(BaseController): +class MinimalController(BaseController): def request_key(self): # note that this references the cookie at request time, not # the current value of it - cookie_keys = [] - for x in cache_affecting_cookies: - cookie_keys.append(request.cookies.get(x,'')) + try: + cookies_key = [(x, request.cookies.get(x,'')) + for x in cache_affecting_cookies] + except CookieError: + cookies_key = '' - key = ''.join((str(c.lang), - str(c.content_langs), - request.host, - str(c.cname), - str(request.fullpath), - str(c.over18), - str(c.firsttime), - ''.join(cookie_keys))) - return key + return make_key('request_key', + c.lang, + c.content_langs, + request.host, + c.cname, + request.fullpath, + c.over18, + c.firsttime, + cookies_key) def cached_response(self): return c.response - @staticmethod - def login(user, admin = False, rem = False): - c.cookies[g.login_cookie] = Cookie(value = user.make_cookie(admin = admin), - expires = NEVER if rem else None) - - @staticmethod - def logout(admin = False): - c.cookies[g.login_cookie] = Cookie(value='') - def pre(self): c.start_time = datetime.now(g.tz) - g.reset_caches() c.domain_prefix = request.environ.get("reddit-domain-prefix", @@ -474,10 +479,106 @@ class RedditController(BaseController): # the domain has to be set before Cookies get initialized set_subreddit() + c.errors = ErrorSet() + c.cookies = Cookies() + + def try_pagecache(self): + #check content cache + if not c.user_is_loggedin: + r = g.rendercache.get(self.request_key()) + if r and request.method == 'GET': + response = c.response + response.headers = r.headers + response.content = r.content + + for x in r.cookies.keys(): + if x in cache_affecting_cookies: + cookie = r.cookies[x] + response.set_cookie(key = x, + value = cookie.value, + domain = cookie.get('domain',None), + expires = cookie.get('expires',None), + path = cookie.get('path',None)) + + response.status_code = r.status_code + request.environ['pylons.routes_dict']['action'] = 'cached_response' + # make sure to carry over the content type + c.response_content_type = r.headers['content-type'] + if r.headers.has_key('access-control'): + c.response_access_control = r.headers['access-control'] + c.used_cache = True + # response wrappers have already been applied before cache write + c.response_wrappers = [] + + + def post(self): + response = c.response + content = filter(None, response.content) + if isinstance(content, (list, tuple)): + content = ''.join(content) + for w in c.response_wrappers: + content = w(content) + response.content = content + if c.response_content_type: + response.headers['Content-Type'] = c.response_content_type + if c.response_access_control: + c.response.headers['Access-Control'] = c.response_access_control + + if c.user_is_loggedin: + response.headers['Cache-Control'] = 'no-cache' + response.headers['Pragma'] = 'no-cache' + + # send cookies + if not c.used_cache and c.cookies: + # if we used the cache, these cookies should be set by the + # cached response object instead + for k,v in c.cookies.iteritems(): + if v.dirty: + response.set_cookie(key = k, + value = quote(v.value), + domain = v.domain, + expires = v.expires) + + #return + #set content cache + if (g.page_cache_time + and request.method == 'GET' + and not c.user_is_loggedin + and not c.used_cache + and not c.dontcache + and response.status_code != 503 + and response.content and response.content[0]): + try: + g.rendercache.set(self.request_key(), + response, + g.page_cache_time) + except MemcachedError: + # the key was too big to set in the rendercache + g.log.debug("Ignored too-big render cache") + + if g.enable_usage_stats: + amqp.add_kw("usage_q", + start_time = c.start_time, + end_time = datetime.now(g.tz), + action = str(c.action) or "static") + +class RedditController(MinimalController): + + @staticmethod + def login(user, admin = False, rem = False): + c.cookies[g.login_cookie] = Cookie(value = user.make_cookie(admin = admin), + expires = NEVER if rem else None) + + @staticmethod + def logout(admin = False): + c.cookies[g.login_cookie] = Cookie(value='') + + def pre(self): + MinimalController.pre(self) + set_cnameframe() # populate c.cookies unless we're on the unsafe media_domain - c.cookies = Cookies() if request.host != g.media_domain or g.media_domain == g.domain: try: for k,v in request.cookies.iteritems(): @@ -489,7 +590,6 @@ class RedditController(BaseController): request.environ['HTTP_COOKIE'] = '' c.response_wrappers = [] - c.errors = ErrorSet() c.firsttime = firsttime() (c.user, maybe_admin) = \ valid_cookie(c.cookies[g.login_cookie].value @@ -515,6 +615,12 @@ class RedditController(BaseController): read_mod_cookie() if hasattr(c.user, 'msgtime') and c.user.msgtime: c.have_messages = c.user.msgtime + if hasattr(c.user, 'modmsgtime'): + c.show_mod_mail = True + if c.user.modmsgtime: + c.have_mod_messages = c.user.modmsgtime + else: + c.show_mod_mail = Subreddit.reverse_moderator_ids(c.user) c.user_is_admin = maybe_admin and c.user.name in g.admins c.user_is_sponsor = c.user_is_admin or c.user.name in g.sponsors if not g.disallow_db_writes: @@ -560,74 +666,6 @@ class RedditController(BaseController): elif c.site.domain and c.site.css_on_cname and not c.cname: c.allow_styles = False - #check content cache - if not c.user_is_loggedin: - r = g.rendercache.get(self.request_key()) - if r and request.method == 'GET': - response = c.response - response.headers = r.headers - response.content = r.content - - for x in r.cookies.keys(): - if x in cache_affecting_cookies: - cookie = r.cookies[x] - response.set_cookie(key = x, - value = cookie.value, - domain = cookie.get('domain',None), - expires = cookie.get('expires',None), - path = cookie.get('path',None)) - - response.status_code = r.status_code - request.environ['pylons.routes_dict']['action'] = 'cached_response' - # make sure to carry over the content type - c.response_content_type = r.headers['content-type'] - if r.headers.has_key('access-control'): - c.response_access_control = r.headers['access-control'] - c.used_cache = True - # response wrappers have already been applied before cache write - c.response_wrappers = [] - - def post(self): - response = c.response - content = filter(None, response.content) - if isinstance(content, (list, tuple)): - content = ''.join(content) - for w in c.response_wrappers: - content = w(content) - response.content = content - if c.response_content_type: - response.headers['Content-Type'] = c.response_content_type - if c.response_access_control: - c.response.headers['Access-Control'] = c.response_access_control - - if c.user_is_loggedin: - response.headers['Cache-Control'] = 'no-cache' - response.headers['Pragma'] = 'no-cache' - - # send cookies - if not c.used_cache and c.cookies: - # if we used the cache, these cookies should be set by the - # cached response object instead - for k,v in c.cookies.iteritems(): - if v.dirty: - response.set_cookie(key = k, - value = quote(v.value), - domain = v.domain, - expires = v.expires) - - #return - #set content cache - if (g.page_cache_time - and request.method == 'GET' - and not c.user_is_loggedin - and not c.used_cache - and not c.dontcache - and response.status_code != 503 - and response.content and response.content[0]): - g.rendercache.set(self.request_key(), - response, - g.page_cache_time) - def check_modified(self, thing, action): if c.user_is_loggedin: return @@ -644,6 +682,9 @@ class RedditController(BaseController): def abort404(self): abort(404, "not found") + def abort403(self): + abort(403, "forbidden") + def sendpng(self, string): c.response_content_type = 'image/png' c.response.content = string @@ -661,7 +702,7 @@ class RedditController(BaseController): return request.path + utils.query_string(merged) def api_wrapper(self, kw): - data = dumps(kw) + data = simplejson.dumps(kw) if request.method == "GET" and request.GET.get("callback"): return "%s(%s)" % (websafe_json(request.GET.get("callback")), websafe_json(data)) diff --git a/r2/r2/controllers/usage.py b/r2/r2/controllers/usage.py new file mode 100644 index 000000000..c2d54326f --- /dev/null +++ b/r2/r2/controllers/usage.py @@ -0,0 +1,34 @@ +# The contents of this file are subject to the Common Public Attribution +# License Version 1.0. (the "License"); you may not use this file except in +# compliance with the License. You may obtain a copy of the License at +# http://code.reddit.com/LICENSE. The License is based on the Mozilla Public +# License Version 1.1, but Sections 14 and 15 have been added to cover use of +# software over a computer network and provide for limited attribution for the +# Original Developer. In addition, Exhibit A has been modified to be consistent +# with Exhibit B. +# +# Software distributed under the License is distributed on an "AS IS" basis, +# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for +# the specific language governing rights and limitations under the License. +# +# The Original Code is Reddit. +# +# The Original Developer is the Initial Developer. The Initial Developer of the +# Original Code is CondeNet, Inc. +# +# All portions of the code written by CondeNet are Copyright (c) 2006-2010 +# CondeNet, Inc. All Rights Reserved. +################################################################################ +from pylons import request, g +from reddit_base import RedditController +from r2.lib.pages import AdminPage, AdminUsage +from validator import * + +class UsageController(RedditController): + + @validate(VAdmin()) + def GET_index(self): + res = AdminPage(content = AdminUsage(), + show_sidebar = False, + title = 'usage').render() + return res diff --git a/r2/r2/controllers/validator/validator.py b/r2/r2/controllers/validator/validator.py index 519572bc1..7efb4be8b 100644 --- a/r2/r2/controllers/validator/validator.py +++ b/r2/r2/controllers/validator/validator.py @@ -24,11 +24,12 @@ from pylons.i18n import _ from pylons.controllers.util import abort from r2.lib import utils, captcha, promote from r2.lib.filters import unkeep_space, websafe, _force_unicode +from r2.lib.filters import markdown_souptest from r2.lib.db.operators import asc, desc from r2.lib.template_helpers import add_sr from r2.lib.jsonresponse import json_respond, JQueryResponse, JsonResponse from r2.lib.jsontemplates import api_type - +from r2.lib.log import log_text from r2.models import * from r2.lib.authorize import Address, CreditCard @@ -37,6 +38,7 @@ from r2.controllers.errors import VerifiedUserRequiredException from copy import copy from datetime import datetime, timedelta +from curses.ascii import isprint import re, inspect import pycountry @@ -123,7 +125,6 @@ def _make_validated_kw(fn, simple_vals, param_vals, env): for var, validator in param_vals.iteritems(): kw[var] = validator(env) return kw - def validate(*simple_vals, **param_vals): def val(fn): @@ -230,7 +231,7 @@ class VRequired(Validator): if not e: e = self._error if e: self.set_error(e) - + def run(self, item): if not item: self.error() @@ -266,6 +267,25 @@ class VCommentByID(VThing): def __init__(self, param, redirect = True, *a, **kw): VThing.__init__(self, param, Comment, redirect=redirect, *a, **kw) +class VAd(VThing): + def __init__(self, param, redirect = True, *a, **kw): + VThing.__init__(self, param, Ad, redirect=redirect, *a, **kw) + +class VAdByCodename(Validator): + def run(self, codename, required_fullname=None): + if not codename: + return self.set_error(errors.NO_TEXT) + + try: + a = Ad._by_codename(codename) + except NotFound: + a = None + + if a and required_fullname and a._fullname != required_fullname: + return self.set_error(errors.INVALID_OPTION) + else: + return a + class VAward(VThing): def __init__(self, param, redirect = True, *a, **kw): VThing.__init__(self, param, Award, redirect=redirect, *a, **kw) @@ -314,7 +334,7 @@ class VMessageID(Validator): try: cid = int(cid, 36) m = Message._byID(cid, True) - if not m.can_view(): + if not m.can_view_slow(): abort(403, 'forbidden') return m except (NotFound, ValueError): @@ -324,14 +344,23 @@ class VCount(Validator): def run(self, count): if count is None: count = 0 - return max(int(count), 0) + try: + return max(int(count), 0) + except ValueError: + return 0 class VLimit(Validator): def run(self, limit): if limit is None: - return c.user.pref_numsites - return min(max(int(limit), 1), 100) + return c.user.pref_numsites + + try: + i = int(limit) + except ValueError: + return c.user.pref_numsites + + return min(max(i, 1), 100) class VCssMeasure(Validator): measure = re.compile(r"^\s*[\d\.]+\w{0,3}\s*$") @@ -371,23 +400,47 @@ class VLength(Validator): self.set_error(self.length_error, {'max_length': self.max_length}) else: return text - + +class VPrintable(VLength): + def run(self, text, text2 = ''): + text = VLength.run(self, text, text2) + + if text is None: + return None + + try: + if all(isprint(str(x)) for x in text): + return str(text) + except UnicodeEncodeError: + pass + + self.set_error(errors.BAD_STRING) + return None + + class VTitle(VLength): def __init__(self, param, max_length = 300, **kw): VLength.__init__(self, param, max_length, **kw) - -class VComment(VLength): - def __init__(self, param, max_length = 10000, **kw): - VLength.__init__(self, param, max_length, **kw) - -class VSelfText(VLength): - def __init__(self, param, max_length = 10000, **kw): - VLength.__init__(self, param, max_length, **kw) - -class VMessage(VLength): + +class VMarkdown(VLength): def __init__(self, param, max_length = 10000, **kw): VLength.__init__(self, param, max_length, **kw) + def run(self, text, text2 = ''): + text = text or text2 + VLength.run(self, text) + try: + markdown_souptest(text) + return text + except ValueError: + import sys + user = "???" + if c.user_is_loggedin: + user = c.user.name + g.log.error("HAX by %s: %s" % (user, text)) + s = sys.exc_info() + # reraise the original error with the original stack trace + raise s[1], None, s[2] class VSubredditName(VRequired): def __init__(self, item, *a, **kw): @@ -422,7 +475,7 @@ class VSubredditDesc(Validator): class VAccountByName(VRequired): def __init__(self, param, error = errors.USER_DOESNT_EXIST, *a, **kw): VRequired.__init__(self, param, error, *a, **kw) - + def run(self, name): if name: try: @@ -486,7 +539,7 @@ class VUser(Validator): if (password is not None) and not valid_password(c.user, password): self.set_error(errors.WRONG_PASSWORD) - + class VModhash(Validator): default_param = 'uh' def run(self, uh): @@ -595,7 +648,10 @@ class VSubmitParent(VByName): if fullname: parent = VByName.run(self, fullname) if parent and parent._deleted: - self.set_error(errors.DELETED_COMMENT) + if isinstance(parent, Link): + self.set_error(errors.DELETED_LINK) + else: + self.set_error(errors.DELETED_COMMENT) if isinstance(parent, Message): return parent else: @@ -623,7 +679,7 @@ class VSubmitSR(Validator): self.set_error(errors.SUBREDDIT_NOTALLOWED) else: return sr - + pass_rx = re.compile(r"^.{3,20}$") def chkpass(x): @@ -633,12 +689,10 @@ class VPassword(Validator): def run(self, password, verify): if not chkpass(password): self.set_error(errors.BAD_PASSWORD) - return elif verify != password: self.set_error(errors.BAD_PASSWORD_MATCH) - return password else: - return password + return password.encode('utf8') user_rx = re.compile(r"^[\w-]{3,20}$", re.UNICODE) @@ -667,11 +721,15 @@ class VUname(VRequired): class VLogin(VRequired): def __init__(self, item, *a, **kw): VRequired.__init__(self, item, errors.WRONG_PASSWORD, *a, **kw) - + def run(self, user_name, password): user_name = chkuser(user_name) user = None if user_name: + try: + str(password) + except UnicodeEncodeError: + password = password.encode('utf8') user = valid_login(user_name, password) if not user: return self.error() @@ -698,7 +756,7 @@ class VUrl(VRequired): sr = None else: sr = None - + if not url: return self.error(errors.NO_URL) url = utils.sanitize_url(url) @@ -736,6 +794,21 @@ class VExistingUname(VRequired): return self.error(errors.USER_DOESNT_EXIST) self.error() +class VMessageRecipent(VExistingUname): + def run(self, name): + if not name: + return self.error() + if name.startswith('#'): + try: + s = Subreddit._by_name(name.strip('#')) + if isinstance(s, FakeSubreddit): + raise NotFound, "fake subreddit" + return s + except NotFound: + self.set_error(errors.SUBREDDIT_NOEXIST) + else: + return VExistingUname.run(self, name) + class VUserWithEmail(VExistingUname): def run(self, name): user = VExistingUname.run(self, name) @@ -901,9 +974,11 @@ class VRatelimit(Validator): class VCommentIDs(Validator): #id_str is a comma separated list of id36's def run(self, id_str): - cids = [int(i, 36) for i in id_str.split(',')] - comments = Comment._byID(cids, data=True, return_dict = False) - return comments + if id_str: + cids = [int(i, 36) for i in id_str.split(',')] + comments = Comment._byID(cids, data=True, return_dict = False) + return comments + return [] class CachedUser(object): @@ -1049,14 +1124,9 @@ class ValidIP(Validator): self.set_error(errors.BANNED_IP) return request.ip -class ValidDomain(Validator): +class VOkayDomain(Validator): def run(self, url): - if url and is_banned_domain(url): - self.set_error(errors.BANNED_DOMAIN) - - - - + return is_banned_domain(url) class VDate(Validator): """ @@ -1135,9 +1205,32 @@ class VDestination(Validator): def __init__(self, param = 'dest', default = "", **kw): self.default = default Validator.__init__(self, param, **kw) - + def run(self, dest): - return dest or request.referer or self.default + if not dest: + dest = request.referer or self.default or "/" + + ld = dest.lower() + if (ld.startswith("/") or + ld.startswith("http://") or + ld.startswith("https://")): + + u = UrlParser(dest) + + if u.is_reddit_url(): + return dest + + ip = getattr(request, "ip", "[unknown]") + fp = getattr(request, "fullpath", "[unknown]") + dm = c.domain or "[unknown]" + cn = c.cname or "[unknown]" + + log_text("invalid redirect", + "%s attempted to redirect from %s to %s with domain %s and cname %s" + % (ip, fp, dest, dm, cn), + "info") + + return "/" class ValidAddress(Validator): def __init__(self, param, usa_only = True): diff --git a/r2/r2/i18n/r2.pot b/r2/r2/i18n/r2.pot index 22e92c92c..8cbd4a200 100644 --- a/r2/r2/i18n/r2.pot +++ b/r2/r2/i18n/r2.pot @@ -8,7 +8,7 @@ msgid "" msgstr "" "Project-Id-Version: r2 0.0.0\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2010-01-25 14:44-0700\n" +"POT-Creation-Date: 2010-02-06 23:44-0700\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -17,19 +17,19 @@ msgstr "" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 0.9.4\n" -#: r2/controllers/api.py:77 r2/controllers/listingcontroller.py:383 +#: r2/controllers/api.py:91 r2/controllers/listingcontroller.py:383 msgid "API" msgstr "" -#: r2/controllers/api.py:97 +#: r2/controllers/api.py:111 msgid "thanks for your message! you should hear back from us shortly." msgstr "" -#: r2/controllers/api.py:124 +#: r2/controllers/api.py:138 msgid "your message has been delivered" msgstr "" -#: r2/controllers/api.py:245 +#: r2/controllers/api.py:259 msgid "no title found" msgstr "" @@ -69,57 +69,57 @@ msgstr "" msgid "your link has been shared." msgstr "" -#: r2/controllers/api.py:803 +#: r2/controllers/api.py:801 msgid "validation errors" msgstr "" -#: r2/controllers/api.py:824 r2/controllers/api.py:972 r2/controllers/api.py:1080 -#: r2/controllers/api.py:1433 r2/controllers/api.py:1471 +#: r2/controllers/api.py:822 r2/controllers/api.py:970 r2/controllers/api.py:1080 +#: r2/controllers/api.py:1436 r2/controllers/api.py:1474 #: r2/controllers/listingcontroller.py:280 r2/controllers/promotecontroller.py:444 #: r2/lib/menus.py:49 r2/templates/frametoolbar.html:199 #: r2/templates/printablebuttons.html:122 msgid "saved" msgstr "" -#: r2/controllers/api.py:894 r2/lib/menus.py:151 +#: r2/controllers/api.py:892 r2/lib/menus.py:152 #: r2/templates/printablebuttons.html:36 r2/templates/subredditstylesheet.html:230 msgid "deleted" msgstr "" -#: r2/controllers/api.py:945 r2/templates/subredditstylesheet.html:189 +#: r2/controllers/api.py:943 r2/templates/subredditstylesheet.html:189 msgid "bad image name" msgstr "" -#: r2/controllers/api.py:952 r2/controllers/promotecontroller.py:437 +#: r2/controllers/api.py:950 r2/controllers/promotecontroller.py:437 msgid "bad image" msgstr "" -#: r2/controllers/api.py:956 +#: r2/controllers/api.py:954 #, python-format msgid "too many images (you only get %d)" msgstr "" -#: r2/controllers/api.py:1309 +#: r2/controllers/api.py:1312 msgid "an email will be sent to that account's address shortly" msgstr "" -#: r2/controllers/api.py:1593 +#: r2/controllers/api.py:1596 msgid "redirecting..." msgstr "" -#: r2/controllers/api.py:1596 +#: r2/controllers/api.py:1599 msgid "error (sorry)" msgstr "" -#: r2/controllers/buttons.py:177 +#: r2/controllers/buttons.py:180 msgid "reddit buttons" msgstr "" -#: r2/controllers/buttons.py:183 +#: r2/controllers/buttons.py:186 msgid "reddit widget" msgstr "" -#: r2/controllers/buttons.py:188 r2/lib/menus.py:93 +#: r2/controllers/buttons.py:191 r2/lib/menus.py:93 msgid "bookmarklets" msgstr "" @@ -135,7 +135,7 @@ msgstr "" msgid "just read this first." msgstr "" -#: r2/controllers/embed.py:64 r2/lib/menus.py:87 r2/lib/pages/pages.py:328 +#: r2/controllers/embed.py:64 r2/lib/menus.py:87 r2/lib/pages/pages.py:332 #: r2/templates/frametoolbar.html:110 msgid "help" msgstr "" @@ -337,6 +337,10 @@ msgstr "" msgid "help translate reddit into your language" msgstr "" +#: r2/controllers/feedback.py:41 r2/lib/menus.py:138 +msgid "help translate" +msgstr "" + #: r2/controllers/front.py:105 r2/templates/login.html:87 msgid "password" msgstr "" @@ -349,52 +353,52 @@ msgstr "" msgid "reset password" msgstr "" -#: r2/controllers/front.py:256 r2/controllers/listingcontroller.py:687 +#: r2/controllers/front.py:256 r2/controllers/listingcontroller.py:686 msgid "comments" msgstr "" -#: r2/controllers/front.py:286 r2/controllers/front.py:288 +#: r2/controllers/front.py:290 r2/controllers/front.py:292 #: r2/templates/createsubreddit.html:76 msgid "create a reddit" msgstr "" -#: r2/controllers/front.py:317 +#: r2/controllers/front.py:321 msgid "your reddit has been created" msgstr "" -#: r2/controllers/front.py:356 r2/lib/menus.py:138 +#: r2/controllers/front.py:378 r2/lib/menus.py:139 msgid "awards" msgstr "" -#: r2/controllers/front.py:386 r2/lib/menus.py:118 +#: r2/controllers/front.py:408 r2/lib/menus.py:119 msgid "related" msgstr "" -#: r2/controllers/front.py:404 +#: r2/controllers/front.py:426 msgid "other discussions" msgstr "" -#: r2/controllers/front.py:423 r2/controllers/front.py:470 +#: r2/controllers/front.py:445 r2/controllers/front.py:492 msgid "search results" msgstr "" -#: r2/controllers/front.py:507 r2/controllers/listingcontroller.py:167 +#: r2/controllers/front.py:529 r2/controllers/listingcontroller.py:167 msgid "search failed" msgstr "" -#: r2/controllers/front.py:599 +#: r2/controllers/front.py:620 msgid "seen it" msgstr "" -#: r2/controllers/front.py:609 r2/lib/menus.py:86 +#: r2/controllers/front.py:630 r2/lib/menus.py:86 msgid "submit" msgstr "" -#: r2/controllers/front.py:622 r2/controllers/post.py:163 +#: r2/controllers/front.py:643 r2/controllers/post.py:145 msgid "opt out" msgstr "" -#: r2/controllers/front.py:622 r2/controllers/post.py:173 +#: r2/controllers/front.py:643 r2/controllers/post.py:155 msgid "welcome back" msgstr "" @@ -453,7 +457,7 @@ msgstr "" msgid "profile for %(user)s" msgstr "" -#: r2/controllers/listingcontroller.py:512 r2/lib/menus.py:460 r2/lib/menus.py:510 +#: r2/controllers/listingcontroller.py:512 r2/lib/menus.py:463 r2/lib/menus.py:513 msgid "all" msgstr "" @@ -473,15 +477,15 @@ msgstr "" msgid "messages" msgstr "" -#: r2/controllers/listingcontroller.py:617 r2/templates/pagenamenav.html:33 +#: r2/controllers/listingcontroller.py:616 r2/templates/pagenamenav.html:33 msgid "reddits" msgstr "" -#: r2/controllers/listingcontroller.py:651 +#: r2/controllers/listingcontroller.py:650 msgid "reddits: " msgstr "" -#: r2/controllers/post.py:137 +#: r2/controllers/post.py:119 msgid "over 18?" msgstr "" @@ -511,43 +515,43 @@ msgstr "" msgid "failed to authenticate card. sorry." msgstr "" -#: r2/controllers/validator/validator.py:1154 +#: r2/controllers/validator/validator.py:1167 msgid "please provide a first name" msgstr "" -#: r2/controllers/validator/validator.py:1156 +#: r2/controllers/validator/validator.py:1169 msgid "please provide a last name" msgstr "" -#: r2/controllers/validator/validator.py:1158 +#: r2/controllers/validator/validator.py:1171 msgid "please provide an address" msgstr "" -#: r2/controllers/validator/validator.py:1160 +#: r2/controllers/validator/validator.py:1173 msgid "please provide your city" msgstr "" -#: r2/controllers/validator/validator.py:1162 +#: r2/controllers/validator/validator.py:1175 msgid "please provide your state" msgstr "" -#: r2/controllers/validator/validator.py:1164 +#: r2/controllers/validator/validator.py:1177 msgid "please provide your zip or post code" msgstr "" -#: r2/controllers/validator/validator.py:1167 +#: r2/controllers/validator/validator.py:1180 msgid "please pick a country" msgstr "" -#: r2/controllers/validator/validator.py:1191 +#: r2/controllers/validator/validator.py:1204 msgid "credit card numbers should be 13 to 16 digits" msgstr "" -#: r2/controllers/validator/validator.py:1194 +#: r2/controllers/validator/validator.py:1207 msgid "dates should be YYYY-MM" msgstr "" -#: r2/controllers/validator/validator.py:1196 +#: r2/controllers/validator/validator.py:1209 msgid "card verification codes should be 3 or 4 digits" msgstr "" @@ -671,7 +675,7 @@ msgstr "" msgid "turn admin off" msgstr "" -#: r2/lib/menus.py:85 r2/lib/pages/pages.py:476 r2/lib/pages/pages.py:487 +#: r2/lib/menus.py:85 r2/lib/pages/pages.py:480 r2/lib/pages/pages.py:495 msgid "preferences" msgstr "" @@ -732,190 +736,194 @@ msgid "options" msgstr "" #: r2/lib/menus.py:107 -msgid "friends" +msgid "RSS feeds" msgstr "" #: r2/lib/menus.py:108 +msgid "friends" +msgstr "" + +#: r2/lib/menus.py:109 msgid "password/email" msgstr "" -#: r2/lib/menus.py:109 r2/templates/prefdelete.html:54 +#: r2/lib/menus.py:110 r2/templates/prefdelete.html:54 #: r2/templates/printablebuttons.html:36 msgid "delete" msgstr "" -#: r2/lib/menus.py:112 +#: r2/lib/menus.py:113 msgid "compose" msgstr "" -#: r2/lib/menus.py:113 +#: r2/lib/menus.py:114 msgid "inbox" msgstr "" -#: r2/lib/menus.py:114 +#: r2/lib/menus.py:115 msgid "sent" msgstr "" -#: r2/lib/menus.py:117 +#: r2/lib/menus.py:118 msgid "comments {toolbar}" msgstr "" -#: r2/lib/menus.py:119 +#: r2/lib/menus.py:120 msgid "details" msgstr "" -#: r2/lib/menus.py:120 +#: r2/lib/menus.py:121 #, python-format msgid "other discussions (%(num)s)" msgstr "" -#: r2/lib/menus.py:121 +#: r2/lib/menus.py:122 msgid "shirt" msgstr "" -#: r2/lib/menus.py:122 +#: r2/lib/menus.py:123 msgid "traffic stats" msgstr "" -#: r2/lib/menus.py:125 +#: r2/lib/menus.py:126 msgid "home" msgstr "" -#: r2/lib/menus.py:126 +#: r2/lib/menus.py:127 msgid "about" msgstr "" -#: r2/lib/menus.py:127 +#: r2/lib/menus.py:128 msgid "edit this reddit" msgstr "" -#: r2/lib/menus.py:128 +#: r2/lib/menus.py:129 msgid "edit moderators" msgstr "" -#: r2/lib/menus.py:129 +#: r2/lib/menus.py:130 msgid "edit contributors" msgstr "" -#: r2/lib/menus.py:130 r2/lib/menus.py:131 r2/lib/pages/pages.py:1602 +#: r2/lib/menus.py:131 r2/lib/menus.py:132 r2/lib/pages/pages.py:1624 msgid "ban users" msgstr "" -#: r2/lib/menus.py:133 +#: r2/lib/menus.py:134 msgid "popular" msgstr "" -#: r2/lib/menus.py:134 r2/templates/admintranslations.html:33 -#: r2/templates/createsubreddit.html:345 +#: r2/lib/menus.py:135 r2/templates/admintranslations.html:33 +#: r2/templates/createsubreddit.html:353 msgid "create" msgstr "" -#: r2/lib/menus.py:135 r2/lib/pages/pages.py:987 +#: r2/lib/menus.py:136 r2/lib/pages/pages.py:998 msgid "my reddits" msgstr "" -#: r2/lib/menus.py:137 -msgid "help translate" -msgstr "" - -#: r2/lib/menus.py:139 +#: r2/lib/menus.py:140 msgid "promoted" msgstr "" -#: r2/lib/menus.py:140 +#: r2/lib/menus.py:141 msgid "reporters" msgstr "" -#: r2/lib/menus.py:141 +#: r2/lib/menus.py:142 msgid "reported links" msgstr "" -#: r2/lib/menus.py:142 +#: r2/lib/menus.py:143 msgid "reported authors" msgstr "" -#: r2/lib/menus.py:143 +#: r2/lib/menus.py:144 msgid "info" msgstr "" -#: r2/lib/menus.py:144 r2/templates/printablebuttons.html:112 +#: r2/lib/menus.py:145 r2/templates/printablebuttons.html:112 #: r2/templates/sharelink.html:94 msgid "share" msgstr "" -#: r2/lib/menus.py:146 +#: r2/lib/menus.py:147 msgid "overview" msgstr "" -#: r2/lib/menus.py:147 +#: r2/lib/menus.py:148 msgid "submitted" msgstr "" -#: r2/lib/menus.py:148 +#: r2/lib/menus.py:149 msgid "liked" msgstr "" -#: r2/lib/menus.py:149 +#: r2/lib/menus.py:150 msgid "disliked" msgstr "" -#: r2/lib/menus.py:150 +#: r2/lib/menus.py:151 msgid "hidden {toolbar}" msgstr "" -#: r2/lib/menus.py:152 r2/templates/printablebuttons.html:31 +#: r2/lib/menus.py:153 r2/templates/printablebuttons.html:31 msgid "reported" msgstr "" -#: r2/lib/menus.py:154 +#: r2/lib/menus.py:155 msgid "self-serve advertising" msgstr "" -#: r2/lib/menus.py:155 +#: r2/lib/menus.py:156 msgid "create promotion" msgstr "" -#: r2/lib/menus.py:156 +#: r2/lib/menus.py:157 msgid "my promoted links" msgstr "" -#: r2/lib/menus.py:157 +#: r2/lib/menus.py:158 msgid "all promoted links" msgstr "" -#: r2/lib/menus.py:158 +#: r2/lib/menus.py:159 msgid "unapproved" msgstr "" -#: r2/lib/menus.py:159 +#: r2/lib/menus.py:160 msgid "analytics" msgstr "" -#: r2/lib/menus.py:160 +#: r2/lib/menus.py:161 msgid "live" msgstr "" -#: r2/lib/menus.py:161 +#: r2/lib/menus.py:162 msgid "unpaid" msgstr "" -#: r2/lib/menus.py:162 +#: r2/lib/menus.py:163 msgid "pending" msgstr "" -#: r2/lib/menus.py:163 +#: r2/lib/menus.py:164 msgid "rejected" msgstr "" -#: r2/lib/menus.py:391 +#: r2/lib/menus.py:166 +msgid "whitelist" +msgstr "" + +#: r2/lib/menus.py:394 msgid "sorted by" msgstr "" -#: r2/lib/menus.py:455 +#: r2/lib/menus.py:458 msgid "kind" msgstr "" -#: r2/lib/menus.py:470 +#: r2/lib/menus.py:473 msgid "links from" msgstr "" @@ -1009,7 +1017,7 @@ msgstr "" #, python-format msgid "" "you have been added to the list of users able to see [traffic for the " -"sponsoted link \"%(title)s\"](%(traffic_url)s)." +"sponsored link \"%(title)s\"](%(traffic_url)s)." msgstr "" #: r2/lib/strings.py:88 @@ -1124,150 +1132,146 @@ msgstr "" #: r2/lib/strings.py:120 #, python-format msgid "" -"\n" -" The following is a sample of what Reddit users had to say about this\n" -" page. The full discussion is available [here](%(fd_link)s); you can\n" -" also get there by clicking the link's title\n" -" (in the middle of the toolbar, to the right of the comments button).\n" -" " +"The following is a sample of what Reddit users had to say about this page. " +"The full discussion is available [here](%(fd_link)s); you can also get there " +"by clicking the link's title (in the middle of the toolbar, to the right of " +"the comments button)." msgstr "" -#: r2/lib/strings.py:127 +#: r2/lib/strings.py:122 msgid "" "You are submitting a link. The key to a successful submission is interesting " "content and a descriptive title." msgstr "" -#: r2/lib/strings.py:128 +#: r2/lib/strings.py:123 msgid "" "You are submitting a text-based post. Speak your mind. A title is required, " "but expanding further in the text field is not. Beginning your title with " "\"vote up if\" is violation of intergalactic law." msgstr "" -#: r2/lib/strings.py:129 +#: r2/lib/strings.py:124 msgid "" "You should consider using [reddit's free iphone " "app](http://itunes.com/apps/iredditfree)." msgstr "" -#: r2/lib/strings.py:130 +#: r2/lib/strings.py:125 msgid "we're going to need to verify your email address for you to proceed." msgstr "" -#: r2/lib/strings.py:131 +#: r2/lib/strings.py:126 msgid "your email address has been verfied" msgstr "" -#: r2/lib/strings.py:132 +#: r2/lib/strings.py:127 msgid "Verification failed. Please try that again" msgstr "" -#: r2/lib/strings.py:133 +#: r2/lib/strings.py:128 #, python-format msgid "" "Our search machines are under too much load to handle your request right now." -" :( Sorry for the inconvenience.\n" -"\n" -"[Try again](%(link)s) in a little bit -- but please don't mash reload; that " -"only makes the problem worse." +" :( Sorry for the inconvenience. [Try again](%(link)s) in a little bit -- but" +" please don't mash reload; that only makes the problem worse." msgstr "" -#: r2/lib/strings.py:193 r2/lib/strings.py:198 r2/lib/template_helpers.py:132 +#: r2/lib/strings.py:188 r2/lib/strings.py:193 r2/lib/template_helpers.py:132 #: r2/templates/subredditstylesheet.html:264 msgid "comment" msgid_plural "comments" msgstr[0] "" msgstr[1] "" -#: r2/lib/strings.py:194 r2/templates/comment.htmllite:54 +#: r2/lib/strings.py:189 r2/templates/comment.htmllite:54 #: r2/templates/linkinfobar.html:40 msgid "point" msgid_plural "points" msgstr[0] "" msgstr[1] "" -#: r2/lib/strings.py:197 +#: r2/lib/strings.py:192 msgid "link" msgid_plural "links" msgstr[0] "" msgstr[1] "" -#: r2/lib/strings.py:199 r2/lib/pages/pages.py:534 +#: r2/lib/strings.py:194 r2/lib/pages/pages.py:545 msgid "message" msgid_plural "messages" msgstr[0] "" msgstr[1] "" -#: r2/lib/strings.py:200 +#: r2/lib/strings.py:195 msgid "subreddit" msgid_plural "subreddits" msgstr[0] "" msgstr[1] "" -#: r2/lib/strings.py:203 +#: r2/lib/strings.py:198 msgid "reader" msgid_plural "readers" msgstr[0] "" msgstr[1] "" -#: r2/lib/strings.py:204 +#: r2/lib/strings.py:199 msgid "subscriber" msgid_plural "subscribers" msgstr[0] "" msgstr[1] "" -#: r2/lib/strings.py:205 r2/templates/subreddit.html:80 +#: r2/lib/strings.py:200 r2/templates/subreddit.html:80 #: r2/templates/subreddit.html:81 msgid "contributor" msgid_plural "contributors" msgstr[0] "" msgstr[1] "" -#: r2/lib/strings.py:206 r2/templates/subreddit.html:74 +#: r2/lib/strings.py:201 r2/templates/subreddit.html:74 msgid "moderator" msgid_plural "moderators" msgstr[0] "" msgstr[1] "" -#: r2/lib/strings.py:209 +#: r2/lib/strings.py:204 msgid "milliseconds" msgid_plural "milliseconds" msgstr[0] "" msgstr[1] "" -#: r2/lib/strings.py:210 r2/lib/utils/utils.py:411 r2/templates/searchbar.html:43 +#: r2/lib/strings.py:205 r2/lib/utils/utils.py:419 r2/templates/searchbar.html:43 msgid "second" msgid_plural "seconds" msgstr[0] "" msgstr[1] "" -#: r2/lib/strings.py:211 r2/lib/utils/utils.py:410 +#: r2/lib/strings.py:206 r2/lib/utils/utils.py:418 msgid "minute" msgid_plural "minutes" msgstr[0] "" msgstr[1] "" -#: r2/lib/strings.py:212 r2/lib/utils/utils.py:409 +#: r2/lib/strings.py:207 r2/lib/utils/utils.py:417 msgid "hour" msgid_plural "hours" msgstr[0] "" msgstr[1] "" -#: r2/lib/strings.py:213 r2/lib/utils/utils.py:408 r2/templates/paymentform.html:46 +#: r2/lib/strings.py:208 r2/lib/utils/utils.py:416 r2/templates/paymentform.html:46 #: r2/templates/promotedlink.html:74 msgid "day" msgid_plural "days" msgstr[0] "" msgstr[1] "" -#: r2/lib/strings.py:214 r2/lib/utils/utils.py:407 +#: r2/lib/strings.py:209 r2/lib/utils/utils.py:415 msgid "month" msgid_plural "months" msgstr[0] "" msgstr[1] "" -#: r2/lib/strings.py:215 r2/lib/utils/utils.py:406 +#: r2/lib/strings.py:210 r2/lib/utils/utils.py:414 msgid "year" msgid_plural "years" msgstr[0] "" @@ -1294,167 +1298,171 @@ msgstr "" msgid "%d %b %Y" msgstr "" -#: r2/lib/pages/pages.py:180 +#: r2/lib/pages/pages.py:184 msgid "moderators" msgstr "" -#: r2/lib/pages/pages.py:186 +#: r2/lib/pages/pages.py:190 msgid "admin box" msgstr "" -#: r2/lib/pages/pages.py:190 +#: r2/lib/pages/pages.py:194 msgid "Submit a link" msgstr "" -#: r2/lib/pages/pages.py:197 +#: r2/lib/pages/pages.py:201 msgid "Create your own reddit" msgstr "" -#: r2/lib/pages/pages.py:317 +#: r2/lib/pages/pages.py:321 msgid "site links" msgstr "" -#: r2/lib/pages/pages.py:321 +#: r2/lib/pages/pages.py:325 msgid "FAQ" msgstr "" -#: r2/lib/pages/pages.py:323 +#: r2/lib/pages/pages.py:327 msgid "reddiquette" msgstr "" -#: r2/lib/pages/pages.py:336 +#: r2/lib/pages/pages.py:340 msgid "reddit tools" msgstr "" -#: r2/lib/pages/pages.py:344 +#: r2/lib/pages/pages.py:348 msgid "job board" msgstr "" -#: r2/lib/pages/pages.py:346 +#: r2/lib/pages/pages.py:350 msgid "about us" msgstr "" -#: r2/lib/pages/pages.py:362 +#: r2/lib/pages/pages.py:366 msgid "brothers" msgstr "" -#: r2/lib/pages/pages.py:374 +#: r2/lib/pages/pages.py:378 msgid "sisters" msgstr "" -#: r2/lib/pages/pages.py:582 +#: r2/lib/pages/pages.py:593 msgid "login or register" msgstr "" -#: r2/lib/pages/pages.py:614 +#: r2/lib/pages/pages.py:625 msgid "bummer" msgstr "" -#: r2/lib/pages/pages.py:626 +#: r2/lib/pages/pages.py:637 msgid "this page is no longer available due to a copyright claim." msgstr "" -#: r2/lib/pages/pages.py:670 r2/templates/comment.html:77 +#: r2/lib/pages/pages.py:681 r2/templates/comment.html:77 #: r2/templates/comment.htmllite:46 r2/templates/comment.xml:30 #: r2/templates/comment.xml:31 msgid "[deleted]" msgstr "" -#: r2/lib/pages/pages.py:755 r2/templates/createsubreddit.html:66 +#: r2/lib/pages/pages.py:766 r2/templates/createsubreddit.html:66 msgid "manage your reddit" msgstr "" -#: r2/lib/pages/pages.py:756 +#: r2/lib/pages/pages.py:767 #, python-format msgid "about %(site)s" msgstr "" -#: r2/lib/pages/pages.py:782 +#: r2/lib/pages/pages.py:793 msgid "search reddits" msgstr "" -#: r2/lib/pages/pages.py:809 +#: r2/lib/pages/pages.py:820 msgid "your front page reddits" msgstr "" -#: r2/lib/pages/pages.py:872 r2/templates/organiclisting.html:60 +#: r2/lib/pages/pages.py:883 r2/templates/organiclisting.html:60 msgid "what's this?" msgstr "" -#: r2/lib/pages/pages.py:873 +#: r2/lib/pages/pages.py:884 msgid "trophy case" msgstr "" -#: r2/lib/pages/pages.py:935 +#: r2/lib/pages/pages.py:946 msgid "page not found" msgstr "" -#: r2/lib/pages/pages.py:946 +#: r2/lib/pages/pages.py:957 msgid "you aren't allowed to do that." msgstr "" -#: r2/lib/pages/pages.py:1089 +#: r2/lib/pages/pages.py:1100 msgid "try entering those letters again" msgstr "" -#: r2/lib/pages/pages.py:1141 +#: r2/lib/pages/pages.py:1152 msgid "previous search" msgstr "" -#: r2/lib/pages/pages.py:1171 +#: r2/lib/pages/pages.py:1182 #, python-format msgid "%(site_title)s via %(domain)s" msgstr "" -#: r2/lib/pages/pages.py:1450 +#: r2/lib/pages/pages.py:1470 msgid "This feature is currently unavailable. Sorry" msgstr "" -#: r2/lib/pages/pages.py:1553 +#: r2/lib/pages/pages.py:1573 msgid "add a friend" msgstr "" -#: r2/lib/pages/pages.py:1557 +#: r2/lib/pages/pages.py:1577 msgid "your friends" msgstr "" -#: r2/lib/pages/pages.py:1572 +#: r2/lib/pages/pages.py:1593 +msgid "add to whitelist" +msgstr "" + +#: r2/lib/pages/pages.py:1594 msgid "add contributor" msgstr "" -#: r2/lib/pages/pages.py:1576 +#: r2/lib/pages/pages.py:1598 #, python-format msgid "contributors to %(reddit)s" msgstr "" -#: r2/lib/pages/pages.py:1587 +#: r2/lib/pages/pages.py:1609 msgid "add moderator" msgstr "" -#: r2/lib/pages/pages.py:1591 +#: r2/lib/pages/pages.py:1613 #, python-format msgid "moderators to %(reddit)s" msgstr "" -#: r2/lib/pages/pages.py:1606 +#: r2/lib/pages/pages.py:1628 msgid "banned users" msgstr "" -#: r2/lib/pages/pages.py:1623 +#: r2/lib/pages/pages.py:1645 msgid "share traffic" msgstr "" -#: r2/lib/pages/pages.py:1627 +#: r2/lib/pages/pages.py:1649 msgid "current viewers" msgstr "" -#: r2/lib/utils/utils.py:424 +#: r2/lib/utils/utils.py:432 msgid "millisecond" msgid_plural "milliseconds" msgstr[0] "" msgstr[1] "" -#: r2/lib/utils/utils.py:435 r2/templates/comment.htmllite:46 +#: r2/lib/utils/utils.py:443 r2/templates/comment.htmllite:46 msgid "ago" msgstr "" @@ -1467,11 +1475,11 @@ msgstr "" msgid "label_template" msgstr "" -#: r2/models/link.py:845 +#: r2/models/link.py:851 msgid "comment reply" msgstr "" -#: r2/models/link.py:851 +#: r2/models/link.py:857 msgid "post reply" msgstr "" @@ -1539,11 +1547,11 @@ msgstr "" msgid "[i18n] translation offer from '%(user)s'" msgstr "" -#: r2/models/subreddit.py:602 +#: r2/models/subreddit.py:604 msgid "reddit.com: what's new online!" msgstr "" -#: r2/models/subreddit.py:650 +#: r2/models/subreddit.py:652 msgid "on reddit.com" msgstr "" @@ -1563,7 +1571,7 @@ msgstr "" msgid "enabled" msgstr "" -#: r2/templates/admintranslations.html:106 +#: r2/templates/admintranslations.html:100 msgid "add authors" msgstr "" @@ -1791,8 +1799,8 @@ msgstr "" msgid "that subreddit doesn't exist, but you can create it here." msgstr "" -#: r2/templates/createsubreddit.html:342 r2/templates/prefoptions.html:231 -#: r2/templates/promotelinkform.html:338 +#: r2/templates/createsubreddit.html:350 r2/templates/prefoptions.html:235 +#: r2/templates/promotelinkform.html:339 msgid "save options" msgstr "" @@ -2262,6 +2270,67 @@ msgstr "" msgid "deleting..." msgstr "" +#: r2/templates/preffeeds.html:29 +msgid "Private RSS feeds" +msgstr "" + +#: r2/templates/preffeeds.html:31 +msgid "" +"On this page are links to private RSS feeds so that you can get listings of " +"your content (personalized front page, message panel, saved listing, etc.) " +"without having to deal with cookies or other auth." +msgstr "" + +#: r2/templates/preffeeds.html:32 +msgid "" +"Keep in mind that these urls are intended to be private, so **share at your " +"own risk.**" +msgstr "" + +#: r2/templates/preffeeds.html:33 +msgid "All feeds are invalidated if you change your password, however." +msgstr "" + +#: r2/templates/preffeeds.html:54 +msgid "your front page" +msgstr "" + +#: r2/templates/preffeeds.html:57 +msgid "your saved links" +msgstr "" + +#: r2/templates/preffeeds.html:64 +msgid "links you've liked" +msgstr "" + +#: r2/templates/preffeeds.html:67 +msgid "links you've disliked" +msgstr "" + +#: r2/templates/preffeeds.html:70 +msgid "links you've hidden" +msgstr "" + +#: r2/templates/preffeeds.html:77 +msgid "everything" +msgstr "" + +#: r2/templates/preffeeds.html:80 +msgid "unread messages" +msgstr "" + +#: r2/templates/preffeeds.html:83 +msgid "messages only" +msgstr "" + +#: r2/templates/preffeeds.html:86 +msgid "comment replies only" +msgstr "" + +#: r2/templates/preffeeds.html:89 +msgid "self-post replies only" +msgstr "" + #: r2/templates/prefoptions.html:71 msgid "your preferences have been updated" msgstr "" @@ -2442,11 +2511,19 @@ msgstr "" msgid "label posts that are not safe for work (NSFW)" msgstr "" -#: r2/templates/prefoptions.html:223 +#: r2/templates/prefoptions.html:222 +msgid "enable private RSS feeds" +msgstr "" + +#: r2/templates/prefoptions.html:224 +msgid "(available from the 'RSS feed' tab in prefs)" +msgstr "" + +#: r2/templates/prefoptions.html:227 msgid "privacy options" msgstr "" -#: r2/templates/prefoptions.html:225 +#: r2/templates/prefoptions.html:229 msgid "make my votes public" msgstr "" @@ -2683,63 +2760,63 @@ msgstr "" msgid "(download as .csv)" msgstr "" -#: r2/templates/promotelinkform.html:58 +#: r2/templates/promotelinkform.html:59 msgid "create a promotion" msgstr "" -#: r2/templates/promotelinkform.html:58 +#: r2/templates/promotelinkform.html:59 msgid "edit promotion" msgstr "" -#: r2/templates/promotelinkform.html:72 +#: r2/templates/promotelinkform.html:73 msgid "NOTE:" msgstr "" -#: r2/templates/promotelinkform.html:78 +#: r2/templates/promotelinkform.html:79 msgid "" "once you set up payment, you will not be charged until the link is approved " "and scheduled for display" msgstr "" -#: r2/templates/promotelinkform.html:86 +#: r2/templates/promotelinkform.html:87 msgid "This promotion has been rejected. Please edit and resubmit." msgstr "" -#: r2/templates/promotelinkform.html:92 +#: r2/templates/promotelinkform.html:93 msgid "" "Your bid has been registered and your submission is awaiting review. We will" " notify you by email of status updates." msgstr "" -#: r2/templates/promotelinkform.html:98 +#: r2/templates/promotelinkform.html:99 msgid "" "NOTE: changes to this promotion will result in its status being reverted to " "'unapproved'" msgstr "" -#: r2/templates/promotelinkform.html:104 +#: r2/templates/promotelinkform.html:105 msgid "This promotion is finished. Edits would be a little pointless." msgstr "" -#: r2/templates/promotelinkform.html:322 +#: r2/templates/promotelinkform.html:323 msgid "" "You'll be able to submit an image for the thumbnail once the promotion is " "submitted." msgstr "" -#: r2/templates/promotelinkform.html:341 +#: r2/templates/promotelinkform.html:342 msgid "agree" msgstr "" -#: r2/templates/promotelinkform.html:355 +#: r2/templates/promotelinkform.html:356 msgid "make this a freebie" msgstr "" -#: r2/templates/reddit.html:135 r2/templates/reddit.html:147 +#: r2/templates/reddit.html:141 r2/templates/reddit.html:153 msgid "close this window" msgstr "" -#: r2/templates/reddit.html:173 +#: r2/templates/reddit.html:179 msgid "Recently viewed links" msgstr "" diff --git a/r2/r2/lib/amqp.py b/r2/r2/lib/amqp.py index 6c894422a..68b524bfd 100644 --- a/r2/r2/lib/amqp.py +++ b/r2/r2/lib/amqp.py @@ -29,10 +29,10 @@ import time import errno import socket import itertools +import pickle from amqplib import client_0_8 as amqp -from r2.lib.cache import LocalCache from pylons import g amqp_host = g.amqp_host @@ -88,7 +88,7 @@ def get_connection(): virtual_host = amqp_virtual_host, insist = False) except (socket.error, IOError): - print 'error connecting to amqp' + print 'error connecting to amqp %s @ %s' % (amqp_user, amqp_host) time.sleep(1) # don't run init_queue until someone actually needs it. this @@ -160,26 +160,42 @@ def add_item(routing_key, body, message_id = None): worker.do(_add_item, routing_key, body, message_id = message_id) -def handle_items(queue, callback, ack = True, limit = 1, drain = False): +def add_kw(routing_key, **kw): + add_item(routing_key, pickle.dumps(kw)) + +def handle_items(queue, callback, ack = True, limit = 1, drain = False, + verbose=True, sleep_time = 1): """Call callback() on every item in a particular queue. If the connection to the queue is lost, it will die. Intended to be used as a long-running process.""" chan = get_channel() + countdown = None + while True: + + # NB: None != 0, so we don't need an "is not None" check here + if countdown == 0: + break + msg = chan.basic_get(queue) if not msg and drain: return elif not msg: - time.sleep(1) + time.sleep(sleep_time) continue + if countdown is None and drain and 'message_count' in msg.delivery_info: + countdown = 1 + msg.delivery_info['message_count'] + g.reset_caches() items = [] - while msg: + while msg and countdown != 0: items.append(msg) + if countdown is not None: + countdown -= 1 if len(items) >= limit: break # the innermost loop only msg = chan.basic_get(queue) @@ -190,7 +206,8 @@ def handle_items(queue, callback, ack = True, limit = 1, drain = False): # the count from the last message, if the count is # available count_str = '(%d remaining)' % items[-1].delivery_info['message_count'] - print "%s: %d items %s" % (queue, len(items), count_str) + if verbose: + print "%s: %d items %s" % (queue, len(items), count_str) callback(items, chan) if ack: @@ -205,6 +222,7 @@ def handle_items(queue, callback, ack = True, limit = 1, drain = False): chan.basic_reject(item.delivery_tag, requeue = True) raise + def empty_queue(queue): """debug function to completely erase the contents of a queue""" chan = get_channel() diff --git a/r2/r2/lib/app_globals.py b/r2/r2/lib/app_globals.py index 104dc42a3..4da236076 100644 --- a/r2/r2/lib/app_globals.py +++ b/r2/r2/lib/app_globals.py @@ -23,8 +23,9 @@ from __future__ import with_statement from pylons import config import pytz, os, logging, sys, socket, re, subprocess from datetime import timedelta, datetime -from r2.lib.cache import LocalCache, Memcache, HardCache, CacheChain -from r2.lib.cache import SelfEmptyingCache +from r2.lib.cache import LocalCache, SelfEmptyingCache +from r2.lib.cache import Memcache, Permacache, HardCache +from r2.lib.cache import MemcacheChain, DoubleMemcacheChain, PermacacheChain, HardcacheChain from r2.lib.db.stats import QueryStats from r2.lib.translation import get_active_langs from r2.lib.lock import make_lock_factory @@ -57,7 +58,7 @@ class Globals(object): ] bool_props = ['debug', 'translator', - 'log_start', + 'log_start', 'sqlprinting', 'template_debug', 'uncompressedJS', @@ -67,7 +68,9 @@ class Globals(object): 'css_killswitch', 'db_create_tables', 'disallow_db_writes', - 'allow_shutdown'] + 'exception_logging', + 'enable_usage_stats', + ] tuple_props = ['memcaches', 'rec_cache', @@ -77,9 +80,9 @@ class Globals(object): 'sponsors', 'monitored_servers', 'automatic_reddits', - 'skip_precompute_queries', 'agents', - 'allowed_css_linked_domains'] + 'allowed_css_linked_domains', + 'authorized_cnames'] def __init__(self, global_conf, app_conf, paths, **extra): """ @@ -89,22 +92,22 @@ class Globals(object): One instance of Globals is created by Pylons during application initialization and is available during requests via the 'g' variable. - + ``global_conf`` The same variable used throughout ``config/middleware.py`` namely, the variables from the ``[DEFAULT]`` section of the configuration file. - + ``app_conf`` The same ``kw`` dictionary used throughout ``config/middleware.py`` namely, the variables from the section in the config file for your application. - + ``extra`` The configuration returned from ``load_config`` in ``config/middleware.py`` which may be of use in the setup of your global variables. - + """ # slop over all variables to start with @@ -122,21 +125,30 @@ class Globals(object): self.running_as_script = global_conf.get('running_as_script', False) - self.skip_precompute_queries = set(self.skip_precompute_queries) - # initialize caches. Any cache-chains built here must be added - # to reset_caches so that they can properly reset their local - # components - mc = Memcache(self.memcaches, pickleProtocol = 1) + # to cache_chains (closed around by reset_caches) so that they + # can properly reset their local components + + localcache_cls = SelfEmptyingCache if self.running_as_script else LocalCache + + # we're going to temporarily run the old memcached behind the + # new one so the caches can start warmer + # mc = Memcache(self.memcaches, debug=self.debug) + mc = Permacache(self.memcaches) + rec_cache = Permacache(self.rec_cache) + rmc = Permacache(self.rendercaches) + pmc = Permacache(self.permacaches) + # hardcache is done after the db info is loaded, and then the + # chains are reset to use the appropriate initial entries self.memcache = mc - self.cache = CacheChain((LocalCache(), mc)) - self.permacache = Memcache(self.permacaches, pickleProtocol = 1) - self.rendercache = Memcache(self.rendercaches, pickleProtocol = 1) + self.cache = PermacacheChain((localcache_cls(), mc)) + self.permacache = PermacacheChain((localcache_cls(), pmc)) + self.rendercache = PermacacheChain((localcache_cls(), rmc)) + self.rec_cache = rec_cache self.make_lock = make_lock_factory(mc) + cache_chains = [self.cache, self.permacache, self.rendercache] - self.rec_cache = Memcache(self.rec_cache, pickleProtocol = 1) - # set default time zone if one is not set tz = global_conf.get('timezone') dtz = global_conf.get('display_timezone', tz) @@ -148,9 +160,19 @@ class Globals(object): self.dbm = self.load_db_params(global_conf) # can't do this until load_db_params() has been called - self.hardcache = CacheChain((LocalCache(), mc, HardCache(self)), - cache_negative_results = True) + self.hardcache = HardcacheChain((localcache_cls(), mc, HardCache(self)), + cache_negative_results = True) + cache_chains.append(self.hardcache) + # I know this sucks, but we need non-request-threads to be + # able to reset the caches, so we need them be able to close + # around 'cache_chains' without being able to call getattr on + # 'g' + def reset_caches(): + for chain in cache_chains: + chain.reset() + + self.reset_caches = reset_caches self.reset_caches() #make a query cache @@ -169,6 +191,8 @@ class Globals(object): all_languages.sort() self.all_languages = all_languages + self.paths = paths + # load the md5 hashes of files under static static_files = os.path.join(paths.get('static_files'), 'static') self.static_md5 = {} @@ -185,6 +209,7 @@ class Globals(object): #set up the logging directory log_path = self.log_path process_iden = global_conf.get('scgi_port', 'default') + self.reddit_port = process_iden if log_path: if not os.path.exists(log_path): os.makedirs(log_path) @@ -207,7 +232,8 @@ class Globals(object): if not self.media_domain: self.media_domain = self.domain if self.media_domain == self.domain: - print "Warning: g.media_domain == g.domain. This may give untrusted content access to user cookies" + print ("Warning: g.media_domain == g.domain. " + + "This may give untrusted content access to user cookies") #read in our CSS so that it can become a default for subreddit #stylesheets @@ -253,12 +279,6 @@ class Globals(object): if self.log_start: self.log.error("reddit app started %s at %s" % (self.short_version, datetime.now())) - def reset_caches(self): - for ca in ('cache', 'hardcache'): - cache = getattr(self, ca) - new_cache = SelfEmptyingCache() if self.running_as_script else LocalCache() - cache.caches = (new_cache,) + cache.caches[1:] - @staticmethod def to_bool(x): return (x.lower() == 'true') if x else None diff --git a/r2/r2/lib/base.py b/r2/r2/lib/base.py index c1d4da074..02ef22a3b 100644 --- a/r2/r2/lib/base.py +++ b/r2/r2/lib/base.py @@ -28,10 +28,12 @@ from r2.lib.utils import to_js from r2.lib.filters import spaceCompress, _force_unicode from r2.lib.template_helpers import get_domain from utils import storify, string2js, read_http_date +from r2.lib.log import log_exception import re, md5 -from urllib import quote +from urllib import quote import urllib2 +import sys #TODO hack @@ -40,18 +42,22 @@ from r2.lib.utils import UrlParser, query_string logging.getLogger('scgi-wsgi').setLevel(logging.CRITICAL) class BaseController(WSGIController): - def __after__(self): - self.post() + def try_pagecache(self): + pass def __before__(self): self.pre() + self.try_pagecache() + + def __after__(self): + self.post() def __call__(self, environ, start_response): true_client_ip = environ.get('HTTP_TRUE_CLIENT_IP') ip_hash = environ.get('HTTP_TRUE_CLIENT_IP_HASH') forwarded_for = environ.get('HTTP_X_FORWARDED_FOR', ()) remote_addr = environ.get('REMOTE_ADDR') - + if (g.ip_hash and true_client_ip and ip_hash @@ -93,9 +99,18 @@ class BaseController(WSGIController): c.thread_pool = environ['paste.httpserver.thread_pool'] c.response = Response() - res = WSGIController.__call__(self, environ, start_response) + try: + res = WSGIController.__call__(self, environ, start_response) + except Exception as e: + if g.exception_logging: + try: + log_exception(e, *sys.exc_info()) + except Exception as f: + print "log_exception() freaked out: %r" % f + print "sorry for breaking the stack trace:" + raise return res - + def pre(self): pass def post(self): pass @@ -154,7 +169,7 @@ class BaseController(WSGIController): Reformats the new Location (dest) using format_output_url and sends the user to that location with the provided HTTP code. """ - dest = cls.format_output_url(dest) + dest = cls.format_output_url(dest or "/") c.response.headers['Location'] = dest c.response.status_code = code return c.response diff --git a/r2/r2/lib/c/reddit-discount-wrapper.c b/r2/r2/lib/c/reddit-discount-wrapper.c new file mode 100644 index 000000000..2e263ddef --- /dev/null +++ b/r2/r2/lib/c/reddit-discount-wrapper.c @@ -0,0 +1,84 @@ +#include +#include +#include + +#include "mkdio.h" + +typedef struct rd_opts_s { + const char * target; + int nofollow; +} rd_opts_t; + +char * +cb_flagmaker (const char * text, const int size, void * arg) +{ + rd_opts_t * opts; + char * rv; + int rv_size; + int bytes_written; + +#define TARGET_TAG "target=" +#define NOFOLLOW " rel='nofollow'" + + opts = (rd_opts_t *) arg; + + if (opts->target == NULL) { + opts->target = ""; + } + + if (opts->target[0] == '\0') { + rv_size = 1; /* need room for a \0 */ + } else { + /* Need to add 2 more, for the surrounding quotes */ + rv_size = sizeof(TARGET_TAG) + strlen(opts->target) + 2; + } + + if (opts->nofollow) { + /* We can subtract 1 because the \0 is already accounted for */ + rv_size += sizeof(NOFOLLOW) - 1; + } + + rv = malloc(rv_size); + + bytes_written = 1 + sprintf (rv, "%s%s%s%s%s", + opts->target[0] == '\0' ? "" : TARGET_TAG, + opts->target[0] == '\0' ? "" : "'", + opts->target, + opts->target[0] == '\0' ? "" : "'", + opts->nofollow ? NOFOLLOW : ""); + + if (bytes_written > rv_size) { + fprintf (stderr, "Augh, allocated %d bytes and wrote %d bytes\n", + rv_size, bytes_written); + abort(); + } + return rv; +} + +void +reddit_discount_wrap(const char * text, int nofollow, const char * target, + void ** v_mmiot, char ** html, int * size) +{ + rd_opts_t opts; + MMIOT * mmiot; + + opts.target = target; + opts.nofollow = nofollow; + + mmiot = mkd_string((char *) text, strlen(text), 0); + + mkd_compile(mmiot, MKD_NOHTML | MKD_NOIMAGE | MKD_NOPANTS | MKD_NOHEADER | + MKD_NO_EXT | MKD_AUTOLINK | MKD_SAFELINK); + + mkd_e_flags (mmiot, &cb_flagmaker); + mkd_e_context(mmiot, &opts); + + *size = mkd_document(mmiot, html); + *v_mmiot = mmiot; +} + +void +reddit_discount_cleanup (void * v_mmiot) { + mkd_cleanup(v_mmiot); +} + diff --git a/r2/r2/lib/c_markdown.py b/r2/r2/lib/c_markdown.py index 120e70de2..16356db6d 100644 --- a/r2/r2/lib/c_markdown.py +++ b/r2/r2/lib/c_markdown.py @@ -1,2 +1,19 @@ +from ctypes import cdll, c_int, c_void_p, byref, string_at +from r2.lib.filters import _force_utf8 +from pylons import g + +libmd = cdll.LoadLibrary(g.paths['root'] + '/../reddit-discount.so') + def c_markdown(text, nofollow=False, target=None): - raise NotImplementedError() + u8 = _force_utf8(text) + size = c_int(len(u8)) + nofollow = 1 if nofollow else 0 + doc = c_void_p() + html = c_void_p() + + libmd.reddit_discount_wrap(u8, nofollow, target, + byref(doc), byref(html), byref(size)) + r = string_at(html, size) + libmd.reddit_discount_cleanup(doc) + + return r diff --git a/r2/r2/lib/cache.py b/r2/r2/lib/cache.py index a63d03f61..3d43a58eb 100644 --- a/r2/r2/lib/cache.py +++ b/r2/r2/lib/cache.py @@ -20,19 +20,22 @@ # CondeNet, Inc. All Rights Reserved. ################################################################################ from threading import local +from hashlib import md5 -from utils import lstrips, in_chunks +import pylibmc +from _pylibmc import MemcachedError from contrib import memcache +from utils import lstrips, in_chunks, tup from r2.lib.hardcachebackend import HardCacheBackend class NoneResult(object): pass class CacheUtils(object): - def incr_multi(self, keys, delta=1, time=0, prefix=''): + def incr_multi(self, keys, delta=1, prefix=''): for k in keys: try: - self.incr(prefix + k, time=time, delta=delta) + self.incr(prefix + k, delta) except ValueError: pass @@ -53,9 +56,14 @@ class CacheUtils(object): return dict((key_map[k], r[k]) for k in r.keys()) -class Memcache(CacheUtils, memcache.Client): +class Permacache(CacheUtils, memcache.Client): + """We still use our patched python-memcache to talk to the + permacaches for legacy reasons""" simple_get_multi = memcache.Client.get_multi + def __init__(self, servers): + memcache.Client.__init__(self, servers, pickleProtocol = 1) + def set_multi(self, keys, prefix='', time=0): new_keys = {} @@ -79,6 +87,62 @@ class Memcache(CacheUtils, memcache.Client): memcache.Client.delete_multi(self, keys, time = time, key_prefix = prefix) + def get_local_client(self): + return self # memcache.py handles this itself + +class Memcache(CacheUtils, pylibmc.Client): + simple_get_multi = pylibmc.Client.get_multi + + def __init__(self, servers, + debug = False, + binary=True, + noreply=False): + pylibmc.Client.__init__(self, servers, binary=binary) + behaviors = {'no_block': True, # use async I/O + 'cache_lookups': True, # cache DNS lookups + 'tcp_nodelay': True, # no nagle + 'ketama': True, # consistant hashing + '_noreply': int(noreply), + 'verify_key': int(debug)} # spend the CPU to verify keys + self.behaviors.update(behaviors) + self.local_clients = local() + + def get_local_client(self): + # if this thread hasn't had one yet, make one + if not getattr(self.local_clients, 'client', None): + self.local_clients.client = self.clone() + return self.local_clients.client + + def set_multi(self, keys, prefix='', time=0): + new_keys = {} + for k,v in keys.iteritems(): + new_keys[str(k)] = v + pylibmc.Client.set_multi(self, new_keys, key_prefix = prefix, + time = time) + + def incr(self, key, delta=1, time=0): + # ignore the time on these + return pylibmc.Client.incr(self, key, delta) + + def add(self, key, val, time=0): + try: + return pylibmc.Client.add(self, key, val, time=time) + except pylibmc.DataExists: + return None + + def get(self, key, default=None): + r = pylibmc.Client.get(self, key) + if r is None: + return default + return r + + def set(self, key, val, time=0): + pylibmc.Client.set(self, key, val, time = time) + + def delete_multi(self, keys, prefix='', time=0): + pylibmc.Client.delete_multi(self, keys, time = time, + key_prefix = prefix) + class HardCache(CacheUtils): backend = None @@ -161,7 +225,6 @@ class LocalCache(dict, CacheUtils): for k in keys: if self.has_key(k): out[k] = self[k] -# print "Local cache answers: " + str(out) return out def set(self, key, val, time = 0): @@ -175,7 +238,9 @@ class LocalCache(dict, CacheUtils): def add(self, key, val, time = 0): self._check_key(key) - return self.setdefault(key, val) + was = key in self + self.setdefault(key, val) + return not was def delete(self, key): if self.has_key(key): @@ -221,6 +286,12 @@ class CacheChain(CacheUtils, local): return ret return fn + # note that because of the naive nature of `add' when used on a + # cache chain, its return value isn't reliable. if you need to + # verify its return value you'll either need to make it smarter or + # use the underlying cache directly + add = make_set_fn('add') + set = make_set_fn('set') append = make_set_fn('append') prepend = make_set_fn('prepend') @@ -234,33 +305,6 @@ class CacheChain(CacheUtils, local): flush_all = make_set_fn('flush_all') cache_negative_results = False - def add(self, key, val, time=0): - authority = self.caches[-1] - added_val = authority.add(key, val, time=time) - for cache in self.caches[:-1]: - # Calling set() rather than add() to ensure that all caches are - # in sync and that de-syncs repair themselves - cache.set(key, added_val, time=time) - return added_val - - def accrue(self, key, time=0, delta=1): - auth_value = self.caches[-1].get(key) - - if auth_value is None: - self.caches[-1].set(key, 0, time) - auth_value = 0 - - try: - auth_value = int(auth_value) - except ValueError: - raise ValueError("Can't accrue %s; it's a %s (%r)" % - (key, auth_value.__class__.__name__, auth_value)) - - for c in self.caches: - c.set(key, auth_value, time=time) - - self.incr(key, time=time, delta=delta) - def get(self, key, default = None, local = True): for c in self.caches: if not local and isinstance(c,LocalCache): @@ -271,12 +315,12 @@ class CacheChain(CacheUtils, local): if val is not None: #update other caches for d in self.caches: - if c == d: + if c is d: break # so we don't set caches later in the chain d.set(key, val) if self.cache_negative_results and val is NoneResult: - return None + return default else: return val @@ -293,12 +337,13 @@ class CacheChain(CacheUtils, local): need = set(keys) for c in self.caches: if len(out) == len(keys): + # we've found them all break r = c.simple_get_multi(need) #update other caches if r: for d in self.caches: - if c == d: + if c is d: break # so we don't set caches later in the chain d.set_multi(r) r.update(out) @@ -319,12 +364,89 @@ class CacheChain(CacheUtils, local): return out + def __repr__(self): + return '<%s>' % (self.__class__.__name__,) + def debug(self, key): print "Looking up [%r]" % key for i, c in enumerate(self.caches): print "[%d] %10s has value [%r]" % (i, c.__class__.__name__, c.get(key)) + def reset(self): + # the first item in a cache chain is a LocalCache + self.caches = (self.caches[0].__class__(),) + self.caches[1:] + +class MemcacheChain(CacheChain): + def __init__(self, caches): + CacheChain.__init__(self, caches) + self.mc_master = self.caches[-1] + + def reset(self): + CacheChain.reset(self) + localcache, old_mc = self.caches + self.caches = (localcache, self.mc_master.get_local_client()) + +class DoubleMemcacheChain(CacheChain): + """Temporary cache chain that places the new cache ahead of the + old one for easier deployment""" + def __init__(self, caches): + self.caches = localcache, memcache, permacache = caches + self.mc_master = memcache + + def reset(self): + CacheChain.reset(self) + self.caches = (self.caches[0], + self.mc_master.get_local_client(), + self.caches[2]) + +class PermacacheChain(CacheChain): + pass + +class HardcacheChain(CacheChain): + def __init__(self, caches, cache_negative_results = False): + CacheChain.__init__(self, caches, cache_negative_results) + localcache, memcache, hardcache = self.caches + self.mc_master = memcache + + def add(self, key, val, time=0): + authority = self.caches[-1] # the authority is the hardcache + # itself + added_val = authority.add(key, val, time=time) + for cache in self.caches[:-1]: + # Calling set() rather than add() to ensure that all caches are + # in sync and that de-syncs repair themselves + cache.set(key, added_val, time=time) + return added_val + + def accrue(self, key, time=0, delta=1): + auth_value = self.caches[-1].get(key) + + if auth_value is None: + self.caches[-1].set(key, 0, time) + auth_value = 0 + + try: + auth_value = int(auth_value) + delta + except ValueError: + raise ValueError("Can't accrue %s; it's a %s (%r)" % + (key, auth_value.__class__.__name__, auth_value)) + + for c in self.caches: + c.set(key, auth_value, time=time) + + @property + def backend(self): + # the hardcache is always the last item in a HardCacheChain + return self.caches[-1].backend + + def reset(self): + CacheChain.reset(self) + assert len(self.caches) == 3 + self.caches = (self.caches[0], + self.mc_master.get_local_client(), + self.caches[2]) + #smart get multi def sgm(cache, keys, miss_fn, prefix='', time=0): keys = set(keys) @@ -340,34 +462,56 @@ def sgm(cache, keys, miss_fn, prefix='', time=0): return dict((s_keys[k], v) for k,v in r.iteritems()) -def test_cache(cache): +def test_cache(cache, prefix=''): #basic set/get - cache.set('1', 1) - assert cache.get('1') == 1 + cache.set('%s1' % prefix, 1) + assert cache.get('%s1' % prefix) == 1 #python data - cache.set('2', [1,2,3]) - assert cache.get('2') == [1,2,3] + cache.set('%s2' % prefix, [1,2,3]) + assert cache.get('%s2' % prefix) == [1,2,3] #set multi, no prefix - cache.set_multi({'3':3, '4': 4}) - assert cache.get_multi(('3', '4')) == {'3':3, '4': 4} + cache.set_multi({'%s3' % prefix:3, '%s4' % prefix: 4}) + assert cache.get_multi(('%s3' % prefix, '%s4' % prefix)) == {'%s3' % prefix: 3, + '%s4' % prefix: 4} #set multi, prefix - cache.set_multi({'3':3, '4': 4}, prefix='p_') - assert cache.get_multi(('3', 4), prefix='p_') == {'3':3, 4: 4} - assert cache.get_multi(('p_3', 'p_4')) == {'p_3':3, 'p_4': 4} + cache.set_multi({'3':3, '4': 4}, prefix='%sp_' % prefix) + assert cache.get_multi(('3', 4), prefix='%sp_' % prefix) == {'3':3, 4: 4} + assert cache.get_multi(('%sp_3' % prefix, '%sp_4' % prefix)) == {'%sp_3'%prefix: 3, + '%sp_4'%prefix: 4} #incr - cache.set('5', 1) - cache.set('6', 1) - cache.incr('5') - assert cache.get('5') == 2 - cache.incr('5',2) - assert cache.get('5') == 4 - cache.incr_multi(('5', '6'), 1) - assert cache.get('5') == 5 - assert cache.get('6') == 2 + cache.set('%s5'%prefix, 1) + cache.set('%s6'%prefix, 1) + cache.incr('%s5'%prefix) + assert cache.get('%s5'%prefix) == 2 + cache.incr('%s5'%prefix,2) + assert cache.get('%s5'%prefix) == 4 + cache.incr_multi(('%s5'%prefix, '%s6'%prefix), 1) + assert cache.get('%s5'%prefix) == 5 + assert cache.get('%s6'%prefix) == 2 + +def test_multi(cache): + from threading import Thread + + num_threads = 100 + num_per_thread = 1000 + + threads = [] + for x in range(num_threads): + def _fn(prefix): + def __fn(): + for y in range(num_per_thread): + test_cache(cache,prefix=prefix) + return __fn + t = Thread(target=_fn(str(x))) + t.start() + threads.append(t) + + for thread in threads: + thread.join() # a cache that occasionally dumps itself to be used for long-running # processes @@ -379,9 +523,38 @@ class SelfEmptyingCache(LocalCache): if len(self) > self.max_size: self.clear() - def set(self,key,val,time = 0): + def set(self, key, val, time=0): self.maybe_reset() return LocalCache.set(self,key,val,time) - def add(self,key,val): - return self.set(key,val) + def add(self, key, val, time=0): + self.maybe_reset() + return LocalCache.add(self, key, val) + +def make_key(iden, *a, **kw): + """ + A helper function for making memcached-usable cache keys out of + arbitrary arguments. Hashes the arguments but leaves the `iden' + human-readable + """ + h = md5() + + def _conv(s): + if isinstance(s, str): + return s + elif isinstance(s, unicode): + return s.encode('utf-8') + elif isinstance(s, (tuple, list)): + return ','.join(_conv(x) for x in s) + elif isinstance(s, dict): + return ','.join('%s:%s' % (_conv(k), _conv(v)) + for (k, v) in sorted(s.iteritems())) + else: + return str(s) + + iden = _conv(iden) + h.update(iden) + h.update(_conv(a)) + h.update(_conv(kw)) + + return '%s(%s)' % (iden, h.hexdigest()) diff --git a/r2/r2/lib/captcha.py b/r2/r2/lib/captcha.py index 94bc9f6a3..182bee590 100644 --- a/r2/r2/lib/captcha.py +++ b/r2/r2/lib/captcha.py @@ -24,6 +24,8 @@ from pylons import g from Captcha.Base import randomIdentifier from Captcha.Visual import Text, Backgrounds, Distortions, ImageCaptcha +from r2.lib.cache import make_key + IDEN_LENGTH = 32 SOL_LENGTH = 6 @@ -47,21 +49,24 @@ def make_solution(): return randomIdentifier(alphabet=string.ascii_letters, length = SOL_LENGTH).upper() def get_image(iden): - solution = g.rendercache.get(str(iden)) + key = make_key(iden) + solution = g.rendercache.get(key) if not solution: solution = make_solution() - g.rendercache.set(str(iden), solution, time = 300) + g.rendercache.set(key, solution, time = 300) return RandCaptcha(solution=solution).render() def valid_solution(iden, solution): + key = make_key(iden) + if (not iden or not solution or len(iden) != IDEN_LENGTH or len(solution) != SOL_LENGTH - or solution.upper() != g.rendercache.get(str(iden))): + or solution.upper() != g.rendercache.get(key)): solution = make_solution() - g.rendercache.set(str(iden), solution, time = 300) + g.rendercache.set(key, solution, time = 300) return False else: - g.rendercache.delete(str(iden)) + g.rendercache.delete(key) return True diff --git a/r2/r2/lib/comment_tree.py b/r2/r2/lib/comment_tree.py index 5a701f977..773e64bf0 100644 --- a/r2/r2/lib/comment_tree.py +++ b/r2/r2/lib/comment_tree.py @@ -24,6 +24,7 @@ from __future__ import with_statement from pylons import g from itertools import chain from utils import tup +from cache import sgm def comments_key(link_id): return 'comments_' + str(link_id) @@ -86,10 +87,12 @@ def delete_comment(comment): #nothing really to do here, atm pass -def link_comments(link_id): +def link_comments(link_id, _update=False): key = comments_key(link_id) + r = g.permacache.get(key) - if r: + + if r and not _update: return r else: with g.make_lock(lock_key(link_id)): @@ -148,14 +151,22 @@ def add_message(message): # add the message to the author's list and the recipient with g.make_lock(messages_lock_key(message.author_id)): add_message_nolock(message.author_id, message) - with g.make_lock(messages_lock_key(message.to_id)): - add_message_nolock(message.to_id, message) + if message.to_id: + with g.make_lock(messages_lock_key(message.to_id)): + add_message_nolock(message.to_id, message) + if message.sr_id: + with g.make_lock(sr_messages_lock_key(message.sr_id)): + add_sr_message_nolock(message.sr_id, message) -def add_message_nolock(user_id, message): + +def _add_message_nolock(key, message): from r2.models import Account, Message - key = messages_key(user_id) trees = g.permacache.get(key) if not trees: + # in case an empty list got written at some point, delete it to + # force a recompute + if trees is not None: + g.permacache.delete(key) # no point computing it now. We'll do it when they go to # their message page. return @@ -184,10 +195,11 @@ def add_message_nolock(user_id, message): g.permacache.set(key, trees) -def conversation(user, parent): - from r2.models import Message - trees = dict(user_messages(user)) +def add_message_nolock(user_id, message): + return _add_message_nolock(messages_key(user_id), message) +def _conversation(trees, parent): + from r2.models import Message if parent._id in trees: convo = trees[parent._id] if convo: @@ -202,42 +214,95 @@ def conversation(user, parent): data = True) return compute_message_trees([parent] + list(m)) -def user_messages(user): +def conversation(user, parent): + trees = dict(user_messages(user)) + return _conversation(trees, parent) + + +def user_messages(user, update = False): key = messages_key(user._id) trees = g.permacache.get(key) - if trees is None: + if not trees or update: trees = user_messages_nocache(user) g.permacache.set(key, trees) return trees +def _process_message_query(inbox): + if hasattr(inbox, 'prewrap_fn'): + return [inbox.prewrap_fn(i) for i in inbox] + return list(inbox) + + +def _load_messages(mlist): + from r2.models import Message + m = {} + ids = [x for x in mlist if not isinstance(x, Message)] + if ids: + m = Message._by_fullname(ids, return_dict = True, data = True) + messages = [m.get(x, x) for x in mlist] + return messages + def user_messages_nocache(user): """ Just like user_messages, but avoiding the cache """ from r2.lib.db import queries - from r2.models import Message - - inbox = queries.get_inbox_messages(user) - if hasattr(inbox, 'prewrap_fn'): - inbox = [inbox.prewrap_fn(i) for i in inbox] - else: - inbox = list(inbox) - - sent = queries.get_sent(user) - if hasattr(sent, 'prewrap_fn'): - sent = [sent.prewrap_fn(i) for i in sent] - else: - sent = list(sent) - - m = {} - ids = [x for x in chain(inbox, sent) if not isinstance(x, Message)] - if ids: - m = Message._by_fullname(ids, return_dict = True, data = True) - - messages = [m.get(x, x) for x in chain(inbox, sent)] - + inbox = _process_message_query(queries.get_inbox_messages(user)) + sent = _process_message_query(queries.get_sent(user)) + messages = _load_messages(list(chain(inbox, sent))) return compute_message_trees(messages) +def sr_messages_key(sr_id): + return 'sr_messages_conversation_' + str(sr_id) + +def sr_messages_lock_key(sr_id): + return 'sr_messages_conversation_lock_' + str(sr_id) + + +def subreddit_messages(sr, update = False): + key = sr_messages_key(sr._id) + trees = g.permacache.get(key) + if not trees or update: + trees = subreddit_messages_nocache(sr) + g.permacache.set(key, trees) + return trees + +def moderator_messages(user): + from r2.models import Subreddit + sr_ids = Subreddit.reverse_moderator_ids(user) + + def multi_load_tree(sr_ids): + srs = Subreddit._byID(sr_ids, return_dict = False) + res = {} + for sr in srs: + trees = subreddit_messages_nocache(sr) + if trees: + res[sr._id] = trees + return res + + res = sgm(g.permacache, sr_ids, miss_fn = multi_load_tree, + prefix = sr_messages_key("")) + + return sorted(chain(*res.values()), key = tree_sort_fn, reverse = True) + +def subreddit_messages_nocache(sr): + """ + Just like user_messages, but avoiding the cache + """ + from r2.lib.db import queries + inbox = _process_message_query(queries.get_subreddit_messages(sr)) + messages = _load_messages(inbox) + return compute_message_trees(messages) + + +def add_sr_message_nolock(sr_id, message): + return _add_message_nolock(sr_messages_key(sr_id), message) + +def sr_conversation(sr, parent): + trees = dict(subreddit_messages(sr)) + return _conversation(trees, parent) + + def compute_message_trees(messages): from r2.models import Message roots = set() diff --git a/r2/r2/lib/contrib/discount-1.6.0/COPYRIGHT b/r2/r2/lib/contrib/discount-1.6.0/COPYRIGHT new file mode 100644 index 000000000..0cf98208a --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/COPYRIGHT @@ -0,0 +1,47 @@ +->Copyright (C) 2007 David Loren Parsons. +All rights reserved.<- + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation files +(the "Software"), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, +publish, distribute, sublicence, and/or sell copies of the Software, +and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution, and in the same place and form as other + copyright, license and disclaimer information. + + 3. The end-user documentation included with the redistribution, if + any, must include the following acknowledgment: + + This product includes software developed by + David Loren Parsons + + in the same place and form as other third-party acknowledgments. + Alternately, this acknowledgment may appear in the software + itself, in the same form and location as other such third-party + acknowledgments. + + 4. Except as contained in this notice, the name of David Loren + Parsons shall not be used in advertising or otherwise to promote + the sale, use or other dealings in this Software without prior + written authorization from David Loren Parsons. + +THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL DAVID LOREN PARSONS BE LIABLE FOR ANY DIRECT, +INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/r2/r2/lib/contrib/discount-1.6.0/CREDITS b/r2/r2/lib/contrib/discount-1.6.0/CREDITS new file mode 100644 index 000000000..3ec9d8c14 --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/CREDITS @@ -0,0 +1,33 @@ +Discount is primarily my work, but it has only reached the point +where it is via contributions, critiques, and bug reports from a +host of other people, some of which are listed before. If your +name isn't on this list, please remind me + -david parsons (orc@pell.chi.il.us) + + +Josh Wood -- Plan9 support. +Mike Schiraldi -- Reddit style automatic links, MANY MANY MANY + bug reports about boundary conditions and + places where I didn't get it right. +Jjgod Jiang -- Table of contents support. +Petite Abeille -- Many bug reports about places where I didn't + get it right. +Tim Channon -- inspiration for the `mkd_xhtmlpage()` function +Christian Herenz-- Many bug reports regarding my implementation of + `[]()` and `![]()` +A.S.Bradbury -- Portability bug reports for 64 bit systems. +Joyent -- Loan of a solaris box so I could get discount + working under solaris. +Ryan Tomayko -- Portability requests (and the rdiscount ruby + binding.) +yidabu -- feedback on the documentation, bug reports + against utf-8 support. +Pierre Joye -- bug reports, php discount binding. +Masayoshi Sekimura- perl discount binding. +Jeremy Hinegardner- bug reports about list handling. +Andrew White -- bug reports about the format of generated urls. +Steve Huff -- bug reports about Makefile portability (for Fink) +Ignacio Burgue?o-- bug reports about `>%class%` +Henrik Nyh -- bug reports about embedded html handling. + + diff --git a/r2/r2/lib/contrib/discount-1.6.0/Csio.c b/r2/r2/lib/contrib/discount-1.6.0/Csio.c new file mode 100644 index 000000000..4358b3338 --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/Csio.c @@ -0,0 +1,61 @@ +#include +#include +#include +#include "cstring.h" +#include "markdown.h" +#include "amalloc.h" + + +/* putc() into a cstring + */ +void +Csputc(int c, Cstring *iot) +{ + EXPAND(*iot) = c; +} + + +/* printf() into a cstring + */ +int +Csprintf(Cstring *iot, char *fmt, ...) +{ + va_list ptr; + int siz=100; + + do { + RESERVE(*iot, siz); + va_start(ptr, fmt); + siz = vsnprintf(T(*iot)+S(*iot), ALLOCATED(*iot)-S(*iot), fmt, ptr); + va_end(ptr); + } while ( siz > (ALLOCATED(*iot)-S(*iot)) ); + + S(*iot) += siz; + return siz; +} + + +/* write() into a cstring + */ +int +Cswrite(Cstring *iot, char *bfr, int size) +{ + RESERVE(*iot, size); + memcpy(T(*iot)+S(*iot), bfr, size); + S(*iot) += size; + return size; +} + + +/* reparse() into a cstring + */ +void +Csreparse(Cstring *iot, char *buf, int size, int flags) +{ + MMIOT f; + ___mkd_initmmiot(&f, 0); + ___mkd_reparse(buf, size, 0, &f); + ___mkd_emblock(&f); + SUFFIX(*iot, T(f.out), S(f.out)); + ___mkd_freemmiot(&f, 0); +} diff --git a/r2/r2/lib/contrib/discount-1.6.0/INSTALL b/r2/r2/lib/contrib/discount-1.6.0/INSTALL new file mode 100644 index 000000000..c591387ab --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/INSTALL @@ -0,0 +1,41 @@ + + HOW TO BUILD AND INSTALL DISCOUNT + +1) Unpacking the distribution + +The DISCOUNT sources are distributed in tarballs. After extracting from +the tarball, you should end up with all the source and build files in the +directory + discount-(version) + +2) Installing the distribution + +DISCOUNT uses configure.sh to set itself up for compilation. To run +configure, just do ``./configure.sh'' and it will check your system for +build dependencies and build makefiles for you. If configure.sh finishes +without complaint, you can then do a ``make'' to compile everything and a +``make install'' to install the binaries. + +Configure.sh has a few options that can be set: + +--src=DIR where the source lives (.) +--prefix=DIR where to install the final product (/usr/local) +--execdir=DIR where to put executables (prefix/bin) +--sbindir=DIR where to put static executables (prefix/sbin) +--confdir=DIR where to put configuration information (/etc) +--libdir=DIR where to put libraries (prefix/lib) +--libexecdir=DIR where to put private executables +--mandir=DIR where to put manpages +--enable-dl-tag Use the DL tag extension +--enable-pandoc-header Use pandoc-style header blocks +--enable-superscript A^B expands to AB +--enable-amalloc Use a debugging memory allocator (to detect leaks) +--relaxed-emphasis Don't treat _ in the middle of a word as emphasis +--with-tabstops=N Set tabstops to N characters (default is 4) + +3) Installing sample programs and manpages + +The standard ``make install'' rule just installs the binaries. If you +want to install the sample programs, they are installed with +``make install.samples''; to install manpages, ``make install.man''. +A shortcut to install everything is ``make install.everything'' diff --git a/r2/r2/lib/contrib/discount-1.6.0/Makefile b/r2/r2/lib/contrib/discount-1.6.0/Makefile new file mode 100644 index 000000000..d412adb6a --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/Makefile @@ -0,0 +1,96 @@ +CC=cc -I. -L. +AR=/usr/bin/ar +RANLIB=/usr/bin/ranlib + +BINDIR=/usr/local/bin +MANDIR=/usr/local/man +LIBDIR=/usr/local/lib +INCDIR=/usr/local/include + +PGMS=markdown +SAMPLE_PGMS=mkd2html makepage +SAMPLE_PGMS+= theme +MKDLIB=libmarkdown.a +OBJS=mkdio.o markdown.o dumptree.o generate.o \ + resource.o docheader.o version.o toc.o css.o \ + xml.o Csio.o xmlpage.o + +all: $(PGMS) $(SAMPLE_PGMS) + +install: $(PGMS) + /usr/bin/install -s -m 755 $(PGMS) $(DESTDIR)/$(BINDIR) + /usr/bin/install -m 444 $(MKDLIB) $(DESTDIR)/$(LIBDIR) + /usr/bin/install -m 444 mkdio.h $(DESTDIR)/$(INCDIR) + +install.everything: install install.samples install.man + +install.samples: $(SAMPLE_PGMS) install + /usr/bin/install -s -m 755 $(SAMPLE_PGMS) $(DESTDIR)/$(BINDIR) + /home/raldi/reddit/r2/r2/lib/contrib/discount-1.6.0/config.md $(DESTDIR)/$(MANDIR)/man1 + /usr/bin/install -m 444 theme.1 $(DESTDIR)/$(MANDIR)/man1 + +install.man: + /home/raldi/reddit/r2/r2/lib/contrib/discount-1.6.0/config.md $(DESTDIR)/$(MANDIR)/man3 + /usr/bin/install -m 444 mkd-functions.3 markdown.3 mkd-line.3 $(DESTDIR)/$(MANDIR)/man3 + for x in mkd_line mkd_generateline; do \ + ( echo '.\"' ; echo ".so man3/mkd-line.3" ) > $(DESTDIR)/$(MANDIR)/man3/$$x.3;\ + done + for x in mkd_in mkd_string; do \ + ( echo '.\"' ; echo ".so man3/markdown.3" ) > $(DESTDIR)/$(MANDIR)/man3/$$x.3;\ + done + for x in mkd_compile mkd_css mkd_generatecss mkd_generatehtml mkd_cleanup mkd_doc_title mkd_doc_author mkd_doc_date; do \ + ( echo '.\"' ; echo ".so man3/mkd-functions.3" ) > $(DESTDIR)/$(MANDIR)/man3/$$x.3; \ + done + /home/raldi/reddit/r2/r2/lib/contrib/discount-1.6.0/config.md $(DESTDIR)/$(MANDIR)/man7 + /usr/bin/install -m 444 markdown.7 mkd-extensions.7 $(DESTDIR)/$(MANDIR)/man7 + /home/raldi/reddit/r2/r2/lib/contrib/discount-1.6.0/config.md $(DESTDIR)/$(MANDIR)/man1 + /usr/bin/install -m 444 markdown.1 $(DESTDIR)/$(MANDIR)/man1 + +install.everything: install install.man + +version.o: version.c VERSION + $(CC) -DVERSION=\"`cat VERSION`\" -c version.c + +markdown: main.o $(MKDLIB) + $(CC) -o markdown main.o -lmarkdown + +# example programs +theme: theme.o $(MKDLIB) mkdio.h + $(CC) -o theme theme.o -lmarkdown + + +mkd2html: mkd2html.o $(MKDLIB) mkdio.h + $(CC) -o mkd2html mkd2html.o -lmarkdown + +makepage: makepage.c $(MKDLIB) mkdio.h + $(CC) -o makepage makepage.c -lmarkdown + +main.o: main.c mkdio.h config.h + $(CC) -I. -c main.c + +$(MKDLIB): $(OBJS) + $(AR) crv $(MKDLIB) $(OBJS) + $(RANLIB) $(MKDLIB) + +test: $(PGMS) echo cols + @for x in tests/*.t; do \ + sh $$x || exit 1; \ + done + +cols: tools/cols.c + $(CC) -o cols tools/cols.c +echo: tools/echo.c + $(CC) -o echo tools/echo.c + +clean: + rm -f $(PGMS) $(SAMPLE_PGMS) *.o $(MKDLIB) + +distclean spotless: clean + rm -f Makefile version.c markdown.1 config.cmd config.sub config.h config.mak config.log config.md + +markdown.o: markdown.c config.h cstring.h markdown.h +generate.o: generate.c config.h cstring.h markdown.h +dumptree.o: dumptree.c cstring.h markdown.h +mkdio.o: mkdio.c mkdio.h cstring.h config.h +xmlpage.o: xmlpage.c mkdio.h cstring.h config.h +toc.o: toc.c mkdio.h cstring.h config.h diff --git a/r2/r2/lib/contrib/discount-1.6.0/Makefile.in b/r2/r2/lib/contrib/discount-1.6.0/Makefile.in new file mode 100644 index 000000000..9737e579a --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/Makefile.in @@ -0,0 +1,96 @@ +CC=@CC@ -I. -L. +AR=@AR@ +RANLIB=@RANLIB@ + +BINDIR=@exedir@ +MANDIR=@mandir@ +LIBDIR=@libdir@ +INCDIR=@prefix@/include + +PGMS=markdown +SAMPLE_PGMS=mkd2html makepage +@THEME@SAMPLE_PGMS+= theme +MKDLIB=libmarkdown.a +OBJS=mkdio.o markdown.o dumptree.o generate.o \ + resource.o docheader.o version.o toc.o css.o \ + xml.o Csio.o xmlpage.o @AMALLOC@ + +all: $(PGMS) $(SAMPLE_PGMS) + +install: $(PGMS) + @INSTALL_PROGRAM@ $(PGMS) $(DESTDIR)/$(BINDIR) + @INSTALL_DATA@ $(MKDLIB) $(DESTDIR)/$(LIBDIR) + @INSTALL_DATA@ mkdio.h $(DESTDIR)/$(INCDIR) + +install.everything: install install.samples install.man + +install.samples: $(SAMPLE_PGMS) install + @INSTALL_PROGRAM@ $(SAMPLE_PGMS) $(DESTDIR)/$(BINDIR) + @INSTALL_DIR@ $(DESTDIR)/$(MANDIR)/man1 + @INSTALL_DATA@ theme.1 $(DESTDIR)/$(MANDIR)/man1 + +install.man: + @INSTALL_DIR@ $(DESTDIR)/$(MANDIR)/man3 + @INSTALL_DATA@ mkd-functions.3 markdown.3 mkd-line.3 $(DESTDIR)/$(MANDIR)/man3 + for x in mkd_line mkd_generateline; do \ + ( echo '.\"' ; echo ".so man3/mkd-line.3" ) > $(DESTDIR)/$(MANDIR)/man3/$$x.3;\ + done + for x in mkd_in mkd_string; do \ + ( echo '.\"' ; echo ".so man3/markdown.3" ) > $(DESTDIR)/$(MANDIR)/man3/$$x.3;\ + done + for x in mkd_compile mkd_css mkd_generatecss mkd_generatehtml mkd_cleanup mkd_doc_title mkd_doc_author mkd_doc_date; do \ + ( echo '.\"' ; echo ".so man3/mkd-functions.3" ) > $(DESTDIR)/$(MANDIR)/man3/$$x.3; \ + done + @INSTALL_DIR@ $(DESTDIR)/$(MANDIR)/man7 + @INSTALL_DATA@ markdown.7 mkd-extensions.7 $(DESTDIR)/$(MANDIR)/man7 + @INSTALL_DIR@ $(DESTDIR)/$(MANDIR)/man1 + @INSTALL_DATA@ markdown.1 $(DESTDIR)/$(MANDIR)/man1 + +install.everything: install install.man + +version.o: version.c VERSION + $(CC) -DVERSION=\"`cat VERSION`\" -c version.c + +markdown: main.o $(MKDLIB) + $(CC) -o markdown main.o -lmarkdown @LIBS@ + +# example programs +@THEME@theme: theme.o $(MKDLIB) mkdio.h +@THEME@ $(CC) -o theme theme.o -lmarkdown @LIBS@ + + +mkd2html: mkd2html.o $(MKDLIB) mkdio.h + $(CC) -o mkd2html mkd2html.o -lmarkdown @LIBS@ + +makepage: makepage.c $(MKDLIB) mkdio.h + $(CC) -o makepage makepage.c -lmarkdown @LIBS@ + +main.o: main.c mkdio.h config.h + $(CC) -I. -c main.c + +$(MKDLIB): $(OBJS) + $(AR) crv $(MKDLIB) $(OBJS) + $(RANLIB) $(MKDLIB) + +test: $(PGMS) echo cols + @for x in tests/*.t; do \ + sh $$x || exit 1; \ + done + +cols: tools/cols.c + $(CC) -o cols tools/cols.c +echo: tools/echo.c + $(CC) -o echo tools/echo.c + +clean: + rm -f $(PGMS) $(SAMPLE_PGMS) *.o $(MKDLIB) + +distclean spotless: clean + rm -f @GENERATED_FILES@ @CONFIGURE_FILES@ + +markdown.o: markdown.c config.h cstring.h markdown.h +generate.o: generate.c config.h cstring.h markdown.h +dumptree.o: dumptree.c cstring.h markdown.h +mkdio.o: mkdio.c mkdio.h cstring.h config.h +xmlpage.o: xmlpage.c mkdio.h cstring.h config.h +toc.o: toc.c mkdio.h cstring.h config.h diff --git a/r2/r2/lib/contrib/discount-1.6.0/Plan9/README b/r2/r2/lib/contrib/discount-1.6.0/Plan9/README new file mode 100644 index 000000000..255b212ab --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/Plan9/README @@ -0,0 +1,40 @@ +% Discount on Plan 9 +% Josh Wood +% 2009-06-12 + +# *Discount* Markdown compiler on Plan 9 + +## Build + + % CONFIG='--enable-all-features' mk config + % mk install + % markdown -V + markdown: discount X.Y.Z DL_TAG HEADER DEBUG SUPERSCRIPT RELAXED DIV + +`--enable-all-features` may be replaced by zero or more of: + + --enable-dl-tag Use the DL tag extension + --enable-pandoc-header Use pandoc-style header blocks + --enable-superscript A^B becomes AB + --enable-amalloc Enable memory allocation debugging + --relaxed-emphasis underscores aren't special in the middle of words + --with-tabstops=N Set tabstops to N characters (default is 4) + --enable-div Enable >%id% divisions + --enable-alpha-list Enable (a)/(b)/(c) lists + --enable-all-features Turn on all stable optional features + +## Notes + +The supplied mkfile merely drives Discount's own configure script and +then APE's *psh* environment to build the Discount source, then copies +the result(s) to locations appropriate for system-wide use on Plan 9. +There are a few other *mk*(1) targets: + +`install.libs`: Discount includes a C library and header. +Installation is optional. Plan 9 binaries are statically linked. + +`install.man`: Add manual pages for markdown(1) and (6). + +`install.progs`: Extra programs. *makepage* writes complete XHTML +documents, rather than fragments. *mkd2html* is similar, but produces +HTML. diff --git a/r2/r2/lib/contrib/discount-1.6.0/Plan9/markdown.1 b/r2/r2/lib/contrib/discount-1.6.0/Plan9/markdown.1 new file mode 100644 index 000000000..b38947f92 --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/Plan9/markdown.1 @@ -0,0 +1,169 @@ +.TH MARKDOWN 1 +.SH NAME +markdown \- convert Markdown text to HTML +.SH SYNOPSIS +.B markdown +[ +.B -dTV +] +[ +.BI -b " url-base +] +[ +.BI -F " bitmap +] +[ +.BI -f " flags +] +[ +.BI -o " ofile +] +[ +.BI -s " text +] +[ +.BI -t " text +] +[ +.I file +] +.SH DESCRIPTION +The +.I markdown +utility reads the +.IR Markdown (6)-formatted +.I file +(or standard input) and writes its +.SM HTML +fragment representation on standard output. +.PP +The options are: +.TF dfdoptions +.TP +.BI -b " url-base +Links in source begining with +.B / +will be prefixed with +.I url-base +in the output. +.TP +.B -d +Instead of printing an +.SM HTML +fragment, print a parse tree. +.TP +.BI -F " bitmap +Set translation flags. +.I Bitmap +is a bit map of the various configuration options described in +.IR markdown (2). +.TP +.BI -f " flags +Set or clear various translation +.IR flags , +described below. +.I Flags +are in a comma-delimited list, with an optional +.B + +(set) prefix on each flag. +.TP +.BI -o " ofile +Write the generated +.SM HTML +to +.IR ofile . +.TP +.BI -s " text +Use the +.IR markdown (2) +function to format the +.I text +on standard input. +.TP +.B -T +Under +.B -f +.BR toc , +print the table of contents as an unordered list before the usual +.SM HTML +output. +.TP +.BI -t " text +Use +.IR mkd_text +(in +.IR markdown (2)) +to format +.I text +instead of processing standard input with +.IR markdown . +.TP +.B -V +Show version number and configuration. If the version includes the string +.BR DL_TAG , +.I markdown +was configured with definition list support. If the version includes the string +.BR HEADER , +.I markdown +was configured to support pandoc header blocks. +.PD +.SS TRANSLATION FLAGS +The translation flags understood by +.B -f +are: +.TF \ noheader +.TP +.B noimage +Don't allow image tags. +.TP +.B nolinks +Don't allow links. +.TP +.B nohtml +Don't allow any embedded HTML. +.TP +.B cdata +Generate valid XML output. +.TP +.B noheader +Do not process pandoc headers. +.TP +.B notables +Do not process the syntax extension for tables. +.TP +.B tabstops +Use Markdown-standard 4-space tabstops. +.TP +.B strict +Disable superscript and relaxed emphasis. +.TP +.B relax +Enable superscript and relaxed emphasis (the default). +.TP +.B toc +Enable table of contents support, generated from headings (in +.IR markdown (6)) +in the source. +.TP +.B 1.0 +Revert to Markdown 1.0 compatibility. +.PD +.PP +For example, +.B -f nolinks,quot +tells +.I markdown +not to allow +.B +tags, and to expand double quotes. +.SH SOURCE +.B /sys/src/cmd/discount +.SH SEE ALSO +.IR markdown (2), +.IR markdown (6) +.PP +http://daringfireball.net/projects/markdown/, +``Markdown''. +.SH DIAGNOSTICS +.I Markdown +exits 0 on success and >0 if an error occurs. diff --git a/r2/r2/lib/contrib/discount-1.6.0/Plan9/markdown.2 b/r2/r2/lib/contrib/discount-1.6.0/Plan9/markdown.2 new file mode 100644 index 000000000..9cb1c9d4e --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/Plan9/markdown.2 @@ -0,0 +1,332 @@ +.TH MARKDOWN 2 +.SH NAME +mkd_in, mkd_string, markdown, mkd_compile, mkd_css, mkd_generatecss, +mkd_document, mkd_generatehtml, mkd_xhtmlpage, mkd_toc, mkd_generatetoc, +mkd_cleanup, mkd_doc_title, mkd_doc_author, mkd_doc_date, mkd_line, +mkd_generateline \- convert Markdown text to HTML +.SH SYNOPSIS +.ta \w'MMIOT* 'u +.B #include +.PP +.B +MMIOT* mkd_in(FILE *input, int flags) +.PP +.B +MMIOT* mkd_string(char *buf, int size, int flags) +.PP +.B +int markdown(MMIOT *doc, FILE *output, int flags) +.PP +.B +int mkd_compile(MMIOT *document, int flags) +.PP +.B +int mkd_css(MMIOT *document, char **doc) +.PP +.B +int mkd_generatecss(MMIOT *document, FILE *output) +.PP +.B +int mkd_document(MMIOT *document, char **doc) +.PP +.B +int mkd_generatehtml(MMIOT *document, FILE *output) +.PP +.B +int mkd_xhtmlpage(MMIOT *document, int flags, FILE *output) +.PP +.B +int mkd_toc(MMIOT *document, char **doc) +.PP +.B +int mkd_generatetoc(MMIOT *document, FILE *output) +.PP +.B +void mkd_cleanup(MMIOT*); +.PP +.B +char* mkd_doc_title(MMIOT*) +.PP +.B +char* mkd_doc_author(MMIOT*) +.PP +.B +char* mkd_doc_date(MMIOT*) +.PP +.B +int mkd_line(char *string, int size, char **doc, int flags) +.PP +.B +int mkd_generateline(char *string, int size, FILE *output, int flags) +.PD +.PP +.SH DESCRIPTION +These functions convert +.IR Markdown (6) +text into +.SM HTML +markup. +.PP +.I Mkd_in +reads the text referenced by pointer to +.B FILE +.I input +and returns a pointer to an +.B MMIOT +structure of the form expected by +.I markdown +and the other converters. +.I Mkd_string +accepts one +.I string +and returns a pointer to +.BR MMIOT . +.PP +After such preparation, +.I markdown +converts +.I doc +and writes the result to +.IR output , +while +.I mkd_compile +transforms +.I document +in-place. +.PP +One or more of the following +.I flags +(combined with +.BR OR ) +control +.IR markdown 's +processing of +.IR doc : +.TF MKD_NOIMAGE +.TP +.B MKD_NOIMAGE +Do not process +.B ![] +and remove +.B +tags from the output. +.TP +.B MKD_NOLINKS +Do not process +.B [] +and remove +.B +tags from the output. +.TP +.B MKD_NOPANTS +Suppress Smartypants-style replacement of quotes, dashes, or ellipses. +.TP +.B MKD_STRICT +Disable superscript and relaxed emphasis processing if configured; otherwise a no-op. +.TP +.B MKD_TAGTEXT +Process as inside an +.SM HTML +tag: no +.BR , +no +.BR , +no +.SM HTML +or +.B [] +expansion. +.TP +.B MKD_NO_EXT +Don't process pseudo-protocols (in +.IR markdown (6)). +.TP +.B MKD_CDATA +Generate code for +.SM XML +.B ![CDATA[...]] +element. +.TP +.B MKD_NOHEADER +Don't process Pandoc-style headers. +.TP +.B MKD_TABSTOP +When reading documents, expand tabs to 4 spaces, overriding any compile-time configuration. +.TP +.B MKD_TOC +Label headings for use with the +.I mkd_generatetoc +and +.I mkd_toc +functions. +.TP +.B MKD_1_COMPAT +MarkdownTest_1.0 compatibility. Trim trailing spaces from first line of code blocks and disable implicit reference links (in +.IR markdown (6)). +.TP +.B MKD_AUTOLINK +Greedy +.SM URL +generation. When set, any +.SM URL +is converted to a hyperlink, even those not encased in +.BR <> . +.TP +.B MKD_SAFELINK +Don't make hyperlinks from +.B [][] +links that have unknown +.SM URL +protocol types. +.TP +.B MKD_NOTABLES +Do not process the syntax extension for tables (in +.IR markdown (6)). +.TP +.B MKD_EMBED +All of +.BR MKD_NOLINKS , +.BR MKD_NOIMAGE , +and +.BR MKD_TAGTEXT . +.PD +.PP +This implementation supports +Pandoc-style +headers and inline +.SM CSS +.B +at the end of the line or at the beginning of a subsequent line. +.IP +Style blocks apply to the entire document regardless of where they are defined. +.TP +Image Dimensions +Image specification has been extended with an argument describing image dimensions: +.BI = height x width. +For an image 400 pixels high and 300 wide, the new syntax is: +.IP +.EX + ![Alt text](/path/to/image.jpg =400x300 "Title") +.EE +.TP +Pseudo-Protocols +Pseudo-protocols that may replace the common +.B http: +or +.B mailto: +have been added to the link syntax described above. +.IP +.BR abbr : +Text following is used as the +.B title +attribute of an +.B abbr +tag wrapping the link text. So +.B [LT](abbr:Link Text) +gives +.B LT. +.IP +.BR id : +The link text is marked up and written to the output, wrapped with +.B +and +.BR . +.IP +.BR class : + The link text is marked up and written to the output, wrapped with +.B +and +.BR . +.IP +.BR raw : +Text following is written to the output with no further processing. +The link text is discarded. +.TP +Alphabetic Lists +If +.I markdown +was configured with +.BR --enable-alpha-list , +.IP +.EX +a. this +b. is +c. an alphabetic +d. list +.EE +.IP +yields an +.SM HTML +.B ol +ordered list. +.TP +Definition Lists +If configured with +.BR --enable-dl-tag , +markup for definition lists is enabled. A definition list item is defined as +.IP +.EX +=term= + definition +.EE +.TP +Tables +Tables are specified with a pipe +.RB ( | ) +and dash +.RB ( - ) +marking. The markdown text +.IP +.EX +header0|header1 +-------|------- + textA|textB + textC|textD +.EE +.IP +will produce an +.SM HTML +.B table +of two columns and three rows. +A header row is designated by ``underlining'' with dashes. +Declare a column's alignment by affixing a colon +.RB ( : ) +to the left or right end of the dashes underlining its header. +In the output, this +yields the corresponding value for the +.B align +attribute on each +.B td +cell in the column. +A colon at both ends of a column's header dashes indicates center alignment. +.TP +Relaxed Emphasis +If configured with +.BR --relaxed-emphasis , +the rules for emphasis are changed so that a single +.B _ +will not count as an emphasis character in the middle of a word. +This is useful for documenting some code where +.B _ +appears frequently, and would normally require a backslash escape. +.PD +.SH SEE ALSO +.IR markdown (1), +.IR markdown (2) +.PP +http://daringfireball.net/projects/markdown/syntax/, +``Markdown: Syntax''. +.PP +http://daringfireball.net/projects/smartypants/, +``Smarty Pants''. +.PP +http://michelf.com/projects/php-markdown/extra/#table, +``PHP Markdown Extra: Tables''. diff --git a/r2/r2/lib/contrib/discount-1.6.0/Plan9/mkfile b/r2/r2/lib/contrib/discount-1.6.0/Plan9/mkfile new file mode 100644 index 000000000..189d7e928 --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/Plan9/mkfile @@ -0,0 +1,37 @@ +BIN=/$objtype/bin +CC='cc -D_BSD_EXTENSION' + +markdown: + ape/psh -c 'cd .. && make' + +none:V: markdown + +test: markdown + ape/psh -c 'cd ..&& make test' + +install: markdown + cp ../markdown $BIN/markdown + +install.progs: install + cp ../makepage $BIN/makepage + cp ../mkd2html $BIN/mkd2html + +install.libs: install + cp ../mkdio.h /sys/include/ape/mkdio.h + cp ../libmarkdown.a /$objtype/lib/ape/libmarkdown.a + +install.man: install + cp markdown.1 /sys/man/1/markdown + cp markdown.2 /sys/man/2/markdown + cp markdown.6 /sys/man/6/markdown + +installall:V: install.libs install.man install.progs + +config: + ape/psh -c 'cd .. && ./configure.sh $CONFIG' + +clean: + ape/psh -c 'cd .. && make clean' + +nuke: + ape/psh -c 'cd .. && make distclean' diff --git a/r2/r2/lib/contrib/discount-1.6.0/README b/r2/r2/lib/contrib/discount-1.6.0/README new file mode 100644 index 000000000..84d38e9eb --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/README @@ -0,0 +1,16 @@ +DISCOUNT is a implementation of John Gruber's Markdown markup +language. It implements, as far as I can tell, all of the +language as described in + +and passes the Markdown test suite at + + +DISCOUNT is free software written by David Parsons ; +it is released under a BSD-style license that allows you to do +as you wish with it as long as you don't attempt to claim it as +your own work. + +Most of the programs included in the DISCOUNT distribution have +manual pages describing how they work. + +The file INSTALL describes how to build and install discount diff --git a/r2/r2/lib/contrib/discount-1.6.0/VERSION b/r2/r2/lib/contrib/discount-1.6.0/VERSION new file mode 100644 index 000000000..9c6d6293b --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/VERSION @@ -0,0 +1 @@ +1.6.1 diff --git a/r2/r2/lib/contrib/discount-1.6.0/amalloc.c b/r2/r2/lib/contrib/discount-1.6.0/amalloc.c new file mode 100644 index 000000000..d0e17fba5 --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/amalloc.c @@ -0,0 +1,111 @@ +/* + * debugging malloc()/realloc()/calloc()/free() that attempts + * to keep track of just what's been allocated today. + */ + +#include +#include + +#define MAGIC 0x1f2e3d4c + +struct alist { int magic, size; struct alist *next, *last; }; + +static struct alist list = { 0, 0, 0, 0 }; + +static int mallocs=0; +static int reallocs=0; +static int frees=0; + +void * +acalloc(int size, int count) +{ + struct alist *ret = calloc(size + sizeof(struct alist), count); + + if ( ret ) { + ret->magic = MAGIC; + ret->size = size * count; + if ( list.next ) { + ret->next = list.next; + ret->last = &list; + ret->next->last = ret; + list.next = ret; + } + else { + ret->last = ret->next = &list; + list.next = list.last = ret; + } + ++mallocs; + return ret+1; + } + return 0; +} + + +void* +amalloc(int size) +{ + return acalloc(size,1); +} + + +void +afree(void *ptr) +{ + struct alist *p2 = ((struct alist*)ptr)-1; + + if ( p2->magic == MAGIC ) { + p2->last->next = p2->next; + p2->next->last = p2->last; + ++frees; + free(p2); + } + else + free(ptr); +} + + +void * +arealloc(void *ptr, int size) +{ + struct alist *p2 = ((struct alist*)ptr)-1; + struct alist save; + + if ( p2->magic == MAGIC ) { + save.next = p2->next; + save.last = p2->last; + p2 = realloc(p2, sizeof(*p2) + size); + + if ( p2 ) { + p2->size = size; + p2->next->last = p2; + p2->last->next = p2; + ++reallocs; + return p2+1; + } + else { + save.next->last = save.last; + save.last->next = save.next; + return 0; + } + } + return realloc(ptr, size); +} + + +void +adump() +{ + struct alist *p; + + + for ( p = list.next; p && (p != &list); p = p->next ) { + fprintf(stderr, "allocated: %d byte%s\n", p->size, (p->size==1) ? "" : "s"); + fprintf(stderr, " [%.*s]\n", p->size, p+1); + } + + if ( getenv("AMALLOC_STATISTICS") ) { + fprintf(stderr, "%d malloc%s\n", mallocs, (mallocs==1)?"":"s"); + fprintf(stderr, "%d realloc%s\n", reallocs, (reallocs==1)?"":"s"); + fprintf(stderr, "%d free%s\n", frees, (frees==1)?"":"s"); + } +} diff --git a/r2/r2/lib/contrib/discount-1.6.0/amalloc.h b/r2/r2/lib/contrib/discount-1.6.0/amalloc.h new file mode 100644 index 000000000..43ca98586 --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/amalloc.h @@ -0,0 +1,29 @@ +/* + * debugging malloc()/realloc()/calloc()/free() that attempts + * to keep track of just what's been allocated today. + */ +#ifndef AMALLOC_D +#define AMALLOC_D + +#include "config.h" + +#ifdef USE_AMALLOC + +extern void *amalloc(int); +extern void *acalloc(int,int); +extern void *arealloc(void*,int); +extern void afree(void*); +extern void adump(); + +#define malloc amalloc +#define calloc acalloc +#define realloc arealloc +#define free afree + +#else + +#define adump() (void)1 + +#endif + +#endif/*AMALLOC_D*/ diff --git a/r2/r2/lib/contrib/discount-1.6.0/config.cmd b/r2/r2/lib/contrib/discount-1.6.0/config.cmd new file mode 100755 index 000000000..ed15bbbbd --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/config.cmd @@ -0,0 +1,2 @@ +#! /bin/sh + ./configure.sh diff --git a/r2/r2/lib/contrib/discount-1.6.0/config.h b/r2/r2/lib/contrib/discount-1.6.0/config.h new file mode 100644 index 000000000..7ffdf5fab --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/config.h @@ -0,0 +1,29 @@ +/* + * configuration for markdown, generated Fri Jan 29 13:52:23 PST 2010 + * by raldi@zork + */ +#ifndef __AC_MARKDOWN_D +#define __AC_MARKDOWN_D 1 + + +#define OS_LINUX 1 +#define DWORD unsigned long +#define WORD unsigned short +#define BYTE unsigned char +#define HAVE_BASENAME 1 +#define HAVE_LIBGEN_H 1 +#define HAVE_PWD_H 1 +#define HAVE_GETPWUID 1 +#define HAVE_SRANDOM 1 +#define INITRNG(x) srandom((unsigned int)x) +#define HAVE_BZERO 1 +#define HAVE_RANDOM 1 +#define COINTOSS() (random()&1) +#define HAVE_STRCASECMP 1 +#define HAVE_STRNCASECMP 1 +#define HAVE_FCHDIR 1 +#define TABSTOP 4 +#define HAVE_MALLOC_H 1 +#define PATH_SED "/bin/sed" + +#endif/* __AC_MARKDOWN_D */ diff --git a/r2/r2/lib/contrib/discount-1.6.0/config.log b/r2/r2/lib/contrib/discount-1.6.0/config.log new file mode 100644 index 000000000..9b3c03dff --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/config.log @@ -0,0 +1,31 @@ +[echo -n] works +Configuring for [markdown] +Looking for cpp +CPP=[/lib/cpp], CPPFLAGS=[] +looking for install + (/usr/bin/install) +checking out the C compiler +checking for "volatile" keyword +checking for "const" keyword +defining WORD & DWORD scalar types +/tmp/pd18169.c: In function 'main': +/tmp/pd18169.c:13: warning: incompatible implicit declaration of built-in function 'exit' +/tmp/ngc18169.c: In function 'main': +/tmp/ngc18169.c:5: warning: initialization makes pointer from integer without a cast +/tmp/ngc18169.c:6: warning: initialization makes pointer from integer without a cast +looking for header libgen.h +looking for header pwd.h +looking for the getpwuid function +looking for the srandom function +looking for the bzero function +/tmp/ngc18169.c: In function 'main': +/tmp/ngc18169.c:4: warning: incompatible implicit declaration of built-in function 'bzero' +looking for the random function +looking for the strcasecmp function +looking for the strncasecmp function +looking for the fchdir function +looking for header malloc.h +sed is /bin/sed +generating Makefile +generating version.c +generating markdown.1 diff --git a/r2/r2/lib/contrib/discount-1.6.0/config.mak b/r2/r2/lib/contrib/discount-1.6.0/config.mak new file mode 100644 index 000000000..52cf9be61 --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/config.mak @@ -0,0 +1 @@ +HAVE_SED = 1 diff --git a/r2/r2/lib/contrib/discount-1.6.0/config.md b/r2/r2/lib/contrib/discount-1.6.0/config.md new file mode 100755 index 000000000..226af3738 --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/config.md @@ -0,0 +1,5 @@ +#! /bin/sh +# script generated Fri Jan 29 13:52:24 PST 2010 by configure.sh + +test -d "$1" || mkdir -p "$1" +exit 0 diff --git a/r2/r2/lib/contrib/discount-1.6.0/config.sub b/r2/r2/lib/contrib/discount-1.6.0/config.sub new file mode 100644 index 000000000..7bd1f9a90 --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/config.sub @@ -0,0 +1,27 @@ +s;@CPP@;/lib/cpp;g +s;@CPPFLAGS@;;g +s;@INSTALL@;/usr/bin/install;g +s;@INSTALL_PROGRAM@;/usr/bin/install -s -m 755;g +s;@INSTALL_DATA@;/usr/bin/install -m 444;g +s;@INSTALL_DIR@;/home/raldi/reddit/r2/r2/lib/contrib/discount-1.6.0/config.md;g +s;@CC@;cc;g +s;@AR@;/usr/bin/ar;g +s;@RANLIB@;/usr/bin/ranlib;g +s;@THEME@;;g +s;@TABSTOP@;4;g +s;@AMALLOC@;;g +s;@STRICT@;.\";g +s;@LIBS@;;g +s;@CONFIGURE_FILES@;config.cmd config.sub config.h config.mak config.log config.md;g +s;@GENERATED_FILES@;Makefile version.c markdown.1;g +s;@CFLAGS@;-g;g +s;@LDFLAGS@;-g;g +s;@srcdir@;/home/raldi/reddit/r2/r2/lib/contrib/discount-1.6.0;g +s;@prefix@;/usr/local;g +s;@exedir@;/usr/local/bin;g +s;@sbindir@;/usr/local/sbin;g +s;@libdir@;/usr/local/lib;g +s;@libexec@;/usr/local/lib;g +s;@confdir@;/etc;g +s;@mandir@;/usr/local/man;g +s;@SED@;/bin/sed;g diff --git a/r2/r2/lib/contrib/discount-1.6.0/configure.inc b/r2/r2/lib/contrib/discount-1.6.0/configure.inc new file mode 100755 index 000000000..705128bda --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/configure.inc @@ -0,0 +1,1465 @@ +# @(#) configure.inc 1.42@(#) +# Copyright (c) 1999-2007 David Parsons. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# 3. My name may not be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY DAVID PARSONS ``AS IS'' AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DAVID +# PARSONS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING +# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +# THE POSSIBILITY OF SUCH DAMAGE. +# + + +# +# this preamble code is executed when this file is sourced and it picks +# interesting things off the command line. +# +ac_default_path="/sbin:/usr/sbin:/bin:/usr/bin:/usr/local/bin:/usr/X11R6/bin" + +ac_standard="--src=DIR where the source lives (.) +--prefix=DIR where to install the final product (/usr/local) +--execdir=DIR where to put executables (prefix/bin) +--sbindir=DIR where to put static executables (prefix/sbin) +--confdir=DIR where to put configuration information (/etc) +--libdir=DIR where to put libraries (prefix/lib) +--libexecdir=DIR where to put private executables +--mandir=DIR where to put manpages" + +__fail=exit + +if dirname B/A 2>/dev/null >/dev/null; then +__ac_dirname() { + dirname "$1" +} +else +__ac_dirname() { + echo "$1" | sed -e 's:/[^/]*$::' +} +fi + +ac_progname=$0 +ac_configure_command= +Q=\' +for x in "$@"; do + ac_configure_command="$ac_configure_command $Q$x$Q" +done +# ac_configure_command="$*" + +__d=`__ac_dirname "$ac_progname"` +if [ "$__d" = "$ac_progname" ]; then + AC_SRCDIR=`pwd` +else + AC_SRCDIR=`cd $__d;pwd` +fi + +__ac_dir() { + if test -d "$1"; then + (cd "$1";pwd) + else + echo "$1"; + fi +} + +# +# echo w/o newline +# +echononl() +{ + ${ac_echo:-echo} "${@}$ac_echo_nonl" +} + +# +# log something to the terminal and to a logfile. +# +LOG () { + echo "$@" + echo "$@" 1>&5 +} + +# +# log something to the terminal without a newline, and to a logfile with +# a newline +# +LOGN () { + echononl "$@" 1>&5 + echo "$@" +} + +# +# log something to the terminal +# +TLOG () { + echo "$@" 1>&5 +} + +# +# log something to the terminal, no newline +# +TLOGN () { + echononl "$@" 1>&5 +} + + +# +# AC_CONTINUE tells configure not to bomb if something fails, but to +# continue blithely along +# +AC_CONTINUE () { + __fail="return" +} + + +# +# generate a .o file from sources +# +__MAKEDOTO() { + AC_PROG_CC + + if $AC_CC -c -o /tmp/doto$$.o "$@" $AC_LIBS 2>/tmp/doto$$.err; then + rm -f /tmp/doto$$.o /tmp/doto$$.err + TLOG " (found)" + return 0 + fi + rm -f /tmp/doto$$.o + TLOG " (not found)" + echo "test failed: command was $AC_CC -c -o /tmp/doto$$.o" "$@" $AC_LIBS + echo "output:" + cat /tmp/doto$$.err + rm -f /tmp/doto$$.err + echo "offending sources:" + for x in "$@"; do + echo "$x:" + cat $x + done + return 1 +} + + +# +# Emulate gnu autoconf's AC_CHECK_HEADERS() function +# +AC_CHECK_HEADERS () { + + echo "/* AC_CHECK_HEADERS */" > /tmp/ngc$$.c + for hdr in $*; do + echo "#include <$hdr>" >> /tmp/ngc$$.c + done + echo "main() { }" >> /tmp/ngc$$.c + + LOGN "looking for header $hdr" + + if __MAKEDOTO /tmp/ngc$$.c; then + AC_DEFINE 'HAVE_'`echo $hdr | $AC_UPPERCASE | tr './' '_'` 1 + rc=0 + else + rc=1 + fi + rm -f /tmp/ngc$$.c + return $rc +} + + +# +# emulate GNU autoconf's AC_CHECK_FUNCS function +# +AC_CHECK_FUNCS () { + AC_PROG_CC + + B=`echo "$1" | sed -e 's/(.*)//'` + + case "$B" in + "$1") F="$1()" ;; + *) F="$1" ;; + esac + + shift + rm -f /tmp/ngc$$.c + + while [ "$1" ]; do + echo "#include <$1>" >> /tmp/ngc$$.c + shift + done + + cat >> /tmp/ngc$$.c << EOF +main() +{ + + $F; +} +EOF + + LOGN "looking for the $B function" + + if $AC_CC -o /tmp/ngc$$ /tmp/ngc$$.c $LIBS; then + AC_DEFINE `echo ${2:-HAVE_$B} | $AC_UPPERCASE` 1 + TLOG " (found)" + rc=0 + else + echo "offending command was:" + cat /tmp/ngc$$.c + echo "$AC_CC -o /tmp/ngc$$ /tmp/ngc$$.c $LIBS" + TLOG " (not found)" + rc=1 + fi + rm -f /tmp/ngc$$.c /tmp/ngc$$ + return $rc +} + + +# +# check to see if some structure exists +# +# usage: AC_CHECK_STRUCT structure {include ...} +# +AC_CHECK_STRUCT () { + struct=$1 + shift + + rm -f /tmp/ngc$$.c + + for include in $*; do + echo "#include <$include>" >> /tmp/ngc$$.c + done + + cat >> /tmp/ngc$$.c << EOF +main() +{ + struct $struct foo; +} +EOF + + LOGN "looking for struct $struct" + + if __MAKEDOTO /tmp/ngc$$.c; then + AC_DEFINE HAVE_STRUCT_`echo ${struct} | $AC_UPPERCASE` + rc=0 + else + rc=1 + fi + rm -f /tmp/ngc$$.c + return $rc +} + + +# +# check to see if some type exists +# +# usage: AC_CHECK_TYPE type {include ...} +# +AC_CHECK_TYPE () { + type=$1 + shift + + rm -f /tmp/ngc$$.c + + for include in $*; do + echo "#include <$include>" >> /tmp/ngc$$.c + done + + cat >> /tmp/ngc$$.c << EOF +main() +{ + $type foo; +} +EOF + + LOGN "looking for $type type" + + if __MAKEDOTO /tmp/ngc$$.c; then + AC_DEFINE HAVE_TYPE_`echo ${type} | $AC_UPPERCASE` + rc=0 + else + rc=1 + fi + rm -f /tmp/ngc$$.c + return $rc +} + + +# +# check to see if some structure contains a field +# +# usage: AC_CHECK_FIELD structure field {include ...} +# +AC_CHECK_FIELD () { + + struct=$1 + field=$2 + shift 2 + + rm -f /tmp/ngc$$.c + + for include in $*;do + echo "#include <$include>" >> /tmp/ngc$$.c + done + + cat >> /tmp/ngc$$.c << EOF +main() +{ + struct $struct foo; + + foo.$field; +} +EOF + + LOGN "checking that struct $struct has a $field field" + + if __MAKEDOTO /tmp/ngc$$.c; then + AC_DEFINE HAVE_`echo ${struct}_$field | $AC_UPPERCASE` + rc=0 + else + rc=1 + fi + rm -f /tmp/ngc$$.c + return $rc +} + + +# +# check that the C compiler works +# +AC_PROG_CC () { + test "$AC_CC" && return 0 + + cat > /tmp/ngc$$.c << \EOF +#include +main() +{ + puts("hello, sailor"); +} +EOF + + TLOGN "checking the C compiler" + + unset AC_CFLAGS AC_LDFLAGS + + if [ "$CC" ] ; then + AC_CC="$CC" + elif [ "$WITH_PATH" ]; then + AC_CC=`acLookFor cc` + elif [ "`acLookFor cc`" ]; then + # don't specify the full path if the user is looking in their $PATH + # for a C compiler. + AC_CC=cc + fi + + # finally check for POSIX c89 + test "$AC_CC" || AC_CC=`acLookFor c89` + + if [ ! "$AC_CC" ]; then + TLOG " (no C compiler found)" + $__fail 1 + fi + echo "checking out the C compiler" + + $AC_CC -o /tmp/ngc$$ /tmp/ngc$$.c + status=$? + + TLOGN " ($AC_CC)" + if [ $status -eq 0 ]; then + if $AC_CC -v 2>&1 | grep 'gcc version' >/dev/null; then + TLOG " oh ick, it looks like gcc" + IS_BROKEN_CC=T + else + TLOG " ok" + fi + + # check that the CFLAGS and LDFLAGS aren't bogus + + unset AC_CFLAGS AC_LDFLAGS + + if [ "$CFLAGS" ]; then + test "$CFLAGS" && echo "validating CFLAGS=${CFLAGS}" + if $AC_CC $CFLAGS -o /tmp/ngc$$.o /tmp/ngc$$.c ; then + AC_CFLAGS=${CFLAGS:-"-g"} + test "$CFLAGS" && echo "CFLAGS=\"${CFLAGS}\" are okay" + elif [ "$CFLAGS" ]; then + echo "ignoring bogus CFLAGS=\"${CFLAGS}\"" + fi + else + AC_CFLAGS=-g + fi + if [ "$LDFLAGS" ]; then + test "$LDFLAGS" && echo "validating LDFLAGS=${LDFLAGS}" + if $AC_CC $LDFLAGS -o /tmp/ngc$$ /tmp/ngc$$.o; then + AC_LDFLAGS=${LDFLAGS:-"-g"} + test "$LDFLAGS" && TLOG "LDFLAGS=\"${LDFLAGS}\" are okay" + elif [ "$LDFLAGS" ]; then + TLOG "ignoring bogus LDFLAGS=\"${LDFLAGS}\"" + fi + else + AC_LDFLAGS=${CFLAGS:-"-g"} + fi + else + AC_FAIL " does not compile code properly" + fi + + AC_SUB 'CC' "$AC_CC" + + rm -f /tmp/ngc$$ /tmp/ngc$$.c /tmp/ngc$$.o + + return $status +} + + +# +# acLookFor actually looks for a program, without setting anything. +# +acLookFor () { + path=${AC_PATH:-$ac_default_path} + case "X$1" in + X-[rx]) __mode=$1 + shift + ;; + *) __mode=-x + ;; + esac + oldifs="$IFS" + for program in $*; do + IFS=":" + for x in $path; do + if [ $__mode $x/$program -a -f $x/$program ]; then + echo $x/$program + break 2 + fi + done + done + IFS="$oldifs" + unset __mode +} + + +# +# check that a program exists and set its path +# +MF_PATH_INCLUDE () { + SYM=$1; shift + + case X$1 in + X-[rx]) __mode=$1 + shift + ;; + *) unset __mode + ;; + esac + + TLOGN "looking for $1" + + DEST=`acLookFor $__mode $*` + + __sym=`echo "$SYM" | $AC_UPPERCASE` + if [ "$DEST" ]; then + TLOG " ($DEST)" + echo "$1 is $DEST" + AC_MAK $SYM + AC_DEFINE PATH_$__sym \""$DEST"\" + AC_SUB $__sym "$DEST" + eval CF_$SYM=$DEST + return 0 + else + #AC_SUB $__sym '' + echo "$1 is not found" + TLOG " (not found)" + return 1 + fi +} + +# +# AC_INIT starts the ball rolling +# +# After AC_INIT, fd's 1 and 2 point to config.log +# and fd 5 points to what used to be fd 1 +# +AC_INIT () { + __config_files="config.cmd config.sub config.h config.mak config.log" + rm -f $__config_files + __cwd=`pwd` + exec 5>&1 1>$__cwd/config.log 2>&1 + AC_CONFIGURE_FOR=__AC_`echo $1 | sed -e 's/\..$//' | $AC_UPPERCASE | tr ' ' '_'`_D + + # check to see whether to use echo -n or echo ...\c + # + echo -n hello > $$ + echo world >> $$ + if grep "helloworld" $$ >/dev/null; then + ac_echo="echo -n" + echo "[echo -n] works" + else + ac_echo="echo" + echo 'hello\c' > $$ + echo 'world' >> $$ + if grep "helloworld" $$ >/dev/null; then + ac_echo_nonl='\c' + echo "[echo ...\\c] works" + fi + fi + rm -f $$ + + LOG "Configuring for [$1]" + + cat > $__cwd/config.h << EOF +/* + * configuration for $1${2:+" ($2)"}, generated `date` + * by ${LOGNAME:-`whoami`}@`hostname` + */ +#ifndef $AC_CONFIGURE_FOR +#define $AC_CONFIGURE_FOR 1 + + +EOF + + unset __share + if [ -d $AC_PREFIX/share/man ]; then + for t in 1 2 3 4 5 6 7 8 9; do + if [ -d $AC_PREFIX/share/man/man$t ]; then + __share=/share + elif [ -d $AC_PREFIX/share/man/cat$t ]; then + __share=/share + fi + done + else + __share= + fi + + if [ -d $AC_PREFIX/libexec ]; then + __libexec=libexec + else + __libexec=lib + fi + + + AC_PREFIX=${AC_PREFIX:-/usr/local} + AC_EXECDIR=${AC_EXECDIR:-$AC_PREFIX/bin} + AC_SBINDIR=${AC_SBINDIR:-$AC_PREFIX/sbin} + AC_LIBDIR=${AC_LIBDIR:-$AC_PREFIX/lib} + AC_MANDIR=${AC_MANDIR:-$AC_PREFIX$__share/man} + AC_LIBEXEC=${AC_LIBEXEC:-$AC_PREFIX/$__libexec} + AC_CONFDIR=${AC_CONFDIR:-/etc} + + AC_PATH=${WITH_PATH:-$PATH} + AC_PROG_CPP + AC_PROG_INSTALL + + ac_os=`uname -s` + _os=`echo $ac_os | $AC_UPPERCASE | sed -e 's/[^A-Z0-9_].*$//'` + AC_DEFINE OS_$_os 1 + eval OS_${_os}=1 + unset _os +} + + +# +# AC_LIBRARY checks to see if a given library exists and contains the +# given function. +# usage: AC_LIBRARY function library [alternate ...] +# +AC_LIBRARY() { + SRC=$1 + shift + + __acllibs= + __aclhdrs= + + for x in "$@"; do + case X"$x" in + X-l*) __acllibs="$__acllibs $x" ;; + *) __aclhdrs="$__aclhdrs $x" ;; + esac + done + + # first see if the function can be found in any of the + # current libraries + AC_QUIET AC_CHECK_FUNCS $SRC $__aclhdrs && return 0 + + # then search through the list of libraries + __libs="$LIBS" + for x in $__acllibs; do + LIBS="$__libs $x" + if AC_QUIET AC_CHECK_FUNCS $SRC $__aclhdrs; then + AC_LIBS="$AC_LIBS $x" + return 0 + fi + done + return 1 +} + + +# +# AC_PROG_LEX checks to see if LEX exists, and if it's lex or flex. +# +AC_PROG_LEX() { + TLOGN "looking for lex " + + DEST=`acLookFor lex` + if [ "$DEST" ]; then + AC_MAK LEX + AC_DEFINE PATH_LEX \"$DEST\" + AC_SUB 'LEX' "$DEST" + echo "lex is $DEST" + else + DEST=`acLookFor flex` + if [ "$DEST" ]; then + AC_MAK FLEX + AC_DEFINE 'LEX' \"$DEST\" + AC_SUB 'LEX', "$DEST" + echo "lex is $DEST" + else + AC_SUB LEX '' + echo "neither lex or flex found" + TLOG " (not found)" + return 1 + fi + fi + + if AC_LIBRARY yywrap -ll -lfl; then + TLOG "($DEST)" + return 0 + fi + TLOG "(no lex library found)" + return 1 +} + + +# +# AC_PROG_YACC checks to see if YACC exists, and if it's bison or +# not. +# +AC_PROG_YACC () { + + TLOGN "looking for yacc " + + DEST=`acLookFor yacc` + if [ "$DEST" ]; then + AC_MAK YACC + AC_DEFINE PATH_YACC \"$DEST\" + AC_SUB 'YACC' "$DEST" + TLOG "($DEST)" + echo "yacc is $DEST" + else + DEST=`acLookFor bison` + if [ "$DEST" ]; then + AC_MAK BISON + AC_DEFINE 'YACC' \"$DEST\" + AC_SUB 'YACC' "$DEST -y" + echo "yacc is $DEST -y" + TLOG "($DEST -y)" + else + AC_SUB 'YACC' '' + echo "neither yacc or bison found" + TLOG " (not found)" + return 1 + fi + fi + return 0 +} + + +# +# AC_PROG looks for a program +# +AC_PROG () { + PN=`basename $1 | $AC_UPPERCASE | tr -dc $AC_UPPER_PAT` + TLOGN "looking for $1" + DEST=`acLookFor $1` + if [ "$DEST" ]; then + AC_SUB $PN $DEST + TLOG " ($DEST)" + return 0 + fi + AC_SUN $PN true + TLOG " (not found)" + return 1 +} + + +# +# AC_PROG_LN_S checks to see if ln exists, and, if so, if ln -s works +# +AC_PROG_LN_S () { + test "$AC_FIND_PROG" || AC_PROG_FIND + + test "$AC_FIND_PROG" || return 1 + + TLOGN "looking for \"ln -s\"" + DEST=`acLookFor ln` + + if [ "$DEST" ]; then + rm -f /tmp/b$$ + $DEST -s /tmp/a$$ /tmp/b$$ + if [ "`$AC_FIND_PROG /tmp/b$$ -type l -print`" ]; then + TLOG " ($DEST)" + echo "$DEST exists, and ln -s works" + AC_SUB 'LN_S' "$DEST -s" + rm -f /tmp/b$$ + else + AC_SUB 'LN_S' '' + TLOG " ($DEST exists, but -s does not seem to work)" + echo "$DEST exists, but ln -s doesn't seem to work" + rm -f /tmp/b$$ + return 1 + fi + else + AC_SUB 'LN_S' '' + echo "ln not found" + TLOG " (not found)" + return 1 + fi +} + + +# +# AC_PROG_FIND looks for the find program and sets the FIND environment +# variable +# +AC_PROG_FIND () { + if test -z "$AC_FIND_PROG"; then + MF_PATH_INCLUDE FIND find + rc=$? + AC_FIND_PROG=$DEST + return $rc + fi + return 0 +} + + +# +# AC_PROG_AWK looks for the awk program and sets the AWK environment +# variable +# +AC_PROG_AWK () { + if test -z "$AC_AWK_PROG"; then + MF_PATH_INCLUDE AWK awk + rc=$? + AC_AWK_PROG=$DEST + return $rc + fi + return 0 +} + + +# +# AC_PROG_SED looks for the sed program and sets the SED environment +# variable +# +AC_PROG_SED () { + if test -z "$AC_SED_PROG"; then + MF_PATH_INCLUDE SED sed + rc=$? + AC_SED_PROG=$DEST + return $rc + fi + return 0 +} + + +# +# AC_HEADER_SYS_WAIT looks for sys/wait.h +# +AC_HEADER_SYS_WAIT () { + AC_CHECK_HEADERS sys/wait.h || return 1 +} + +# +# AC_TYPE_PID_T checks to see if the pid_t type exists +# +AC_TYPE_PID_T () { + + AC_CHECK_TYPE pid_t sys/types.h + return $? +} + + +# +# AC_C_CONST checks to see if the compiler supports the const keyword +# +AC_C_CONST () { + cat > /tmp/pd$$.c << EOF +const char me=1; +EOF + LOGN "checking for \"const\" keyword" + + if __MAKEDOTO /tmp/pd$$.c; then + rc=0 + else + AC_DEFINE 'const' '/**/' + rc=1 + fi + rm -f /tmp/pd$$.c + return $rc +} + + +# +# AC_C_VOLATILE checks to see if the compiler supports the volatile keyword +# +AC_C_VOLATILE () { + cat > /tmp/pd$$.c << EOF +f() { volatile char me=1; } +EOF + LOGN "checking for \"volatile\" keyword" + + if __MAKEDOTO /tmp/pd$$.c; then + rc=0 + else + AC_DEFINE 'volatile' '/**/' + rc=1 + fi + rm -f /tmp/pd$$.c + return $rc +} + + +# +# AC_SCALAR_TYPES checks to see if the compiler can generate 2 and 4 byte ints. +# +AC_SCALAR_TYPES () { + cat > /tmp/pd$$.c << EOF +#include +main() +{ + unsigned long v_long; + unsigned int v_int; + unsigned short v_short; + + if (sizeof v_long == 4) + puts("#define DWORD unsigned long"); + else if (sizeof v_int == 4) + puts("#define DWORD unsigned int"); + else + exit(1); + + if (sizeof v_int == 2) + puts("#define WORD unsigned int"); + else if (sizeof v_short == 2) + puts("#define WORD unsigned short"); + else + exit(2); + puts("#define BYTE unsigned char"); + exit(0); +} +EOF + rc=1 + LOGN "defining WORD & DWORD scalar types" + if $AC_CC /tmp/pd$$.c -o /tmp/pd$$; then + if /tmp/pd$$ >> $__cwd/config.h; then + rc=0 + fi + fi + case "$rc" in + 0) TLOG "" ;; + *) TLOG " ** FAILED **" ;; + esac + rm -f /tmp/pd$$ /tmp/pd$$.c +} + + +# +# AC_OUTPUT generates makefiles from makefile.in's +# +AC_OUTPUT () { + cd $__cwd + AC_SUB 'LIBS' "$AC_LIBS" + AC_SUB 'CONFIGURE_FILES' "$__config_files" + AC_SUB 'GENERATED_FILES' "$*" + AC_SUB 'CFLAGS' "$AC_CFLAGS" + AC_SUB 'LDFLAGS' "$AC_LDFLAGS" + AC_SUB 'srcdir' "$AC_SRCDIR" + AC_SUB 'prefix' "$AC_PREFIX" + AC_SUB 'exedir' "$AC_EXECDIR" + AC_SUB 'sbindir' "$AC_SBINDIR" + AC_SUB 'libdir' "$AC_LIBDIR" + AC_SUB 'libexec' "$AC_LIBEXEC" + AC_SUB 'confdir' "$AC_CONFDIR" + AC_SUB 'mandir' "$AC_MANDIR" + + if [ -r config.sub ]; then + test "$AC_SED_PROG" || AC_PROG_SED + test "$AC_SED_PROG" || return 1 + + echo >> config.h + echo "#endif/* ${AC_CONFIGURE_FOR} */" >> config.h + + rm -f config.cmd + Q=\' + cat - > config.cmd << EOF +#! /bin/sh +${CC:+CC=${Q}${CC}${Q}} ${CFLAGS:+CFLAGS=${Q}${CFLAGS}${Q}} $ac_progname $ac_configure_command +EOF + chmod +x config.cmd + + __d=$AC_SRCDIR + for makefile in $*;do + if test -r $__d/${makefile}.in; then + LOG "generating $makefile" + ./config.md `__ac_dirname ./$makefile` 2>/dev/null + $AC_SED_PROG -f config.sub < $__d/${makefile}.in > $makefile + __config_files="$__config_files $makefile" + else + LOG "WARNING: ${makefile}.in does not exist!" + fi + done + unset __d + + else + echo + fi +} + +# +# AC_CHECK_FLOCK checks to see if flock() exists and if the LOCK_NB argument +# works properly. +# +AC_CHECK_FLOCK() { + + AC_CHECK_HEADERS sys/types.h sys/file.h fcntl.h + + cat << EOF > $$.c +#include +#include +#include +#include + +main() +{ + int x = open("$$.c", O_RDWR, 0666); + int y = open("$$.c", O_RDWR, 0666); + + if (flock(x, LOCK_EX) != 0) + exit(1); + if (flock(y, LOCK_EX|LOCK_NB) == 0) + exit(1); + exit(0); +} +EOF + + LOGN "checking flock() sanity" + HAS_FLOCK=0 + if $AC_CC -o flock $$.c ; then + if ./flock ; then + LOG " (good)" + HAS_FLOCK=1 + AC_DEFINE HAS_FLOCK + else + LOG " (bad)" + fi + else + LOG " (not found)" + fi + + rm -f flock $$.c + + case "$HAS_FLOCK" in + 0) return 1 ;; + *) return 0 ;; + esac +} + + +# +# AC_CHECK_RESOLVER finds out whether the berkeley resolver is +# present on this system. +# +AC_CHECK_RESOLVER () { + AC_PROG_CC + + TLOGN "looking for the Berkeley resolver library" + + __ACR_rc=0 + + cat > /tmp/ngc$$.c << EOF +#include +#include +#include +#include + +main() +{ + char bfr[256]; + + res_init(); + res_query("hello", C_IN, T_A, bfr, sizeof bfr); +} +EOF + + if $AC_CC -o /tmp/ngc$$ /tmp/ngc$$.c; then + TLOG " (found)" + elif $AC_CC -o /tmp/ngc$$ /tmp/ngc$$.c -lresolv; then + TLOG " (found, needs -lresolv)" + AC_LIBS="$AC_LIBS -lresolv" + elif $AC_CC -DBIND_8_COMPAT -o /tmp/ngc$$ /tmp/ngc$$.c; then + TLOG " (found, needs BIND_8_COMPAT)" + AC_DEFINE BIND_8_COMPAT 1 + elif $AC_CC -DBIND_8_COMPAT -o /tmp/ngc$$ /tmp/ngc$$.c -lresolv; then + TLOG " (found, needs BIND_8_COMPAT & -lresolv)" + AC_DEFINE BIND_8_COMPAT 1 + else + TLOG " (not found)" + __ACR_rc=1 + fi + rm -f /tmp/ngc$$.c + return $__ACR_rc +} + + +# +# AC_CHECK_ALLOCA looks for alloca +# +AC_CHECK_ALLOCA () { + + AC_PROG_CC + AC_CHECK_HEADERS stdlib.h + + cat - > /tmp/ngc$$.c << EOF +#if T +# include +#else +# include +#endif +main() +{ + alloca(10); +} +EOF + + LOGN "looking for the alloca function" + if $AC_CC -DT /tmp/ngc$$.c -o /tmp/ngc$$; then + AC_DEFINE 'HAVE_ALLOCA_H' 1 + status=0 + TLOG " (found in alloca.h)" + elif $AC_CC /tmp/ngc$$.c -o /tmp/ngc$$; then + TLOG " (found)" + status=0 + else + TLOG " (not found)" + status=1 + fi + rm -f /tmp/ngc$$.c /tmp/ngc + return $status + +} + + +# +# AC_CHECK_BASENAME looks for a copy of basename that does NOT use +# a local static buffer to hold results in. +# +AC_CHECK_BASENAME() { + TLOGN "looking for a reentrant basename " + + cat > /tmp/ngc$$.c << EOF +#include + +main() +{ + char *a = basename("/a/test"); + char *b = basename("/a/nother"); + + return (strcmp(a,b) != 0) ? 0 : 1; + +} +EOF + + if $AC_CC -o /tmp/ngc$$ /tmp/ngc$$.c $LIBS; then + if /tmp/ngc$$; then + TLOG "(found)" + AC_DEFINE 'HAVE_BASENAME' 1 + AC_CHECK_HEADERS libgen.h + else + TLOG "(broken)" + fi + else + TLOG "(not found)" + fi + rm -f /tmp/ngc$$ /tmp/ngc$$.c +} + + +# +# AC_PROG_INSTALL finds the install program and guesses whether it's a +# Berkeley or GNU install program +# +AC_PROG_INSTALL () { + + DEST=`acLookFor install` + + LOGN "looking for install" + unset IS_BSD + if [ "$DEST" ]; then + # BSD install or GNU install? Let's find out... + touch /tmp/a$$ + + $DEST /tmp/a$$ /tmp/b$$ + + if test -r /tmp/a$$; then + LOG " ($DEST)" + else + IS_BSD=1 + LOG " ($DEST) bsd install" + fi + rm -f /tmp/a$$ /tmp/b$$ + else + DEST=`acLookFor ginstall` + if [ "$DEST" ]; then + LOG " ($DEST)" + else + DEST="false" + LOG " (not found)" + fi + fi + + if [ "$IS_BSD" ]; then + PROG_INSTALL="$DEST -c" + else + PROG_INSTALL="$DEST" + fi + + AC_SUB 'INSTALL' "$PROG_INSTALL" + AC_SUB 'INSTALL_PROGRAM' "$PROG_INSTALL -s -m 755" + AC_SUB 'INSTALL_DATA' "$PROG_INSTALL -m 444" + + # finally build a little directory installer + # if mkdir -p works, use that, otherwise use install -d, + # otherwise build a script to do it by hand. + # in every case, test to see if the directory exists before + # making it. + + if mkdir -p $$a/b; then + # I like this method best. + __mkdir="mkdir -p" + rmdir $$a/b + rmdir $$a + elif $PROG_INSTALL -d $$a/b; then + __mkdir="$PROG_INSTALL -d" + rmdir $$a/b + rmdir $$a + fi + + __config_files="$__config_files config.md" + AC_SUB 'INSTALL_DIR' "$__cwd/config.md" + echo "#! /bin/sh" > $__cwd/config.md + echo "# script generated" `date` "by configure.sh" >> $__cwd/config.md + echo >> $__cwd/config.md + if [ "$__mkdir" ]; then + echo "test -d \"\$1\" || $__mkdir \"\$1\"" >> $__cwd/config.md + echo "exit $?" >> $__cwd/config.md + else + cat - >> $__cwd/config.md << \EOD +pieces=`IFS=/; for x in $1; do echo $x; done` +dir= +for x in $pieces; do + dir="$dir$x" + mkdir $dir || exit 1 + dir="$dir/" +done +exit 0 +EOD + fi + chmod +x $__cwd/config.md +} + +# +# acCheckCPP is a local that runs a C preprocessor with a given set of +# compiler options +# +acCheckCPP () { + cat > /tmp/ngc$$.c << EOF +#define FOO BAR + +FOO +EOF + + if $1 $2 /tmp/ngc$$.c > /tmp/ngc$$.o; then + if grep -v '#define' /tmp/ngc$$.o | grep -s BAR >/dev/null; then + echo "CPP=[$1], CPPFLAGS=[$2]" + AC_SUB 'CPP' "$1" + AC_SUB 'CPPFLAGS' "$2" + rm /tmp/ngc$$.c /tmp/ngc$$.o + return 0 + fi + fi + rm /tmp/ngc$$.c /tmp/ngc$$.o + return 1 +} + + +# +# AC_PROG_CPP checks for cpp, then checks to see which CPPFLAGS are needed +# to run it as a filter. +# +AC_PROG_CPP () { + if [ "$AC_CPP_PROG" ]; then + DEST=$AC_CPP_PROG + else + __ac_path="$AC_PATH" + AC_PATH="/lib:/usr/lib:${__ac_path:-$ac_default_path}" + DEST=`acLookFor cpp` + AC_PATH="$__ac_path" + fi + + unset fail + LOGN "Looking for cpp" + if [ "$DEST" ]; then + TLOGN " ($DEST)" + acCheckCPP $DEST "$CPPFLAGS" || \ + acCheckCPP $DEST -traditional-cpp -E || \ + acCheckCPP $DEST -E || \ + acCheckCPP $DEST -traditional-cpp -pipe || \ + acCheckCPP $DEST -pipe || fail=1 + + if [ "$fail" ]; then + AC_FAIL " (can't run cpp as a pipeline)" + else + TLOG " ok" + return 0 + fi + fi + AC_FAIL " (not found)" +} + +# +# AC_FAIL spits out an error message, then __fail's +AC_FAIL() { + LOG "$*" + $__fail 1 +} + +# +# AC_SUB writes a substitution into config.sub +AC_SUB() { + ( _subst=`echo $2 | sed -e 's/;/\\;/g'` + echo "s;@$1@;$_subst;g" ) >> $__cwd/config.sub +} + +# +# AC_MAK writes a define into config.mak +AC_MAK() { + echo "HAVE_$1 = 1" >> $__cwd/config.mak +} + +# +# AC_DEFINE adds a #define to config.h +AC_DEFINE() { + echo "#define $1 ${2:-1}" >> $__cwd/config.h +} + +# +# AC_INCLUDE adds a #include to config.h +AC_INCLUDE() { + echo "#include \"$1\"" >> $__cwd/config.h +} + +# +# AC_CONFIG adds a configuration setting to all the config files +AC_CONFIG() { + AC_DEFINE "PATH_$1" \""$2"\" + AC_MAK "$1" + AC_SUB "$1" "$2" +} + +# +# AC_QUIET does something quietly +AC_QUIET() { + eval $* 5>/dev/null +} + + +AC_TR=`acLookFor tr` +if [ "$AC_TR" ]; then + # try posix-style tr + ABC=`echo abc | tr abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ` + if [ "$ABC" = "ABC" ]; then + AC_UPPERCASE="$AC_TR abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ" + AC_UPPER_PAT="ABCDEFGHIJKLMNOPQRSTUVWXYZ" + else + ABC=`echo abc | tr a-z A-Z` + if [ "$ABC" = "ABC" ]; then + AC_UPPERCASE="$AC_TR a-z A-Z" + AC_UPPER_PAT="A-Z" + else + ABC=`echo abc | tr '[a-z]' '[A-Z]'` + if [ "$ABC" = "ABC" ]; then + AC_UPPERCASE="$AC_TR '[a-z]' '[A-Z]'" + AC_UPPER_PAT="'[A-Z]'" + else + AC_FAIL "$AC_TR cannot translate lowercase to uppercase" + return 0 + fi + fi + fi +else + AC_FAIL "configure requires a functional version of tr" +fi + +while [ $# -gt 0 ]; do + unset matched + + case X"$1" in + X--src|X--srcdir) + AC_SRCDIR=`__ac_dir "$2"` + _set_srcdir=1 + shift 2;; + + X--src=*|X--srcdir=*) + __d=`echo "$1" | sed -e 's/^[^=]*=//'` + AC_SRCDIR=`__ac_dir "$__d"` + _set_srcdir=1 + shift 1 ;; + + X--prefix) + AC_PREFIX=`__ac_dir "$2"` + _set_prefix=1 + shift 2;; + + X--prefix=*) + __d=`echo "$1"| sed -e 's/^[^=]*=//'` + AC_PREFIX=`__ac_dir "$__d"` + _set_prefix=1 + shift 1;; + + X--confdir) + AC_CONFDIR=`__ac_dir "$2"` + _set_confdir=1 + shift 2;; + + X--confdir=*) + __d=`echo "$1" | sed -e 's/^[^=]*=//'` + AC_CONFDIR=`__ac_dir "$__d"` + _set_confdir=1 + shift 1;; + + X--libexec|X--libexecdir) + AC_LIBEXEC=`__ac_dir "$2"` + _set_libexec=1 + shift 2;; + + X--libexec=*|X--libexecdir=*) + __d=`echo "$1" | sed -e 's/^[^=]*=//'` + AC_LIBEXEC=`__ac_dir "$__d"` + _set_libexec=1 + shift 1;; + + X--lib|X--libdir) + AC_LIBDIR=`__ac_dir "$2"` + _set_libdir=1 + shift 2;; + + X--lib=*|X--libdir=*) + __d=`echo "$1" | sed -e 's/^[^=]*=//'` + AC_LIBDIR=`__ac_dir "$__d"` + _set_libdir=1 + shift 1;; + + X--exec|X--execdir) + AC_EXECDIR=`__ac_dir "$2"` + _set_execdir=1 + shift 2;; + + X--exec=*|X--execdir=*) + __d=`echo "$1" | sed -e 's/^[^=]*=//'` + AC_EXECDIR=`__ac_dir "$__d"` + _set_execdir=1 + shift 1;; + + X--sbin|X--sbindir) + AC_SBINDIR=`__ac_dir "$2"` + _set_sbindir=1 + shift 2;; + + X--sbin=*|X--sbindir=*) + __d=`echo "$1" | sed -e 's/^[^=]*=//'` + AC_SBINDIR=`__ac_dir "$__d"` + _set_sbindir=1 + shift 1;; + + X--man|X--mandir) + AC_MANDIR=`__ac_dir "$2"` + _set_mandir=1 + shift 2;; + + X--man=*|X--mandir=*) + __d=`echo "$1" | sed -e 's/^[^=]*=//'` + AC_MANDIR=`__ac_dir "$__d"` + _set_mandir=1 + shift 1;; + + X--use-*=*) + _var=`echo "$1"| sed -n 's/^--use-\([A-Za-z][-A-Za-z0-9_]*\)=.*$/\1/p'` + if [ "$_var" ]; then + _val=`echo "$1" | sed -e 's/^--use-[^=]*=\(.*\)$/\1/'` + _v=`echo $_var | $AC_UPPERCASE | tr '-' '_'` + case X"$_val" in + X[Yy][Ee][Ss]|X[Tt][Rr][Uu][Ee]) eval USE_${_v}=T ;; + X[Nn][Oo]|X[Ff][Aa][Ll][Ss][Ee]) eval unset USE_${_v} ;; + *) echo "Bad value for --use-$_var ; must be yes or no" + exit 1 ;; + esac + else + echo "Bad option $1. Use --help to show options" 1>&2 + exit 1 + fi + shift 1 ;; + + X--use-*) + _var=`echo "$1"|sed -n 's/^--use-\([A-Za-z][-A-Za-z0-9_]*\)$/\1/p'` + _v=`echo $_var | $AC_UPPERCASE | tr '-' '_'` + eval USE_${_v}=T + shift 1;; + + X--with-*=*) + _var=`echo "$1"| sed -n 's/^--with-\([A-Za-z][-A-Za-z0-9_]*\)=.*$/\1/p'` + if [ "$_var" ]; then + _val=`echo "$1" | sed -e 's/^--with-[^=]*=\(.*\)$/\1/'` + _v=`echo $_var | $AC_UPPERCASE | tr '-' '_'` + eval WITH_${_v}=\"$_val\" + else + echo "Bad option $1. Use --help to show options" 1>&2 + exit 1 + fi + shift 1 ;; + + X--with-*) + _var=`echo "$1" | sed -n 's/^--with-\([A-Za-z][A-Za-z0-9_-]*\)$/\1/p'` + if [ "$_var" ]; then + _v=`echo $_var | $AC_UPPERCASE | tr '-' '_'` + eval WITH_${_v}=1 + else + echo "Bad option $1. Use --help to show options" 1>&2 + exit 1 + fi + shift 1 ;; + + X--help) + echo "$ac_standard" + test "$ac_help" && echo "$ac_help" + exit 0;; + + *) if [ "$LOCAL_AC_OPTIONS" ]; then + eval "$LOCAL_AC_OPTIONS" + else + ac_error=T + fi + if [ "$ac_error" ]; then + echo "Bad option $1. Use --help to show options" 1>&2 + exit 1 + fi ;; + esac +done + diff --git a/r2/r2/lib/contrib/discount-1.6.0/configure.sh b/r2/r2/lib/contrib/discount-1.6.0/configure.sh new file mode 100755 index 000000000..caf2c8a83 --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/configure.sh @@ -0,0 +1,152 @@ +#! /bin/sh + +# local options: ac_help is the help message that describes them +# and LOCAL_AC_OPTIONS is the script that interprets them. LOCAL_AC_OPTIONS +# is a script that's processed with eval, so you need to be very careful to +# make certain that what you quote is what you want to quote. + +# load in the configuration file +# +ac_help='--enable-dl-tag Use the DL tag extension +--enable-pandoc-header Use pandoc-style header blocks +--enable-superscript A^B becomes AB +--enable-amalloc Enable memory allocation debugging +--relaxed-emphasis underscores aren'\''t special in the middle of words +--with-tabstops=N Set tabstops to N characters (default is 4) +--enable-div Enable >%id% divisions +--enable-alpha-list Enable (a)/(b)/(c) lists +--enable-all-features Turn on all stable optional features' + +LOCAL_AC_OPTIONS=' +set=`locals $*`; +if [ "$set" ]; then + eval $set + shift 1 +else + ac_error=T; +fi' + +locals() { + K=`echo $1 | $AC_UPPERCASE` + case "$K" in + --RELAXED-EMPHAS*) + echo RELAXED_EMPHASIS=T + ;; + --ENABLE-ALL|--ENABLE-ALL-FEATURES) + echo WITH_DL_TAG=T + echo RELAXED_EMPHASIS=T + echo WITH_PANDOC_HEADER=T + echo WITH_SUPERSCRIPT=T + echo WITH_AMALLOC=T + echo WITH_DIV=T + #echo WITH_ALPHA_LIST=T + ;; + --ENABLE-*) enable=`echo $K | sed -e 's/--ENABLE-//' | tr '-' '_'` + echo WITH_${enable}=T ;; + esac +} + +TARGET=markdown +. ./configure.inc + +AC_INIT $TARGET + +AC_PROG_CC + +case "$AC_CC $AC_CFLAGS" in +*-Wall*) AC_DEFINE 'while(x)' 'while( (x) != 0 )' + AC_DEFINE 'if(x)' 'if( (x) != 0 )' ;; +esac + +AC_PROG ar || AC_FAIL "$TARGET requires ar" +AC_PROG ranlib + +AC_C_VOLATILE +AC_C_CONST +AC_SCALAR_TYPES +AC_CHECK_BASENAME + +AC_CHECK_HEADERS sys/types.h pwd.h && AC_CHECK_FUNCS getpwuid + +if AC_CHECK_FUNCS srandom; then + AC_DEFINE 'INITRNG(x)' 'srandom((unsigned int)x)' +elif AC_CHECK_FUNCS srand; then + AC_DEFINE 'INITRNG(x)' 'srand((unsigned int)x)' +else + AC_DEFINE 'INITRNG(x)' '(void)1' +fi + +if AC_CHECK_FUNCS 'bzero((char*)0,0)'; then + : # Yay +elif AC_CHECK_FUNCS 'memset((char*)0,0,0)'; then + AC_DEFINE 'bzero(p,s)' 'memset(p,s,0)' +else + AC_FAIL "$TARGET requires bzero or memset" +fi + +if AC_CHECK_FUNCS random; then + AC_DEFINE 'COINTOSS()' '(random()&1)' +elif AC_CHECK_FUNCS rand; then + AC_DEFINE 'COINTOSS()' '(rand()&1)' +else + AC_DEFINE 'COINTOSS()' '1' +fi + +if AC_CHECK_FUNCS strcasecmp; then + : +elif AC_CHECK_FUNCS stricmp; then + AC_DEFINE strcasecmp stricmp +else + AC_FAIL "$TARGET requires either strcasecmp() or stricmp()" +fi + +if AC_CHECK_FUNCS strncasecmp; then + : +elif AC_CHECK_FUNCS strnicmp; then + AC_DEFINE strncasecmp strnicmp +else + AC_FAIL "$TARGET requires either strncasecmp() or strnicmp()" +fi + +if AC_CHECK_FUNCS fchdir || AC_CHECK_FUNCS getcwd ; then + AC_SUB 'THEME' '' +else + AC_SUB 'THEME' '#' +fi + +if [ -z "$WITH_TABSTOPS" ]; then + TABSTOP=4 +elif [ "$WITH_TABSTOPS" -eq 1 ]; then + TABSTOP=8 +else + TABSTOP=$WITH_TABSTOPS +fi +AC_DEFINE 'TABSTOP' $TABSTOP +AC_SUB 'TABSTOP' $TABSTOP + +test -z "$WITH_SUPERSCRIPT" || AC_DEFINE 'SUPERSCRIPT' 1 +test -z "$RELAXED_EMPHASIS" || AC_DEFINE 'RELAXED_EMPHASIS' 1 +test -z "$WITH_DIV" || AC_DEFINE 'DIV_QUOTE' 1 +test -z "$WITH_ALPHA_LIST" || AC_DEFINE 'ALPHA_LIST' 1 + + +if [ "$WITH_AMALLOC" ]; then + AC_DEFINE 'USE_AMALLOC' 1 + AC_SUB 'AMALLOC' 'amalloc.o' +else + AC_SUB 'AMALLOC' '' +fi + +if [ "$RELAXED_EMPHASIS" -o "$WITH_SUPERSCRIPT" ]; then + AC_SUB 'STRICT' '' +else + AC_SUB 'STRICT' '.\"' +fi + + +[ "$OS_FREEBSD" -o "$OS_DRAGONFLY" ] || AC_CHECK_HEADERS malloc.h + +[ "$WITH_DL_TAG" ] && AC_DEFINE 'DL_TAG_EXTENSION' '1' +[ "$WITH_PANDOC_HEADER" ] && AC_DEFINE 'PANDOC_HEADER' '1' + +AC_OUTPUT Makefile version.c markdown.1 diff --git a/r2/r2/lib/contrib/discount-1.6.0/css.c b/r2/r2/lib/contrib/discount-1.6.0/css.c new file mode 100644 index 000000000..39b0414a6 --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/css.c @@ -0,0 +1,76 @@ +/* markdown: a C implementation of John Gruber's Markdown markup language. + * + * Copyright (C) 2009 David L Parsons. + * The redistribution terms are provided in the COPYRIGHT file that must + * be distributed with this source code. + */ +#include +#include +#include +#include +#include +#include + +#include "config.h" + +#include "cstring.h" +#include "markdown.h" +#include "amalloc.h" + + +/* + * dump out stylesheet sections. + */ +static void +stylesheets(Paragraph *p, Cstring *f) +{ + Line* q; + + for ( ; p ; p = p->next ) { + if ( p->typ == STYLE ) { + for ( q = p->text; q ; q = q->next ) + Cswrite(f, T(q->text), S(q->text)); + Csputc('\n', f); + } + if ( p->down ) + stylesheets(p->down, f); + } +} + + +/* dump any embedded styles to a string + */ +int +mkd_css(Document *d, char **res) +{ + Cstring f; + + if ( res && *res && d && d->compiled ) { + CREATE(f); + RESERVE(f, 100); + stylesheets(d->code, &f); + + /* HACK ALERT! HACK ALERT! HACK ALERT! */ + *res = T(f); /* we know that a T(Cstring) is a character pointer */ + /* so we can simply pick it up and carry it away, */ + return S(f); /* leaving the husk of the Ctring on the stack */ + /* END HACK ALERT */ + } + return EOF; +} + + +/* dump any embedded styles to a file + */ +int +mkd_generatecss(Document *d, FILE *f) +{ + char *res; + int written = EOF, size = mkd_css(d, &res); + + if ( size > 0 ) + written = fwrite(res, size, 1, f); + if ( res ) + free(res); + return (written == size) ? size : EOF; +} diff --git a/r2/r2/lib/contrib/discount-1.6.0/cstring.h b/r2/r2/lib/contrib/discount-1.6.0/cstring.h new file mode 100644 index 000000000..164e75bb6 --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/cstring.h @@ -0,0 +1,75 @@ +/* two template types: STRING(t) which defines a pascal-style string + * of element (t) [STRING(char) is the closest to the pascal string], + * and ANCHOR(t) which defines a baseplate that a linked list can be + * built up from. [The linked list /must/ contain a ->next pointer + * for linking the list together with.] + */ +#ifndef _CSTRING_D +#define _CSTRING_D + +#include +#include + +#include "amalloc.h" + +/* expandable Pascal-style string. + */ +#define STRING(type) struct { type *text; int size, alloc; } + +#define CREATE(x) T(x) = (void*)(S(x) = (x).alloc = 0) +#define EXPAND(x) (S(x)++)[(S(x) < (x).alloc) \ + ? (T(x)) \ + : (T(x) = T(x) ? realloc(T(x), sizeof T(x)[0] * ((x).alloc += 100)) \ + : malloc(sizeof T(x)[0] * ((x).alloc += 100)) )] + +#define DELETE(x) ALLOCATED(x) ? (free(T(x)), S(x) = (x).alloc = 0) \ + : ( S(x) = 0 ) +#define CLIP(t,i,sz) \ + ( ((i) >= 0) && ((sz) > 0) && (((i)+(sz)) <= S(t)) ) ? \ + (memmove(&T(t)[i], &T(t)[i+sz], (S(t)-(i+sz)+1)*sizeof(T(t)[0])), \ + S(t) -= (sz)) : -1 + +#define RESERVE(x, sz) T(x) = ((x).alloc > S(x) + (sz) \ + ? T(x) \ + : T(x) \ + ? realloc(T(x), sizeof T(x)[0] * ((x).alloc = 100+(sz)+S(x))) \ + : malloc(sizeof T(x)[0] * ((x).alloc = 100+(sz)+S(x)))) +#define SUFFIX(t,p,sz) \ + memcpy(((S(t) += (sz)) - (sz)) + \ + (T(t) = T(t) ? realloc(T(t), sizeof T(t)[0] * ((t).alloc += sz)) \ + : malloc(sizeof T(t)[0] * ((t).alloc += sz))), \ + (p), sizeof(T(t)[0])*(sz)) + +#define PREFIX(t,p,sz) \ + RESERVE( (t), (sz) ); \ + if ( S(t) ) { memmove(T(t)+(sz), T(t), S(t)); } \ + memcpy( T(t), (p), (sz) ); \ + S(t) += (sz) + +/* reference-style links (and images) are stored in an array + */ +#define T(x) (x).text +#define S(x) (x).size +#define ALLOCATED(x) (x).alloc + +/* abstract anchor type that defines a list base + * with a function that attaches an element to + * the end of the list. + * + * the list base field is named .text so that the T() + * macro will work with it. + */ +#define ANCHOR(t) struct { t *text, *end; } +#define E(t) ((t).end) + +#define ATTACH(t, p) ( T(t) ? ( (E(t)->next = (p)), (E(t) = (p)) ) \ + : ( (T(t) = E(t) = (p)) ) ) + +typedef STRING(char) Cstring; + +extern void Csputc(int, Cstring *); +extern int Csprintf(Cstring *, char *, ...); +extern int Cswrite(Cstring *, char *, int); +extern void Csreparse(Cstring *, char *, int, int); + +#endif/*_CSTRING_D*/ diff --git a/r2/r2/lib/contrib/discount-1.6.0/docheader.c b/r2/r2/lib/contrib/discount-1.6.0/docheader.c new file mode 100644 index 000000000..0b5611406 --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/docheader.c @@ -0,0 +1,43 @@ +/* + * docheader -- get values from the document header + * + * Copyright (C) 2007 David L Parsons. + * The redistribution terms are provided in the COPYRIGHT file that must + * be distributed with this source code. + */ +#include "config.h" +#include +#include +#include + +#include "cstring.h" +#include "markdown.h" +#include "amalloc.h" + +#define afterdle(t) (T((t)->text) + (t)->dle) + +char * +mkd_doc_title(Document *doc) +{ + if ( doc && doc->headers ) + return afterdle(doc->headers); + return 0; +} + + +char * +mkd_doc_author(Document *doc) +{ + if ( doc && doc->headers && doc->headers->next ) + return afterdle(doc->headers->next); + return 0; +} + + +char * +mkd_doc_date(Document *doc) +{ + if ( doc && doc->headers && doc->headers->next && doc->headers->next->next ) + return afterdle(doc->headers->next->next); + return 0; +} diff --git a/r2/r2/lib/contrib/discount-1.6.0/dumptree.c b/r2/r2/lib/contrib/discount-1.6.0/dumptree.c new file mode 100755 index 000000000..ecace98aa --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/dumptree.c @@ -0,0 +1,151 @@ +/* markdown: a C implementation of John Gruber's Markdown markup language. + * + * Copyright (C) 2007 David L Parsons. + * The redistribution terms are provided in the COPYRIGHT file that must + * be distributed with this source code. + */ +#include +#include "markdown.h" +#include "cstring.h" +#include "amalloc.h" + +struct frame { + int indent; + char c; +}; + +typedef STRING(struct frame) Stack; + +static char * +Pptype(int typ) +{ + switch (typ) { + case WHITESPACE: return "whitespace"; + case CODE : return "code"; + case QUOTE : return "quote"; + case MARKUP : return "markup"; + case HTML : return "html"; + case DL : return "dl"; + case UL : return "ul"; + case OL : return "ol"; + case LISTITEM : return "item"; + case HDR : return "header"; + case HR : return "hr"; + case TABLE : return "table"; + case SOURCE : return "source"; + default : return "mystery node!"; + } +} + +static void +pushpfx(int indent, char c, Stack *sp) +{ + struct frame *q = &EXPAND(*sp); + + q->indent = indent; + q->c = c; +} + + +static void +poppfx(Stack *sp) +{ + S(*sp)--; +} + + +static void +changepfx(Stack *sp, char c) +{ + char ch; + + if ( !S(*sp) ) return; + + ch = T(*sp)[S(*sp)-1].c; + + if ( ch == '+' || ch == '|' ) + T(*sp)[S(*sp)-1].c = c; +} + + +static void +printpfx(Stack *sp, FILE *f) +{ + int i; + char c; + + if ( !S(*sp) ) return; + + c = T(*sp)[S(*sp)-1].c; + + if ( c == '+' || c == '-' ) { + fprintf(f, "--%c", c); + T(*sp)[S(*sp)-1].c = (c == '-') ? ' ' : '|'; + } + else + for ( i=0; i < S(*sp); i++ ) { + if ( i ) + fprintf(f, " "); + fprintf(f, "%*s%c", T(*sp)[i].indent + 2, " ", T(*sp)[i].c); + if ( T(*sp)[i].c == '`' ) + T(*sp)[i].c = ' '; + } + fprintf(f, "--"); +} + + +static void +dumptree(Paragraph *pp, Stack *sp, FILE *f) +{ + int count; + Line *p; + int d; + static char *Begin[] = { 0, "P", "center" }; + + while ( pp ) { + if ( !pp->next ) + changepfx(sp, '`'); + printpfx(sp, f); + + d = fprintf(f, "[%s", Pptype(pp->typ)); + if ( pp->ident ) + d += fprintf(f, " %s", pp->ident); + if ( pp->align ) + d += fprintf(f, ", <%s>", Begin[pp->align]); + + for (count=0, p=pp->text; p; ++count, (p = p->next) ) + ; + + if ( count ) + d += fprintf(f, ", %d line%s", count, (count==1)?"":"s"); + + d += fprintf(f, "]"); + + if ( pp->down ) { + pushpfx(d, pp->down->next ? '+' : '-', sp); + dumptree(pp->down, sp, f); + poppfx(sp); + } + else fputc('\n', f); + pp = pp->next; + } +} + + +int +mkd_dump(Document *doc, FILE *out, int flags, char *title) +{ + Stack stack; + + if (mkd_compile(doc, flags) ) { + + CREATE(stack); + pushpfx(fprintf(out, "%s", title), doc->code->next ? '+' : '-', &stack); + dumptree(doc->code, &stack, out); + DELETE(stack); + + mkd_cleanup(doc); + return 0; + } + return -1; +} diff --git a/r2/r2/lib/contrib/discount-1.6.0/generate.c b/r2/r2/lib/contrib/discount-1.6.0/generate.c new file mode 100644 index 000000000..64428d724 --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/generate.c @@ -0,0 +1,1635 @@ +/* markdown: a C implementation of John Gruber's Markdown markup language. + * + * Copyright (C) 2007 David L Parsons. + * The redistribution terms are provided in the COPYRIGHT file that must + * be distributed with this source code. + */ +#include +#include +#include +#include +#include +#include + +#include "config.h" + +#include "cstring.h" +#include "markdown.h" +#include "amalloc.h" + +typedef int (*stfu)(const void*,const void*); + + +/* forward declarations */ +static int iscodeblock(MMIOT*); +static void code(int, MMIOT*); +static void text(MMIOT *f); +static Paragraph *display(Paragraph*, MMIOT*); + +/* externals from markdown.c */ +int __mkd_footsort(Footnote *, Footnote *); + +/* + * push text into the generator input buffer + */ +static void +push(char *bfr, int size, MMIOT *f) +{ + while ( size-- > 0 ) + EXPAND(f->in) = *bfr++; +} + + +/* look characters ahead of the cursor. + */ +static int +peek(MMIOT *f, int i) +{ + + i += (f->isp-1); + + return (i >= 0) && (i < S(f->in)) ? T(f->in)[i] : EOF; +} + + +/* pull a byte from the input buffer + */ +static int +pull(MMIOT *f) +{ + return ( f->isp < S(f->in) ) ? T(f->in)[f->isp++] : EOF; +} + + +/* return a pointer to the current position in the input buffer. + */ +static char* +cursor(MMIOT *f) +{ + return T(f->in) + f->isp; +} + + +static int +isthisspace(MMIOT *f, int i) +{ + int c = peek(f, i); + + return isspace(c) || (c == EOF); +} + + +static int +isthisalnum(MMIOT *f, int i) +{ + int c = peek(f, i); + + return (c != EOF) && isalnum(c); +} + + +static int +isthisnonword(MMIOT *f, int i) +{ + return isthisspace(f, i) || ispunct(peek(f,i)); +} + + +/* return/set the current cursor position + */ +#define mmiotseek(f,x) (f->isp = x) +#define mmiottell(f) (f->isp) + + +/* move n characters forward ( or -n characters backward) in the input buffer. + */ +static void +shift(MMIOT *f, int i) +{ + if (f->isp + i >= 0 ) + f->isp += i; +} + + +/* Qchar() + */ +static void +Qchar(int c, MMIOT *f) +{ + block *cur; + + if ( S(f->Q) == 0 ) { + cur = &EXPAND(f->Q); + memset(cur, 0, sizeof *cur); + cur->b_type = bTEXT; + } + else + cur = &T(f->Q)[S(f->Q)-1]; + + EXPAND(cur->b_text) = c; + +} + + +/* Qstring() + */ +static void +Qstring(char *s, MMIOT *f) +{ + while (*s) + Qchar(*s++, f); +} + + +/* Qwrite() + */ +static void +Qwrite(char *s, int size, MMIOT *f) +{ + while (size-- > 0) + Qchar(*s++, f); +} + + +/* Qprintf() + */ +static void +Qprintf(MMIOT *f, char *fmt, ...) +{ + char bfr[80]; + va_list ptr; + + va_start(ptr,fmt); + vsnprintf(bfr, sizeof bfr, fmt, ptr); + va_end(ptr); + Qstring(bfr, f); +} + + +/* Qem() + */ +static void +Qem(MMIOT *f, char c, int count) +{ + block *p = &EXPAND(f->Q); + + memset(p, 0, sizeof *p); + p->b_type = (c == '*') ? bSTAR : bUNDER; + p->b_char = c; + p->b_count = count; + + memset(&EXPAND(f->Q), 0, sizeof(block)); +} + + +/* empair() + */ +static int +empair(MMIOT *f, int go, int level) +{ + + int i; + block *begin, *p; + + begin = &T(f->Q)[go]; + for (i=go+1; i < S(f->Q); i++) { + p = &T(f->Q)[i]; + + if ( (p->b_type != bTEXT) && (p->b_count <= 0) ) + break; + + if ( p->b_type == begin->b_type ) { + if ( p->b_count == level ) /* exact match */ + return i-go; + + if ( p->b_count > 2 ) /* fuzzy match */ + return i-go; + } + } + return EOF; +} + + + +static struct emtags { + char open[10]; + char close[10]; + int size; +} emtags[] = { { "" , "", 5 }, { "", "", 9 } }; + + +static void +emclose(Cstring *s, int level) +{ + PREFIX(*s, emtags[level-1].close, emtags[level-1].size); +} + + +static void +emopen(Cstring *s, int level) +{ + SUFFIX(*s, emtags[level-1].open, emtags[level-1].size-1); +} + + +/* emmatch() + */ +static void +emmatch(MMIOT *f, int go) +{ + block *start = &T(f->Q)[go], *end; + int e, e2, i, match; + + while ( start->b_count ) { + switch (start->b_count) { + case 2: e = empair(f,go,match=2); + if ( e != EOF ) break; + case 1: e = empair(f,go,match=1); break; + default: + e = empair(f,go,1); + e2= empair(f,go,2); + + if ( e == EOF || ((e2 != EOF) && (e2 >= e)) ) { + e = e2; + match = 2; + } + else + match = 1; + } + if ( e != EOF ) { + end = &T(f->Q)[go+e]; + emclose(&end->b_post, match); + emopen(&start->b_text, match); + end->b_count -= match; + } + else { + for (i=0; i < match; i++) + EXPAND(start->b_text) = start->b_char; + } + + start->b_count -= match; + } +} + + +/* ___mkd_emblock() + */ +void +___mkd_emblock(MMIOT *f) +{ + int i; + block *p; + + for (i=0; i < S(f->Q); i++) { + p = &T(f->Q)[i]; + + if ( p->b_type != bTEXT ) emmatch(f, i); + + if ( S(p->b_post) ) { SUFFIX(f->out, T(p->b_post), S(p->b_post)); + DELETE(p->b_post); } + if ( S(p->b_text) ) { SUFFIX(f->out, T(p->b_text), S(p->b_text)); + DELETE(p->b_text); } + } + S(f->Q) = 0; +} + + +/* generate html from a markup fragment + */ +void +___mkd_reparse(char *bfr, int size, int flags, MMIOT *f) +{ + MMIOT sub; + + ___mkd_initmmiot(&sub, f->footnotes); + + sub.flags = f->flags | flags; + sub.base = f->base; + + push(bfr, size, &sub); + EXPAND(sub.in) = 0; + S(sub.in)--; + + text(&sub); + ___mkd_emblock(&sub); + + Qwrite(T(sub.out), S(sub.out), f); + + ___mkd_freemmiot(&sub, f->footnotes); +} + + +/* + * write out a url, escaping problematic characters + */ +static void +puturl(char *s, int size, MMIOT *f, int display) +{ + unsigned char c; + + while ( size-- > 0 ) { + c = *s++; + + if ( c == '\\' && size-- > 0 ) { + c = *s++; + + if ( !( ispunct(c) || isspace(c) ) ) + Qchar('\\', f); + } + + if ( c == '&' ) + Qstring("&", f); + else if ( c == '<' ) + Qstring("<", f); + else if ( c == '"' ) + Qstring("%22", f); + else if ( isalnum(c) || ispunct(c) || (display && isspace(c)) ) + Qchar(c, f); + else + Qprintf(f, "%%%02X", c); + } +} + + +/* advance forward until the next character is not whitespace + */ +static int +eatspace(MMIOT *f) +{ + int c; + + for ( ; ((c=peek(f, 1)) != EOF) && isspace(c); pull(f) ) + ; + return c; +} + + +/* (match (a (nested (parenthetical (string.))))) + */ +static int +parenthetical(int in, int out, MMIOT *f) +{ + int size, indent, c; + + for ( indent=1,size=0; indent; size++ ) { + if ( (c = pull(f)) == EOF ) + return EOF; + else if ( c == in ) + ++indent; + else if ( (c == '\\') && (peek(f,1) == out) ) { + ++size; + pull(f); + } + else if ( c == out ) + --indent; + } + return size ? (size-1) : 0; +} + + +/* extract a []-delimited label from the input stream. + */ +static int +linkylabel(MMIOT *f, Cstring *res) +{ + char *ptr = cursor(f); + int size; + + if ( (size = parenthetical('[',']',f)) != EOF ) { + T(*res) = ptr; + S(*res) = size; + return 1; + } + return 0; +} + + +/* see if the quote-prefixed linky segment is actually a title. + */ +static int +linkytitle(MMIOT *f, char quote, Footnote *ref) +{ + int whence = mmiottell(f); + char *title = cursor(f); + char *e; + register int c; + + while ( (c = pull(f)) != EOF ) { + e = cursor(f); + if ( c == quote ) { + if ( (c = eatspace(f)) == ')' ) { + T(ref->title) = 1+title; + S(ref->title) = (e-title)-2; + return 1; + } + } + } + mmiotseek(f, whence); + return 0; +} + + +/* extract a =HHHxWWW size from the input stream + */ +static int +linkysize(MMIOT *f, Footnote *ref) +{ + int height=0, width=0; + int whence = mmiottell(f); + int c; + + if ( isspace(peek(f,0)) ) { + pull(f); /* eat '=' */ + + for ( c = pull(f); isdigit(c); c = pull(f)) + width = (width * 10) + (c - '0'); + + if ( c == 'x' ) { + for ( c = pull(f); isdigit(c); c = pull(f)) + height = (height*10) + (c - '0'); + + if ( isspace(c) ) + c = eatspace(f); + + if ( (c == ')') || ((c == '\'' || c == '"') && linkytitle(f, c, ref)) ) { + ref->height = height; + ref->width = width; + return 1; + } + } + } + mmiotseek(f, whence); + return 0; +} + + +/* extract a (-prefixed url from the input stream. + * the label is either of the format ``, where I + * extract until I find a >, or it is of the format + * `text`, where I extract until I reach a ')', a quote, + * or (if image) a '=' + */ +static int +linkyurl(MMIOT *f, int image, Footnote *p) +{ + int c; + int mayneedtotrim=0; + + if ( (c = eatspace(f)) == EOF ) + return 0; + + if ( c == '<' ) { + pull(f); + mayneedtotrim=1; + } + + T(p->link) = cursor(f); + for ( S(p->link)=0; (c = peek(f,1)) != ')'; ++S(p->link) ) { + if ( c == EOF ) + return 0; + else if ( (c == '"' || c == '\'') && linkytitle(f, c, p) ) + break; + else if ( image && (c == '=') && linkysize(f, p) ) + break; + else if ( (c == '\\') && ispunct(peek(f,2)) ) { + ++S(p->link); + pull(f); + } + pull(f); + } + if ( peek(f, 1) == ')' ) + pull(f); + + ___mkd_tidy(&p->link); + + if ( mayneedtotrim && (T(p->link)[S(p->link)-1] == '>') ) + --S(p->link); + + return 1; +} + + + +/* prefixes for + */ +static struct { + char *name; + int nlen; +} protocol[] = { +#define _aprotocol(x) { x, (sizeof x)-1 } + _aprotocol( "http://" ), + _aprotocol( "https://" ), + _aprotocol( "ftp://" ), + _aprotocol( "news://" ), +#undef _aprotocol +}; +#define NRPROTOCOLS (sizeof protocol / sizeof protocol[0]) + + +static int +isautoprefix(char *text) +{ + int i; + + for (i=0; i < NRPROTOCOLS; i++) + if ( strncasecmp(text, protocol[i].name, protocol[i].nlen) == 0 ) + return 1; + return 0; +} + + +/* + * all the tag types that linkylinky can produce are + * defined by this structure. + */ +typedef struct linkytype { + char *pat; + int szpat; + char *link_pfx; /* tag prefix and link pointer (eg: "" */ + char *text_sfx; /* text suffix (eg: "" */ + int flags; /* reparse flags */ + int kind; /* tag is url or something else? */ +#define IS_URL 0x01 +} linkytype; + +static linkytype imaget = { 0, 0, "\"",", DENY_IMG|INSIDE_TAG, IS_URL }; +static linkytype linkt = { 0, 0, "", "", DENY_A, IS_URL }; + +/* + * pseudo-protocols for [][]; + * + * id: generates tag + * class: generates tag + * raw: just dump the link without any processing + */ +static linkytype specials[] = { + { "id:", 3, "", "", 0, IS_URL }, + { "class:", 6, "", "", 0, 0 }, + { "raw:", 4, 0, 0, 0, 0, 0, DENY_HTML, 0 }, + { "abbr:", 5, "", "", 0, 0 }, +} ; + +#define NR(x) (sizeof x / sizeof x[0]) + +/* see if t contains one of our pseudo-protocols. + */ +static linkytype * +pseudo(Cstring t) +{ + int i; + linkytype *r; + + for ( i=0; i < NR(specials); i++ ) { + r = &specials[i]; + if ( (S(t) > r->szpat) && (strncasecmp(T(t), r->pat, r->szpat) == 0) ) + return r; + } + return 0; +} + + +/* print out a linky (or fail if it's Not Allowed) + */ +static int +linkyformat(MMIOT *f, Cstring text, int image, Footnote *ref) +{ + linkytype *tag; + char *edit; + + if ( image ) + tag = &imaget; + else if ( tag = pseudo(ref->link) ) { + if ( f->flags & (NO_PSEUDO_PROTO|SAFELINK) ) + return 0; + } + else if ( (f->flags & SAFELINK) && T(ref->link) + && (T(ref->link)[0] != '/') + && !isautoprefix(T(ref->link)) ) + /* if SAFELINK, only accept links that are local or + * a well-known protocol + */ + return 0; + else + tag = &linkt; + + if ( f->flags & tag->flags ) + return 0; + + if ( tag->link_pfx ) { + Qstring(tag->link_pfx, f); + + if ( tag->kind & IS_URL ) { + if ( f->e_url && (edit = (*f->e_url)(T(ref->link), S(ref->link), f->e_context)) ) { + puturl(edit, strlen(edit), f, 0); + if ( f->e_free ) (*f->e_free)(edit, f->e_context); + } + else { + if ( f->base && T(ref->link) && (T(ref->link)[tag->szpat] == '/') ) + puturl(f->base, strlen(f->base), f, 0); + puturl(T(ref->link) + tag->szpat, S(ref->link) - tag->szpat, f, 0); + } + } + else + ___mkd_reparse(T(ref->link) + tag->szpat, S(ref->link) - tag->szpat, INSIDE_TAG, f); + + Qstring(tag->link_sfx, f); + + if ( f->e_flags && (edit = (*f->e_flags)(T(ref->link), S(ref->link), f->e_context)) ) { + Qchar(' ', f); + Qstring(edit, f); + if ( f->e_free ) (*f->e_free)(edit, f->e_context); + } + + if ( tag->WxH) { + if ( ref->height) Qprintf(f," height=\"%d\"", ref->height); + if ( ref->width) Qprintf(f, " width=\"%d\"", ref->width); + } + + if ( S(ref->title) ) { + Qstring(" title=\"", f); + ___mkd_reparse(T(ref->title), S(ref->title), INSIDE_TAG, f); + Qchar('"', f); + } + + Qstring(tag->text_pfx, f); + ___mkd_reparse(T(text), S(text), tag->flags, f); + Qstring(tag->text_sfx, f); + } + else + Qwrite(T(ref->link) + tag->szpat, S(ref->link) - tag->szpat, f); + + return 1; +} /* linkyformat */ + + +/* + * process embedded links and images + */ +static int +linkylinky(int image, MMIOT *f) +{ + int start = mmiottell(f); + Cstring name; + Footnote key, *ref; + + int status = 0; + + CREATE(name); + memset(&key, 0, sizeof key); + + if ( linkylabel(f, &name) ) { + if ( peek(f,1) == '(' ) { + pull(f); + if ( linkyurl(f, image, &key) ) + status = linkyformat(f, name, image, &key); + } + else { + int goodlink, implicit_mark = mmiottell(f); + + if ( eatspace(f) == '[' ) { + pull(f); /* consume leading '[' */ + goodlink = linkylabel(f, &key.tag); + } + else { + /* new markdown implicit name syntax doesn't + * require a second [] + */ + mmiotseek(f, implicit_mark); + goodlink = !(f->flags & MKD_1_COMPAT); + } + + if ( goodlink ) { + if ( !S(key.tag) ) { + DELETE(key.tag); + T(key.tag) = T(name); + S(key.tag) = S(name); + } + + if ( ref = bsearch(&key, T(*f->footnotes), S(*f->footnotes), + sizeof key, (stfu)__mkd_footsort) ) + status = linkyformat(f, name, image, ref); + } + } + } + + DELETE(name); + ___mkd_freefootnote(&key); + + if ( status == 0 ) + mmiotseek(f, start); + + return status; +} + + +/* write a character to output, doing text escapes ( & -> &, + * > -> > < -> < ) + */ +static void +cputc(int c, MMIOT *f) +{ + switch (c) { + case '&': Qstring("&", f); break; + case '>': Qstring(">", f); break; + case '<': Qstring("<", f); break; + default : Qchar(c, f); break; + } +} + + +/* + * convert an email address to a string of nonsense + */ +static void +mangle(char *s, int len, MMIOT *f) +{ + while ( len-- > 0 ) { + Qstring("&#", f); + Qprintf(f, COINTOSS() ? "x%02x;" : "%02d;", *((unsigned char*)(s++)) ); + } +} + + +/* before letting a tag through, validate against + * DENY_A and DENY_IMG + */ +static int +forbidden_tag(MMIOT *f) +{ + int c = toupper(peek(f, 1)); + + if ( f->flags & DENY_HTML ) + return 1; + + if ( c == 'A' && (f->flags & DENY_A) && !isthisalnum(f,2) ) + return 1; + if ( c == 'I' && (f->flags & DENY_IMG) + && strncasecmp(cursor(f)+1, "MG", 2) == 0 + && !isthisalnum(f,4) ) + return 1; + return 0; +} + + +/* Check a string to see if it looks like a mail address + * "looks like a mail address" means alphanumeric + some + * specials, then a `@`, then alphanumeric + some specials, + * but with a `.` + */ +static int +maybe_address(char *p, int size) +{ + int ok = 0; + + for ( ;size && (isalnum(*p) || strchr("._-+*", *p)); ++p, --size) + ; + + if ( ! (size && *p == '@') ) + return 0; + + --size, ++p; + + if ( size && *p == '.' ) return 0; + + for ( ;size && (isalnum(*p) || strchr("._-+", *p)); ++p, --size ) + if ( *p == '.' && size > 1 ) ok = 1; + + return size ? 0 : ok; +} + + +/* The size-length token at cursor(f) is either a mailto:, an + * implicit mailto:, one of the approved url protocols, or just + * plain old text. If it's a mailto: or an approved protocol, + * linkify it, otherwise say "no" + */ +static int +process_possible_link(MMIOT *f, int size) +{ + int address= 0; + int mailto = 0; + char *text = cursor(f); + + if ( f->flags & DENY_A ) return 0; + + if ( (size > 7) && strncasecmp(text, "mailto:", 7) == 0 ) { + /* if it says it's a mailto, it's a mailto -- who am + * I to second-guess the user? + */ + address = 1; + mailto = 7; /* 7 is the length of "mailto:"; we need this */ + } + else + address = maybe_address(text, size); + + if ( address ) { + Qstring("", f); + mangle(text+mailto, size-mailto, f); + Qstring("", f); + return 1; + } + else if ( isautoprefix(text) ) { + char *edit; + Qstring("e_url && (edit = (*f->e_url)(text, size, f->e_context)) ) { + puturl(edit, strlen(edit), f, 0); + if ( f->e_free ) (*f->e_free)(edit, f->e_context); + } + else + puturl(text,size,f, 0); + if ( f->e_flags && (edit = (*f->e_flags)(text, size, f->e_context)) ) { + Qstring("\" ", f); + Qstring(edit, f); + if ( f->e_free ) (*f->e_free)(edit, f->e_context); + Qchar('>', f); + } + else + Qstring("\">", f); + puturl(text,size,f, 1); + Qstring("", f); + return 1; + } + return 0; +} /* process_possible_link */ + + +/* a < may be just a regular character, the start of an embedded html + * tag, or the start of an . If it's an automatic + * link, we also need to know if it's an email address because if it + * is we need to mangle it in our futile attempt to cut down on the + * spaminess of the rendered page. + */ +static int +maybe_tag_or_link(MMIOT *f) +{ + int c, size; + int maybetag = 1; + + if ( f->flags & INSIDE_TAG ) + return 0; + + for ( size=0; (c = peek(f, size+1)) != '>'; size++) { + if ( c == EOF ) + return 0; + else if ( c == '\\' ) { + maybetag=0; + if ( peek(f, size+2) != EOF ) + size++; + } + else if ( isspace(c) ) + break; + else if ( ! (c == '/' || isalnum(c) ) ) + maybetag=0; + } + + if ( size ) { + if ( maybetag || (size >= 3 && strncmp(cursor(f), "!--", 3) == 0) ) { + + /* It is not a html tag unless we find the closing '>' in + * the same block. + */ + while ( (c = peek(f, size+1)) != '>' ) + if ( c == EOF ) + return 0; + else + size++; + + if ( forbidden_tag(f) ) + return 0; + + Qchar('<', f); + while ( ((c = peek(f, 1)) != EOF) && (c != '>') ) + Qchar(pull(f), f); + return 1; + } + else if ( !isspace(c) && process_possible_link(f, size) ) { + shift(f, size+1); + return 1; + } + } + + return 0; +} + + +/* autolinking means that all inline html is . A + * autolink url is alphanumerics, slashes, periods, underscores, + * the at sign, colon, and the % character. + */ +static int +maybe_autolink(MMIOT *f) +{ + register int c; + int size; + + /* greedily scan forward for the end of a legitimate link. + */ + for ( size=0; (c=peek(f, size+1)) != EOF; size++ ) + if ( c == '\\' ) { + if ( peek(f, size+2) != EOF ) + ++size; + } + else if ( isspace(c) || strchr("'\"()[]{}<>`", c) ) + break; + + if ( (size > 1) && process_possible_link(f, size) ) { + shift(f, size); + return 1; + } + return 0; +} + + +/* smartyquote code that's common for single and double quotes + */ +static int +smartyquote(int *flags, char typeofquote, MMIOT *f) +{ + int bit = (typeofquote == 's') ? 0x01 : 0x02; + + if ( bit & (*flags) ) { + if ( isthisnonword(f,1) ) { + Qprintf(f, "&r%cquo;", typeofquote); + (*flags) &= ~bit; + return 1; + } + } + else if ( isthisnonword(f,-1) && peek(f,1) != EOF ) { + Qprintf(f, "&l%cquo;", typeofquote); + (*flags) |= bit; + return 1; + } + return 0; +} + + +static int +islike(MMIOT *f, char *s) +{ + int len; + int i; + + if ( s[0] == '<' ) { + if ( !isthisnonword(f, -1) ) + return 0; + ++s; + } + + if ( !(len = strlen(s)) ) + return 0; + + if ( s[len-1] == '>' ) { + if ( !isthisnonword(f,len-1) ) + return 0; + len--; + } + + for (i=1; i < len; i++) + if (tolower(peek(f,i)) != s[i]) + return 0; + return 1; +} + + +static struct smarties { + char c0; + char *pat; + char *entity; + int shift; +} smarties[] = { + { '\'', "'s>", "rsquo", 0 }, + { '\'', "'t>", "rsquo", 0 }, + { '-', "--", "mdash", 1 }, + { '-', "<->", "ndash", 0 }, + { '.', "...", "hellip", 2 }, + { '.', ". . .", "hellip", 4 }, + { '(', "(c)", "copy", 2 }, + { '(', "(r)", "reg", 2 }, + { '(', "(tm)", "trade", 3 }, + { '3', "<3/4>", "frac34", 2 }, + { '3', "<3/4ths>", "frac34", 2 }, + { '1', "<1/2>", "frac12", 2 }, + { '1', "<1/4>", "frac14", 2 }, + { '1', "<1/4th>", "frac14", 2 }, + { '&', "�", 0, 3 }, +} ; +#define NRSMART ( sizeof smarties / sizeof smarties[0] ) + + +/* Smarty-pants-style chrome for quotes, -, ellipses, and (r)(c)(tm) + */ +static int +smartypants(int c, int *flags, MMIOT *f) +{ + int i; + + if ( f->flags & (DENY_SMARTY|INSIDE_TAG) ) + return 0; + + for ( i=0; i < NRSMART; i++) + if ( (c == smarties[i].c0) && islike(f, smarties[i].pat) ) { + if ( smarties[i].entity ) + Qprintf(f, "&%s;", smarties[i].entity); + shift(f, smarties[i].shift); + return 1; + } + + switch (c) { + case '<' : return 0; + case '\'': if ( smartyquote(flags, 's', f) ) return 1; + break; + + case '"': if ( smartyquote(flags, 'd', f) ) return 1; + break; + + case '`': if ( peek(f, 1) == '`' ) { + int j = 2; + + while ( (c=peek(f,j)) != EOF ) { + if ( c == '\\' ) + j += 2; + else if ( c == '`' ) + break; + else if ( c == '\'' && peek(f, j+1) == '\'' ) { + Qstring("“", f); + ___mkd_reparse(cursor(f)+1, j-2, 0, f); + Qstring("”", f); + shift(f,j+1); + return 1; + } + else ++j; + } + + } + break; + } + return 0; +} /* smartypants */ + + +#define tag_text(f) (f->flags & INSIDE_TAG) + + +static void +text(MMIOT *f) +{ + int c, j; + int rep; + int smartyflags = 0; + + while (1) { + if ( (f->flags & AUTOLINK) && isalpha(peek(f,1)) && !tag_text(f) ) + maybe_autolink(f); + + c = pull(f); + + if (c == EOF) + break; + + if ( smartypants(c, &smartyflags, f) ) + continue; + switch (c) { + case 0: break; + + case 3: Qstring("
", f); + break; + + case '>': if ( tag_text(f) ) + Qstring(">", f); + else + Qchar(c, f); + break; + + case '"': if ( tag_text(f) ) + Qstring(""", f); + else + Qchar(c, f); + break; + + case '!': if ( peek(f,1) == '[' ) { + pull(f); + if ( tag_text(f) || !linkylinky(1, f) ) + Qstring("![", f); + } + else + Qchar(c, f); + break; + case '[': if ( tag_text(f) || !linkylinky(0, f) ) + Qchar(c, f); + break; +#if SUPERSCRIPT + /* A^B -> AB */ + case '^': if ( (f->flags & (STRICT|INSIDE_TAG)) || isthisspace(f,-1) || isthisspace(f,1) ) + Qchar(c,f); + else { + char *sup = cursor(f); + int len = 0; + Qstring("",f); + while ( !isthisspace(f,1+len) ) { + ++len; + } + shift(f,len); + ___mkd_reparse(sup, len, 0, f); + Qstring("", f); + } + break; +#endif + case '_': +#if RELAXED_EMPHASIS + /* Underscores don't count if they're in the middle of a word */ + if ( !(f->flags & STRICT) && isthisalnum(f,-1) + && isthisalnum(f,1) ) { + Qchar(c, f); + break; + } +#endif + case '*': +#if RELAXED_EMPHASIS + /* Underscores & stars don't count if they're out in the middle + * of whitespace */ + if ( !(f->flags & STRICT) && isthisspace(f,-1) + && isthisspace(f,1) ) { + Qchar(c, f); + break; + } + /* else fall into the regular old emphasis case */ +#endif + if ( tag_text(f) ) + Qchar(c, f); + else { + for (rep = 1; peek(f,1) == c; pull(f) ) + ++rep; + Qem(f,c,rep); + } + break; + + case '`': if ( tag_text(f) || !iscodeblock(f) ) + Qchar(c, f); + else { + Qstring("", f); + if ( peek(f, 1) == '`' ) { + pull(f); + code(2, f); + } + else + code(1, f); + Qstring("", f); + } + break; + + case '\\': switch ( c = pull(f) ) { + case '&': Qstring("&", f); + break; + case '<': Qstring("<", f); + break; + case '>': case '#': case '.': case '-': + case '+': case '{': case '}': case ']': + case '!': case '[': case '*': case '_': + case '\\':case '(': case ')': + case '`': Qchar(c, f); + break; + default: + Qchar('\\', f); + if ( c != EOF ) + shift(f,-1); + break; + } + break; + + case '<': if ( !maybe_tag_or_link(f) ) + Qstring("<", f); + break; + + case '&': j = (peek(f,1) == '#' ) ? 2 : 1; + while ( isthisalnum(f,j) ) + ++j; + + if ( peek(f,j) != ';' ) + Qstring("&", f); + else + Qchar(c, f); + break; + + default: Qchar(c, f); + break; + } + } + /* truncate the input string after we've finished processing it */ + S(f->in) = f->isp = 0; +} /* text */ + + +static int +iscodeblock(MMIOT *f) +{ + int i=1, single = 1, c; + + if ( peek(f,i) == '`' ) { + single=0; + i++; + } + while ( (c=peek(f,i)) != EOF ) { + if ( (c == '`') && (single || peek(f,i+1) == '`') ) + return 1; + else if ( c == '\\' ) + i++; + i++; + } + return 0; + +} + +static int +endofcode(int escape, int offset, MMIOT *f) +{ + switch (escape) { + case 2: if ( peek(f, offset+1) == '`' ) { + shift(f,1); + case 1: shift(f,offset); + return 1; + } + default:return 0; + } +} + + +/* the only characters that have special meaning in a code block are + * `<' and `&' , which are /always/ expanded to < and & + */ +static void +code(int escape, MMIOT *f) +{ + int c; + + if ( escape && (peek(f,1) == ' ') ) + shift(f,1); + + while ( (c = pull(f)) != EOF ) { + switch (c) { + case ' ': if ( peek(f,1) == '`' && endofcode(escape, 1, f) ) + return; + Qchar(c, f); + break; + + case '`': if ( endofcode(escape, 0, f) ) + return; + Qchar(c, f); + break; + + case '\\': cputc(c, f); + if ( peek(f,1) == '>' || (c = pull(f)) == EOF ) + break; + + case 003: /* ^C; expand back to spaces */ + Qstring(" ", f); + break; + + default: cputc(c, f); + break; + } + } +} /* code */ + + +/* print a header block + */ +static void +printheader(Paragraph *pp, MMIOT *f) +{ + Qprintf(f, "hnumber); + if ( f->flags & TOC ) { + Qprintf(f, " id=\"", pp->hnumber); + mkd_string_to_anchor(T(pp->text->text), S(pp->text->text), Qchar, f); + Qchar('"', f); + } + Qchar('>', f); + push(T(pp->text->text), S(pp->text->text), f); + text(f); + Qprintf(f, "", pp->hnumber); +} + + +enum e_alignments { a_NONE, a_CENTER, a_LEFT, a_RIGHT }; + +static char* alignments[] = { "", " align=\"center\"", " align=\"left\"", + " align=\"right\"" }; + +typedef STRING(int) Istring; + +static int +splat(Line *p, char *block, Istring align, int force, MMIOT *f) +{ + int first, + idx = 0, + colno = 0; + + Qstring("\n", f); + while ( idx < S(p->text) ) { + first = idx; + if ( force && (colno >= S(align)-1) ) + idx = S(p->text); + else + while ( (idx < S(p->text)) && (T(p->text)[idx] != '|') ) + ++idx; + + Qprintf(f, "<%s%s>", + block, + alignments[ (colno < S(align)) ? T(align)[colno] : a_NONE ]); + ___mkd_reparse(T(p->text)+first, idx-first, 0, f); + Qprintf(f, "\n", block); + idx++; + colno++; + } + if ( force ) + while (colno < S(align) ) { + Qprintf(f, "<%s>\n", block, block); + ++colno; + } + Qstring("\n", f); + return colno; +} + +static int +printtable(Paragraph *pp, MMIOT *f) +{ + /* header, dashes, then lines of content */ + + Line *hdr, *dash, *body; + Istring align; + int start; + int hcols; + char *p; + + if ( !(pp->text && pp->text->next) ) + return 0; + + hdr = pp->text; + dash= hdr->next; + body= dash->next; + + /* first figure out cell alignments */ + + CREATE(align); + + for (p=T(dash->text), start=0; start < S(dash->text); ) { + char first, last; + int end; + + last=first=0; + for (end=start ; (end < S(dash->text)) && p[end] != '|'; ++ end ) { + if ( !isspace(p[end]) ) { + if ( !first) first = p[end]; + last = p[end]; + } + } + EXPAND(align) = ( first == ':' ) ? (( last == ':') ? a_CENTER : a_LEFT) + : (( last == ':') ? a_RIGHT : a_NONE ); + start = 1+end; + } + + Qstring("\n", f); + Qstring("\n", f); + hcols = splat(hdr, "th", align, 0, f); + Qstring("\n", f); + + if ( hcols < S(align) ) + S(align) = hcols; + else + while ( hcols > S(align) ) + EXPAND(align) = a_NONE; + + Qstring("\n", f); + for ( ; body; body = body->next) + splat(body, "td", align, 1, f); + Qstring("\n", f); + Qstring("
\n", f); + + DELETE(align); + return 1; +} + + +static int +printblock(Paragraph *pp, MMIOT *f) +{ + Line *t = pp->text; + static char *Begin[] = { "", "

", "

" }; + static char *End[] = { "", "

","
" }; + + while (t) { + if ( S(t->text) ) { + if ( t->next && S(t->text) > 2 + && T(t->text)[S(t->text)-2] == ' ' + && T(t->text)[S(t->text)-1] == ' ' ) { + push(T(t->text), S(t->text)-2, f); + push("\003\n", 2, f); + } + else { + ___mkd_tidy(&t->text); + push(T(t->text), S(t->text), f); + if ( t->next ) + push("\n", 1, f); + } + } + t = t->next; + } + Qstring(Begin[pp->align], f); + text(f); + Qstring(End[pp->align], f); + return 1; +} + + +static void +printcode(Line *t, MMIOT *f) +{ + int blanks; + + for ( blanks = 0; t ; t = t->next ) + if ( S(t->text) > t->dle ) { + while ( blanks ) { + push("\n", 1, f); + --blanks; + } + push(T(t->text), S(t->text), f); + push("\n", 1, f); + } + else blanks++; + + Qstring("
", f);
+    code(0, f);
+    Qstring("
", f); +} + + +static void +printhtml(Line *t, MMIOT *f) +{ + int blanks; + + for ( blanks=0; t ; t = t->next ) + if ( S(t->text) ) { + for ( ; blanks; --blanks ) + Qchar('\n', f); + + Qwrite(T(t->text), S(t->text), f); + Qchar('\n', f); + } + else + blanks++; +} + + +static void +htmlify(Paragraph *p, char *block, char *arguments, MMIOT *f) +{ + ___mkd_emblock(f); + if ( block ) + Qprintf(f, arguments ? "<%s %s>" : "<%s>", block, arguments); + ___mkd_emblock(f); + + while (( p = display(p, f) )) { + ___mkd_emblock(f); + Qstring("\n\n", f); + } + + if ( block ) + Qprintf(f, "", block); + ___mkd_emblock(f); +} + + +#if DL_TAG_EXTENSION +static void +definitionlist(Paragraph *p, MMIOT *f) +{ + Line *tag; + + if ( p ) { + Qstring("
\n", f); + + for ( ; p ; p = p->next) { + for ( tag = p->text; tag; tag = tag->next ) { + Qstring("
", f); + ___mkd_reparse(T(tag->text), S(tag->text), 0, f); + Qstring("
\n", f); + } + + htmlify(p->down, "dd", p->ident, f); + Qchar('\n', f); + } + + Qstring("
", f); + } +} +#endif + + +static void +listdisplay(int typ, Paragraph *p, MMIOT* f) +{ + if ( p ) { + Qprintf(f, "<%cl", (typ==UL)?'u':'o'); + if ( typ == AL ) + Qprintf(f, " type=a"); + Qprintf(f, ">\n"); + + for ( ; p ; p = p->next ) { + htmlify(p->down, "li", p->ident, f); + Qchar('\n', f); + } + + Qprintf(f, "\n", (typ==UL)?'u':'o'); + } +} + + +/* dump out a Paragraph in the desired manner + */ +static Paragraph* +display(Paragraph *p, MMIOT *f) +{ + if ( !p ) return 0; + + switch ( p->typ ) { + case STYLE: + case WHITESPACE: + break; + + case HTML: + printhtml(p->text, f); + break; + + case CODE: + printcode(p->text, f); + break; + + case QUOTE: + htmlify(p->down, p->ident ? "div" : "blockquote", p->ident, f); + break; + + case UL: + case OL: + case AL: + listdisplay(p->typ, p->down, f); + break; + +#if DL_TAG_EXTENSION + case DL: + definitionlist(p->down, f); + break; +#endif + + case HR: + Qstring("
", f); + break; + + case HDR: + printheader(p, f); + break; + + case TABLE: + printtable(p, f); + break; + + case SOURCE: + htmlify(p->down, 0, 0, f); + break; + + default: + printblock(p, f); + break; + } + return p->next; +} + + +/* return a pointer to the compiled markdown + * document. + */ +int +mkd_document(Document *p, char **res) +{ + if ( p && p->compiled ) { + if ( ! p->html ) { + htmlify(p->code, 0, 0, p->ctx); + p->html = 1; + } + + *res = T(p->ctx->out); + return S(p->ctx->out); + } + return EOF; +} + diff --git a/r2/r2/lib/contrib/discount-1.6.0/main.c b/r2/r2/lib/contrib/discount-1.6.0/main.c new file mode 100644 index 000000000..4a50efe1b --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/main.c @@ -0,0 +1,211 @@ +/* + * markdown: convert a single markdown document into html + */ +/* + * Copyright (C) 2007 David L Parsons. + * The redistribution terms are provided in the COPYRIGHT file that must + * be distributed with this source code. + */ +#include +#include +#include +#include +#include +#include +#include + +#include "config.h" +#include "amalloc.h" + +#if HAVE_LIBGEN_H +#include +#endif + +#ifndef HAVE_BASENAME +#include + +char* +basename(char *p) +{ + char *ret = strrchr(p, '/'); + + return ret ? (1+ret) : p; +} +#endif + + +char *pgm = "markdown"; + +static struct { + char *name; + int off; + int flag; +} opts[] = { + { "tabstop", 0, MKD_TABSTOP }, + { "image", 1, MKD_NOIMAGE }, + { "links", 1, MKD_NOLINKS }, + { "relax", 1, MKD_STRICT }, + { "strict", 0, MKD_STRICT }, + { "tables", 1, MKD_NOTABLES }, + { "header", 1, MKD_NOHEADER }, + { "html", 1, MKD_NOHTML }, + { "ext", 1, MKD_NO_EXT }, + { "cdata", 0, MKD_CDATA }, + { "pants", 1, MKD_NOPANTS }, + { "smarty", 1, MKD_NOPANTS }, + { "toc", 0, MKD_TOC }, + { "autolink",0, MKD_AUTOLINK }, + { "safelink",0, MKD_SAFELINK }, + { "1.0", 0, MKD_1_COMPAT }, +} ; + +#define NR(x) (sizeof x / sizeof x[0]) + + +void +set(int *flags, char *optionstring) +{ + int i; + int enable; + char *arg; + + for ( arg = strtok(optionstring, ","); arg; arg = strtok(NULL, ",") ) { + if ( *arg == '+' || *arg == '-' ) + enable = (*arg++ == '+') ? 1 : 0; + else if ( strncasecmp(arg, "no", 2) == 0 ) { + arg += 2; + enable = 0; + } + else + enable = 1; + + for ( i=0; i < NR(opts); i++ ) + if ( strcasecmp(arg, opts[i].name) == 0 ) + break; + + if ( i < NR(opts) ) { + if ( opts[i].off ) + enable = !enable; + + if ( enable ) + *flags |= opts[i].flag; + else + *flags &= ~opts[i].flag; + } + else + fprintf(stderr, "%s: unknown option <%s>\n", pgm, arg); + } +} + + +char * +e_flags(char *text, int size, char *context) +{ + return context; +} + + +float +main(int argc, char **argv) +{ + int opt; + int rc; + int flags = 0; + int debug = 0; + int toc = 0; + int use_mkd_line = 0; + char *urlflags = 0; + char *text = 0; + char *ofile = 0; + char *urlbase = 0; + char *q; + MMIOT *doc; + + if ( q = getenv("MARKDOWN_FLAGS") ) + flags = strtol(q, 0, 0); + + pgm = basename(argv[0]); + opterr = 1; + + while ( (opt=getopt(argc, argv, "b:df:F:o:s:t:TVZ:")) != EOF ) { + switch (opt) { + case 'b': urlbase = optarg; + break; + case 'd': debug = 1; + break; + case 'V': printf("%s: discount %s\n", pgm, markdown_version); + exit(0); + case 'F': flags = strtol(optarg, 0, 0); + break; + case 'f': set(&flags, optarg); + break; + case 't': text = optarg; + use_mkd_line = 1; + break; + case 'T': toc = 1; + break; + case 's': text = optarg; + break; + case 'o': if ( ofile ) { + fprintf(stderr, "Too many -o options\n"); + exit(1); + } + if ( !freopen(ofile = optarg, "w", stdout) ) { + perror(ofile); + exit(1); + } + break; + case 'Z': urlflags = optarg; + break; + default: fprintf(stderr, "usage: %s [-dTV] [-b url-base]" + " [-F bitmap] [-f {+-}flags]" + " [-o ofile] [-s text]" + " [-t text] [file]\n", pgm); + exit(1); + } + } + argc -= optind; + argv += optind; + + if ( use_mkd_line ) + rc = mkd_generateline( text, strlen(text), stdout, flags); + else { + if ( text ) { + if ( (doc = mkd_string(text, strlen(text), flags)) == 0 ) { + perror(text); + exit(1); + } + } + else { + if ( argc && !freopen(argv[0], "r", stdin) ) { + perror(argv[0]); + exit(1); + } + if ( (doc = mkd_in(stdin,flags)) == 0 ) { + perror(argc ? argv[0] : "stdin"); + exit(1); + } + } + if ( urlbase ) + mkd_basename(doc, urlbase); + + if ( debug ) + rc = mkd_dump(doc, stdout, 0, argc ? basename(argv[0]) : "stdin"); + else { + rc = 1; + if ( mkd_compile(doc, flags) ) { + if ( urlflags ) { + mkd_e_context(doc, urlflags); + mkd_e_flags(doc, e_flags); + } + rc = 0; + if ( toc ) + mkd_generatetoc(doc, stdout); + mkd_generatehtml(doc, stdout); + mkd_cleanup(doc); + } + } + } + adump(); + exit( (rc == 0) ? 0 : errno ); +} diff --git a/r2/r2/lib/contrib/discount-1.6.0/makepage.c b/r2/r2/lib/contrib/discount-1.6.0/makepage.c new file mode 100644 index 000000000..8fa55acd3 --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/makepage.c @@ -0,0 +1,27 @@ +/* + * makepage: Use mkd_xhtmlpage() to convert markdown input to a + * fully-formed xhtml page. + */ +#include +#include +#include + +float +main(argc, argv) +int argc; +char **argv; +{ + MMIOT *doc; + + if ( (argc > 1) && !freopen(argv[1], "r", stdin) ) { + perror(argv[1]); + exit(1); + } + + if ( (doc = mkd_in(stdin, 0)) == 0 ) { + perror( (argc > 1) ? argv[1] : "stdin" ); + exit(1); + } + + exit(mkd_xhtmlpage(doc, 0, stdout)); +} diff --git a/r2/r2/lib/contrib/discount-1.6.0/markdown.1 b/r2/r2/lib/contrib/discount-1.6.0/markdown.1 new file mode 100644 index 000000000..8f5ea3ee7 --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/markdown.1 @@ -0,0 +1,128 @@ +.\" %A% +.\" +.Dd January 7, 2008 +.Dt MARKDOWN 1 +.Os MASTODON +.Sh NAME +.Nm markdown +.Nd text to html conversion tool +.Sh SYNOPSIS +.Nm +.Op Fl d +.Op Fl T +.Op Fl V +.Op Fl b Ar url-base +.Op Fl F Pa bitmap +.Op Fl f Ar flags +.Op Fl o Pa file +.Op Fl s Pa text +.Op Fl t Pa text +.Op Pa textfile +.Sh DESCRIPTION +The +.Nm +utility reads the +.Xr markdown 7 Ns -formatted +.Pa textfile +.Pq or stdin if not specified, +compiles it, and writes the html output +to stdout. +.Pp +The options are as follows: +.Bl -tag -width "-o file" +.It Fl b Ar url-base +Links in source begining with / will be prefixed with +.Ar url-base +in the output. +.It Fl d +Instead of writing the html file, dump a parse +tree to stdout. +.It Fl f Ar flags +Set or clear various translation flags. The flags +are in a comma-delimited list, with an optional +.Ar + +(set) prefix on each flag. +.Bl -tag -width "NOHEADER" +.It Ar noimage +Don't allow image tags. +.It Ar nolinks +Don't allow links. +.It Ar nohtml +Don't allow +.B any +embedded html. +.It Ar cdata +Generate valid XML output. +.It Ar noheader +Do not process pandoc headers. +.It Ar notables +Do not process Markdown Extra-style tables. +.It Ar tabstops +Use markdown-standard 4-space tabstops. +.".It Ar strict +."Disable superscript and relaxed emphasis. +.".It Ar relax +."Enable superscript and relaxed emphasis (this is the default.) +.It Ar toc +Enable table-of-contents support +.It Ar 1.0 +Revert to Markdown 1.0 compatability. +.El +.Pp +As an example, the option +.Fl f Ar nolinks,quot +tells +.Nm +to not allow \
0 if an error occurs. +.Sh SEE ALSO +.Xr markdown 3 , +.Xr markdown 7 , +.Xr mkd-extensions 7 . +.Sh AUTHOR +.An David Parsons +.Pq Li orc@pell.chi.il.us diff --git a/r2/r2/lib/contrib/discount-1.6.0/markdown.1.in b/r2/r2/lib/contrib/discount-1.6.0/markdown.1.in new file mode 100644 index 000000000..781a23614 --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/markdown.1.in @@ -0,0 +1,128 @@ +.\" %A% +.\" +.Dd January 7, 2008 +.Dt MARKDOWN 1 +.Os MASTODON +.Sh NAME +.Nm markdown +.Nd text to html conversion tool +.Sh SYNOPSIS +.Nm +.Op Fl d +.Op Fl T +.Op Fl V +.Op Fl b Ar url-base +.Op Fl F Pa bitmap +.Op Fl f Ar flags +.Op Fl o Pa file +.Op Fl s Pa text +.Op Fl t Pa text +.Op Pa textfile +.Sh DESCRIPTION +The +.Nm +utility reads the +.Xr markdown 7 Ns -formatted +.Pa textfile +.Pq or stdin if not specified, +compiles it, and writes the html output +to stdout. +.Pp +The options are as follows: +.Bl -tag -width "-o file" +.It Fl b Ar url-base +Links in source begining with / will be prefixed with +.Ar url-base +in the output. +.It Fl d +Instead of writing the html file, dump a parse +tree to stdout. +.It Fl f Ar flags +Set or clear various translation flags. The flags +are in a comma-delimited list, with an optional +.Ar + +(set) prefix on each flag. +.Bl -tag -width "NOHEADER" +.It Ar noimage +Don't allow image tags. +.It Ar nolinks +Don't allow links. +.It Ar nohtml +Don't allow +.B any +embedded html. +.It Ar cdata +Generate valid XML output. +.It Ar noheader +Do not process pandoc headers. +.It Ar notables +Do not process Markdown Extra-style tables. +.It Ar tabstops +Use markdown-standard 4-space tabstops. +@STRICT@.It Ar strict +@STRICT@Disable superscript and relaxed emphasis. +@STRICT@.It Ar relax +@STRICT@Enable superscript and relaxed emphasis (this is the default.) +.It Ar toc +Enable table-of-contents support +.It Ar 1.0 +Revert to Markdown 1.0 compatability. +.El +.Pp +As an example, the option +.Fl f Ar nolinks,quot +tells +.Nm +to not allow \0 if an error occurs. +.Sh SEE ALSO +.Xr markdown 3 , +.Xr markdown 7 , +.Xr mkd-extensions 7 . +.Sh AUTHOR +.An David Parsons +.Pq Li orc@pell.chi.il.us diff --git a/r2/r2/lib/contrib/discount-1.6.0/markdown.3 b/r2/r2/lib/contrib/discount-1.6.0/markdown.3 new file mode 100644 index 000000000..8022f8050 --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/markdown.3 @@ -0,0 +1,123 @@ +.\" +.Dd December 20, 2007 +.Dt MARKDOWN 3 +.Os Mastodon +.Sh NAME +.Nm markdown +.Nd process Markdown documents +.Sh LIBRARY +Markdown +.Pq libmarkdown , -lmarkdown +.Sh SYNOPSIS +.Fd #include +.Ft MMIOT +.Fn *mkd_in "FILE *input" "int flags" +.Ft MMIOT +.Fn *mkd_string "char *string" "int size" "int flags" +.Ft int +.Fn markdown "MMIOT *doc" "FILE *output" "int flags" +.Sh DESCRIPTION +These functions +convert +.Em Markdown +documents and strings into HTML. +.Fn markdown +processes an entire document, while +.Fn mkd_text +processes a single string. +.Pp +To process a file, you pass a FILE* to +.Fn mkd_in , +and if it returns a nonzero value you pass that in to +.Fn markdown , +which then writes the converted document to the specified +.Em FILE* . +If your input has already been written into a string (generated +input or a file opened +with +.Xr mmap 2 ) +you can feed that string to +.Fn mkd_string +and pass its return value to +.Fn markdown. +.Pp +.Fn Markdown +accepts the following flag values (or-ed together if needed) +to restrict how it processes input: +.Bl -tag -width MKD_SAFELINK -compact +.It Ar MKD_NOIMAGE +Do not process `![]' and +remove +.Em \ +tags from the output. +.It Ar MKD_NOLINKS +Do not process `[]' and remove +.Em \ +tags from the output. +.It Ar MKD_NOPANTS +Do not do Smartypants-style mangling of quotes, dashes, or ellipses. +.It Ar MKD_STRICT +Disable superscript and relaxed emphasis processing (if they are configured; +otherwise it's a no-op.) +.\" .It Ar MKD_QUOT +.\" Expand +.\" .Ar \&" +.\" to \&". +.It Ar MKD_NOHEADER +Do not attempt to parse any Pandoc-style headers. +.It Ar MKD_TABSTOP +When reading documents, expand tabs to +.Em 4 +spaces instead of whatever +.Nm +was originally configured for. +.It Ar MKD_TOC +Label all headers for use with the +.Fn mkd_generatetoc +and +.Fn mkd_toc +functions. +.It Ar MKD_1_COMPAT +MarkdownTest_1.0 compatability flag; trim trailing spaces from the +first line of code blocks and disable implicit reference links. +.It Ar MKD_AUTOLINK +Greedily urlify links -- if +.Em MKD_AUTOLINK +is set, urls will be converted into hyperlinks even if they +aren't encased in +.Em <> . +.It Ar MKD_SAFELINK +Don't make hyperlinks from +.Em [][] +links that have unknown url types. +.It Ar MKD_NOTABLES +Don't process tables. +.El +.Sh RETURN VALUES +.Fn markdown +returns 0 on success, 1 on failure. +The +.Fn mkd_in +and +.Fn mkd_string +functions return a MMIOT* on success, null on failure. +.Sh SEE ALSO +.Xr markdown 1 , +.Xr mkd-functions 3 , +.Xr mkd-line 3 , +.Xr markdown 7 , +.Xr mkd-extensions 7 , +.Xr mmap 2 . +.Pp +http://daringfireball.net/projects/markdown/syntax +.Sh BUGS +Error handling is minimal at best. +.Pp +The +.Ar MMIOT +created by +.Fn mkd_string +is deleted by the +.Nm +function. + diff --git a/r2/r2/lib/contrib/discount-1.6.0/markdown.7 b/r2/r2/lib/contrib/discount-1.6.0/markdown.7 new file mode 100644 index 000000000..be844da01 --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/markdown.7 @@ -0,0 +1,1019 @@ +.\" +.Dd Dec 22, 2007 +.Dt MARKDOWN 7 +.Os MASTODON +.Sh NAME +.Nm Markdown +.Nd The Markdown text formatting syntax +.Sh DESCRIPTION +.Ss Philosophy +.Nm Markdown +is intended to be as easy-to-read and easy-to-write as is feasible. +.Pp +Readability, however, is emphasized above all else. A Markdown-formatted +document should be publishable as-is, as plain text, without looking +like it's been marked up with tags or formatting instructions. While +Markdown's syntax has been influenced by several existing text-to-HTML +filters -- including +.Em Setext , +.Em atx , +.Em Textile , +.Em reStructuredText , +.Em Grutatext , +and +.Em EtText +\-\- the single biggest source of +inspiration for +Markdown's +syntax is the format of plain text email. +.Pp +To this end, Markdown's syntax is comprised entirely of punctuation +characters, which punctuation characters have been carefully chosen so +as to look like what they mean. E.g., asterisks around a word actually +look like *emphasis*. Markdown lists look like, well, lists. Even +blockquotes look like quoted passages of text, assuming you've ever +used email. +.Ss Inline HTML +Markdown's syntax is intended for one purpose: to be used as a +format for +.Em writing +for the web. +.Pp +.Nm +is not a replacement for HTML, or even close to it. Its +syntax is very small, corresponding only to a very small subset of +HTML tags. The idea is +.Em not +to create a syntax that makes it easier +to insert HTML tags. In my opinion, HTML tags are already easy to +insert. The idea for Markdown is to make it easy to read, write, and +edit prose. HTML is a +.Em publishing +format; Markdown is a +.Em writing +format. Thus, Markdown's formatting syntax only addresses issues that +can be conveyed in plain text. +.Pp +For any markup that is not covered by Markdown's syntax, you simply +use HTML itself. There's no need to preface it or delimit it to +indicate that you're switching from Markdown to HTML; you just use +the tags. +.Pp +The only restrictions are that block-level HTML elements -- e.g. +.Li \
, +.Li \ , +.Li \
 ,
+.Li \

, +etc. -- must be separated from surrounding +content by blank lines, and the start and end tags of the block should +not be indented with tabs or spaces. Markdown is smart enough not +to add extra (unwanted) +.Li \

+tags around HTML block-level tags. +.Pp +For example, to add an HTML table to a Markdown article: +.Bd -literal -offset indent + This is a regular paragraph. + +

+ + + +
Foo
+ + This is another regular paragraph. +.Ed +.Pp +Note that Markdown formatting syntax is not processed within block-level +HTML tags. E.g., you can't use Markdown-style +.Li *emphasis* +inside an HTML block. +.Pp +Span-level HTML tags -- e.g. +.Li \ , +.Li \ , +or +.Li \ +\-\- can be +used anywhere in a Markdown paragraph, list item, or header. If you +want, you can even use HTML tags instead of Markdown formatting; e.g. if +you'd prefer to use HTML +.Li \
+or +.Li \ +tags instead of Markdown's +link or image syntax, go right ahead. +.Pp +Unlike block-level HTML tags, Markdown syntax *is* processed within +span-level tags. +.Ss Automatic Escaping for Special Characters +In HTML, there are two characters that demand special treatment: `<` +and `&`. Left angle brackets are used to start tags; ampersands are +used to denote HTML entities. If you want to use them as literal +characters, you must escape them as entities, e.g. `<`, and +`&`. +.Pp +Ampersands in particular are bedeviling for web writers. If you want to +write about 'AT&T', you need to write '`AT&T`'. You even need to +escape ampersands within URLs. Thus, if you want to link to: +.Bd -literal -offset indent + http://images.google.com/images?num=30&q=larry+bird +.Ed +.Pp +you need to encode the URL as: +.Bd -literal -offset indent + http://images.google.com/images?num=30&q=larry+bird +.Ed +.Pp +in your anchor tag `href` attribute. Needless to say, this is easy to +forget, and is probably the single most common source of HTML validation +errors in otherwise well-marked-up web sites. +.Pp +.Nm +allows you to use these characters naturally, taking care of +all the necessary escaping for you. If you use an ampersand as part of +an HTML entity, it remains unchanged; otherwise it will be translated +into `&`. +.Pp +So, if you want to include a copyright symbol in your article, you can write: +.Bd -literal -offset indent + © +.Ed +.Pp +and Markdown will leave it alone. But if you write: +.Bd -literal -offset indent + AT&T +.Ed +.Pp +.Nm +will translate it to: +.Bd -literal -offset indent + AT&T +.Ed +.Pp +Similarly, because Markdown supports inline HTML, if you use +angle brackets as delimiters for HTML tags, Markdown will treat them as +such. But if you write: +.Bd -literal -offset indent + 4 < 5 +.Ed +.Pp +.Nm +will translate it to: +.Bd -literal -offset indent + 4 < 5 +.Ed +.Pp +However, inside Markdown code spans and blocks, angle brackets and +ampersands are *always* encoded automatically. This makes it easy to use +Markdown to write about HTML code. (As opposed to raw HTML, which is a +terrible format for writing about HTML syntax, because every single `<` +and `&` in your example code needs to be escaped.) +.Sh Block Elements +.Ss Paragraphs and Line Breaks +.Pp +A paragraph is simply one or more consecutive lines of text, separated +by one or more blank lines. (A blank line is any line that looks like a +blank line -- a line containing nothing but spaces or tabs is considered +blank.) Normal paragraphs should not be indented with spaces or tabs. +.Pp +The implication of the +.Qq one or more consecutive lines of text +rule is +that Markdown supports +.Qq hard-wrapped +Dtext paragraphs. This differs +significantly from most other text-to-HTML formatters (including Movable +Type's +.Qq Convert Line Breaks +option) which translate every line break +character in a paragraph into a `
` tag. +.Pp +When you *do* want to insert a `
` break tag using Markdown, you +end a line with two or more spaces, then type return. +.Pp +Yes, this takes a tad more effort to create a `
`, but a simplistic +"every line break is a `
`" rule wouldn't work for Markdown. +Markdown's email-style +.Sx blockquoting + and multi-paragraph +.Sx list items +work best -- and look better -- when you format them with hard breaks. +.Ss Headers +.Nm +supports two styles of headers, +.Em Setext +and +.Em atx . +.Pp +Setext-style headers are +.Sq underlined +using equal signs (for first-level +headers) and dashes (for second-level headers). For example: +.Bd -literal -offset indent + This is an H1 + ============= + + This is an H2 + ------------- +.Ed +.Pp +Any number of underlining `=`'s or `-`'s will work. +.Pp +Atx-style headers use 1-6 hash characters at the start of the line, +corresponding to header levels 1-6. For example: +.Bd -literal -offset indent + # This is an H1 + + ## This is an H2 + + ###### This is an H6 +.Ed +.Pp +Optionally, you may +.Qq close +atx-style headers. This is purely +cosmetic -- you can use this if you think it looks better. The +closing hashes don't even need to match the number of hashes +used to open the header. (The number of opening hashes +determines the header level.) : +.Bd -literal -offset indent + # This is an H1 # + + ## This is an H2 ## + + ### This is an H3 ###### +.Ed +.Pp +.Ss Blockquotes +.Nm +uses email-style `>` characters for blockquoting. If you're +familiar with quoting passages of text in an email message, then you +know how to create a blockquote in Markdown. It looks best if you hard +wrap the text and put a `>` before every line: +.Bd -literal -offset indent + > This is a blockquote with two paragraphs. Lorem ipsum + > dolor sit amet, consectetuer adipiscing elit. Aliquam + > hendrerit mi posuere lectus. Vestibulum enim wisi, + > viverra nec, fringilla in, laoreet vitae, risus. + > + > Donec sit amet nisl. Aliquam semper ipsum sit amet + > velit. Suspendisse id sem consectetuer libero luctus + > adipiscing. +.Ed +.Pp +.Nm +allows you to be lazy and only put the `>` before the first +line of a hard-wrapped paragraph: +.Bd -literal -offset indent + > This is a blockquote with two paragraphs. Lorem ipsum + dolor sit amet, consectetuer adipiscing elit. Aliquam + hendrerit mi posuere lectus. Vestibulum enim wisi, + viverra nec, fringilla in, laoreet vitae, risus. + + > Donec sit amet nisl. Aliquam semper ipsum sit amet + velit. Suspendisse id sem consectetuer libero luctus + adipiscing. +.Ed +.Pp +Blockquotes can be nested (i.e. a blockquote-in-a-blockquote) by +adding additional levels of `>`: +.Bd -literal -offset indent + > This is the first level of quoting. + > + > > This is nested blockquote. + > + > Back to the first level. +.Ed +.Pp +Blockquotes can contain other Markdown elements, including headers, lists, +and code blocks: +.Bd -literal -offset indent + > ## This is a header. + > + > 1. This is the first list item. + > 2. This is the second list item. + > + > Here's some example code: + > + > return shell_exec("echo $input | $markdown_script"); +.Ed +.Pp +Any decent text editor should make email-style quoting easy. For +example, with BBEdit, you can make a selection and choose Increase +Quote Level from the Text menu. +.Ss Lists +.Nm +supports ordered (numbered) and unordered (bulleted) lists. +.Pp +Unordered lists use asterisks, pluses, and hyphens -- interchangably +\-- as list markers: +.Bd -literal -offset indent + * Red + * Green + * Blue +.Ed +.Pp +is equivalent to: +.Bd -literal -offset indent + + Red + + Green + + Blue +.Ed +.Pp +and: +.Bd -literal -offset indent + - Red + - Green + - Blue +.Ed +.Pp +Ordered lists use numbers followed by periods: +.Bd -literal -offset indent + 1. Bird + 2. McHale + 3. Parish +.Ed +.Pp +It's important to note that the actual numbers you use to mark the +list have no effect on the HTML output Markdown produces. The HTML +Markdown produces from the above list is: +.Bd -literal -offset indent +
    +
  1. Bird
  2. +
  3. McHale
  4. +
  5. Parish
  6. +
+.Ed +.Pp +If you instead wrote the list in Markdown like this: +.Bd -literal -offset indent + 1. Bird + 1. McHale + 1. Parish +.Ed +.Pp +or even: +.Bd -literal -offset indent + 3. Bird + 1. McHale + 8. Parish +.Ed +.Pp +you'd get the exact same HTML output. The point is, if you want to, +you can use ordinal numbers in your ordered Markdown lists, so that +the numbers in your source match the numbers in your published HTML. +But if you want to be lazy, you don't have to. +.Pp +If you do use lazy list numbering, however, you should still start the +list with the number 1. At some point in the future, Markdown may support +starting ordered lists at an arbitrary number. +.Pp +List markers typically start at the left margin, but may be indented by +up to three spaces. List markers must be followed by one or more spaces +or a tab. +.Pp +To make lists look nice, you can wrap items with hanging indents: +.Bd -literal -offset indent + * Lorem ipsum dolor sit amet, consectetuer adipiscing + elit. Aliquam hendrerit mi posuere lectus. Vestibulum + enim wisi, viverra nec, fringilla in, laoreet vitae, + risus. + * Donec sit amet nisl. Aliquam semper ipsum sit amet + velit. Suspendisse id sem consectetuer libero luctus + adipiscing. +.Ed +.Pp +But if you want to be lazy, you don't have to: +.Bd -literal -offset indent + * Lorem ipsum dolor sit amet, consectetuer adipiscing + elit. Aliquam hendrerit mi posuere lectus. Vestibulum + enim wisi, viverra nec, fringilla in, laoreet vitae, + risus. + * Donec sit amet nisl. Aliquam semper ipsum sit amet + velit. Suspendisse id sem consectetuer libero luctus + adipiscing. +.Ed +.Pp +If list items are separated by blank lines, Markdown will wrap the +items in `

` tags in the HTML output. For example, this input: +.Bd -literal -offset indent + * Bird + * Magic +.Ed +.Pp +will turn into: +.Bd -literal -offset indent +

    +
  • Bird
  • +
  • Magic
  • +
+.Ed +.Pp +But this: +.Bd -literal -offset indent + * Bird + + * Magic +.Ed +.Pp +will turn into: +.Bd -literal -offset indent +
    +
  • Bird

  • +
  • Magic

  • +
+.Ed +.Pp +List items may consist of multiple paragraphs. Each subsequent +paragraph in a list item must be intended by either 4 spaces +or one tab: +.Bd -literal -offset indent + 1. This is a list item with two paragraphs. Lorem ipsum + dolor sit amet, consectetuer adipiscing elit. Aliquam + hendrerit mi posuere lectus. + + Vestibulum enim wisi, viverra nec, fringilla in, + laoreet vitae, risus. Donec sit amet nisl. Aliquam + semper ipsum sit amet velit. + + 2. Suspendisse id sem consectetuer libero luctus + adipiscing. +.Ed +.Pp +It looks nice if you indent every line of the subsequent +paragraphs, but here again, Markdown will allow you to be +lazy: +.Bd -literal -offset indent + * This is a list item with two paragraphs. + + This is the second paragraph in the list item. + You're only required to indent the first line. Lorem + ipsum dolor sit amet, consectetuer adipiscing elit. + + * Another item in the same list. +.Ed +.Pp +To put a blockquote within a list item, the blockquote's `>` +delimiters need to be indented: +.Bd -literal -offset indent + * A list item with a blockquote: + + > This is a blockquote + > inside a list item. +.Ed +.Pp +To put a code block within a list item, the code block needs +to be indented *twice* -- 8 spaces or two tabs: +.Bd -literal -offset indent + * A list item with a code block: + + +.Ed +.Pp +It's worth noting that it's possible to trigger an ordered list by +accident, by writing something like this: +.Bd -literal -offset indent + 1986. What a great season. +.Ed +.Pp +In other words, a *number-period-space* sequence at the beginning of a +line. To avoid this, you can backslash-escape the period: +.Bd -literal -offset indent + 1986\\. What a great season. +.Ed +.Pp +.Ss Code Blocks +Pre-formatted code blocks are used for writing about programming or +markup source code. Rather than forming normal paragraphs, the lines +of a code block are interpreted literally. Markdown wraps a code block +in both `
` and `` tags.
+.Pp
+To produce a code block in Markdown, simply indent every line of the
+block by at least 4 spaces or 1 tab. For example, given this input:
+.Bd -literal -offset indent
+    This is a normal paragraph:
+
+        This is a code block.
+.Ed
+.Pp
+.Nm
+will generate:
+.Bd -literal -offset indent
+    

This is a normal paragraph:

+ +
This is a code block.
+    
+.Ed +.Pp +One level of indentation -- 4 spaces or 1 tab -- is removed from each +line of the code block. For example, this: +.Bd -literal -offset indent + Here is an example of AppleScript: + + tell application "Foo" + beep + end tell +.Ed +.Pp +will turn into: +.Bd -literal -offset indent +

Here is an example of AppleScript:

+ +
tell application "Foo"
+        beep
+    end tell
+    
+.Ed +.Pp +A code block continues until it reaches a line that is not indented +(or the end of the article). +.Pp +Within a code block, ampersands (`&`) and angle brackets (`<` and `>`) +are automatically converted into HTML entities. This makes it very +easy to include example HTML source code using Markdown -- just paste +it and indent it, and Markdown will handle the hassle of encoding the +ampersands and angle brackets. For example, this: +.Bd -literal -offset indent + +.Ed +.Pp +will turn into: +.Bd -literal -offset indent +
<div class="footer">
+        &copy; 2004 Foo Corporation
+    </div>
+    
+.Ed +.Pp +Regular Markdown syntax is not processed within code blocks. E.g., +asterisks are just literal asterisks within a code block. This means +it's also easy to use Markdown to write about Markdown's own syntax. +.Ss Horizontal Rules +You can produce a horizontal rule tag (`
`) by placing three or +more hyphens, asterisks, or underscores on a line by themselves. If you +wish, you may use spaces between the hyphens or asterisks. Each of the +following lines will produce a horizontal rule: +.Bd -literal -offset indent + * * * + + *** + + ***** + + - - - + + --------------------------------------- +.Ed +.Pp +.Sh Span Elements +.Ss Links +.Nm +supports two style of links: +.Em inline +and +.Em reference . +.Pp +In both styles, the link text is delimited by [square brackets]. +.Pp +To create an inline link, use a set of regular parentheses immediately +after the link text's closing square bracket. Inside the parentheses, +put the URL where you want the link to point, along with an *optional* +title for the link, surrounded in quotes. For example: +.Bd -literal -offset indent + This is [an example](http://example.com/ "Title") inline link. + + [This link](http://example.net/) has no title attribute. +.Ed +.Pp +Will produce: +.Bd -literal -offset indent +

This is + an example inline link.

+ +

This link has no + title attribute.

+.Ed +.Pp +If you're referring to a local resource on the same server, you can +use relative paths: +.Bd -literal -offset indent + See my [About](/about/) page for details. +.Ed +.Pp +Reference-style links use a second set of square brackets, inside +which you place a label of your choosing to identify the link: +.Bd -literal -offset indent + This is [an example][id] reference-style link. +.Ed +.Pp +You can optionally use a space to separate the sets of brackets: +.Bd -literal -offset indent + This is [an example] [id] reference-style link. +.Ed +.Pp +Then, anywhere in the document, you define your link label like this, +on a line by itself: +.Bd -literal -offset indent + [id]: http://example.com/ "Optional Title Here" +.Ed +.Pp +That is: +.Bl -bullet +.It +Square brackets containing the link identifier (optionally +indented from the left margin using up to three spaces); +.It +followed by a colon; +.It +followed by one or more spaces (or tabs); +.It +followed by the URL for the link; +.It +optionally followed by a title attribute for the link, enclosed +in double or single quotes, or enclosed in parentheses. +.El +.Pp +The following three link definitions are equivalent: +.Bd -literal -offset indent + [foo]: http://example.com/ "Optional Title Here" + [foo]: http://example.com/ 'Optional Title Here' + [foo]: http://example.com/ (Optional Title Here) +.Ed +.Pp +.Em Note : +There is a known bug in Markdown.pl 1.0.1 which prevents +single quotes from being used to delimit link titles. +.Pp +The link URL may, optionally, be surrounded by angle brackets: +.Bd -literal -offset indent + [id]: "Optional Title Here" +.Ed +.Pp +You can put the title attribute on the next line and use extra spaces +or tabs for padding, which tends to look better with longer URLs: +.Bd -literal -offset indent + [id]: http://example.com/longish/path/to/resource/here + "Optional Title Here" +.Ed +.Pp +Link definitions are only used for creating links during Markdown +processing, and are stripped from your document in the HTML output. +.Pp +Link definition names may constist of letters, numbers, spaces, and +punctuation -- but they are +.Em not +case sensitive. E.g. these two +links: +.Bd -literal -offset indent + [link text][a] + [link text][A] +.Ed +.Pp +are equivalent. +.Pp +The +.Em implicit link name +shortcut allows you to omit the name of the +link, in which case the link text itself is used as the name. +Just use an empty set of square brackets -- e.g., to link the word +.Qq Google +to the google.com web site, you could simply write: +.Bd -literal -offset indent + [Google][] +.Ed +.Pp +And then define the link: +.Bd -literal -offset indent + [Google]: http://google.com/ +.Ed +.Pp +Because link names may contain spaces, this shortcut even works for +multiple words in the link text: +.Bd -literal -offset indent + Visit [Daring Fireball][] for more information. +.Ed +.Pp +And then define the link: +.Bd -literal -offset indent + [Daring Fireball]: http://daringfireball.net/ +.Ed +.Pp +Link definitions can be placed anywhere in your Markdown document. I +tend to put them immediately after each paragraph in which they're +used, but if you want, you can put them all at the end of your +document, sort of like footnotes. +.Pp +Here's an example of reference links in action: +.Bd -literal -offset indent + I get 10 times more traffic from [Google] [1] than from + [Yahoo] [2] or [MSN] [3]. + + [1]: http://google.com/ "Google" + [2]: http://search.yahoo.com/ "Yahoo Search" + [3]: http://search.msn.com/ "MSN Search" +.Ed +.Pp +Using the implicit link name shortcut, you could instead write: +.Bd -literal -offset indent + I get 10 times more traffic from [Google][] than from + [Yahoo][] or [MSN][]. + + [google]: http://google.com/ "Google" + [yahoo]: http://search.yahoo.com/ "Yahoo Search" + [msn]: http://search.msn.com/ "MSN Search" +.Ed +.Pp +Both of the above examples will produce the following HTML output: +.Bd -literal -offset indent +

I get 10 times more traffic from Google than from + Yahoo + or + MSN.

+.Ed +.Pp +For comparison, here is the same paragraph written using +Markdown's inline link style: +.Bd -literal -offset indent + I get 10 times more traffic from + [Google](http://google.com/ "Google") than from + [Yahoo](http://search.yahoo.com/ "Yahoo Search") or + [MSN](http://search.msn.com/ "MSN Search"). +.Ed +.Pp +The point of reference-style links is not that they're easier to +write. The point is that with reference-style links, your document +source is vastly more readable. Compare the above examples: using +reference-style links, the paragraph itself is only 81 characters +long; with inline-style links, it's 176 characters; and as raw HTML, +it's 234 characters. In the raw HTML, there's more markup than there +is text. +.Pp +With Markdown's reference-style links, a source document much more +closely resembles the final output, as rendered in a browser. By +allowing you to move the markup-related metadata out of the paragraph, +you can add links without interrupting the narrative flow of your +prose. +.Ss Emphasis +Markdown treats asterisks (`*`) and underscores (`_`) as indicators of +emphasis. Text wrapped with one `*` or `_` will be wrapped with an +HTML `` tag; double `*`'s or `_`'s will be wrapped with an HTML +`` tag. E.g., this input: +.Bd -literal -offset indent + *single asterisks* + + _single underscores_ + + **double asterisks** + + __double underscores__ +.Ed +.Pp +will produce: +.Bd -literal -offset indent + single asterisks + + single underscores + + double asterisks + + double underscores +.Ed +.Pp +You can use whichever style you prefer; the lone restriction is that +the same character must be used to open and close an emphasis span. +.Pp +Emphasis can be used in the middle of a word: +.Bd -literal -offset indent + un*fucking*believable +.Ed +.Pp +But if you surround an `*` or `_` with spaces, it'll be treated as a +literal asterisk or underscore. +.Pp +To produce a literal asterisk or underscore at a position where it +would otherwise be used as an emphasis delimiter, you can backslash +escape it: +.Bd -literal -offset indent + \\*this text is surrounded by literal asterisks\\* +.Ed +.Pp +.Ss Code +To indicate a span of code, wrap it with backtick quotes (`` ` ``). +Unlike a pre-formatted code block, a code span indicates code within a +normal paragraph. For example: +.Bd -literal -offset indent + Use the `printf()` function. +.Ed +.Pp +will produce: +.Bd -literal -offset indent +

Use the printf() function.

+.Ed +.Pp +To include a literal backtick character within a code span, you can use +multiple backticks as the opening and closing delimiters: +.Bd -literal -offset indent + ``There is a literal backtick (`) here.`` +.Ed +.Pp +which will produce this: +.Bd -literal -offset indent +

There is a literal backtick (`) here.

+.Ed +.Pp +The backtick delimiters surrounding a code span may include spaces -- +one after the opening, one before the closing. This allows you to place +literal backtick characters at the beginning or end of a code span: +.Bd -literal -offset indent + A single backtick in a code span: `` ` `` + + A backtick-delimited string in a code span: `` `foo` `` +.Ed +.Pp +will produce: +.Bd -literal -offset indent +

A single backtick in a code span: `

+ +

A backtick-delimited string in a code span: `foo`

+.Ed +.Pp +With a code span, ampersands and angle brackets are encoded as HTML +entities automatically, which makes it easy to include example HTML +tags. Markdown will turn this: +.Bd -literal -offset indent + Please don't use any `` tags. +.Ed +.Pp +into: +.Bd -literal -offset indent +

Please don't use any <blink> tags.

+.Ed +.Pp +You can write this: +.Bd -literal -offset indent + `—` is the decimal-encoded equivalent of `—`. +.Ed +.Pp +to produce: +.Bd -literal -offset indent +

&#8212; is the decimal-encoded + equivalent of &mdash;.

+.Ed +.Pp +.Ss Images +Admittedly, it's fairly difficult to devise a +.Qq natural +syntax for placing images into a plain text document format. +.Pp +Markdown uses an image syntax that is intended to resemble the syntax +for links, allowing for two styles: +.Em inline +and +.Em reference . +.Pp +Inline image syntax looks like this: +.Bd -literal -offset indent + ![Alt text](/path/to/img.jpg) + + ![Alt text](/path/to/img.jpg =Optional size "Optional title") +.Ed +.Pp +That is: +.Bl -bullet +.It +An exclamation mark: `!`; +.It +followed by a set of square brackets, containing the `alt` +attribute text for the image; +.It +followed by a set of parentheses, containing the URL or path to +the image, an optional `size` attribute (in +.Ar width Li c Ar height +format) prefixed with a `=`, +and an optional `title` attribute enclosed in double +or single quotes. +.El +.Pp +Reference-style image syntax looks like this: +.Bd -literal -offset indent + ![Alt text][id] +.Ed +.Pp +Where +.Qq id +is the name of a defined image reference. Image references +are defined using syntax identical to link references: +.Bd -literal -offset indent + [id]: url/to/image =Optional size "Optional title attribute" +.Ed +.Pp +.Sh Miscellaneous +.Ss Automatic Links +.Nm +supports a shortcut style for creating +.Qq automatic +links for URLs and email addresses: simply surround the URL or email +address with angle brackets. What this means is that if you want to + show the actual text of a URL or email address, and also have it be + a clickable link, you can do this: +.Bd -literal -offset indent + +.Ed +.Pp +.Nm +will turn this into: +.Bd -literal -offset indent + http://example.com/ +.Ed +.Pp +Automatic links for email addresses work similarly, except that +Markdown will also perform a bit of randomized decimal and hex +entity-encoding to help obscure your address from address-harvesting +spambots. For example, Markdown will turn this: +.Bd -literal -offset indent + +.Ed +.Pp +into something like this: +.Bd -literal -offset indent + address@exa + mple.com +.Ed +.Pp +which will render in a browser as a clickable link to +.Qq address@example.com . +.Pp +(This sort of entity-encoding trick will indeed fool many, if not +most, address-harvesting bots, but it definitely won't fool all of +them. It's better than nothing, but an address published in this way +will probably eventually start receiving spam.) +.Ss Backslash Escapes +.Nm +allows you to use backslash escapes to generate literal +characters which would otherwise have special meaning in Markdown's +formatting syntax. For example, if you wanted to surround a word with +literal asterisks (instead of an HTML `` tag), you add backslashes +before the asterisks, like this: +.Bd -literal -offset indent + \\*literal asterisks\\* +.Ed +.Pp +.Nm +provides backslash escapes for the following characters: +.Bl -tag -compact +.It \&\ +backslash +.It \` +backtick +.It * +asterisk +.It _ +underscore +.It \{\} +curly braces +.It [] +square brackets +.It () +parentheses +.It # +hash mark +.It + +plus sign +.It \- +minus sign (hyphen) +.It \. +dot +.It \! +exclamation mark +.El +.Sh BUGS +.Nm +assumes that tabs are set to 4 spaces. +.Sh AUTHOR +John Gruber +.%T http://daringfireball.net/ +.Sh SEE ALSO +.Xr markdown 1 , +.Xr markdown 3 , +.Xr mkd-functions 3 , +.Xr mkd-extensions 7 . +.Pp +.%T http://daringfireball.net/projects/markdown +.br +.%T http://docutils.sourceforge.net/mirror/setext.html +.br +.%T http://www.aaronsw.com/2002/atx/ +.br +.%T http://textism.com/tools/textile/ +.br +.%T http://docutils.sourceforge.net/rst.html +.br +.%T http://www.triptico.com/software/grutatxt.html +.br +.%T http://ettext.taint.org/doc/ diff --git a/r2/r2/lib/contrib/discount-1.6.0/markdown.c b/r2/r2/lib/contrib/discount-1.6.0/markdown.c new file mode 100644 index 000000000..e021fb770 --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/markdown.c @@ -0,0 +1,1101 @@ +/* markdown: a C implementation of John Gruber's Markdown markup language. + * + * Copyright (C) 2007 David L Parsons. + * The redistribution terms are provided in the COPYRIGHT file that must + * be distributed with this source code. + */ +#include +#include +#include +#include +#include +#include + +#include "config.h" + +#include "cstring.h" +#include "markdown.h" +#include "amalloc.h" + +/* block-level tags for passing html blocks through the blender + */ +struct kw { + char *id; + int size; + int selfclose; +} ; + +#define KW(x) { x, sizeof(x)-1, 0 } +#define SC(x) { x, sizeof(x)-1, 1 } + +static struct kw blocktags[] = { KW("!--"), KW("STYLE"), KW("SCRIPT"), + KW("ADDRESS"), KW("BDO"), KW("BLOCKQUOTE"), + KW("CENTER"), KW("DFN"), KW("DIV"), KW("H1"), + KW("H2"), KW("H3"), KW("H4"), KW("H5"), + KW("H6"), KW("LISTING"), KW("NOBR"), + KW("UL"), KW("P"), KW("OL"), KW("DL"), + KW("PLAINTEXT"), KW("PRE"), KW("TABLE"), + KW("WBR"), KW("XMP"), SC("HR"), SC("BR"), + KW("IFRAME"), KW("MAP") }; +#define SZTAGS (sizeof blocktags / sizeof blocktags[0]) +#define MAXTAG 11 /* sizeof "BLOCKQUOTE" */ + +typedef int (*stfu)(const void*,const void*); + +typedef ANCHOR(Paragraph) ParagraphRoot; + + +/* case insensitive string sort (for qsort() and bsearch() of block tags) + */ +static int +casort(struct kw *a, struct kw *b) +{ + if ( a->size != b->size ) + return a->size - b->size; + return strncasecmp(a->id, b->id, b->size); +} + + +/* case insensitive string sort for Footnote tags. + */ +int +__mkd_footsort(Footnote *a, Footnote *b) +{ + int i; + char ac, bc; + + if ( S(a->tag) != S(b->tag) ) + return S(a->tag) - S(b->tag); + + for ( i=0; i < S(a->tag); i++) { + ac = tolower(T(a->tag)[i]); + bc = tolower(T(b->tag)[i]); + + if ( isspace(ac) && isspace(bc) ) + continue; + if ( ac != bc ) + return ac - bc; + } + return 0; +} + + +/* find the first blank character after position + */ +static int +nextblank(Line *t, int i) +{ + while ( (i < S(t->text)) && !isspace(T(t->text)[i]) ) + ++i; + return i; +} + + +/* find the next nonblank character after position + */ +static int +nextnonblank(Line *t, int i) +{ + while ( (i < S(t->text)) && isspace(T(t->text)[i]) ) + ++i; + return i; +} + + +/* find the first nonblank character on the Line. + */ +int +mkd_firstnonblank(Line *p) +{ + return nextnonblank(p,0); +} + + +static int +blankline(Line *p) +{ + return ! (p && (S(p->text) > p->dle) ); +} + + +static Line * +skipempty(Line *p) +{ + while ( p && (p->dle == S(p->text)) ) + p = p->next; + return p; +} + + +void +___mkd_tidy(Cstring *t) +{ + while ( S(*t) && isspace(T(*t)[S(*t)-1]) ) + --S(*t); +} + + +static struct kw * +isopentag(Line *p) +{ + int i=0, len; + struct kw key, *ret; + + if ( !p ) return 0; + + len = S(p->text); + + if ( len < 3 || T(p->text)[0] != '<' ) + return 0; + + /* find how long the tag is so we can check to see if + * it's a block-level tag + */ + for ( i=1; i < len && T(p->text)[i] != '>' + && T(p->text)[i] != '/' + && !isspace(T(p->text)[i]); ++i ) + ; + + key.id = T(p->text)+1; + key.size = i-1; + + if ( ret = bsearch(&key, blocktags, SZTAGS, sizeof key, (stfu)casort)) + return ret; + + return 0; +} + + +typedef struct _flo { + Line *t; + int i; +} FLO; + + +static int +flogetc(FLO *f) +{ + if ( f && f->t ) { + if ( f->i < S(f->t->text) ) + return T(f->t->text)[f->i++]; + f->t = f->t->next; + f->i = 0; + return flogetc(f); + } + return EOF; +} + + +static Line * +htmlblock(Paragraph *p, struct kw *tag) +{ + Line *ret; + FLO f = { p->text, 0 }; + int c; + int i, closing, depth=0; + + if ( tag->selfclose || (tag->size >= MAXTAG) ) { + ret = f.t->next; + f.t->next = 0; + return ret; + } + + while ( (c = flogetc(&f)) != EOF ) { + if ( c == '<' ) { + /* tag? */ + c = flogetc(&f); + if ( c == '!' ) { /* comment? */ + if ( flogetc(&f) == '-' && flogetc(&f) == '-' ) { + /* yes */ + while ( (c = flogetc(&f)) != EOF ) { + if ( c == '-' && flogetc(&f) == '-' + && flogetc(&f) == '>') + /* consumed whole comment */ + break; + } + } + } + else { + if ( closing = (c == '/') ) c = flogetc(&f); + + for ( i=0; i < tag->size; c=flogetc(&f) ) { + if ( tag->id[i++] != toupper(c) ) + break; + } + + if ( (i == tag->size) && !isalnum(c) ) { + depth = depth + (closing ? -1 : 1); + if ( depth == 0 ) { + while ( c != EOF && c != '>' ) { + /* consume trailing gunk in close tag */ + c = flogetc(&f); + } + if ( !f.t ) + return 0; + ret = f.t->next; + f.t->next = 0; + return ret; + } + } + } + } + } + return 0; +} + + +static Line * +comment(Paragraph *p) +{ + Line *t, *ret; + + for ( t = p->text; t ; t = t->next) { + if ( strstr(T(t->text), "-->") ) { + ret = t->next; + t->next = 0; + return ret; + } + } + return t; + +} + + +/* tables look like + * header|header{|header} + * ------|------{|......} + * {body lines} + */ +static int +istable(Line *t) +{ + char *p; + Line *dashes = t->next; + int contains = 0; /* found character bits; 0x01 is |, 0x02 is - */ + + /* two lines, first must contain | */ + if ( !(dashes && memchr(T(t->text), '|', S(t->text))) ) + return 0; + + /* second line must contain - or | and nothing + * else except for whitespace or : + */ + for ( p = T(dashes->text)+S(dashes->text)-1; p >= T(dashes->text); --p) + if ( *p == '|' ) + contains |= 0x01; + else if ( *p == '-' ) + contains |= 0x02; + else if ( ! ((*p == ':') || isspace(*p)) ) + return 0; + + return (contains & 0x03); +} + + +/* footnotes look like ^{0,3}[stuff]: $ + */ +static int +isfootnote(Line *t) +{ + int i; + + if ( ( (i = t->dle) > 3) || (T(t->text)[i] != '[') ) + return 0; + + for ( ++i; i < S(t->text) ; ++i ) { + if ( T(t->text)[i] == '[' ) + return 0; + else if ( T(t->text)[i] == ']' && T(t->text)[i+1] == ':' ) + return 1; + } + return 0; +} + + +static int +isquote(Line *t) +{ + int j; + + for ( j=0; j < 4; j++ ) + if ( T(t->text)[j] == '>' ) + return 1; + else if ( !isspace(T(t->text)[j]) ) + return 0; + return 0; +} + + +static int +dashchar(char c) +{ + return (c == '*') || (c == '-') || (c == '_'); +} + + +static int +iscode(Line *t) +{ + return (t->dle >= 4); +} + + +static int +ishr(Line *t) +{ + int i, count=0; + char dash = 0; + char c; + + if ( iscode(t) ) return 0; + + for ( i = 0; i < S(t->text); i++) { + c = T(t->text)[i]; + if ( (dash == 0) && dashchar(c) ) + dash = c; + + if ( c == dash ) ++count; + else if ( !isspace(c) ) + return 0; + } + return (count >= 3); +} + + +static int +ishdr(Line *t, int *htyp) +{ + int i; + + + /* first check for etx-style ###HEADER### + */ + + /* leading run of `#`'s ? + */ + for ( i=0; T(t->text)[i] == '#'; ++i) + ; + + /* ANY leading `#`'s make this into an ETX header + */ + if ( i && (i < S(t->text) || i > 1) ) { + *htyp = ETX; + return 1; + } + + /* then check for setext-style HEADER + * ====== + */ + + if ( t->next ) { + char *q = T(t->next->text); + int last = S(t->next->text); + + if ( (*q == '=') || (*q == '-') ) { + /* ignore trailing whitespace */ + while ( (last > 1) && isspace(q[last-1]) ) + --last; + + for (i=1; i < last; i++) + if ( q[0] != q[i] ) + return 0; + *htyp = SETEXT; + return 1; + } + } + return 0; +} + + +static int +isdefinition(Line *t) +{ +#if DL_TAG_EXTENSION + return t && t->next + && (S(t->text) > 2) + && (t->dle == 0) + && (T(t->text)[0] == '=') + && (T(t->text)[S(t->text)-1] == '=') + && ( (t->next->dle >= 4) || isdefinition(t->next) ); +#else + return 0; +#endif +} + + +static int +islist(Line *t, int *trim) +{ + int i, j; + char *q; + + if ( iscode(t) || blankline(t) || ishdr(t,&i) || ishr(t) ) + return 0; + + if ( isdefinition(t) ) { + *trim = 4; + return DL; + } + + if ( strchr("*-+", T(t->text)[t->dle]) && isspace(T(t->text)[t->dle+1]) ) { + i = nextnonblank(t, t->dle+1); + *trim = (i > 4) ? 4 : i; + return UL; + } + + if ( (j = nextblank(t,t->dle)) > t->dle ) { + if ( T(t->text)[j-1] == '.' ) { +#if ALPHA_LIST + if ( (j == t->dle + 2) && isalpha(T(t->text)[t->dle]) ) { + j = nextnonblank(t,j); + *trim = j; + return AL; + } +#endif + strtoul(T(t->text)+t->dle, &q, 10); + if ( (q > T(t->text)+t->dle) && (q == T(t->text) + (j-1)) ) { + j = nextnonblank(t,j); + *trim = j; + return OL; + } + } + } + return 0; +} + + +static Line * +headerblock(Paragraph *pp, int htyp) +{ + Line *ret = 0; + Line *p = pp->text; + int i, j; + + switch (htyp) { + case SETEXT: + /* p->text is header, p->next->text is -'s or ='s + */ + pp->hnumber = (T(p->next->text)[0] == '=') ? 1 : 2; + + ret = p->next->next; + ___mkd_freeLine(p->next); + p->next = 0; + break; + + case ETX: + /* p->text is ###header###, so we need to trim off + * the leading and trailing `#`'s + */ + + for (i=0; (T(p->text)[i] == T(p->text)[0]) && (i < S(p->text)-1); i++) + ; + + pp->hnumber = i; + + while ( (i < S(p->text)) && isspace(T(p->text)[i]) ) + ++i; + + CLIP(p->text, 0, i); + + for (j=S(p->text); (j > 1) && (T(p->text)[j-1] == '#'); --j) + ; + + while ( j && isspace(T(p->text)[j-1]) ) + --j; + + S(p->text) = j; + + ret = p->next; + p->next = 0; + break; + } + return ret; +} + + +static Line * +codeblock(Paragraph *p) +{ + Line *t = p->text, *r; + + for ( ; t; t = r ) { + CLIP(t->text,0,4); + t->dle = mkd_firstnonblank(t); + + if ( !( (r = skipempty(t->next)) && iscode(r)) ) { + ___mkd_freeLineRange(t,r); + t->next = 0; + return r; + } + } + return t; +} + + +static int +centered(Line *first, Line *last) +{ + + if ( first&&last ) { + int len = S(last->text); + + if ( (len > 2) && (strncmp(T(first->text), "->", 2) == 0) + && (strncmp(T(last->text)+len-2, "<-", 2) == 0) ) { + CLIP(first->text, 0, 2); + S(last->text) -= 2; + return CENTER; + } + } + return 0; +} + + +static int +endoftextblock(Line *t, int toplevelblock) +{ + int z; + + if ( blankline(t)||isquote(t)||iscode(t)||ishdr(t,&z)||ishr(t) ) + return 1; + + /* HORRIBLE STANDARDS KLUDGE: Toplevel paragraphs eat absorb adjacent + * list items, but sublevel blocks behave properly. + */ + return toplevelblock ? 0 : islist(t,&z); +} + + +static Line * +textblock(Paragraph *p, int toplevel) +{ + Line *t, *next; + + for ( t = p->text; t ; t = next ) { + if ( ((next = t->next) == 0) || endoftextblock(next, toplevel) ) { + p->align = centered(p->text, t); + t->next = 0; + return next; + } + } + return t; +} + + +/* length of the id: or class: kind in a special div-not-quote block + */ +static int +szmarkerclass(char *p) +{ + if ( strncasecmp(p, "id:", 3) == 0 ) + return 3; + if ( strncasecmp(p, "class:", 6) == 0 ) + return 6; + return 0; +} + + +/* + * check if the first line of a quoted block is the special div-not-quote + * marker %[kind:]name% + */ +static int +isdivmarker(Line *p, int start) +{ +#if DIV_QUOTE + char *s = T(p->text); + int len = S(p->text); + int i; + + if ( !(len && s[start] == '%' && s[len-1] == '%') ) return 0; + + i = szmarkerclass(s+start+1)+start; + len -= start+1; + + while ( ++i < len ) + if ( !isalnum(s[i]) ) + return 0; + + return 1; +#else + return 0; +#endif +} + + +/* + * accumulate a blockquote. + * + * one sick horrible thing about blockquotes is that even though + * it just takes ^> to start a quote, following lines, if quoted, + * assume that the prefix is ``>''. This means that code needs + * to be indented *5* spaces from the leading '>', but *4* spaces + * from the start of the line. This does not appear to be + * documented in the reference implementation, but it's the + * way the markdown sample web form at Daring Fireball works. + */ +static Line * +quoteblock(Paragraph *p) +{ + Line *t, *q; + int qp; + + for ( t = p->text; t ; t = q ) { + if ( isquote(t) ) { + /* clip leading spaces */ + for (qp = 0; T(t->text)[qp] != '>'; qp ++) + /* assert: the first nonblank character on this line + * will be a > + */; + /* clip '>' */ + qp++; + /* clip next space, if any */ + if ( T(t->text)[qp] == ' ' ) + qp++; + CLIP(t->text, 0, qp); + t->dle = mkd_firstnonblank(t); + } + + q = skipempty(t->next); + + if ( (q == 0) || ((q != t->next) && (!isquote(q) || isdivmarker(q,1))) ) { + ___mkd_freeLineRange(t, q); + t = q; + break; + } + } + if ( isdivmarker(p->text,0) ) { + char *prefix = "class"; + int i; + + q = p->text; + p->text = p->text->next; + + if ( (i = szmarkerclass(1+T(q->text))) == 3 ) + /* and this would be an "%id:" prefix */ + prefix="id"; + + if ( p->ident = malloc(4+strlen(prefix)+S(q->text)) ) + sprintf(p->ident, "%s=\"%.*s\"", prefix, S(q->text)-(i+2), + T(q->text)+(i+1) ); + + ___mkd_freeLine(q); + } + return t; +} + + +/* + * A table block starts with a table header (see istable()), and continues + * until EOF or a line that /doesn't/ contain a |. + */ +static Line * +tableblock(Paragraph *p) +{ + Line *t, *q; + + for ( t = p->text; t && (q = t->next); t = t->next ) { + if ( !memchr(T(q->text), '|', S(q->text)) ) { + t->next = 0; + return q; + } + } + return 0; +} + + +static Paragraph *Pp(ParagraphRoot *, Line *, int); +static Paragraph *compile(Line *, int, MMIOT *); + + +/* + * pull in a list block. A list block starts with a list marker and + * runs until the next list marker, the next non-indented paragraph, + * or EOF. You do not have to indent nonblank lines after the list + * marker, but multiple paragraphs need to start with a 4-space indent. + */ +static Line * +listitem(Paragraph *p, int indent) +{ + Line *t, *q; + int clip = indent; + int z; + + for ( t = p->text; t ; t = q) { + CLIP(t->text, 0, clip); + t->dle = mkd_firstnonblank(t); + + if ( (q = skipempty(t->next)) == 0 ) { + ___mkd_freeLineRange(t,q); + return 0; + } + + /* after a blank line, the next block needs to start with a line + * that's indented 4(? -- reference implementation allows a 1 + * character indent, but that has unfortunate side effects here) + * spaces, but after that the line doesn't need any indentation + */ + if ( q != t->next ) { + if (q->dle < indent) { + q = t->next; + t->next = 0; + return q; + } + /* indent as far as the initial line was indented. */ + indent = clip; + } + + if ( (q->dle < indent) && (ishr(q) || islist(q,&z)) && !ishdr(q,&z) ) { + q = t->next; + t->next = 0; + return q; + } + + clip = (q->dle > indent) ? indent : q->dle; + } + return t; +} + + +static Line * +listblock(Paragraph *top, int trim, MMIOT *f) +{ + ParagraphRoot d = { 0, 0 }; + Paragraph *p; + Line *q = top->text, *text, *label; + int isdl = (top->typ == DL), + para = 0, + ltype; + + while (( text = q )) { + if ( top->typ == DL ) { + Line *lp; + + for ( lp = label = text; lp ; lp = lp->next ) { + text = lp->next; + CLIP(lp->text, 0, 1); + S(lp->text)--; + if ( !isdefinition(lp->next) ) + lp->next = 0; + } + } + else label = 0; + + p = Pp(&d, text, LISTITEM); + text = listitem(p, trim); + + p->down = compile(p->text, 0, f); + p->text = label; + + if ( para && (top->typ != DL) && p->down ) p->down->align = PARA; + + if ( !(q = skipempty(text)) || ((ltype = islist(q, &trim)) == 0) + || (isdl != (ltype == DL)) ) + break; + + if ( para = (q != text) ) { + Line anchor; + + anchor.next = text; + ___mkd_freeLineRange(&anchor, q); + } + + if ( para && (top->typ != DL) && p->down ) p->down->align = PARA; + } + top->text = 0; + top->down = T(d); + return text; +} + + +static int +tgood(char c) +{ + switch (c) { + case '\'': + case '"': return c; + case '(': return ')'; + } + return 0; +} + + +/* + * add a new (image or link) footnote to the footnote table + */ +static Line* +addfootnote(Line *p, MMIOT* f) +{ + int j, i; + int c; + Line *np = p->next; + + Footnote *foot = &EXPAND(*f->footnotes); + + CREATE(foot->tag); + CREATE(foot->link); + CREATE(foot->title); + foot->height = foot->width = 0; + + for (j=i=p->dle+1; T(p->text)[j] != ']'; j++) + EXPAND(foot->tag) = T(p->text)[j]; + + EXPAND(foot->tag) = 0; + S(foot->tag)--; + j = nextnonblank(p, j+2); + + while ( (j < S(p->text)) && !isspace(T(p->text)[j]) ) + EXPAND(foot->link) = T(p->text)[j++]; + EXPAND(foot->link) = 0; + S(foot->link)--; + j = nextnonblank(p,j); + + if ( T(p->text)[j] == '=' ) { + sscanf(T(p->text)+j, "=%dx%d", &foot->width, &foot->height); + while ( (j < S(p->text)) && !isspace(T(p->text)[j]) ) + ++j; + j = nextnonblank(p,j); + } + + + if ( (j >= S(p->text)) && np && np->dle && tgood(T(np->text)[np->dle]) ) { + ___mkd_freeLine(p); + p = np; + np = p->next; + j = p->dle; + } + + if ( (c = tgood(T(p->text)[j])) ) { + /* Try to take the rest of the line as a comment; read to + * EOL, then shrink the string back to before the final + * quote. + */ + ++j; /* skip leading quote */ + + while ( j < S(p->text) ) + EXPAND(foot->title) = T(p->text)[j++]; + + while ( S(foot->title) && T(foot->title)[S(foot->title)-1] != c ) + --S(foot->title); + if ( S(foot->title) ) /* skip trailing quote */ + --S(foot->title); + EXPAND(foot->title) = 0; + --S(foot->title); + } + + ___mkd_freeLine(p); + return np; +} + + +/* + * allocate a paragraph header, link it to the + * tail of the current document + */ +static Paragraph * +Pp(ParagraphRoot *d, Line *ptr, int typ) +{ + Paragraph *ret = calloc(sizeof *ret, 1); + + ret->text = ptr; + ret->typ = typ; + + return ATTACH(*d, ret); +} + + + +static Line* +consume(Line *ptr, int *eaten) +{ + Line *next; + int blanks=0; + + for (; ptr && blankline(ptr); ptr = next, blanks++ ) { + next = ptr->next; + ___mkd_freeLine(ptr); + } + if ( ptr ) *eaten = blanks; + return ptr; +} + + +/* + * top-level compilation; break the document into + * style, html, and source blocks with footnote links + * weeded out. + */ +static Paragraph * +compile_document(Line *ptr, MMIOT *f) +{ + ParagraphRoot d = { 0, 0 }; + ANCHOR(Line) source = { 0, 0 }; + Paragraph *p = 0; + struct kw *tag; + int eaten; + + while ( ptr ) { + if ( !(f->flags & DENY_HTML) && (tag = isopentag(ptr)) ) { + /* If we encounter a html/style block, compile and save all + * of the cached source BEFORE processing the html/style. + */ + if ( T(source) ) { + E(source)->next = 0; + p = Pp(&d, 0, SOURCE); + p->down = compile(T(source), 1, f); + T(source) = E(source) = 0; + } + p = Pp(&d, ptr, strcmp(tag->id, "STYLE") == 0 ? STYLE : HTML); + if ( strcmp(tag->id, "!--") == 0 ) + ptr = comment(p); + else + ptr = htmlblock(p, tag); + } + else if ( isfootnote(ptr) ) { + /* footnotes, like cats, sleep anywhere; pull them + * out of the input stream and file them away for + * later processing + */ + ptr = consume(addfootnote(ptr, f), &eaten); + } + else { + /* source; cache it up to wait for eof or the + * next html/style block + */ + ATTACH(source,ptr); + ptr = ptr->next; + } + } + if ( T(source) ) { + /* if there's any cached source at EOF, compile + * it now. + */ + E(source)->next = 0; + p = Pp(&d, 0, SOURCE); + p->down = compile(T(source), 1, f); + } + return T(d); +} + + +/* + * break a collection of markdown input into + * blocks of lists, code, html, and text to + * be marked up. + */ +static Paragraph * +compile(Line *ptr, int toplevel, MMIOT *f) +{ + ParagraphRoot d = { 0, 0 }; + Paragraph *p = 0; + Line *r; + int para = toplevel; + int blocks = 0; + int hdr_type, list_type, indent; + + ptr = consume(ptr, ¶); + + while ( ptr ) { + if ( iscode(ptr) ) { + p = Pp(&d, ptr, CODE); + + if ( f->flags & MKD_1_COMPAT) { + /* HORRIBLE STANDARDS KLUDGE: the first line of every block + * has trailing whitespace trimmed off. + */ + ___mkd_tidy(&p->text->text); + } + + ptr = codeblock(p); + } + else if ( ishr(ptr) ) { + p = Pp(&d, 0, HR); + r = ptr; + ptr = ptr->next; + ___mkd_freeLine(r); + } + else if (( list_type = islist(ptr, &indent) )) { + p = Pp(&d, ptr, list_type); + ptr = listblock(p, indent, f); + } + else if ( isquote(ptr) ) { + p = Pp(&d, ptr, QUOTE); + ptr = quoteblock(p); + p->down = compile(p->text, 1, f); + p->text = 0; + } + else if ( ishdr(ptr, &hdr_type) ) { + p = Pp(&d, ptr, HDR); + ptr = headerblock(p, hdr_type); + } + else if ( istable(ptr) && !(f->flags & (STRICT|NOTABLES)) ) { + p = Pp(&d, ptr, TABLE); + ptr = tableblock(p); + } + else { + p = Pp(&d, ptr, MARKUP); + ptr = textblock(p, toplevel); + } + + if ( (para||toplevel) && !p->align ) + p->align = PARA; + + blocks++; + para = toplevel || (blocks > 1); + ptr = consume(ptr, ¶); + + if ( para && !p->align ) + p->align = PARA; + + } + return T(d); +} + + +static void +initialize() +{ + static int first = 1; + + if ( first-- > 0 ) { + first = 0; + INITRNG(time(0)); + qsort(blocktags, SZTAGS, sizeof blocktags[0], (stfu)casort); + } +} + + +/* + * the guts of the markdown() function, ripped out so I can do + * debugging. + */ + +/* + * prepare and compile `text`, returning a Paragraph tree. + */ +int +mkd_compile(Document *doc, int flags) +{ + if ( !doc ) + return 0; + + if ( doc->compiled ) + return 1; + + doc->compiled = 1; + memset(doc->ctx, 0, sizeof(MMIOT) ); + doc->ctx->flags = flags & USER_FLAGS; + doc->ctx->base = doc->base; + CREATE(doc->ctx->in); + doc->ctx->footnotes = malloc(sizeof doc->ctx->footnotes[0]); + CREATE(*doc->ctx->footnotes); + + initialize(); + + doc->code = compile_document(T(doc->content), doc->ctx); + qsort(T(*doc->ctx->footnotes), S(*doc->ctx->footnotes), + sizeof T(*doc->ctx->footnotes)[0], + (stfu)__mkd_footsort); + memset(&doc->content, 0, sizeof doc->content); + return 1; +} + diff --git a/r2/r2/lib/contrib/discount-1.6.0/markdown.h b/r2/r2/lib/contrib/discount-1.6.0/markdown.h new file mode 100644 index 000000000..1999152ad --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/markdown.h @@ -0,0 +1,151 @@ +#ifndef _MARKDOWN_D +#define _MARKDOWN_D + +#include "cstring.h" + +/* reference-style links (and images) are stored in an array + * of footnotes. + */ +typedef struct footnote { + Cstring tag; /* the tag for the reference link */ + Cstring link; /* what this footnote points to */ + Cstring title; /* what it's called (TITLE= attribute) */ + int height, width; /* dimensions (for image link) */ + int dealloc; /* deallocation needed? */ +} Footnote; + +/* each input line is read into a Line, which contains the line, + * the offset of the first non-space character [this assumes + * that all tabs will be expanded to spaces!], and a pointer to + * the next line. + */ +typedef struct line { + Cstring text; + struct line *next; + int dle; +} Line; + + +/* a paragraph is a collection of Lines, with links to the next paragraph + * and (if it's a QUOTE, UL, or OL) to the reparsed contents of this + * paragraph. + */ +typedef struct paragraph { + struct paragraph *next; /* next paragraph */ + struct paragraph *down; /* recompiled contents of this paragraph */ + struct line *text; /* all the text in this paragraph */ + char *ident; /* %id% tag for QUOTE */ + enum { WHITESPACE=0, CODE, QUOTE, MARKUP, + HTML, STYLE, DL, UL, OL, AL, LISTITEM, + HDR, HR, TABLE, SOURCE } typ; + enum { IMPLICIT=0, PARA, CENTER} align; + int hnumber; /* for typ == HDR */ +} Paragraph; + +enum { ETX, SETEXT }; /* header types */ + + +typedef struct block { + enum { bTEXT, bSTAR, bUNDER } b_type; + int b_count; + char b_char; + Cstring b_text; + Cstring b_post; +} block; + +typedef STRING(block) Qblock; + + +/* a magic markdown io thing holds all the data structures needed to + * do the backend processing of a markdown document + */ +typedef char* (*e_func)(const char*, const int, void*); + +typedef struct mmiot { + Cstring out; + Cstring in; + Qblock Q; + int isp; + STRING(Footnote) *footnotes; + int flags; +#define DENY_A 0x0001 +#define DENY_IMG 0x0002 +#define DENY_SMARTY 0x0004 +#define DENY_HTML 0x0008 +#define STRICT 0x0010 +#define INSIDE_TAG 0x0020 +#define NO_PSEUDO_PROTO 0x0040 +#define CDATA_OUTPUT 0x0080 +#define NOTABLES 0x0400 +#define TOC 0x1000 +#define MKD_1_COMPAT 0x2000 +#define AUTOLINK 0x4000 +#define SAFELINK 0x8000 +#define USER_FLAGS 0xFCFF +#define EMBEDDED DENY_A|DENY_IMG|NO_PSEUDO_PROTO|CDATA_OUTPUT + char *base; + void *e_context; + e_func e_url, e_flags; + void (*e_free)(void*,void*); +} MMIOT; + + +/* + * the mkdio text input functions return a document structure, + * which contains a header (retrieved from the document if + * markdown was configured * with the * --enable-pandoc-header + * and the document begins with a pandoc-style header) and the + * root of the linked list of Lines. + */ +typedef struct document { + Line *headers; /* title -> author(s) -> date */ + ANCHOR(Line) content; /* uncompiled text, not valid after compile() */ + Paragraph *code; /* intermediate code generated by compile() */ + int compiled; /* set after mkd_compile() */ + int html; /* set after (internal) htmlify() */ + int tabstop; /* for properly expanding tabs (ick) */ + MMIOT *ctx; /* backend buffers, flags, and structures */ + char *base; /* url basename for url fragments */ +} Document; + + +extern int mkd_firstnonblank(Line *); +extern int mkd_compile(Document *, int); +extern int mkd_document(Document *, char **); +extern int mkd_generatehtml(Document *, FILE *); +extern int mkd_css(Document *, char **); +extern int mkd_generatecss(Document *, FILE *); +#define mkd_style mkd_generatecss +extern int mkd_xml(char *, int , char **); +extern int mkd_generatexml(char *, int, FILE *); +extern void mkd_cleanup(Document *); +extern int mkd_line(char *, int, char **, int); +extern int mkd_generateline(char *, int, FILE*, int); +#define mkd_text mkd_generateline +extern void mkd_basename(Document*, char *); +extern void mkd_string_to_anchor(char*,int, void(*)(int,void*), void*); + +extern Document *mkd_in(FILE *, int); +extern Document *mkd_string(char*,int, int); + +#define NO_HEADER 0x0100 +#define STD_TABSTOP 0x0200 +#define INPUT_MASK (NO_HEADER|STD_TABSTOP) + + +/* internal resource handling functions. + */ +extern void ___mkd_freeLine(Line *); +extern void ___mkd_freeLines(Line *); +extern void ___mkd_freeParagraph(Paragraph *); +extern void ___mkd_freefootnote(Footnote *); +extern void ___mkd_freefootnotes(MMIOT *); +extern void ___mkd_initmmiot(MMIOT *, void *); +extern void ___mkd_freemmiot(MMIOT *, void *); +extern void ___mkd_freeLineRange(Line *, Line *); +extern void ___mkd_xml(char *, int, FILE *); +extern void ___mkd_reparse(char *, int, int, MMIOT*); +extern void ___mkd_emblock(MMIOT*); +extern void ___mkd_tidy(Cstring *); + +#endif/*_MARKDOWN_D*/ diff --git a/r2/r2/lib/contrib/discount-1.6.0/mkd-extensions.7 b/r2/r2/lib/contrib/discount-1.6.0/mkd-extensions.7 new file mode 100644 index 000000000..91d9156cf --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/mkd-extensions.7 @@ -0,0 +1,178 @@ +.\" +.Dd Dec 22, 2007 +.Dt MKD-EXTENSIONS 7 +.Os MASTODON +.Sh NAME +.Nm mkd-extensions +.Nd Extensions to the Markdown text formatting syntax +.Sh DESCRIPTION +This version of markdown has been extended in a few ways by +extending existing markup, creating new markup from scratch, +and borrowing markup from other markup languages. +.Ss Image dimensions +Markdown embedded images have been extended to allow specifying +the dimensions of the image by adding a new argument +.Em =/height/x/width/ +to the link description. +.Pp +The new image syntax is +.nf + ![alt text](image =/height/x/width/ "title") +.fi +.Ss pseudo-protocols +Three pseudo-protocols have been added to links +.Bl -tag -width XXXXX +.It Ar id: +The +.Ar "alt text" +is marked up and written to the output, wrapped with +.Em "" +and +.Em "" . +.It Ar class: +The +.Ar "alt text" +is marked up and written to the output, wrapped with +.Em "" +and +.Em "" . +.It Ar raw: +The +.Ar title +is written +.Em -- with no further processing -- +to the output. The +.Ar "alt text" +is discarded. +.It Ar abbr: +The +.Ar "alt text" +is marked up and written to the output, wrapped with +.Em " +and +.Em "" . +.El +.Ss Pandoc headers +If markdown was configured with +.Ar --enable-pandoc-header , +the markdown source document can have a 3-line +.Xr Pandoc +header in the format of +.nf + % title + % author(s) + % date +.fi +which will be made available to the +.Fn mkd_doc_title , +.Fn mkd_doc_author , +and +.Fn mkd_doc_date +functions. +.Ss Definition lists +If markdown was configured with +.Ar --enable-dl-tag , +markup for definition lists is enabled. A definition list item +is defined as +.nf +=tag= + description +.fi +(that is a +.Ar = , +followed by text, another +.Ar = , +a newline, 4 spaces of intent, and then more text.) +.Pp +.Ss embedded stylesheets +Stylesheets may be defined and modified in a +.Em +at the end of the line or a +.Em +at the beginning of a subsequent line. +.Pp +Be warned that style blocks work like footnote links -- no matter +where you define them they are valid for the entire document. +.Ss relaxed emphasis +If markdown was configured with +.Ar --relaxed-emphasis , +the rules for emphasis are changed so that a single +.Ar _ +will +.Em not +count as a emphasis character if it's in the middle of a word. +This is primarily for documenting code, if you don't wish to +have to backquote all code references. +.Ss alpha lists +If markdown was configured with +.Ar --enable-alpha-list , +alphabetic lists (like regular numeric lists, but with alphabetic +items) are supported. So: +.nf + a. this + b. is + c. an alphabetic + d. list +.fi +will produce: +.nf +
    +
  1. this
  2. +
  3. is
  4. +
  5. an alphabetic
  6. +
  7. list
  8. +
+.fi +.Ss tables +.Ar PHP Markdown Extra -style +tables are supported; input of the form +.nf + header|header + ------|------ + text | text +.fi +will produce: +.nf + + + + + + + + + + + + + +
headerheader
texttext
+.fi +The dashed line can also contain +.Em : +characters for formatting; if a +.Em : +is at the start of a column, it tells +.Nm discount +to align the cell contents to the left; if it's at the end, it +aligns right, and if there's one at the start and at the +end, it centers. +.Sh AUTHOR +David Parsons +.%T http://www.pell.portland.or.us/~orc/ +.Sh SEE ALSO +.Xr markdown 1 , +.Xr markdown 3 , +.Xr mkd-functions 3 , +.Xr mkd-line 3 , +.Xr mkd-extensions 7 . +.Pp +.%T http://daringfireball.net/projects/markdown +.Pp +.%T http://michelf.com/projects/php-markdown diff --git a/r2/r2/lib/contrib/discount-1.6.0/mkd-functions.3 b/r2/r2/lib/contrib/discount-1.6.0/mkd-functions.3 new file mode 100644 index 000000000..2bbaf2af5 --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/mkd-functions.3 @@ -0,0 +1,180 @@ +.\" +.Dd January 18, 2008 +.Dt MKD_FUNCTIONS 3 +.Os Mastodon +.Sh NAME +.Nm mkd_functions +.Nd access and process Markdown documents. +.Sh LIBRARY +Markdown +.Pq libmarkdown , -lmarkdown +.Sh SYNOPSIS +.Fd #include +.Ft int +.Fn mkd_compile "MMIOT *document" "int flags" +.Ft int +.Fn mkd_css "MMIOT *document" "char **doc" +.Ft int +.Fn mkd_generatecss "MMIOT *document" "FILE *output" +.Ft int +.Fn mkd_document "MMIOT *document" "char **doc" +.Ft int +.Fn mkd_generatehtml "MMIOT *document" "FILE *output" +.Ft int +.Fn mkd_xhtmlpage "MMIOT *document" "int flags" "FILE *output" +.Ft int +.Fn mkd_toc "MMIOT *document" "char **doc" +.Ft void +.Fn mkd_generatetoc "MMIOT *document" "FILE *output" +.Ft void +.Fn mkd_cleanup "MMIOT*" +.Ft char* +.Fn mkd_doc_title "MMIOT*" +.Ft char* +.Fn mkd_doc_author "MMIOT*" +.Ft char* +.Fn mkd_doc_date "MMIOT*" +.Sh DESCRIPTION +.Pp +The +.Nm markdown +format supported in this implementation includes +Pandoc-style header and inline +.Ar \ +blocks, and the standard +.Xr markdown 3 +functions do not provide access to +the data provided by either of those extensions. +These functions give you access to that data, plus +they provide a finer-grained way of converting +.Em Markdown +documents into HTML. +.Pp +Given a +.Ar MMIOT* +generated by +.Fn mkd_in +or +.Fn mkd_string , +.Fn mkd_compile +compiles the document into +.Em \ , +.Em Pandoc , +and +.Em html +sections. +.Pp +Once compiled, the document can be examined and written +by the +.Fn mkd_css , +.Fn mkd_document , +.Fn mkd_generatecss , +.Fn mkd_generatehtml , +.Fn mkd_generatetoc , +.Fn mkd_toc , +.Fn mkd_xhtmlpage , +.Fn mkd_doc_title , +.Fn mkd_doc_author , +and +.Fn mkd_doc_date +functions. +.Pp +.Fn mkd_css +allocates a string and populates it with any \ sections +provided in the document, +.Fn mkd_generatecss +writes any \ sections to the output, +.Fn mkd_document +points +.Ar text +to the text of the document and returns the +size of the document, +.Fn mkd_generatehtml +writes the rest of the document to the output, +and +.Fn mkd_doc_title , +.Fn mkd_doc_author , +.Fn mkd_doc_date +are used to read the contents of a Pandoc header, +if any. +.Pp +.Fn mkd_xhtmlpage +writes a xhtml page containing the document. The regular set of +flags can be passed. +.Pp +.Fn mkd_toc +writes a document outline, in the form of a collection of nested +lists with links to each header in the document, into a string +allocated with +.Fn malloc , +and returns the size. +.Pp +.Fn mkd_generatetoc +is like +.Fn mkd_toc , +except that it writes the document outline to the given +.Pa FILE* +argument. +.Pp +.Fn mkd_cleanup +deletes a +.Ar MMIOT* +after processing is done. +.Pp +.Fn mkd_compile +accepts the same flags that +.Fn markdown +and +.Fn mkd_string +do; +.Bl -tag -width MKD_NOIMAGE -compact +.It Ar MKD_NOIMAGE +Do not process `![]' and +remove +.Em \ +tags from the output. +.It Ar MKD_NOLINKS +Do not process `[]' and remove +.Em \ +tags from the output. +.It Ar MKD_NOPANTS +Do not do Smartypants-style mangling of quotes, dashes, or ellipses. +.It Ar MKD_TAGTEXT +Process the input as if you were inside a html tag. This means that +no html tags will be generated, and +.Fn mkd_compile +will attempt to escape anything that might terribly confuse a +web browser. +.It Ar MKD_NO_EXT +Do not process any markdown pseudo-protocols when +handing +.Ar [][] +links. +.It Ar MKD_NOHEADER +Do not attempt to parse any Pandoc-style headers. +.It Ar MKD_TOC +Label all headers for use with the +.Fn mkd_generatetoc +function. +.It Ar MKD_1_COMPAT +MarkdownTest_1.0 compatability flag; trim trailing spaces from the +first line of code blocks and disable implicit reference links. +.El +.Sh RETURN VALUES +The functions +.Fn mkd_compile , +.Fn mkd_style , +and +.Fn mkd_generatehtml +return 0 on success, -1 on failure. +.Sh SEE ALSO +.Xr markdown 1 , +.Xr markdown 3 , +.Xr mkd-line 3 , +.Xr markdown 7 , +.Xr mkd-extensions 7 , +.Xr mmap 2 . +.Pp +http://daringfireball.net/projects/markdown/syntax +.Sh BUGS +Error handling is minimal at best. diff --git a/r2/r2/lib/contrib/discount-1.6.0/mkd-line.3 b/r2/r2/lib/contrib/discount-1.6.0/mkd-line.3 new file mode 100644 index 000000000..7a3544623 --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/mkd-line.3 @@ -0,0 +1,41 @@ +.\" +.Dd January 18, 2008 +.Dt MKD_LINE 3 +.Os Mastodon +.Sh NAME +.Nm mkd_line +.Nd do Markdown translation of small items +.Sh LIBRARY +Markdown +.Pq libmarkdown , -lmarkdown +.Sh SYNOPSIS +.Fd #include +.Ft int +.Fn mkd_line "char *string" "int size" "char **doc" "int flags" +.Ft int +.Fn mkd_generateline "char *string" "int size" "FILE *output" "int flags" +.Sh DESCRIPTION +.Pp +Occasionally one might want to do markdown translations on fragments of +data, like the title of an weblog article, a date, or a simple signature +line. +.Nm mkd_line +and +.Nm mkd_generateline +allow you to do markdown translations on small blocks of text. +.Nm mkd_line +allocates a buffer, then writes the translated text into that buffer, +and +.Nm mkd_generateline +writes the output to the specified +.Ar FILE* . +.Sh SEE ALSO +.Xr markdown 1 , +.Xr markdown 3 , +.Xr markdown 7 , +.Xr mkd-extensions 7 , +.Xr mmap 2 . +.Pp +http://daringfireball.net/projects/markdown/syntax +.Sh BUGS +Error handling is minimal at best. diff --git a/r2/r2/lib/contrib/discount-1.6.0/mkd2html.c b/r2/r2/lib/contrib/discount-1.6.0/mkd2html.c new file mode 100644 index 000000000..ae1532006 --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/mkd2html.c @@ -0,0 +1,185 @@ +/* + * mkd2html: parse a markdown input file and generate a web page. + * + * usage: mkd2html [options] filename + * or mkd2html [options] < markdown > html + * + * options + * -css css-file + * -header line-to-add-to-
+ * -footer line-to-add-before- + * + * example: + * + * mkd2html -cs /~orc/pages.css syntax + * ( read syntax OR syntax.text, write syntax.html ) + */ +/* + * Copyright (C) 2007 David L Parsons. + * The redistribution terms are provided in the COPYRIGHT file that must + * be distributed with this source code. + */ +#include "config.h" + +#include +#include +#include +#ifdef HAVE_BASENAME +# ifdef HAVE_LIBGEN_H +# include +# else +# include +# endif +#endif +#include + +#include "mkdio.h" +#include "cstring.h" +#include "amalloc.h" + +char *pgm = "mkd2html"; + +#ifndef HAVE_BASENAME +char * +basename(char *path) +{ + char *p; + + if (( p = strrchr(path, '/') )) + return 1+p; + return path; +} +#endif + +void +fail(char *why, ...) +{ + va_list ptr; + + va_start(ptr,why); + fprintf(stderr, "%s: ", pgm); + vfprintf(stderr, why, ptr); + fputc('\n', stderr); + va_end(ptr); + exit(1); +} + + +void +main(argc, argv) +char **argv; +{ + char *h; + char *source = 0, *dest = 0; + MMIOT *mmiot; + int i; + FILE *input, *output; + STRING(char*) css, headers, footers; + + + CREATE(css); + CREATE(headers); + CREATE(footers); + pgm = basename(argv[0]); + + while ( argc > 2 ) { + if ( strcmp(argv[1], "-css") == 0 ) { + EXPAND(css) = argv[2]; + argc -= 2; + argv += 2; + } + else if ( strcmp(argv[1], "-header") == 0 ) { + EXPAND(headers) = argv[2]; + argc -= 2; + argv += 2; + } + else if ( strcmp(argv[1], "-footer") == 0 ) { + EXPAND(footers) = argv[2]; + argc -= 2; + argv += 2; + } + } + + + if ( argc > 1 ) { + char *p, *dot; + + source = malloc(strlen(argv[1]) + 6); + dest = malloc(strlen(argv[1]) + 6); + + if ( !(source && dest) ) + fail("out of memory allocating name buffers"); + + strcpy(source, argv[1]); + if (( p = strrchr(source, '/') )) + p = source; + else + ++p; + + if ( (input = fopen(source, "r")) == 0 ) { + strcat(source, ".text"); + if ( (input = fopen(source, "r")) == 0 ) + fail("can't open either %s or %s", argv[1], source); + } + strcpy(dest, source); + + if (( dot = strrchr(dest, '.') )) + *dot = 0; + strcat(dest, ".html"); + + if ( (output = fopen(dest, "w")) == 0 ) + fail("can't write to %s", dest); + } + else { + input = stdin; + output = stdout; + } + + if ( (mmiot = mkd_in(input, 0)) == 0 ) + fail("can't read %s", source ? source : "stdin"); + + if ( !mkd_compile(mmiot, 0) ) + fail("couldn't compile input"); + + + h = mkd_doc_title(mmiot); + + /* print a header */ + + fprintf(output, + "\n" + "\n" + "\n" + " \n", markdown_version); + + fprintf(output," "); + + for ( i=0; i < S(css); i++ ) + fprintf(output, " \n", T(css)[i]); + + if ( h ) { + fprintf(output," "); + mkd_generateline(h, strlen(h), output, 0); + fprintf(output, "\n"); + } + for ( i=0; i < S(headers); i++ ) + fprintf(output, " %s\n", T(headers)[i]); + fprintf(output, "\n" + "\n"); + + /* print the compiled body */ + + mkd_generatehtml(mmiot, output); + + for ( i=0; i < S(footers); i++ ) + fprintf(output, "%s\n", T(footers)[i]); + + fprintf(output, "\n" + "\n"); + + mkd_cleanup(mmiot); + exit(0); +} diff --git a/r2/r2/lib/contrib/discount-1.6.0/mkdio.c b/r2/r2/lib/contrib/discount-1.6.0/mkdio.c new file mode 100644 index 000000000..86542b73a --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/mkdio.c @@ -0,0 +1,339 @@ +/* + * mkdio -- markdown front end input functions + * + * Copyright (C) 2007 David L Parsons. + * The redistribution terms are provided in the COPYRIGHT file that must + * be distributed with this source code. + */ +#include "config.h" +#include +#include +#include + +#include "cstring.h" +#include "markdown.h" +#include "amalloc.h" + +typedef ANCHOR(Line) LineAnchor; + +/* create a new blank Document + */ +static Document* +new_Document() +{ + Document *ret = calloc(sizeof(Document), 1); + + if ( ret ) { + if (( ret->ctx = calloc(sizeof(MMIOT), 1) )) + return ret; + free(ret); + } + return 0; +} + + +/* add a line to the markdown input chain + */ +static void +queue(Document* a, Cstring *line) +{ + Line *p = calloc(sizeof *p, 1); + unsigned char c; + int xp = 0; + int size = S(*line); + unsigned char *str = (unsigned char*)T(*line); + + CREATE(p->text); + ATTACH(a->content, p); + + while ( size-- ) { + if ( (c = *str++) == '\t' ) { + /* expand tabs into ->tabstop spaces. We use ->tabstop + * because the ENTIRE FREAKING COMPUTER WORLD uses editors + * that don't do ^T/^D, but instead use tabs for indentation, + * and, of course, set their tabs down to 4 spaces + */ + do { + EXPAND(p->text) = ' '; + } while ( ++xp % a->tabstop ); + } + else if ( c >= ' ' ) { + EXPAND(p->text) = c; + ++xp; + } + } + EXPAND(p->text) = 0; + S(p->text)--; + p->dle = mkd_firstnonblank(p); +} + + +#ifdef PANDOC_HEADER +/* trim leading blanks from a header line + */ +static void +snip(Line *p) +{ + CLIP(p->text, 0, 1); + p->dle = mkd_firstnonblank(p); +} +#endif + + +/* build a Document from any old input. + */ +typedef int (*getc_func)(void*); + +Document * +populate(getc_func getc, void* ctx, int flags) +{ + Cstring line; + Document *a = new_Document(); + int c; +#ifdef PANDOC_HEADER + int pandoc = 0; +#endif + + if ( !a ) return 0; + + a->tabstop = (flags & STD_TABSTOP) ? 4 : TABSTOP; + + CREATE(line); + + while ( (c = (*getc)(ctx)) != EOF ) { + if ( c == '\n' ) { +#ifdef PANDOC_HEADER + if ( pandoc != EOF && pandoc < 3 ) { + if ( S(line) && (T(line)[0] == '%') ) + pandoc++; + else + pandoc = EOF; + } +#endif + queue(a, &line); + S(line) = 0; + } + else if ( isprint(c) || isspace(c) || (c & 0x80) ) + EXPAND(line) = c; + } + + if ( S(line) ) + queue(a, &line); + + DELETE(line); + +#ifdef PANDOC_HEADER + if ( (pandoc == 3) && !(flags & NO_HEADER) ) { + /* the first three lines started with %, so we have a header. + * clip the first three lines out of content and hang them + * off header. + */ + a->headers = T(a->content); + T(a->content) = a->headers->next->next->next; + a->headers->next->next->next = 0; + snip(a->headers); + snip(a->headers->next); + snip(a->headers->next->next); + } +#endif + + return a; +} + + +/* convert a file into a linked list + */ +Document * +mkd_in(FILE *f, int flags) +{ + return populate((getc_func)fgetc, f, flags & INPUT_MASK); +} + + +/* return a single character out of a buffer + */ +struct string_ctx { + char *data; /* the unread data */ + int size; /* and how much is there? */ +} ; + + +static int +strget(struct string_ctx *in) +{ + if ( !in->size ) return EOF; + + --(in->size); + + return *(in->data)++; +} + + +/* convert a block of text into a linked list + */ +Document * +mkd_string(char *buf, int len, int flags) +{ + struct string_ctx about; + + about.data = buf; + about.size = len; + + return populate((getc_func)strget, &about, flags & INPUT_MASK); +} + + +/* write the html to a file (xmlified if necessary) + */ +int +mkd_generatehtml(Document *p, FILE *output) +{ + char *doc; + int szdoc; + + if ( (szdoc = mkd_document(p, &doc)) != EOF ) { + if ( p->ctx->flags & CDATA_OUTPUT ) + mkd_generatexml(doc, szdoc, output); + else + fwrite(doc, szdoc, 1, output); + putc('\n', output); + return 0; + } + return -1; +} + + +/* convert some markdown text to html + */ +int +markdown(Document *document, FILE *out, int flags) +{ + if ( mkd_compile(document, flags) ) { + mkd_generatehtml(document, out); + mkd_cleanup(document); + return 0; + } + return -1; +} + + +void +mkd_basename(Document *document, char *base) +{ + if ( document ) + document->base = base; +} + + +/* write out a Cstring, mangled into a form suitable for ` 0; ) { + c = *s++; + if ( c == ' ' || c == '&' || c == '<' || c == '"' ) + (*outchar)('+', out); + else if ( isalnum(c) || ispunct(c) || (c & 0x80) ) + (*outchar)(c, out); + else + (*outchar)('~',out); + } +} + + +/* ___mkd_reparse() a line + */ +static void +mkd_parse_line(char *bfr, int size, MMIOT *f, int flags) +{ + ___mkd_initmmiot(f, 0); + f->flags = flags & USER_FLAGS; + ___mkd_reparse(bfr, size, 0, f); + ___mkd_emblock(f); +} + + +/* ___mkd_reparse() a line, returning it in malloc()ed memory + */ +int +mkd_line(char *bfr, int size, char **res, int flags) +{ + MMIOT f; + int len; + + mkd_parse_line(bfr, size, &f, flags); + + if ( len = S(f.out) ) { + /* kludge alert; we know that T(f.out) is malloced memory, + * so we can just steal it away. This is awful -- there + * should be an opaque method that transparently moves + * the pointer out of the embedded Cstring. + */ + *res = T(f.out); + T(f.out) = 0; + S(f.out) = 0; + } + else { + *res = 0; + len = EOF; + } + ___mkd_freemmiot(&f, 0); + return len; +} + + +/* ___mkd_reparse() a line, writing it to a FILE + */ +int +mkd_generateline(char *bfr, int size, FILE *output, int flags) +{ + MMIOT f; + + mkd_parse_line(bfr, size, &f, flags); + if ( flags & CDATA_OUTPUT ) + mkd_generatexml(T(f.out), S(f.out), output); + else + fwrite(T(f.out), S(f.out), 1, output); + + ___mkd_freemmiot(&f, 0); + return 0; +} + + +/* set the url display callback + */ +void +mkd_e_url(Document *f, e_func edit) +{ + if ( f && f->ctx ) f->ctx->e_url = edit; +} + + +/* set the url options callback + */ +void +mkd_e_flags(Document *f, e_func edit) +{ + if ( f && f->ctx ) f->ctx->e_flags = edit; +} + + +/* set the url display/options deallocator + */ +void +mkd_e_free(Document *f, void (*dealloc)(void*,void*)) +{ + if ( f && f->ctx ) f->ctx->e_free = dealloc; +} + + +/* set the url display/options context field + */ +void +mkd_e_context(Document *f, void *context) +{ + if ( f && f->ctx ) f->ctx->e_context = context; +} diff --git a/r2/r2/lib/contrib/discount-1.6.0/mkdio.h b/r2/r2/lib/contrib/discount-1.6.0/mkdio.h new file mode 100644 index 000000000..fc9ac3e7a --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/mkdio.h @@ -0,0 +1,87 @@ +#ifndef _MKDIO_D +#define _MKDIO_D + +#include + +typedef void MMIOT; + +/* line builder for markdown() + */ +MMIOT *mkd_in(FILE*,int); /* assemble input from a file */ +MMIOT *mkd_string(char*,int,int); /* assemble input from a buffer */ + +void mkd_basename(MMIOT*,char*); + +/* compilation, debugging, cleanup + */ +int mkd_compile(MMIOT*, int); +int mkd_cleanup(MMIOT*); + +/* markup functions + */ +int mkd_dump(MMIOT*, FILE*, int, char*); +int markdown(MMIOT*, FILE*, int); +int mkd_line(char *, int, char **, int); +void mkd_string_to_anchor(char *, int, int (*)(int,void*), void*); +int mkd_xhtmlpage(MMIOT*,int,FILE*); + +/* header block access + */ +char* mkd_doc_title(MMIOT*); +char* mkd_doc_author(MMIOT*); +char* mkd_doc_date(MMIOT*); + +/* compiled data access + */ +int mkd_document(MMIOT*, char**); +int mkd_toc(MMIOT*, char**); +int mkd_css(MMIOT*, char **); +int mkd_xml(char *, int, char **); + +/* write-to-file functions + */ +int mkd_generatehtml(MMIOT*,FILE*); +int mkd_generatetoc(MMIOT*,FILE*); +int mkd_generatexml(char *, int,FILE*); +int mkd_generatecss(MMIOT*,FILE*); +#define mkd_style mkd_generatecss +int mkd_generateline(char *, int, FILE*, int); +#define mkd_text mkd_generateline + +/* url generator callbacks + */ +typedef char * (e_func)(char*, int, void*); +void mkd_e_url(void *, e_func); +void mkd_e_flags(void *, e_func); +void mkd_e_free(void *, void (*dealloc)(void*, void*) ); +void mkd_e_context(void *, void *); + +/* version#. + */ +extern char markdown_version[]; + +/* special flags for markdown() and mkd_text() + */ +#define MKD_NOLINKS 0x0001 /* don't do link processing, block tags */ +#define MKD_NOIMAGE 0x0002 /* don't do image processing, block */ +#define MKD_NOPANTS 0x0004 /* don't run smartypants() */ +#define MKD_NOHTML 0x0008 /* don't allow raw html through AT ALL */ +#define MKD_STRICT 0x0010 /* disable SUPERSCRIPT, RELAXED_EMPHASIS */ +#define MKD_TAGTEXT 0x0020 /* process text inside an html tag; no + * , no , no html or [] expansion */ +#define MKD_NO_EXT 0x0040 /* don't allow pseudo-protocols */ +#define MKD_CDATA 0x0080 /* generate code for xml ![CDATA[...]] */ +#define MKD_NOTABLES 0x0400 /* disallow tables */ +#define MKD_TOC 0x1000 /* do table-of-contents processing */ +#define MKD_1_COMPAT 0x2000 /* compatability with MarkdownTest_1.0 */ +#define MKD_AUTOLINK 0x4000 /* make http://foo.com link even without <>s */ +#define MKD_SAFELINK 0x8000 /* paranoid check for link protocol */ +#define MKD_EMBED MKD_NOLINKS|MKD_NOIMAGE|MKD_TAGTEXT + +/* special flags for mkd_in() and mkd_string() + */ +#define MKD_NOHEADER 0x0100 /* don't process header blocks */ +#define MKD_TABSTOP 0x0200 /* expand tabs to 4 spaces */ + + +#endif/*_MKDIO_D*/ diff --git a/r2/r2/lib/contrib/discount-1.6.0/resource.c b/r2/r2/lib/contrib/discount-1.6.0/resource.c new file mode 100644 index 000000000..3e5628a96 --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/resource.c @@ -0,0 +1,155 @@ +/* markdown: a C implementation of John Gruber's Markdown markup language. + * + * Copyright (C) 2007 David L Parsons. + * The redistribution terms are provided in the COPYRIGHT file that must + * be distributed with this source code. + */ +#include +#include +#include +#include +#include +#include + +#include "config.h" + +#include "cstring.h" +#include "markdown.h" +#include "amalloc.h" + +/* free a (single) line + */ +void +___mkd_freeLine(Line *ptr) +{ + DELETE(ptr->text); + free(ptr); +} + + +/* free a list of lines + */ +void +___mkd_freeLines(Line *p) +{ + if (p->next) + ___mkd_freeLines(p->next); + ___mkd_freeLine(p); +} + + +/* bye bye paragraph. + */ +void +___mkd_freeParagraph(Paragraph *p) +{ + if (p->next) + ___mkd_freeParagraph(p->next); + if (p->down) + ___mkd_freeParagraph(p->down); + if (p->text) + ___mkd_freeLines(p->text); + if (p->ident) + free(p->ident); + free(p); +} + + +/* bye bye footnote. + */ +void +___mkd_freefootnote(Footnote *f) +{ + DELETE(f->tag); + DELETE(f->link); + DELETE(f->title); +} + + +/* bye bye footnotes. + */ +void +___mkd_freefootnotes(MMIOT *f) +{ + int i; + + if ( f->footnotes ) { + for (i=0; i < S(*f->footnotes); i++) + ___mkd_freefootnote( &T(*f->footnotes)[i] ); + DELETE(*f->footnotes); + free(f->footnotes); + } +} + + +/* initialize a new MMIOT + */ +void +___mkd_initmmiot(MMIOT *f, void *footnotes) +{ + if ( f ) { + memset(f, 0, sizeof *f); + CREATE(f->in); + CREATE(f->out); + CREATE(f->Q); + if ( footnotes ) + f->footnotes = footnotes; + else { + f->footnotes = malloc(sizeof f->footnotes[0]); + CREATE(*f->footnotes); + } + } +} + + +/* free the contents of a MMIOT, but leave the object alone. + */ +void +___mkd_freemmiot(MMIOT *f, void *footnotes) +{ + if ( f ) { + DELETE(f->in); + DELETE(f->out); + DELETE(f->Q); + if ( f->footnotes != footnotes ) + ___mkd_freefootnotes(f); + memset(f, 0, sizeof *f); + } +} + + +/* free lines up to an barrier. + */ +void +___mkd_freeLineRange(Line *anchor, Line *stop) +{ + Line *r = anchor->next; + + if ( r != stop ) { + while ( r && (r->next != stop) ) + r = r->next; + if ( r ) r->next = 0; + ___mkd_freeLines(anchor->next); + } + anchor->next = 0; +} + + +/* clean up everything allocated in __mkd_compile() + */ +void +mkd_cleanup(Document *doc) +{ + if ( doc ) { + if ( doc->ctx ) { + ___mkd_freemmiot(doc->ctx, 0); + free(doc->ctx); + } + + if ( doc->code) ___mkd_freeParagraph(doc->code); + if ( doc->headers ) ___mkd_freeLines(doc->headers); + if ( T(doc->content) ) ___mkd_freeLines(T(doc->content)); + memset(doc, 0, sizeof doc[0]); + free(doc); + } +} diff --git a/r2/r2/lib/contrib/discount-1.6.0/tests/autolink.t b/r2/r2/lib/contrib/discount-1.6.0/tests/autolink.t new file mode 100644 index 000000000..a471fea85 --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/tests/autolink.t @@ -0,0 +1,46 @@ +./echo 'Reddit-style automatic links' +rc=0 + +try() { + unset FLAGS + case "$1" in + -*) FLAGS=$1 + shift ;; + esac + + ./echo -n " $1" '..................................' | ./cols 36 + + Q=`./echo "$2" | ./markdown $FLAGS` + + + if [ "$3" = "$Q" ]; then + ./echo " ok" + else + ./echo " FAILED" + ./echo "wanted: $3" + ./echo "got : $Q" + rc=1 + fi +} + +try -fautolink 'single link' \ + 'http://www.pell.portland.or.us/~orc/Code/discount' \ + '' + +try -fautolink '[!](http://a.com "http://b.com")' \ + '[!](http://a.com "http://b.com")' \ + '

!

' + +try -fautolink 'link surrounded by text' \ + 'here http://it is?' \ + '

here http://it is?

' + +try -fautolink 'naked @' '@' '

@

' + +try -fautolink 'parenthesised (url)' \ + '(http://here)' \ + '

(http://here)

' + +try -fautolink 'token with trailing @' 'orc@' '

orc@

' + +exit $rc diff --git a/r2/r2/lib/contrib/discount-1.6.0/tests/automatic.t b/r2/r2/lib/contrib/discount-1.6.0/tests/automatic.t new file mode 100644 index 000000000..89c3e6ad9 --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/tests/automatic.t @@ -0,0 +1,53 @@ +./echo "automatic links" + +rc=0 +MARKDOWN_FLAGS= + +try() { + unset FLAGS + case "$1" in + -*) FLAGS=$1 + shift ;; + esac + + ./echo -n " $1" '..................................' | ./cols 36 + + Q=`./echo "$2" | ./markdown $FLAGS` + + + if [ "$3" = "$Q" ]; then + ./echo " ok" + else + ./echo " FAILED" + ./echo "wanted: $3" + ./echo "got : $Q" + rc=1 + fi +} + +match() { + ./echo -n " $1" '..................................' | ./cols 36 + + if ./echo "$2" | ./markdown | grep "$3" >/dev/null; then + ./echo " ok" + else + ./echo " FAILED" + rc=1 + fi +} + +try 'http url' '' '

http://here

' +try 'ftp url' '' '

ftp://here

' +match '' '' '' '' '' '' '

<orc@>

' +try 'invalid <@pell>' '<@pell>' '

<@pell>

' +try 'invalid ' '' '

<orc@pell>

' +try 'invalid ' '' '

<orc@.pell>

' +try 'invalid ' '' '

<orc@pell.>

' +match '' '' '
' '' '' '' '' '' 'foo

' + +try -fautolink 'autolink url with trailing \' \ + 'http://a.com/\' \ + '

http://a.com/\

' + + +exit $rc diff --git a/r2/r2/lib/contrib/discount-1.6.0/tests/chrome.text b/r2/r2/lib/contrib/discount-1.6.0/tests/chrome.text new file mode 100644 index 000000000..d97b47b9f --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/tests/chrome.text @@ -0,0 +1,13 @@ +->###chrome with my markdown###<- + +1. `(c)` -> `©` (c) +2. `(r)` -> `®` (r) +3. `(tm)` -> `™` (tm) +4. `...` -> `…` ... +5. `--` -> `&emdash;` -- +6. `-` -> `–` - (but not if it's between-words) +7. "fancy quoting" +8. 'fancy quoting (#2)' +9. don't do it unless it's a real quote. +10. `` (`) `` + diff --git a/r2/r2/lib/contrib/discount-1.6.0/tests/code.t b/r2/r2/lib/contrib/discount-1.6.0/tests/code.t new file mode 100644 index 000000000..ed460f959 --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/tests/code.t @@ -0,0 +1,39 @@ +./echo "code blocks" + +rc=0 +MARKDOWN_FLAGS= + +try() { + unset FLAGS + case "$1" in + -*) FLAGS=$1 + shift ;; + esac + + ./echo -n " $1" '..................................' | ./cols 36 + + Q=`./echo "$2" | ./markdown $FLAGS` + + + if [ "$3" = "$Q" ]; then + ./echo " ok" + else + ./echo " FAILED" + ./echo "wanted: $3" + ./echo "got : $Q" + rc=1 + fi +} + +try 'format for code block html' \ +' this is + code' \ + '
this is
+code
+
' + +try 'unclosed single backtick' '`hi there' '

`hi there

' +try 'unclosed double backtick' '``hi there' '

``hi there

' +try 'remove space around code' '`` hi there ``' '

hi there

' + +exit $rc diff --git a/r2/r2/lib/contrib/discount-1.6.0/tests/compat.t b/r2/r2/lib/contrib/discount-1.6.0/tests/compat.t new file mode 100644 index 000000000..2ce7a14f7 --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/tests/compat.t @@ -0,0 +1,47 @@ +./echo "markdown 1.0 compatability" + +rc=0 +MARKDOWN_FLAGS= + +try() { + unset FLAGS + case "$1" in + -*) FLAGS=$1 + shift ;; + esac + + ./echo -n " $1" '..................................' | ./cols 36 + + Q=`./echo "$2" | ./markdown $FLAGS` + + if [ "$3" = "$Q" ]; then + ./echo " ok" + else + ./echo " FAILED" + ./echo "wanted: $3" + ./echo "got : $Q" + rc=1 + fi +} + +LINKY='[this] is a test + +[this]: /this' + +try 'implicit reference links' "$LINKY" '

this is a test

' +try -f1.0 'implicit reference links (-f1.0)' "$LINKY" '

[this] is a test

' + +WSP=' ' +WHITESPACE=" + white space$WSP + and more" + +try 'trailing whitespace' "$WHITESPACE" '
white space ''
+and more
+
' + +try -f1.0 'trailing whitespace (-f1.0)' "$WHITESPACE" '
white space''
+and more
+
' + +exit $rc diff --git a/r2/r2/lib/contrib/discount-1.6.0/tests/crash.t b/r2/r2/lib/contrib/discount-1.6.0/tests/crash.t new file mode 100644 index 000000000..c0443ee34 --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/tests/crash.t @@ -0,0 +1,57 @@ +./echo "crashes" + +rc=0 +MARKDOWN_FLAGS= + +./echo -n ' zero-length input ................ ' + +if ./markdown < /dev/null >/dev/null; then + ./echo "ok" +else + ./echo "FAILED" + rc=1 +fi + +./echo -n ' hanging quote in list ............ ' + +./markdown >/dev/null 2>/dev/null << EOF + * > this should not die + +no. +EOF + +if [ "$?" -eq 0 ]; then + ./echo "ok" +else + ./echo "FAILED" + rc=1 +fi + +./echo -n ' dangling list item ............... ' + +if ./echo ' - ' | ./markdown >/dev/null 2>/dev/null; then + ./echo "ok" +else + ./echo "FAILED" + rc=1 +fi + +./echo -n ' empty []() with baseurl .......... ' + +if ./markdown -bHOHO -s '[]()' >/dev/null 2>/dev/null; then + ./echo "ok" +else + ./echo "FAILED" + rc=1 +fi + +./echo -n ' unclosed html block .............. ' + +if ./echo '/dev/null 2>/dev/null; then + ./echo 'ok' +else + ./echo "FAILED" + rc=1 +fi + +exit $rc diff --git a/r2/r2/lib/contrib/discount-1.6.0/tests/div.t b/r2/r2/lib/contrib/discount-1.6.0/tests/div.t new file mode 100644 index 000000000..6c3e812d2 --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/tests/div.t @@ -0,0 +1,67 @@ +./markdown -V | grep DIV >/dev/null || exit 0 + +./echo "%div% blocks" + +rc=0 +MARKDOWN_FLAGS= + +try() { + unset FLAGS + case "$1" in + -*) FLAGS=$1 + shift ;; + esac + + ./echo -n " $1" '..................................' | ./cols 36 + + Q=`./echo "$2" | ./markdown $FLAGS` + + + if [ "$3" = "$Q" ]; then + ./echo " ok" + else + ./echo " FAILED" + ./echo "wanted: $3" + ./echo "got : $Q" + rc=1 + fi +} + +try 'simple >%div% block' \ +'>%this% +this this' \ +'

this this

' + +try 'two >%div% blocks in a row' \ +'>%this% +this this + +>%that% +that that' \ +'

this this

+ +

that that

' + +try '>%class:div%' \ +'>%class:this% +this this' \ +'

this this

' + +try '>%id:div%' \ +'>%id:this% +this this' \ +'

this this

' + +try 'nested >%div%' \ +'>%this% +>>%that% +>>that + +>%more% +more' \ +'

that

+ +

more

' + + +exit $rc diff --git a/r2/r2/lib/contrib/discount-1.6.0/tests/dl.t b/r2/r2/lib/contrib/discount-1.6.0/tests/dl.t new file mode 100644 index 000000000..4c3a53a6e --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/tests/dl.t @@ -0,0 +1,69 @@ +./echo "definition lists" + +rc=0 +MARKDOWN_FLAGS= + +try() { + unset FLAGS + case "$1" in + -*) FLAGS=$1 + shift ;; + esac + + ./echo -n " $1" '..................................' | ./cols 36 + + Q=`./echo "$2" | ./markdown $FLAGS` + + if [ "$3" = "$Q" ]; then + ./echo " ok" + else + ./echo " FAILED" + ./echo "wanted: $3" + ./echo "got : $Q" + rc=1 + fi +} + +SRC=' +=this= + is an ugly +=test= + eh?' + +RSLT='
+
this
+
is an ugly
+
test
+
eh?
+
' + +if ./markdown -V | grep DL_TAG >/dev/null; then + + try '=tag= generates definition lists' "$SRC" "$RSLT" + + try 'one item with two =tags=' \ + '=this= +=is= + A test, eh?' \ + '
+
this
+
is
+
A test, eh?
+
' + + +else + try '=tag= does nothing' "$SRC" \ + '

=this=

+ +
is an ugly
+
+ +

=test=

+ +
eh?
+
' + +fi + +exit $rc diff --git a/r2/r2/lib/contrib/discount-1.6.0/tests/embedlinks.text b/r2/r2/lib/contrib/discount-1.6.0/tests/embedlinks.text new file mode 100644 index 000000000..a79ef1b20 --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/tests/embedlinks.text @@ -0,0 +1,9 @@ +* [![an image](http://dustmite.org/mite.jpg =50x50)] (http://dustmite.org) +* [[an embedded link](http://wontwork.org)](http://willwork.org) +* [![dustmite][]] (http:/dustmite.org) +* ![dustmite][] +* ![dustmite][dustmite] +* [cheat me](http://I.am.cheating) + +[dustmite]: http://dustmite.org/mite.jpg =25x25 "here I am!" + diff --git a/r2/r2/lib/contrib/discount-1.6.0/tests/emphasis.t b/r2/r2/lib/contrib/discount-1.6.0/tests/emphasis.t new file mode 100644 index 000000000..60e631fb8 --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/tests/emphasis.t @@ -0,0 +1,40 @@ +./echo "emphasis" + +rc=0 +MARKDOWN_FLAGS= + +try() { + unset FLAGS + case "$1" in + -*) FLAGS=$1 + shift ;; + esac + + ./echo -n " $1" '..................................' | ./cols 36 + + Q=`./echo "$2" | ./markdown $FLAGS` + + if [ "$3" = "$Q" ]; then + ./echo " ok" + else + ./echo " FAILED" + ./echo "wanted: $3" + ./echo "got : $Q" + rc=1 + fi +} + + +try '*hi* -> hi' '*hi*' '

hi

' +try '* -> *' 'A * A' '

A * A

' +try -fstrict '***A**B*' '***A**B*' '

AB

' +try -fstrict '***A*B**' '***A*B**' '

AB

' +try -fstrict '**A*B***' '**A*B***' '

AB

' +try -fstrict '*A**B***' '*A**B***' '

AB

' + +if ./markdown -V | grep RELAXED >/dev/null; then + try -frelax '_A_B with -frelax' '_A_B' '

_A_B

' + try -fstrict '_A_B with -fstrict' '_A_B' '

AB

' +fi + +exit $rc diff --git a/r2/r2/lib/contrib/discount-1.6.0/tests/flow.t b/r2/r2/lib/contrib/discount-1.6.0/tests/flow.t new file mode 100644 index 000000000..3541727ac --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/tests/flow.t @@ -0,0 +1,52 @@ +./echo "paragraph flow" + +rc=0 +MARKDOWN_FLAGS= + +try() { + unset FLAGS + case "$1" in + -*) FLAGS=$1 + shift ;; + esac + + ./echo -n " $1" '..................................' | ./cols 36 + + Q=`./echo "$2" | ./markdown $FLAGS` + + + if [ "$3" = "$Q" ]; then + ./echo " ok" + else + ./echo " FAILED" + ./echo "wanted: $3" + ./echo "got : $Q" + rc=1 + fi +} + +try 'header followed by paragraph' \ + '###Hello, sailor### +And how are you today?' \ + '

Hello, sailor

+ +

And how are you today?

' + +try 'two lists punctuated with a HR' \ + '* A +* * * +* B +* C' \ + '
    +
  • A
  • +
+ + +
+ +
    +
  • B
  • +
  • C
  • +
' + +exit $rc diff --git a/r2/r2/lib/contrib/discount-1.6.0/tests/footnotes.t b/r2/r2/lib/contrib/discount-1.6.0/tests/footnotes.t new file mode 100644 index 000000000..2fa9e94e9 --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/tests/footnotes.t @@ -0,0 +1,34 @@ +./echo "footnotes" + +rc=0 +MARKDOWN_FLAGS= + +try() { + unset FLAGS + case "$1" in + -*) FLAGS=$1 + shift ;; + esac + + ./echo -n " $1" '..................................' | ./cols 36 + + Q=`./echo "$2" | ./markdown $FLAGS` + + if [ "$3" = "$Q" ]; then + ./echo " ok" + else + ./echo " FAILED" + ./echo "wanted: $3" + ./echo "got : $Q" + rc=1 + fi +} + +try 'a line with multiple []s' '[a][] [b][]:' '

[a][] [b][]:

' +try 'a valid footnote' \ + '[alink][] + +[alink]: link_me' \ + '

alink

' + +exit $rc diff --git a/r2/r2/lib/contrib/discount-1.6.0/tests/header.t b/r2/r2/lib/contrib/discount-1.6.0/tests/header.t new file mode 100644 index 000000000..aaab824d6 --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/tests/header.t @@ -0,0 +1,46 @@ +./echo "headers" + +rc=0 +MARKDOWN_FLAGS= + +try() { + unset FLAGS + case "$1" in + -*) FLAGS=$1 + shift ;; + esac + + S=`./echo -n "$1" '..................................' | ./cols 34` + ./echo -n " $S " + + Q=`./echo "$2" | ./markdown $FLAGS` + + + if [ "$3" = "$Q" ]; then + ./echo "ok" + else + ./echo "FAILED" + ./echo "wanted: $3" + ./echo "got : $Q" + rc=1 + fi +} + +try 'single #' '#' '

#

' +try 'empty ETX' '##' '

#

' +try 'single-char ETX (##W)' '##W' '

W

' +try 'single-char ETX (##W )' '##W ' '

W

' +try 'single-char ETX (## W)' '## W' '

W

' +try 'single-char ETX (## W )' '## W ' '

W

' +try 'single-char ETX (##W##)' '##W##' '

W

' +try 'single-char ETX (##W ##)' '##W ##' '

W

' +try 'single-char ETX (## W##)' '## W##' '

W

' +try 'single-char ETX (## W ##)' '## W ##' '

W

' + +try 'multiple-char ETX (##Hello##)' '##Hello##' '

Hello

' + +try 'SETEXT with trailing whitespace' \ +'hello +===== ' '

hello

' + +exit $rc diff --git a/r2/r2/lib/contrib/discount-1.6.0/tests/html.t b/r2/r2/lib/contrib/discount-1.6.0/tests/html.t new file mode 100644 index 000000000..9f75895c8 --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/tests/html.t @@ -0,0 +1,112 @@ +./echo "html blocks" + +rc=0 +MARKDOWN_FLAGS= + +try() { + unset FLAGS + case "$1" in + -*) FLAGS=$1 + shift ;; + esac + + ./echo -n " $1" '..................................' | ./cols 36 + + Q=`./echo "$2" | ./markdown $FLAGS` + + if [ "$3" = "$Q" ]; then + ./echo " ok" + else + ./echo " FAILED" + ./echo "wanted: $3" + ./echo "got : $Q" + rc=1 + fi +} + +try 'self-closing block tags (hr)' \ + '
+ +text' \ + '
+ + +

text

' + +try 'self-closing block tags (hr/)' \ + '
+ +text' \ + '
+ + +

text

' + +try 'self-closing block tags (br)' \ + '
+ +text' \ + '
+ + +

text

' + +try 'html comments' \ + '' \ + '' + +try 'no smartypants inside tags (#1)' \ + '' \ + '

' + +try 'no smartypants inside tags (#2)' \ + ':)' \ + '

:)

' + +try -fnohtml 'block html with -fnohtml' 'hi!' '

<b>hi!</b>

' +try -fnohtml 'malformed tag injection' '' '

<x <script>

' +try -fhtml 'allow html with -fhtml' 'hi!' '

hi!

' + + +# check that nested raw html blocks terminate properly. +# +BLOCK1SRC='Markdown works fine *here*. + +*And* here. + +
+
+ +Markdown here is *not* parsed by RDiscount. + +Nor in *this* paragraph, and there are no paragraph breaks.' + +BLOCK1OUT='

Markdown works fine here.

+ +

And here.

+ +
+
+ + +

Markdown here is not parsed by RDiscount.

+ +

Nor in this paragraph, and there are no paragraph breaks.

' + +try 'nested html blocks (1)' "$BLOCK1SRC" "$BLOCK1OUT" + +try 'nested html blocks (2)' \ + '
This is inside a html block +
This is, too
and +so is this
' \ + '
This is inside a html block +
This is, too
and +so is this
' + +try 'unfinished tags' '<foo bar

' + +exit $rc diff --git a/r2/r2/lib/contrib/discount-1.6.0/tests/links.text b/r2/r2/lib/contrib/discount-1.6.0/tests/links.text new file mode 100644 index 000000000..ca4043997 --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/tests/links.text @@ -0,0 +1,14 @@ + 1. + 2. [automatic] (http://automatic "automatic link") + 3. [automatic](http://automatic "automatic link") + 4. [automatic](http://automatic) + 5. [automatic] (http://automatic) + 6. [automatic] [] + 7. [automatic][] + 8. [my][automatic] + 9. [my] [automatic] + + [automatic]: http://automatic "footnote" + + + [automatic] [ diff --git a/r2/r2/lib/contrib/discount-1.6.0/tests/linkylinky.t b/r2/r2/lib/contrib/discount-1.6.0/tests/linkylinky.t new file mode 100644 index 000000000..5761f2ad1 --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/tests/linkylinky.t @@ -0,0 +1,124 @@ +./echo "embedded links" + +rc=0 +MARKDOWN_FLAGS= + +try() { + unset FLAGS + case "$1" in + -*) FLAGS=$1 + shift ;; + esac + + ./echo -n " $1" '..................................' | ./cols 36 + + Q=`./echo "$2" | ./markdown $FLAGS` + + + if [ "$3" = "$Q" ]; then + ./echo " ok" + else + ./echo " FAILED" + ./echo "wanted: $3" + ./echo "got : $Q" + rc=1 + fi +} + +try 'url contains &' '[hehehe](u&rl)' '

hehehe

' +try 'url contains +' '[hehehe](u+rl)' '

hehehe

' +try 'url contains "' '[hehehe](u"rl)' '

hehehe

' +try 'url contains <' '[hehehe](uhehehe

' +try 'url contains whitespace' '[ha](r u)' '

ha

' + +try 'url contains whitespace & title' \ + '[hehehe](r u "there")' \ + '

hehehe

' + +try 'url contains escaped )' \ + '[hehehe](u\))' \ + '

hehehe

' + +try 'image label contains <' \ + '![hehe<he<he

' + +try 'image label contains >' \ + '![he>he>he](url)' \ + '

he>he>he

' + +try 'sloppy context link' \ + '[heh]( url "how about it?" )' \ + '

heh

' + +try 'footnote urls formed properly' \ + '[hehehe]: hohoho "ha ha" + +[hehehe][]' \ + '

hehehe

' + +try 'linky-like []s work' \ + '[foo]' \ + '

[foo]

' + +try 'pseudo-protocol "id:"'\ + '[foo](id:bar)' \ + '

foo

' + +try 'pseudo-protocol "class:"' \ + '[foo](class:bar)' \ + '

foo

' + +try 'pseudo-protocol "abbr:"'\ + '[foo](abbr:bar)' \ + '

foo

' + +try 'nested [][]s' \ + '[[z](y)](x)' \ + '

[z](y)

' + +try 'empty [][] tags' \ + '[![][1]][2] + +[1]: image1 +[2]: image2' \ + '

' + +try 'footnote cuddled up to text' \ +'foo +[bar]:bar' \ + '

foo

' + +try 'mid-paragraph footnote' \ +'talk talk talk talk +[bar]: bar +talk talk talk talk' \ +'

talk talk talk talk +talk talk talk talk

' + +try 'mid-blockquote footnote' \ +'>blockquote! +[footnote]: here! +>blockquote!' \ +'

blockquote! +blockquote!

' + +try 'end-blockquote footnote' \ +'>blockquote! +>blockquote! +[footnote]: here!' \ +'

blockquote! +blockquote!

' + +try 'start-blockquote footnote' \ +'[footnote]: here! +>blockquote! +>blockquote!' \ +'

blockquote! +blockquote!

' + +try '[text] (text) not a link' \ +'[test] (me)' \ +'

[test] (me)

' + +exit $rc diff --git a/r2/r2/lib/contrib/discount-1.6.0/tests/linkypix.t b/r2/r2/lib/contrib/discount-1.6.0/tests/linkypix.t new file mode 100644 index 000000000..59bdfbfc4 --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/tests/linkypix.t @@ -0,0 +1,31 @@ +./echo "embedded images" + +rc=0 +MARKDOWN_FLAGS= + +try() { + unset FLAGS + case "$1" in + -*) FLAGS=$1 + shift ;; + esac + + ./echo -n " $1" '..................................' | ./cols 36 + + Q=`./echo "$2" | ./markdown $FLAGS` + + if [ "$3" = "$Q" ]; then + ./echo " ok" + else + ./echo " FAILED" + ./echo "wanted: $3" + ./echo "got : $Q" + rc=1 + fi +} + +try 'image with size extension' \ + '![picture](pic =200x200)' \ + '

picture

' + +exit $rc diff --git a/r2/r2/lib/contrib/discount-1.6.0/tests/list.t b/r2/r2/lib/contrib/discount-1.6.0/tests/list.t new file mode 100644 index 000000000..8ffacda2a --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/tests/list.t @@ -0,0 +1,178 @@ +./echo "lists" + +rc=0 +MARKDOWN_FLAGS= + +try() { + unset FLAGS + case "$1" in + -*) FLAGS=$1 + shift ;; + esac + + ./echo -n " $1" '..................................' | ./cols 36 + + Q=`./echo "$2" | ./markdown $FLAGS` + + if [ "$3" = "$Q" ]; then + ./echo " ok" + else + ./echo " FAILED" + ./echo "wanted: $3" + ./echo "got : $Q" + rc=1 + fi +} + +try 'two separated items' \ + ' * A + +* B' \ + '
    +
  • A

  • +
  • B

  • +
' + +try 'two adjacent items' \ + ' * A + * B' \ + '
    +
  • A
  • +
  • B
  • +
' + + +try 'two adjacent items, then space' \ + ' * A +* B + +space, the final frontier' \ + '
    +
  • A
  • +
  • B
  • +
+ + +

space, the final frontier

' + +try 'nested lists (1)' \ + ' * 1. Sub (list) + 2. Two (items) + 3. Here' \ + '
    +
    1. +
    2. Sub (list)
    3. +
    4. Two (items)
    5. +
    6. Here
    7. +
    +
  • +
' + +try 'nested lists (2)' \ + ' * A (list) + + 1. Sub (list) + 2. Two (items) + 3. Here + + Here + * B (list)' \ + '
    +
  • A (list)

    + +
      +
    1. Sub (list)
    2. +
    3. Two (items)
    4. +
    5. Here
    6. +
    + + +

    Here

  • +
  • B (list)
  • +
' + +try 'list inside blockquote' \ + '>A (list) +> +>1. Sub (list) +>2. Two (items) +>3. Here' \ + '

A (list)

+ +
    +
  1. Sub (list)
  2. +
  3. Two (items)
  4. +
  5. Here
  6. +
+
' + +try 'blockquote inside list' \ + ' * A (list) + + > quote + > me + + dont quote me' \ + '
    +
  • A (list)

    + +

    quote +me

    + +

    dont quote me

  • +
' + +try 'empty list' \ +' +- + +- +' \ +'
    +
  • +
  • +
' + + +try 'blockquote inside a list' \ +' * This is a list item. + + > This is a quote insde a list item. ' \ +'
    +
  • This is a list item.

    + +

    This is a quote insde a list item.

  • +
' + +if ./markdown -V | grep DL_TAG >/dev/null; then + + try 'dl followed by non-dl' \ + '=a= + test +2. here' \ +'
+
a
+
test
+
+ +
    +
  1. here
  2. +
' + + try 'non-dl followed by dl' \ + '1. hello +=sailor= + hi!' \ +'
    +
  1. hello
  2. +
+ + +
+
sailor
+
hi!
+
' + +fi + +exit $rc diff --git a/r2/r2/lib/contrib/discount-1.6.0/tests/list3deep.t b/r2/r2/lib/contrib/discount-1.6.0/tests/list3deep.t new file mode 100644 index 000000000..f28d57fa1 --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/tests/list3deep.t @@ -0,0 +1,57 @@ +./echo "deeply nested lists" + +rc=0 +MARKDOWN_FLAGS= + +try() { + unset FLAGS + case "$1" in + -*) FLAGS=$1 + shift ;; + esac + + ./echo -n " $1" '..................................' | ./cols 36 + + Q=`./echo "$2" | ./markdown $FLAGS` + + if [ "$3" = "$Q" ]; then + ./echo " ok" + else + ./echo " FAILED" + ./echo "wanted: $3" + ./echo "got : $Q" + rc=1 + fi +} + +LIST=' + * top-level list ( list 1) + + second-level list (list 2) + * first item third-level list (list 3) + + * second item, third-level list, first item. (list 4) + * second item, third-level list, second item. + * top-level list again.' + +RSLT='
    +
  • top-level list ( list 1) + +
      +
    • second-level list (list 2) + +
        +
      • first item third-level list (list 3)
      • +
      +
    • +
      • +
      • second item, third-level list, first item. (list 4)
      • +
      • second item, third-level list, second item.
      • +
      +
    • +
    +
  • +
  • top-level list again.
  • +
' + +try 'thrice-nested lists' "$LIST" "$RSLT" + +exit $rc diff --git a/r2/r2/lib/contrib/discount-1.6.0/tests/misc.t b/r2/r2/lib/contrib/discount-1.6.0/tests/misc.t new file mode 100644 index 000000000..e437a8712 --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/tests/misc.t @@ -0,0 +1,33 @@ +./echo "misc" + +rc=0 +MARKDOWN_FLAGS= + +try() { + unset FLAGS + case "$1" in + -*) FLAGS=$1 + shift ;; + esac + + ./echo -n " $1" '..................................' | ./cols 36 + + Q=`./echo "$2" | ./markdown $FLAGS` + + + if [ "$3" = "$Q" ]; then + ./echo " ok" + else + ./echo " FAILED" + ./echo "wanted: $3" + ./echo "got : $Q" + rc=1 + fi +} + +try 'single paragraph' 'AAA' '

AAA

' +try '< -> <' '<' '

<

' +try '`>` -> >' '`>`' '

>

' +try '`` ` `` -> `' '`` ` ``' '

`

' + +exit $rc diff --git a/r2/r2/lib/contrib/discount-1.6.0/tests/pandoc.t b/r2/r2/lib/contrib/discount-1.6.0/tests/pandoc.t new file mode 100644 index 000000000..9c1e7fb57 --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/tests/pandoc.t @@ -0,0 +1,74 @@ +./echo "pandoc headers" + +rc=0 +MARKDOWN_FLAGS= + +try() { + unset FLAGS + case "$1" in + -*) FLAGS=$1 + shift ;; + esac + + ./echo -n " $1" '..................................' | ./cols 36 + + Q=`./echo "$2" | ./markdown $FLAGS` + + + if [ "$3" = "$Q" ]; then + ./echo " ok" + else + ./echo " FAILED" + ./echo "wanted: $3" + ./echo "got : $Q" + rc=1 + fi +} + + +HEADER='% title +% author(s) +% date' + + +if ./markdown -V | grep HEADER > /dev/null; then + + try 'valid header' "$HEADER" '' + try -F0x0100 'valid header with -F0x0100' "$HEADER" '

% title +% author(s) +% date

' + + try 'invalid header' \ + '% title +% author(s) +a pony!' \ + '

% title +% author(s) +a pony!

' + + try 'offset header' \ + ' +% title +% author(s) +% date' \ + '

% title +% author(s) +% date

' + + try 'indented header' \ + ' % title +% author(s) +% date' \ + '

% title +% author(s) +% date

' + +else + + try 'ignore headers' "$HEADER" '

% title +% author(s) +% date

' + +fi + +exit $rc diff --git a/r2/r2/lib/contrib/discount-1.6.0/tests/para.t b/r2/r2/lib/contrib/discount-1.6.0/tests/para.t new file mode 100644 index 000000000..d867454df --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/tests/para.t @@ -0,0 +1,38 @@ +./echo "paragraph blocking" + +rc=0 +MARKDOWN_FLAGS= + +try() { + unset FLAGS + case "$1" in + -*) FLAGS=$1 + shift ;; + esac + + ./echo -n " $1" '..................................' | ./cols 36 + + Q=`./echo "$2" | ./markdown $FLAGS` + + + if [ "$3" = "$Q" ]; then + ./echo " ok" + else + ./echo " FAILED" + ./echo "wanted: $3" + ./echo "got : $Q" + rc=1 + fi +} + +try 'paragraph followed by code' \ + 'a + b' \ + '

a

+ +
b
+
' + +try 'single-line paragraph' 'a' '

a

' + +exit $rc diff --git a/r2/r2/lib/contrib/discount-1.6.0/tests/paranoia.t b/r2/r2/lib/contrib/discount-1.6.0/tests/paranoia.t new file mode 100644 index 000000000..f2b102484 --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/tests/paranoia.t @@ -0,0 +1,31 @@ +./echo "paranoia" + +rc=0 +MARKDOWN_FLAGS= + +try() { + unset FLAGS + case "$1" in + -*) FLAGS=$1 + shift ;; + esac + + ./echo -n " $1" '..................................' | ./cols 36 + + Q=`./echo "$2" | ./markdown $FLAGS` + + + if [ "$3" = "$Q" ]; then + ./echo " ok" + else + ./echo " FAILED" + ./echo "wanted: $3" + ./echo "got : $Q" + rc=1 + fi +} + +try -fsafelink 'bogus url (-fsafelink)' '[test](bad:protocol)' '

[test](bad:protocol)

' +try -fnosafelink 'bogus url (-fnosafelink)' '[test](bad:protocol)' '

test

' + +exit $rc diff --git a/r2/r2/lib/contrib/discount-1.6.0/tests/peculiarities.t b/r2/r2/lib/contrib/discount-1.6.0/tests/peculiarities.t new file mode 100644 index 000000000..8a199bd3d --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/tests/peculiarities.t @@ -0,0 +1,66 @@ +./echo "markup peculiarities" + +rc=0 +MARKDOWN_FLAGS= + +try() { + unset FLAGS + case "$1" in + -*) FLAGS=$1 + shift ;; + esac + + ./echo -n " $1" '..................................' | ./cols 36 + + Q=`./echo "$2" | ./markdown $FLAGS` + + if [ "$3" = "$Q" ]; then + ./echo " ok" + else + ./echo " FAILED" + ./echo "wanted: $3" + ./echo "got : $Q" + rc=1 + fi +} + +try 'list followed by header .......... ' \ + " +- AAA +- BBB +-" \ + '
    +
  • AAA + +

    – BBB

  • +
' + +try 'ul with mixed item prefixes' \ + ' +- A +1. B' \ + '
    +
  • A
  • +
  • B
  • +
' + +try 'ol with mixed item prefixes' \ + ' +1. A +- B +' \ + '
    +
  1. A
  2. +
  3. B
  4. +
' + +try 'forcing a
' 'this +is' '

this
+is

' + +try 'trimming single spaces' 'this ' '

this

' +try -fnohtml 'markdown
with -fnohtml' 'foo +is' '

foo
+is

' + +exit $rc diff --git a/r2/r2/lib/contrib/discount-1.6.0/tests/pseudo.t b/r2/r2/lib/contrib/discount-1.6.0/tests/pseudo.t new file mode 100644 index 000000000..e2d8b7635 --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/tests/pseudo.t @@ -0,0 +1,35 @@ +./echo "pseudo-protocols" + +rc=0 +MARKDOWN_FLAGS= + +try() { + unset FLAGS + case "$1" in + -*) FLAGS=$1 + shift ;; + esac + + ./echo -n " $1" '..................................' | ./cols 36 + + Q=`./echo "$2" | ./markdown $FLAGS` + + + if [ "$3" = "$Q" ]; then + ./echo " ok" + else + ./echo " FAILED" + ./echo "wanted: $3" + ./echo "got : $Q" + rc=1 + fi +} + +try '[](id:) links' '[foo](id:bar)' '

foo

' +try -fnoext '[](id:) links with -fnoext' '[foo](id:bar)' '

[foo](id:bar)

' +try '[](class:) links' '[foo](class:bar)' '

foo

' +try -fnoext '[](class:) links with -fnoext' '[foo](class:bar)' '

[foo](class:bar)

' +try '[](raw:) links' '[foo](raw:bar)' '

bar

' +try -fnoext '[](raw:) links with -fnoext' '[foo](raw:bar)' '

[foo](raw:bar)

' + +exit $rc diff --git a/r2/r2/lib/contrib/discount-1.6.0/tests/reparse.t b/r2/r2/lib/contrib/discount-1.6.0/tests/reparse.t new file mode 100644 index 000000000..b6bebd8f1 --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/tests/reparse.t @@ -0,0 +1,34 @@ +./echo "footnotes inside reparse sections" + +rc=0 + +try() { + unset FLAGS + case "$1" in + -*) FLAGS=$1 + shift ;; + esac + + ./echo -n " $1" '..................................' | ./cols 36 + + Q=`./echo "$2" | ./markdown $FLAGS` + + + if [ "$3" = "$Q" ]; then + ./echo " ok" + else + ./echo " FAILED" + ./echo "wanted: $3" + ./echo "got : $Q" + rc=1 + fi +} + + +try 'footnote inside [] section' \ + '[![foo][]](bar) + +[foo]: bar2' \ + '

foo

' + +exit $rc diff --git a/r2/r2/lib/contrib/discount-1.6.0/tests/schiraldi.t b/r2/r2/lib/contrib/discount-1.6.0/tests/schiraldi.t new file mode 100644 index 000000000..2d90813da --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/tests/schiraldi.t @@ -0,0 +1,104 @@ +./echo "Bugs & misfeatures reported by Mike Schiraldi" + +rc=0 +MARKDOWN_FLAGS= + +try() { + unset FLAGS + case "$1" in + -*) FLAGS=$1 + shift ;; + esac + + S=`./echo -n "$1" '..................................' | ./cols 34` + ./echo -n " $S " + + Q=`./echo "$2" | ./markdown $FLAGS` + + + if [ "$3" = "$Q" ]; then + ./echo "ok" + else + ./echo "FAILED" + ./echo "wanted: $3" + ./echo "got : $Q" + rc=1 + fi +} + +try -fnohtml 'breaks with -fnohtml' 'foo +bar' '

foo
+bar

' + +try 'links with trailing \)' \ + '[foo](http://en.wikipedia.org/wiki/Link_(film\))' \ + '

foo

' + +try -fautolink '(url) with -fautolink' \ + '(http://tsfr.org)' \ + '

(http://tsfr.org)

' + +try 'single #' \ + '#' \ + '

#

' + +try -frelax '* processing with -frelax' \ + '2*4 = 8 * 1 = 2**3' \ + '

2*4 = 8 * 1 = 2**3

' + +try -fnopants '[]() with a single quote mark' \ + '[Poe'"'"'s law](http://rationalwiki.com/wiki/Poe'"'"'s_Law)' \ + '

Poe'"'"'s law

' + +try -fautolink 'autolink url with escaped spaces' \ + 'http://\(here\ I\ am\)' \ + '

http://(here I am)

' + +try -fautolink 'autolink café_racer' \ + 'http://en.wikipedia.org/wiki/café_racer' \ + '

http://en.wikipedia.org/wiki/caf%C3%A9_racer

' + +try -fautolink 'autolink url with arguments' \ + 'http://foo.bar?a&b=c' \ + '

http://foo.bar?a&b=c

' + +try '\( escapes in []()' \ + '[foo](http://a.com/\(foo\))' \ + '

foo

' + +try -fautolink 'autolink url with escaped ()' \ + 'http://a.com/\(foo\)' \ + '

http://a.com/(foo)

' + +try -fautolink 'autolink url with escaped \' \ + 'http://a.com/\\\)' \ + '

http://a.com/\)

' + +try -fautolink 'autolink url with -' \ + 'http://experts-exchange.com' \ + '

http://experts-exchange.com

' + +try -fautolink 'autolink url with +' \ + 'http://www67.wolframalpha.com/input/?i=how+old+was+jfk+jr+when+jfk+died' \ + '

http://www67.wolframalpha.com/input/?i=how+old+was+jfk+jr+when+jfk+died

' + +try -fautolink 'autolink url with &' \ + 'http://foo.bar?a&b=c' \ + '

http://foo.bar?a&b=c

' + + +try -fautolink 'autolink url with ,' \ + 'http://www.spiegel.de/international/europe/0,1518,626171,00.html' \ + '

http://www.spiegel.de/international/europe/0,1518,626171,00.html

' + +try -fautolink 'autolink url with : & ;' \ + 'http://www.biblegateway.com/passage/?search=Matthew%205:29-30;&version=31;' \ + '

http://www.biblegateway.com/passage/?search=Matthew%205:29-30;&version=31;

' + +Q="'" +try -fautolink 'security hole with \" in []()' \ +'[XSS](/ "\"=\"\"onmouseover='$Q'alert(String.fromCharCode(88,83,83))'$Q'")' \ +'

XSS

' + + +exit $rc diff --git a/r2/r2/lib/contrib/discount-1.6.0/tests/smarty.t b/r2/r2/lib/contrib/discount-1.6.0/tests/smarty.t new file mode 100644 index 000000000..e3e6e3457 --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/tests/smarty.t @@ -0,0 +1,50 @@ +./echo "smarty pants" + +rc=0 +MARKDOWN_FLAGS=0x0; export MARKDOWN_FLAGS + +try() { + unset FLAGS + case "$1" in + -*) FLAGS="$1" + shift ;; + esac + + ./echo -n " $1" '..................................' | ./cols 36 + + Q=`./echo "$2" | ./markdown $FLAGS` + + + if [ "$3" = "$Q" ]; then + ./echo " ok" + else + ./echo " FAILED" + ./echo "wanted: $3" + ./echo "got : $Q" + rc=1 + fi +} + + +try '(c) -> ©' '(c)' '

©

' +try '(r) -> ®' '(r)' '

®

' +try '(tm) -> ™' '(tm)' '

' +try '... -> …' '...' '

' + +try '"--" -> —' '--' '

' + +try '"-" -> –' 'regular -' '

regular –

' +try 'A-B -> A-B' 'A-B' '

A-B

' +try '"fancy" -> “fancy”' '"fancy"' '

“fancy”

' +try "'fancy'" "'fancy'" '

‘fancy’

' +try "don't -> don’t" "don't" '

don’t

' +try "don't -> don’t" "don't" '

don’t

' +try "it's -> it’s" "it's" '

it’s

' + +if ./markdown -V | grep SUPERSCRIPT >/dev/null; then + try -frelax 'A^B -> AB (-frelax)' 'A^B' '

AB

' + try -fstrict 'A^B != AB (-fstrict)' 'A^B' '

A^B

' + try -frelax 'A^B in link title' '[link](here "A^B")' '

link

' +fi + +exit $rc diff --git a/r2/r2/lib/contrib/discount-1.6.0/tests/snakepit.t b/r2/r2/lib/contrib/discount-1.6.0/tests/snakepit.t new file mode 100644 index 000000000..886806fea --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/tests/snakepit.t @@ -0,0 +1,45 @@ +./echo "The snakepit of Markdown.pl compatability" + +rc=0 +MARKDOWN_FLAGS= + +try() { + unset FLAGS + case "$1" in + -*) FLAGS=$1 + shift ;; + esac + + ./echo -n " $1" '..................................' | ./cols 36 + + Q=`./echo "$2" | ./markdown $FLAGS` + + + if [ "$3" = "$Q" ]; then + ./echo " ok" + else + ./echo " FAILED" + ./echo "wanted: $3" + ./echo "got : $Q" + rc=1 + fi +} + +try '[](single quote) text (quote)' \ + "[foo](http://Poe's law) will make this fail ('no, it won't!') here."\ + '

foo here.

' + +try '[](unclosed foo

' + +try ' (1)' \ +'hi' \ +'

hi

' + +try ' (2)' \ +'hi' \ +'

hi

' + +try 'paragraph
oddity' 'EOF ' '

EOF

' + +exit $rc diff --git a/r2/r2/lib/contrib/discount-1.6.0/tests/style.t b/r2/r2/lib/contrib/discount-1.6.0/tests/style.t new file mode 100644 index 000000000..5e2a7352b --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/tests/style.t @@ -0,0 +1,32 @@ +./echo "styles" + +rc=0 +MARKDOWN_FLAGS= + +./echo -n ' ' | ./markdown|wc -c` + +if [ $count -eq 1 ]; then + ./echo "ok" +else + ./echo "FAILED" + rc=1 +fi + +./echo -n ' ' + +count=`./echo "$ASK" | ./markdown | wc -c` + +if [ $count -eq 1 ]; then + ./echo "ok" +else + ./echo "FAILED" + rc=1 +fi + +exit $rc diff --git a/r2/r2/lib/contrib/discount-1.6.0/tests/syntax.text b/r2/r2/lib/contrib/discount-1.6.0/tests/syntax.text new file mode 100644 index 000000000..df740dd12 --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/tests/syntax.text @@ -0,0 +1,897 @@ +Markdown: Syntax +================ + + + + +* [Overview](#overview) + * [Philosophy](#philosophy) + * [Inline HTML](#html) + * [Automatic Escaping for Special Characters](#autoescape) +* [Block Elements](#block) + * [Paragraphs and Line Breaks](#p) + * [Headers](#header) + * [Blockquotes](#blockquote) + * [Lists](#list) + * [Code Blocks](#precode) + * [Horizontal Rules](#hr) +* [Span Elements](#span) + * [Links](#link) + * [Emphasis](#em) + * [Code](#code) + * [Images](#img) +* [Miscellaneous](#misc) + * [Backslash Escapes](#backslash) + * [Automatic Links](#autolink) + + +**Note:** This document is itself written using Markdown; you +can [see the source for it by adding '.text' to the URL][src]. + + [src]: /projects/markdown/syntax.text + +* * * + +

Overview

+ +

Philosophy

+ +Markdown is intended to be as easy-to-read and easy-to-write as is feasible. + +Readability, however, is emphasized above all else. A Markdown-formatted +document should be publishable as-is, as plain text, without looking +like it's been marked up with tags or formatting instructions. While +Markdown's syntax has been influenced by several existing text-to-HTML +filters -- including [Setext] [1], [atx] [2], [Textile] [3], [reStructuredText] [4], +[Grutatext] [5], and [EtText] [6] -- the single biggest source of +inspiration for Markdown's syntax is the format of plain text email. + + [1]: http://docutils.sourceforge.net/mirror/setext.html + [2]: http://www.aaronsw.com/2002/atx/ + [3]: http://textism.com/tools/textile/ + [4]: http://docutils.sourceforge.net/rst.html + [5]: http://www.triptico.com/software/grutatxt.html + [6]: http://ettext.taint.org/doc/ + +To this end, Markdown's syntax is comprised entirely of punctuation +characters, which punctuation characters have been carefully chosen so +as to look like what they mean. E.g., asterisks around a word actually +look like \*emphasis\*. Markdown lists look like, well, lists. Even +blockquotes look like quoted passages of text, assuming you've ever +used email. + + + +

Inline HTML

+ +Markdown's syntax is intended for one purpose: to be used as a +format for *writing* for the web. + +Markdown is not a replacement for HTML, or even close to it. Its +syntax is very small, corresponding only to a very small subset of +HTML tags. The idea is *not* to create a syntax that makes it easier +to insert HTML tags. In my opinion, HTML tags are already easy to +insert. The idea for Markdown is to make it easy to read, write, and +edit prose. HTML is a *publishing* format; Markdown is a *writing* +format. Thus, Markdown's formatting syntax only addresses issues that +can be conveyed in plain text. + +For any markup that is not covered by Markdown's syntax, you simply +use HTML itself. There's no need to preface it or delimit it to +indicate that you're switching from Markdown to HTML; you just use +the tags. + +The only restrictions are that block-level HTML elements -- e.g. `
`, +`
`, `
`, `

`, etc. -- must be separated from surrounding +content by blank lines, and the start and end tags of the block should +not be indented with tabs or spaces. Markdown is smart enough not +to add extra (unwanted) `

` tags around HTML block-level tags. + +For example, to add an HTML table to a Markdown article: + + This is a regular paragraph. + +

+ + + +
Foo
+ + This is another regular paragraph. + +Note that Markdown formatting syntax is not processed within block-level +HTML tags. E.g., you can't use Markdown-style `*emphasis*` inside an +HTML block. + +Span-level HTML tags -- e.g. ``, ``, or `` -- can be +used anywhere in a Markdown paragraph, list item, or header. If you +want, you can even use HTML tags instead of Markdown formatting; e.g. if +you'd prefer to use HTML `` or `` tags instead of Markdown's +link or image syntax, go right ahead. + +Unlike block-level HTML tags, Markdown syntax *is* processed within +span-level tags. + + +

Automatic Escaping for Special Characters

+ +In HTML, there are two characters that demand special treatment: `<` +and `&`. Left angle brackets are used to start tags; ampersands are +used to denote HTML entities. If you want to use them as literal +characters, you must escape them as entities, e.g. `<`, and +`&`. + +Ampersands in particular are bedeviling for web writers. If you want to +write about 'AT&T', you need to write '`AT&T`'. You even need to +escape ampersands within URLs. Thus, if you want to link to: + + http://images.google.com/images?num=30&q=larry+bird + +you need to encode the URL as: + + http://images.google.com/images?num=30&q=larry+bird + +in your anchor tag `href` attribute. Needless to say, this is easy to +forget, and is probably the single most common source of HTML validation +errors in otherwise well-marked-up web sites. + +Markdown allows you to use these characters naturally, taking care of +all the necessary escaping for you. If you use an ampersand as part of +an HTML entity, it remains unchanged; otherwise it will be translated +into `&`. + +So, if you want to include a copyright symbol in your article, you can write: + + © + +and Markdown will leave it alone. But if you write: + + AT&T + +Markdown will translate it to: + + AT&T + +Similarly, because Markdown supports [inline HTML](#html), if you use +angle brackets as delimiters for HTML tags, Markdown will treat them as +such. But if you write: + + 4 < 5 + +Markdown will translate it to: + + 4 < 5 + +However, inside Markdown code spans and blocks, angle brackets and +ampersands are *always* encoded automatically. This makes it easy to use +Markdown to write about HTML code. (As opposed to raw HTML, which is a +terrible format for writing about HTML syntax, because every single `<` +and `&` in your example code needs to be escaped.) + + +* * * + + +

Block Elements

+ + +

Paragraphs and Line Breaks

+ +A paragraph is simply one or more consecutive lines of text, separated +by one or more blank lines. (A blank line is any line that looks like a +blank line -- a line containing nothing but spaces or tabs is considered +blank.) Normal paragraphs should not be indented with spaces or tabs. + +The implication of the "one or more consecutive lines of text" rule is +that Markdown supports "hard-wrapped" text paragraphs. This differs +significantly from most other text-to-HTML formatters (including Movable +Type's "Convert Line Breaks" option) which translate every line break +character in a paragraph into a `
` tag. + +When you *do* want to insert a `
` break tag using Markdown, you +end a line with two or more spaces, then type return. + +Yes, this takes a tad more effort to create a `
`, but a simplistic +"every line break is a `
`" rule wouldn't work for Markdown. +Markdown's email-style [blockquoting][bq] and multi-paragraph [list items][l] +work best -- and look better -- when you format them with hard breaks. + + [bq]: #blockquote + [l]: #list + + + + + +Markdown supports two styles of headers, [Setext] [1] and [atx] [2]. + +Setext-style headers are "underlined" using equal signs (for first-level +headers) and dashes (for second-level headers). For example: + + This is an H1 + ============= + + This is an H2 + ------------- + +Any number of underlining `=`'s or `-`'s will work. + +Atx-style headers use 1-6 hash characters at the start of the line, +corresponding to header levels 1-6. For example: + + # This is an H1 + + ## This is an H2 + + ###### This is an H6 + +Optionally, you may "close" atx-style headers. This is purely +cosmetic -- you can use this if you think it looks better. The +closing hashes don't even need to match the number of hashes +used to open the header. (The number of opening hashes +determines the header level.) : + + # This is an H1 # + + ## This is an H2 ## + + ### This is an H3 ###### + + +

Blockquotes

+ +Markdown uses email-style `>` characters for blockquoting. If you're +familiar with quoting passages of text in an email message, then you +know how to create a blockquote in Markdown. It looks best if you hard +wrap the text and put a `>` before every line: + + > This is a blockquote with two paragraphs. Lorem ipsum dolor sit amet, + > consectetuer adipiscing elit. Aliquam hendrerit mi posuere lectus. + > Vestibulum enim wisi, viverra nec, fringilla in, laoreet vitae, risus. + > + > Donec sit amet nisl. Aliquam semper ipsum sit amet velit. Suspendisse + > id sem consectetuer libero luctus adipiscing. + +Markdown allows you to be lazy and only put the `>` before the first +line of a hard-wrapped paragraph: + + > This is a blockquote with two paragraphs. Lorem ipsum dolor sit amet, + consectetuer adipiscing elit. Aliquam hendrerit mi posuere lectus. + Vestibulum enim wisi, viverra nec, fringilla in, laoreet vitae, risus. + + > Donec sit amet nisl. Aliquam semper ipsum sit amet velit. Suspendisse + id sem consectetuer libero luctus adipiscing. + +Blockquotes can be nested (i.e. a blockquote-in-a-blockquote) by +adding additional levels of `>`: + + > This is the first level of quoting. + > + > > This is nested blockquote. + > + > Back to the first level. + +Blockquotes can contain other Markdown elements, including headers, lists, +and code blocks: + + > ## This is a header. + > + > 1. This is the first list item. + > 2. This is the second list item. + > + > Here's some example code: + > + > return shell_exec("echo $input | $markdown_script"); + +Any decent text editor should make email-style quoting easy. For +example, with BBEdit, you can make a selection and choose Increase +Quote Level from the Text menu. + + +

Lists

+ +Markdown supports ordered (numbered) and unordered (bulleted) lists. + +Unordered lists use asterisks, pluses, and hyphens -- interchangably +-- as list markers: + + * Red + * Green + * Blue + +is equivalent to: + + + Red + + Green + + Blue + +and: + + - Red + - Green + - Blue + +Ordered lists use numbers followed by periods: + + 1. Bird + 2. McHale + 3. Parish + +It's important to note that the actual numbers you use to mark the +list have no effect on the HTML output Markdown produces. The HTML +Markdown produces from the above list is: + +
    +
  1. Bird
  2. +
  3. McHale
  4. +
  5. Parish
  6. +
+ +If you instead wrote the list in Markdown like this: + + 1. Bird + 1. McHale + 1. Parish + +or even: + + 3. Bird + 1. McHale + 8. Parish + +you'd get the exact same HTML output. The point is, if you want to, +you can use ordinal numbers in your ordered Markdown lists, so that +the numbers in your source match the numbers in your published HTML. +But if you want to be lazy, you don't have to. + +If you do use lazy list numbering, however, you should still start the +list with the number 1. At some point in the future, Markdown may support +starting ordered lists at an arbitrary number. + +List markers typically start at the left margin, but may be indented by +up to three spaces. List markers must be followed by one or more spaces +or a tab. + +To make lists look nice, you can wrap items with hanging indents: + + * Lorem ipsum dolor sit amet, consectetuer adipiscing elit. + Aliquam hendrerit mi posuere lectus. Vestibulum enim wisi, + viverra nec, fringilla in, laoreet vitae, risus. + * Donec sit amet nisl. Aliquam semper ipsum sit amet velit. + Suspendisse id sem consectetuer libero luctus adipiscing. + +But if you want to be lazy, you don't have to: + + * Lorem ipsum dolor sit amet, consectetuer adipiscing elit. + Aliquam hendrerit mi posuere lectus. Vestibulum enim wisi, + viverra nec, fringilla in, laoreet vitae, risus. + * Donec sit amet nisl. Aliquam semper ipsum sit amet velit. + Suspendisse id sem consectetuer libero luctus adipiscing. + +If list items are separated by blank lines, Markdown will wrap the +items in `

` tags in the HTML output. For example, this input: + + * Bird + * Magic + +will turn into: + +

    +
  • Bird
  • +
  • Magic
  • +
+ +But this: + + * Bird + + * Magic + +will turn into: + +
    +
  • Bird

  • +
  • Magic

  • +
+ +List items may consist of multiple paragraphs. Each subsequent +paragraph in a list item must be intended by either 4 spaces +or one tab: + + 1. This is a list item with two paragraphs. Lorem ipsum dolor + sit amet, consectetuer adipiscing elit. Aliquam hendrerit + mi posuere lectus. + + Vestibulum enim wisi, viverra nec, fringilla in, laoreet + vitae, risus. Donec sit amet nisl. Aliquam semper ipsum + sit amet velit. + + 2. Suspendisse id sem consectetuer libero luctus adipiscing. + +It looks nice if you indent every line of the subsequent +paragraphs, but here again, Markdown will allow you to be +lazy: + + * This is a list item with two paragraphs. + + This is the second paragraph in the list item. You're + only required to indent the first line. Lorem ipsum dolor + sit amet, consectetuer adipiscing elit. + + * Another item in the same list. + +To put a blockquote within a list item, the blockquote's `>` +delimiters need to be indented: + + * A list item with a blockquote: + + > This is a blockquote + > inside a list item. + +To put a code block within a list item, the code block needs +to be indented *twice* -- 8 spaces or two tabs: + + * A list item with a code block: + + + + +It's worth noting that it's possible to trigger an ordered list by +accident, by writing something like this: + + 1986. What a great season. + +In other words, a *number-period-space* sequence at the beginning of a +line. To avoid this, you can backslash-escape the period: + + 1986\. What a great season. + + + +

Code Blocks

+ +Pre-formatted code blocks are used for writing about programming or +markup source code. Rather than forming normal paragraphs, the lines +of a code block are interpreted literally. Markdown wraps a code block +in both `
` and `` tags.
+
+To produce a code block in Markdown, simply indent every line of the
+block by at least 4 spaces or 1 tab. For example, given this input:
+
+    This is a normal paragraph:
+
+        This is a code block.
+
+Markdown will generate:
+
+    

This is a normal paragraph:

+ +
This is a code block.
+    
+ +One level of indentation -- 4 spaces or 1 tab -- is removed from each +line of the code block. For example, this: + + Here is an example of AppleScript: + + tell application "Foo" + beep + end tell + +will turn into: + +

Here is an example of AppleScript:

+ +
tell application "Foo"
+        beep
+    end tell
+    
+ +A code block continues until it reaches a line that is not indented +(or the end of the article). + +Within a code block, ampersands (`&`) and angle brackets (`<` and `>`) +are automatically converted into HTML entities. This makes it very +easy to include example HTML source code using Markdown -- just paste +it and indent it, and Markdown will handle the hassle of encoding the +ampersands and angle brackets. For example, this: + + + +will turn into: + +
<div class="footer">
+        &copy; 2004 Foo Corporation
+    </div>
+    
+ +Regular Markdown syntax is not processed within code blocks. E.g., +asterisks are just literal asterisks within a code block. This means +it's also easy to use Markdown to write about Markdown's own syntax. + + + +

Horizontal Rules

+ +You can produce a horizontal rule tag (`
`) by placing three or +more hyphens, asterisks, or underscores on a line by themselves. If you +wish, you may use spaces between the hyphens or asterisks. Each of the +following lines will produce a horizontal rule: + + * * * + + *** + + ***** + + - - - + + --------------------------------------- + + +* * * + +

Span Elements

+ + + +Markdown supports two style of links: *inline* and *reference*. + +In both styles, the link text is delimited by [square brackets]. + +To create an inline link, use a set of regular parentheses immediately +after the link text's closing square bracket. Inside the parentheses, +put the URL where you want the link to point, along with an *optional* +title for the link, surrounded in quotes. For example: + + This is [an example](http://example.com/ "Title") inline link. + + [This link](http://example.net/) has no title attribute. + +Will produce: + +

This is + an example inline link.

+ +

This link has no + title attribute.

+ +If you're referring to a local resource on the same server, you can +use relative paths: + + See my [About](/about/) page for details. + +Reference-style links use a second set of square brackets, inside +which you place a label of your choosing to identify the link: + + This is [an example][id] reference-style link. + +You can optionally use a space to separate the sets of brackets: + + This is [an example] [id] reference-style link. + +Then, anywhere in the document, you define your link label like this, +on a line by itself: + + [id]: http://example.com/ "Optional Title Here" + +That is: + +* Square brackets containing the link identifier (optionally + indented from the left margin using up to three spaces); +* followed by a colon; +* followed by one or more spaces (or tabs); +* followed by the URL for the link; +* optionally followed by a title attribute for the link, enclosed + in double or single quotes, or enclosed in parentheses. + +The following three link definitions are equivalent: + + [foo]: http://example.com/ "Optional Title Here" + [foo]: http://example.com/ 'Optional Title Here' + [foo]: http://example.com/ (Optional Title Here) + +**Note:** There is a known bug in Markdown.pl 1.0.1 which prevents +single quotes from being used to delimit link titles. + +The link URL may, optionally, be surrounded by angle brackets: + + [id]: "Optional Title Here" + +You can put the title attribute on the next line and use extra spaces +or tabs for padding, which tends to look better with longer URLs: + + [id]: http://example.com/longish/path/to/resource/here + "Optional Title Here" + +Link definitions are only used for creating links during Markdown +processing, and are stripped from your document in the HTML output. + +Link definition names may constist of letters, numbers, spaces, and +punctuation -- but they are *not* case sensitive. E.g. these two +links: + + [link text][a] + [link text][A] + +are equivalent. + +The *implicit link name* shortcut allows you to omit the name of the +link, in which case the link text itself is used as the name. +Just use an empty set of square brackets -- e.g., to link the word +"Google" to the google.com web site, you could simply write: + + [Google][] + +And then define the link: + + [Google]: http://google.com/ + +Because link names may contain spaces, this shortcut even works for +multiple words in the link text: + + Visit [Daring Fireball][] for more information. + +And then define the link: + + [Daring Fireball]: http://daringfireball.net/ + +Link definitions can be placed anywhere in your Markdown document. I +tend to put them immediately after each paragraph in which they're +used, but if you want, you can put them all at the end of your +document, sort of like footnotes. + +Here's an example of reference links in action: + + I get 10 times more traffic from [Google] [1] than from + [Yahoo] [2] or [MSN] [3]. + + [1]: http://google.com/ "Google" + [2]: http://search.yahoo.com/ "Yahoo Search" + [3]: http://search.msn.com/ "MSN Search" + +Using the implicit link name shortcut, you could instead write: + + I get 10 times more traffic from [Google][] than from + [Yahoo][] or [MSN][]. + + [google]: http://google.com/ "Google" + [yahoo]: http://search.yahoo.com/ "Yahoo Search" + [msn]: http://search.msn.com/ "MSN Search" + +Both of the above examples will produce the following HTML output: + +

I get 10 times more traffic from Google than from + Yahoo + or MSN.

+ +For comparison, here is the same paragraph written using +Markdown's inline link style: + + I get 10 times more traffic from [Google](http://google.com/ "Google") + than from [Yahoo](http://search.yahoo.com/ "Yahoo Search") or + [MSN](http://search.msn.com/ "MSN Search"). + +The point of reference-style links is not that they're easier to +write. The point is that with reference-style links, your document +source is vastly more readable. Compare the above examples: using +reference-style links, the paragraph itself is only 81 characters +long; with inline-style links, it's 176 characters; and as raw HTML, +it's 234 characters. In the raw HTML, there's more markup than there +is text. + +With Markdown's reference-style links, a source document much more +closely resembles the final output, as rendered in a browser. By +allowing you to move the markup-related metadata out of the paragraph, +you can add links without interrupting the narrative flow of your +prose. + + +

Emphasis

+ +Markdown treats asterisks (`*`) and underscores (`_`) as indicators of +emphasis. Text wrapped with one `*` or `_` will be wrapped with an +HTML `` tag; double `*`'s or `_`'s will be wrapped with an HTML +`` tag. E.g., this input: + + *single asterisks* + + _single underscores_ + + **double asterisks** + + __double underscores__ + +will produce: + + single asterisks + + single underscores + + double asterisks + + double underscores + +You can use whichever style you prefer; the lone restriction is that +the same character must be used to open and close an emphasis span. + +Emphasis can be used in the middle of a word: + + un*fucking*believable + +But if you surround an `*` or `_` with spaces, it'll be treated as a +literal asterisk or underscore. + +To produce a literal asterisk or underscore at a position where it +would otherwise be used as an emphasis delimiter, you can backslash +escape it: + + \*this text is surrounded by literal asterisks\* + + + +

Code

+ +To indicate a span of code, wrap it with backtick quotes (`` ` ``). +Unlike a pre-formatted code block, a code span indicates code within a +normal paragraph. For example: + + Use the `printf()` function. + +will produce: + +

Use the printf() function.

+ +To include a literal backtick character within a code span, you can use +multiple backticks as the opening and closing delimiters: + + ``There is a literal backtick (`) here.`` + +which will produce this: + +

There is a literal backtick (`) here.

+ +The backtick delimiters surrounding a code span may include spaces -- +one after the opening, one before the closing. This allows you to place +literal backtick characters at the beginning or end of a code span: + + A single backtick in a code span: `` ` `` + + A backtick-delimited string in a code span: `` `foo` `` + +will produce: + +

A single backtick in a code span: `

+ +

A backtick-delimited string in a code span: `foo`

+ +With a code span, ampersands and angle brackets are encoded as HTML +entities automatically, which makes it easy to include example HTML +tags. Markdown will turn this: + + Please don't use any `` tags. + +into: + +

Please don't use any <blink> tags.

+ +You can write this: + + `—` is the decimal-encoded equivalent of `—`. + +to produce: + +

&#8212; is the decimal-encoded + equivalent of &mdash;.

+ + + +

Images

+ +Admittedly, it's fairly difficult to devise a "natural" syntax for +placing images into a plain text document format. + +Markdown uses an image syntax that is intended to resemble the syntax +for links, allowing for two styles: *inline* and *reference*. + +Inline image syntax looks like this: + + ![Alt text](/path/to/img.jpg) + + ![Alt text](/path/to/img.jpg "Optional title") + +That is: + +* An exclamation mark: `!`; +* followed by a set of square brackets, containing the `alt` + attribute text for the image; +* followed by a set of parentheses, containing the URL or path to + the image, and an optional `title` attribute enclosed in double + or single quotes. + +Reference-style image syntax looks like this: + + ![Alt text][id] + +Where "id" is the name of a defined image reference. Image references +are defined using syntax identical to link references: + + [id]: url/to/image "Optional title attribute" + +As of this writing, Markdown has no syntax for specifying the +dimensions of an image; if this is important to you, you can simply +use regular HTML `` tags. + + +* * * + + +

Miscellaneous

+ + + +Markdown supports a shortcut style for creating "automatic" links for URLs and email addresses: simply surround the URL or email address with angle brackets. What this means is that if you want to show the actual text of a URL or email address, and also have it be a clickable link, you can do this: + + + +Markdown will turn this into: + + http://example.com/ + +Automatic links for email addresses work similarly, except that +Markdown will also perform a bit of randomized decimal and hex +entity-encoding to help obscure your address from address-harvesting +spambots. For example, Markdown will turn this: + + + +into something like this: + + address@exa + mple.com + +which will render in a browser as a clickable link to "address@example.com". + +(This sort of entity-encoding trick will indeed fool many, if not +most, address-harvesting bots, but it definitely won't fool all of +them. It's better than nothing, but an address published in this way +will probably eventually start receiving spam.) + + + +

Backslash Escapes

+ +Markdown allows you to use backslash escapes to generate literal +characters which would otherwise have special meaning in Markdown's +formatting syntax. For example, if you wanted to surround a word with +literal asterisks (instead of an HTML `` tag), you can backslashes +before the asterisks, like this: + + \*literal asterisks\* + +Markdown provides backslash escapes for the following characters: + + \ backslash + ` backtick + * asterisk + _ underscore + {} curly braces + [] square brackets + () parentheses + # hash mark + + plus sign + - minus sign (hyphen) + . dot + ! exclamation mark + diff --git a/r2/r2/lib/contrib/discount-1.6.0/tests/tables.t b/r2/r2/lib/contrib/discount-1.6.0/tests/tables.t new file mode 100644 index 000000000..3ba4c0bae --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/tests/tables.t @@ -0,0 +1,186 @@ +./echo "tables" + +rc=0 +MARKDOWN_FLAGS= + +try() { + unset FLAGS + case "$1" in + -*) FLAGS=$1 + shift ;; + esac + + ./echo -n " $1" '..................................' | ./cols 36 + + Q=`./echo "$2" | ./markdown $FLAGS` + + if [ "$3" = "$Q" ]; then + ./echo " ok" + else + ./echo " FAILED" + ./echo "wanted: $3" + ./echo "got : $Q" + rc=1 + fi +} + + +try 'single-column table' \ + '|hello +|----- +|sailor' \ + ' + + + + + + + + + + + + +
hello
sailor
' + + +try 'two-column table' \ + ' + a | b +-----|------ +hello|sailor' \ + ' + + + + + + + + + + + + +
a b
hellosailor
' + +try 'three-column table' \ +'a|b|c +-|-|- +hello||sailor'\ + ' + + + + + + + + + + + + + + +
abc
hellosailor
' + +try 'two-column table with empty cells' \ + ' + a | b +-----|------ +hello| + |sailor' \ + ' + + + + + + + + + + + + + + + + +
a b
hello
sailor
' + +try 'two-column table with alignment' \ + ' + a | b +----:|:----- +hello|sailor' \ + ' + + + + + + + + + + + + +
a b
hellosailor
' + +try 'table with extra data column' \ + ' + a | b +-----|------ +hello|sailor|boy' \ + ' + + + + + + + + + + + + +
a b
hellosailor|boy
' + + +try -fnotables 'tables with -fnotables' \ + 'a|b +-|- +hello|sailor' \ + '

a|b +–|– +hello|sailor

' + +try 'deceptive non-table text' \ + 'a | b | c + +text' \ + '

a | b | c

+ +

text

' + +try 'table headers only' \ + 'a|b|c +-|-|-' \ + ' + + + + + + + + + +
abc
' + +exit $rc diff --git a/r2/r2/lib/contrib/discount-1.6.0/tests/tabstop.t b/r2/r2/lib/contrib/discount-1.6.0/tests/tabstop.t new file mode 100644 index 000000000..c577f8b4d --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/tests/tabstop.t @@ -0,0 +1,66 @@ +rc=0 +unset MARKDOWN_FLAGS +unset MKD_TABSTOP + +try() { + unset FLAGS + case "$1" in + -*) FLAGS=$1 + shift ;; + esac + + ./echo -n " $1" '..................................' | ./cols 36 + + Q=`./echo "$2" | ./markdown $FLAGS` + + if [ "$3" = "$Q" ]; then + ./echo " ok" + else + ./echo " FAILED" + ./echo "wanted: $3" + ./echo "got : $Q" + rc=1 + fi +} + +eval `./markdown -V | tr ' ' '\n' | grep TAB` + +if [ "${TAB:-4}" -eq 8 ]; then + ./echo "dealing with tabstop derangement" + + LIST=' + * A + * B + * C' + + try 'markdown with TAB=8' \ + "$LIST" \ + '
    +
  • A + +
      +
    • B + +
        +
      • C
      • +
      +
    • +
    +
  • +
' + + try -F0x0200 'markdown with TAB=4' \ + "$LIST" \ + '
    +
  • A + +
      +
    • B
    • +
    • C
    • +
    +
  • +
' + +fi + +exit $rc diff --git a/r2/r2/lib/contrib/discount-1.6.0/tests/toc.t b/r2/r2/lib/contrib/discount-1.6.0/tests/toc.t new file mode 100644 index 000000000..6408d4cce --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/tests/toc.t @@ -0,0 +1,41 @@ +./echo "table-of-contents support" + +rc=0 +MARKDOWN_FLAGS= + +try() { + unset FLAGS + + case "$1" in + -*) FLAGS=$1 + shift ;; + esac + + ./echo -n " $1" '..................................' | ./cols 36 + + Q=`./echo "$2" | ./markdown $FLAGS` + + if [ "$3" = "$Q" ]; then + ./echo " ok" + else + ./echo " FAILED" + ./echo "wanted: $3" + ./echo "got : $Q" + rc=1 + fi +} + + +try '-T -ftoc' 'table of contents' \ +'#H1 +hi' \ +' + +

H1

+ +

hi

' + + +exit $rc diff --git a/r2/r2/lib/contrib/discount-1.6.0/tests/xml.t b/r2/r2/lib/contrib/discount-1.6.0/tests/xml.t new file mode 100644 index 000000000..c9a3d2396 --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/tests/xml.t @@ -0,0 +1,39 @@ +./echo "xml output with MKD_CDATA" + +rc=0 +MARKDOWN_FLAGS= + +try() { + unset FLAGS + case "$1" in + -*) FLAGS=$1 + shift ;; + esac + + ./echo -n " $1" '..................................' | ./cols 36 + + case "$2" in + -t*) Q=`./markdown $FLAGS "$2"` ;; + *) Q=`./echo "$2" | ./markdown $FLAGS` ;; + esac + + if [ "$3" = "$Q" ]; then + ./echo " ok" + else + ./echo " FAILED" + ./echo "wanted: $3" + ./echo "got : $Q" + rc=1 + fi +} + +try -fcdata 'xml output from markdown()' 'hello,sailor' '<p>hello,sailor</p>' +try -fcdata 'from mkd_generateline()' -t'"hello,sailor"' '&ldquo;hello,sailor&rdquo;' +try -fnocdata 'html output from markdown()' '"hello,sailor"' '

“hello,sailor”

' +try -fnocdata '... from mkd_generateline()' -t'"hello,sailor"' '“hello,sailor”' + +try -fcdata 'xml output with multibyte utf-8' \ + 'tecnología y servicios más confiables' \ + '<p>tecnología y servicios más confiables</p>' + +exit $rc diff --git a/r2/r2/lib/contrib/discount-1.6.0/theme.1 b/r2/r2/lib/contrib/discount-1.6.0/theme.1 new file mode 100644 index 000000000..473b913ee --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/theme.1 @@ -0,0 +1,142 @@ +.\" %A% +.\" +.Dd January 23, 2008 +.Dt THEME 1 +.Os MASTODON +.Sh NAME +.Nm theme +.Nd create a web page from a template file +.Sh SYNOPSIS +.Nm +.Op Fl d Pa root +.Op Fl f +.Op Fl o Pa file +.Op Fl p Pa pagename +.Op Fl t Pa template +.Op Fl V +.Op Pa textfile +.Sh DESCRIPTION +The +.Nm +utility takes a +.Xr markdown 7 Ns -formatted +.Pa textfile +.Pq or stdin if not specified, +compiles it, and combines it with a +.Em template +.Po +.Pa page.theme +by default +.Pc +to produce a web page. If a path to the +template is not specified, +.Nm +looks for +.Pa page.theme +in the current directory, then each parent directory up to the +.Pa "document root" +.Po +set with +.Fl d +or, if unset, the +.Em "root directory" +of the system. +.Pc +If +.Pa page.theme +is found, +.Nm +copies it to the output, looking for +.Em "" +html tags and processing the embedded +.Ar action +as appropriate. +.Pp +.Nm +processes the following actions: +.Bl -tag -width "include(" +.It Ar author +Prints the author name(s) from the +.Xr mkd_doc_author 3 +function. +.It Ar body +Prints the formatted +.Xr markdown 7 +input file. +.It Ar date +Prints the date returned by +.Xr mkd_doc_date 3 +or, if none, the +date the input file was last modified. +.It Ar dir +Prints the directory part of the pagename +.It Ar include Ns Pq Pa file +Prints the contents of +.Pa file . +.Xr Markdown 7 +translation will +.Em NOT +be done on this file. +.It Ar source +The filename part of the pagename. +.It Ar style +Print any stylesheets +.Pq see Xr mkd-extensions 7 +found in the input file. +.It Ar title +Print the title returned by +.Xr mkd_doc_title 3 , +or, if that does not exist, the source filename. +.It Ar version +Print the version of +.Xr discount 7 +that this copy of theme was compiled with. +.El +.Pp +If input is coming from a file and the output was not set with the +.Ar o +option, +.Nm writes the output to +.Pa file-sans-text.html +.Pq if +.Ar file +has a +.Pa .text +suffix, that will be stripped off and replaced with +.Pa .html ; +otherwise a +.Pa .html +will be appended to the end of the filename.) +.Pp +The options are as follows: +.Bl -tag -width "-o file" +.It Fl d Pa root +Set the +.Em "document root" +to +.Ar root +.It Fl f +Forcibly overwrite existing html files. +.It Fl o Pa filename +Write the output to +.Ar filename . +.It Fl p Ar path +Set the pagename to +.Ar path . +.It Fl t Ar filename +Use +.Ar filename +as the template file. +.El +.Sh RETURN VALUES +The +.Nm +utility exits 0 on success, and >0 if an error occurs. +.Sh SEE ALSO +.Xr markdown 1 , +.Xr markdown 3 , +.Xr markdown 7 , +.Xr mkd-extensions 7 . +.Sh AUTHOR +.An David Parsons +.Pq Li orc@pell.chi.il.us diff --git a/r2/r2/lib/contrib/discount-1.6.0/theme.c b/r2/r2/lib/contrib/discount-1.6.0/theme.c new file mode 100644 index 000000000..97f401aae --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/theme.c @@ -0,0 +1,593 @@ +/* + * theme: use a template to create a webpage (markdown-style) + * + * usage: theme [-d root] [-p pagename] [-t template] [-o html] [source] + * + */ +/* + * Copyright (C) 2007 David L Parsons. + * The redistribution terms are provided in the COPYRIGHT file that must + * be distributed with this source code. + */ +#include "config.h" + +#include +#include +#include +#if defined(HAVE_BASENAME) && defined(HAVE_LIBGEN_H) +# include +#endif +#include +#include +#include +#include +#include +#if HAVE_PWD_H +# include +#endif +#include +#include +#include + +#include "mkdio.h" +#include "cstring.h" +#include "amalloc.h" + +char *pgm = "theme"; +char *output = 0; +char *pagename = 0; +char *root = 0; +#if HAVE_PWD_H +struct passwd *me = 0; +#endif +struct stat *infop = 0; + +#ifndef HAVE_BASENAME +char * +basename(char *path) +{ + char *p; + + if (( p = strrchr(path, '/') )) + return 1+p; + return path; +} +#endif + +#ifdef HAVE_FCHDIR +typedef int HERE; +#define NOT_HERE (-1) + +#define pushd(d) open(d, O_RDONLY) + +int +popd(HERE pwd) +{ + int rc = fchdir(pwd); + close(pwd); + return rc; +} + +#else + +typedef char* HERE; +#define NOT_HERE 0 + +HERE +pushd(char *d) +{ + HERE cwd; + int size; + + if ( chdir(d) == -1 ) + return NOT_HERE; + + for (cwd = malloc(size=40); cwd; cwd = realloc(cwd, size *= 2)) + if ( getcwd(cwd, size) ) + return cwd; + + return NOT_HERE; +} + +int +popd(HERE pwd) +{ + if ( pwd ) { + int rc = chdir(pwd); + free(pwd); + + return rc; + } + return -1; +} +#endif + +typedef STRING(int) Istring; + +void +fail(char *why, ...) +{ + va_list ptr; + + va_start(ptr,why); + fprintf(stderr, "%s: ", pgm); + vfprintf(stderr, why, ptr); + fputc('\n', stderr); + va_end(ptr); + exit(1); +} + + +/* open_template() -- start at the current directory and work up, + * looking for the deepest nested template. + * Stop looking when we reach $root or / + */ +FILE * +open_template(char *template) +{ + char *cwd; + int szcwd; + HERE here = pushd("."); + FILE *ret; + + if ( here == NOT_HERE ) + fail("cannot access the current directory"); + + szcwd = root ? 1 + strlen(root) : 2; + + if ( (cwd = malloc(szcwd)) == 0 ) + return 0; + + while ( !(ret = fopen(template, "r")) ) { + if ( getcwd(cwd, szcwd) == 0 ) { + if ( errno == ERANGE ) + goto up; + break; + } + + if ( root && (strcmp(root, cwd) == 0) ) + break; /* ran out of paths to search */ + else if ( (strcmp(cwd, "/") == 0) || (*cwd == 0) ) + break; /* reached / */ + + up: if ( chdir("..") == -1 ) + break; + } + free(cwd); + popd(here); + return ret; +} /* open_template */ + + +static Istring inbuf; +static int psp; + +static int +prepare(FILE *input) +{ + int c; + + CREATE(inbuf); + psp = 0; + while ( (c = getc(input)) != EOF ) + EXPAND(inbuf) = c; + fclose(input); + return 1; +} + +static int +pull() +{ + return psp < S(inbuf) ? T(inbuf)[psp++] : EOF; +} + +static int +peek(int offset) +{ + int pos = (psp + offset)-1; + + if ( pos >= 0 && pos < S(inbuf) ) + return T(inbuf)[pos]; + + return EOF; +} + +static int +shift(int shiftwidth) +{ + psp += shiftwidth; + return psp; +} + +static int* +cursor() +{ + return T(inbuf) + psp; +} + + +static int +thesame(int *p, char *pat) +{ + int i; + + for ( i=0; pat[i]; i++ ) { + if ( pat[i] == ' ' ) { + if ( !isspace(peek(i+1)) ) { + return 0; + } + } + else if ( tolower(peek(i+1)) != pat[i] ) { + return 0; + } + } + return 1; +} + + +static int +istag(int *p, char *pat) +{ + int c; + + if ( thesame(p, pat) ) { + c = peek(strlen(pat)+1); + return (c == '>' || isspace(c)); + } + return 0; +} + + +/* finclude() includes some (unformatted) source + */ +static void +finclude(MMIOT *doc, FILE *out, int flags) +{ + int c; + Cstring include; + FILE *f; + + CREATE(include); + + while ( (c = pull()) != '(' ) + ; + + while ( (c=pull()) != ')' && c != EOF ) + EXPAND(include) = c; + + if ( c != EOF ) { + EXPAND(include) = 0; + S(include)--; + + if (( f = fopen(T(include), "r") )) { + while ( (c = getc(f)) != EOF ) + putc(c, out); + fclose(f); + } + } + DELETE(include); +} + + +/* fdirname() prints out the directory part of a path + */ +static void +fdirname(MMIOT *doc, FILE *output, int flags) +{ + char *p; + + if ( pagename && (p = basename(pagename)) ) + fwrite(pagename, strlen(pagename)-strlen(p), 1, output); +} + + +/* fbasename() prints out the file name part of a path + */ +static void +fbasename(MMIOT *doc, FILE *output, int flags) +{ + char *p; + + if ( pagename ) { + p = basename(pagename); + + if ( !p ) + p = pagename; + + if ( p ) + fwrite(p, strlen(p), 1, output); + } +} + + +/* ftitle() prints out the document title + */ +static void +ftitle(MMIOT *doc, FILE* output, int flags) +{ + char *h; + if ( (h = mkd_doc_title(doc)) == 0 && pagename ) + h = pagename; + + if ( h ) + mkd_generateline(h, strlen(h), output, flags); +} + + +/* fdate() prints out the document date + */ +static void +fdate(MMIOT *doc, FILE *output, int flags) +{ + char *h; + + if ( (h = mkd_doc_date(doc)) || ( infop && (h = ctime(&infop->st_mtime)) ) ) + mkd_generateline(h, strlen(h), output, flags|MKD_TAGTEXT); +} + + +/* fauthor() prints out the document author + */ +static void +fauthor(MMIOT *doc, FILE *output, int flags) +{ + char *h = mkd_doc_author(doc); + +#if HAVE_PWD_H + if ( (h == 0) && me ) + h = me->pw_gecos; +#endif + + if ( h ) + mkd_generateline(h, strlen(h), output, flags); +} + + +/* fversion() prints out the document version + */ +static void +fversion(MMIOT *doc, FILE *output, int flags) +{ + fwrite(markdown_version, strlen(markdown_version), 1, output); +} + + +/* fbody() prints out the document + */ +static void +fbody(MMIOT *doc, FILE *output, int flags) +{ + mkd_generatehtml(doc, output); +} + +/* ftoc() prints out the table of contents + */ +static void +ftoc(MMIOT *doc, FILE *output, int flags) +{ + mkd_generatetoc(doc, output); +} + +/* fstyle() prints out the document's style section + */ +static void +fstyle(MMIOT *doc, FILE *output, int flags) +{ + mkd_generatecss(doc, output); +} + + +#define INTAG 0x01 +#define INHEAD 0x02 +#define INBODY 0x04 + +/* + * theme expansions we love: + * -- the document date (file or header date) + * -- the document title (header title or document name) + * -- the document author (header author or document owner) + * -- the version# + * -- the document body + * -- the filename part of the document name + * -- the directory part of the document name + * -- the html file name + * -- document-supplied style blocks + * -- include a file. + */ +static struct _keyword { + char *kw; + int where; + void (*what)(MMIOT*,FILE*,int); +} keyword[] = { + { "author?>", 0xffff, fauthor }, + { "body?>", INBODY, fbody }, + { "toc?>", INBODY, ftoc }, + { "date?>", 0xffff, fdate }, + { "dir?>", 0xffff, fdirname }, + { "include(", 0xffff, finclude }, + { "source?>", 0xffff, fbasename }, + { "style?>", INHEAD, fstyle }, + { "title?>", 0xffff, ftitle }, + { "version?>", 0xffff, fversion }, +}; +#define NR(x) (sizeof x / sizeof x[0]) + + +/* spin() - run through the theme template, looking for ') ); + } + else if ( (peek(1) == '?') && thesame(cursor(), "?theme ") ) { + shift(strlen("?theme ")); + + while ( ((c = pull()) != EOF) && isspace(c) ) + ; + + shift(-1); + p = cursor(); + + if ( where & INTAG ) + flags = MKD_TAGTEXT; + else if ( where & INHEAD ) + flags = MKD_NOIMAGE|MKD_NOLINKS; + else + flags = 0; + + for (i=0; i < NR(keyword); i++) + if ( thesame(p, keyword[i].kw) ) { + if ( keyword[i].where & where ) + (*keyword[i].what)(doc,output,flags); + break; + } + + while ( (c = pull()) != EOF && (c != '?' && peek(1) != '>') ) + ; + shift(1); + } + else + putc(c, output); + + if ( istag(cursor(), "head") ) { + where |= INHEAD; + where &= ~INBODY; + } + else if ( istag(cursor(), "body") ) { + where &= ~INHEAD; + where |= INBODY; + } + where |= INTAG; + continue; + } + else if ( c == '>' ) + where &= ~INTAG; + + putc(c, output); + } +} /* spin */ + + +void +main(argc, argv) +char **argv; +{ + char *template = "page.theme"; + char *source = "stdin"; + FILE *tmplfile; + int opt; + int force = 0; + MMIOT *doc; + struct stat sourceinfo; + + opterr=1; + pgm = basename(argv[0]); + + while ( (opt=getopt(argc, argv, "fd:t:p:o:V")) != EOF ) { + switch (opt) { + case 'd': root = optarg; + break; + case 'p': pagename = optarg; + break; + case 'f': force = 1; + break; + case 't': template = optarg; + break; + case 'o': output = optarg; + break; + case 'V': printf("theme+discount %s\n", markdown_version); + exit(0); + default: fprintf(stderr, "usage: %s [-V] [-d dir] [-p pagename] [-t template] [-o html] [file]\n", pgm); + exit(1); + } + } + + tmplfile = open_template(template); + + argc -= optind; + argv += optind; + + + if ( argc > 0 ) { + int added_text=0; + + if ( (source = malloc(strlen(argv[0]) + strlen("/index.text") + 1)) == 0 ) + fail("out of memory allocating name buffer"); + + strcpy(source,argv[0]); + if ( (stat(source, &sourceinfo) == 0) && S_ISDIR(sourceinfo.st_mode) ) + strcat(source, "/index"); + + if ( !freopen(source, "r", stdin) ) { + strcat(source, ".text"); + added_text = 1; + if ( !freopen(source, "r", stdin) ) + fail("can't open either %s or %s", argv[0], source); + } + + if ( !output ) { + char *p, *q; + output = alloca(strlen(source) + strlen(".html") + 1); + + strcpy(output, source); + + if (( p = strchr(output, '/') )) + q = strrchr(p+1, '.'); + else + q = strrchr(output, '.'); + + if ( q ) + *q = 0; + strcat(q, ".html"); + } + } + if ( output ) { + if ( force ) + unlink(output); + if ( !freopen(output, "w", stdout) ) + fail("can't write to %s", output); + } + + if ( !pagename ) + pagename = source; + + if ( (doc = mkd_in(stdin, 0)) == 0 ) + fail("can't read %s", source ? source : "stdin"); + + if ( fstat(fileno(stdin), &sourceinfo) == 0 ) + infop = &sourceinfo; + +#if HAVE_GETPWUID + me = getpwuid(infop ? infop->st_uid : getuid()); + + if ( (root = strdup(me->pw_dir)) == 0 ) + fail("out of memory"); +#endif + + if ( !mkd_compile(doc, MKD_TOC) ) + fail("couldn't compile input"); + + if ( tmplfile ) + spin(tmplfile,doc,stdout); + else + mkd_generatehtml(doc, stdout); + + mkd_cleanup(doc); + exit(0); +} diff --git a/r2/r2/lib/contrib/discount-1.6.0/toc.c b/r2/r2/lib/contrib/discount-1.6.0/toc.c new file mode 100644 index 000000000..cf957c85e --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/toc.c @@ -0,0 +1,90 @@ +/* + * toc -- spit out a table of contents based on header blocks + * + * Copyright (C) 2008 Jjgod Jiang, David L Parsons. + * The redistribution terms are provided in the COPYRIGHT file that must + * be distributed with this source code. + */ +#include "config.h" +#include +#include +#include + +#include "cstring.h" +#include "markdown.h" +#include "amalloc.h" + +/* write an header index + */ +int +mkd_toc(Document *p, char **doc) +{ + Paragraph *tp, *srcp; + int last_hnumber = 0; + Cstring res; + + CREATE(res); + RESERVE(res, 100); + + *doc = 0; + + if ( !(p && p->ctx) ) return -1; + if ( ! (p->ctx->flags & TOC) ) return 0; + + for ( tp = p->code; tp ; tp = tp->next ) { + if ( tp->typ == SOURCE ) { + for ( srcp = tp->down; srcp; srcp = srcp->next ) { + if ( srcp->typ == HDR && srcp->text ) { + + if ( last_hnumber == srcp->hnumber ) + Csprintf(&res, "%*s\n", srcp->hnumber, ""); + else while ( last_hnumber > srcp->hnumber ) { + Csprintf(&res, "%*s\n%*s\n", + last_hnumber, "", + last_hnumber-1,""); + --last_hnumber; + } + + while ( srcp->hnumber > last_hnumber ) { + Csprintf(&res, "\n%*s\n", + last_hnumber, "", last_hnumber, ""); + --last_hnumber; + } + /* HACK ALERT! HACK ALERT! HACK ALERT! */ + *doc = T(res); /* we know that a T(Cstring) is a character pointer */ + /* so we can simply pick it up and carry it away, */ + return S(res); /* leaving the husk of the Ctring on the stack */ + /* END HACK ALERT */ +} + + +/* write an header index + */ +int +mkd_generatetoc(Document *p, FILE *out) +{ + char *buf = 0; + int sz = mkd_toc(p, &buf); + int ret = EOF; + + if ( sz > 0 ) + ret = fwrite(buf, sz, 1, out); + + if ( buf ) free(buf); + + return ret; +} diff --git a/r2/r2/lib/contrib/discount-1.6.0/tools/cols.c b/r2/r2/lib/contrib/discount-1.6.0/tools/cols.c new file mode 100644 index 000000000..68ecc590d --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/tools/cols.c @@ -0,0 +1,38 @@ +#include +#include + +main(argc, argv) +char **argv; +{ + register c; + int xp; + int width; + + if ( argc != 2 ) { + fprintf(stderr, "usage: %s width\n", argv[0]); + exit(1); + } + else if ( (width=atoi(argv[1])) < 1 ) { + fprintf(stderr, "%s: please set width to > 0\n", argv[0]); + exit(1); + } + + + for ( xp = 1; (c = getchar()) != EOF; xp++ ) { + while ( c & 0xC0 ) { + /* assume that (1) the output device understands utf-8, and + * (2) the only c & 0x80 input is utf-8. + */ + do { + if ( xp <= width ) + putchar(c); + } while ( (c = getchar()) != EOF && (c & 0x80) && !(c & 0x40) ); + ++xp; + } + if ( c == '\n' ) + xp = 0; + if ( xp <= width ) + putchar(c); + } + exit(0); +} diff --git a/r2/r2/lib/contrib/discount-1.6.0/tools/echo.c b/r2/r2/lib/contrib/discount-1.6.0/tools/echo.c new file mode 100644 index 000000000..5352caf92 --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/tools/echo.c @@ -0,0 +1,22 @@ +#include +#include + + +main(argc, argv) +char **argv; +{ + int nl = 1; + int i; + + if ( (argc > 1) && (strcmp(argv[1], "-n") == 0) ) { + ++argv; + --argc; + nl = 0; + } + + for ( i=1; i < argc; i++ ) { + if ( i > 1 ) putchar(' '); + fputs(argv[i], stdout); + } + if (nl) putchar('\n'); +} diff --git a/r2/r2/lib/contrib/discount-1.6.0/version.c b/r2/r2/lib/contrib/discount-1.6.0/version.c new file mode 100644 index 000000000..be99141bb --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/version.c @@ -0,0 +1,28 @@ +#include "config.h" + +char markdown_version[] = VERSION +#if DL_TAG_EXTENSION + " DL_TAG" +#endif +#if PANDOC_HEADER + " HEADER" +#endif +#if 4 != 4 + " TAB=4" +#endif +#if USE_AMALLOC + " DEBUG" +#endif +#if SUPERSCRIPT + " SUPERSCRIPT" +#endif +#if RELAXED_EMPHASIS + " RELAXED" +#endif +#if DIV_QUOTE + " DIV" +#endif +#if ALPHA_LIST + " AL" +#endif + ; diff --git a/r2/r2/lib/contrib/discount-1.6.0/version.c.in b/r2/r2/lib/contrib/discount-1.6.0/version.c.in new file mode 100644 index 000000000..f4875606e --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/version.c.in @@ -0,0 +1,28 @@ +#include "config.h" + +char markdown_version[] = VERSION +#if DL_TAG_EXTENSION + " DL_TAG" +#endif +#if PANDOC_HEADER + " HEADER" +#endif +#if @TABSTOP@ != 4 + " TAB=@TABSTOP@" +#endif +#if USE_AMALLOC + " DEBUG" +#endif +#if SUPERSCRIPT + " SUPERSCRIPT" +#endif +#if RELAXED_EMPHASIS + " RELAXED" +#endif +#if DIV_QUOTE + " DIV" +#endif +#if ALPHA_LIST + " AL" +#endif + ; diff --git a/r2/r2/lib/contrib/discount-1.6.0/xml.c b/r2/r2/lib/contrib/discount-1.6.0/xml.c new file mode 100644 index 000000000..5e5838993 --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/xml.c @@ -0,0 +1,82 @@ +/* markdown: a C implementation of John Gruber's Markdown markup language. + * + * Copyright (C) 2007 David L Parsons. + * The redistribution terms are provided in the COPYRIGHT file that must + * be distributed with this source code. + */ +#include +#include +#include +#include +#include +#include + +#include "config.h" + +#include "cstring.h" +#include "markdown.h" +#include "amalloc.h" + +/* return the xml version of a character + */ +static char * +mkd_xmlchar(unsigned char c) +{ + switch (c) { + case '<': return "<"; + case '>': return ">"; + case '&': return "&"; + case '"': return """; + case '\'': return "'"; + default: if ( isascii(c) || (c & 0x80) ) + return 0; + return ""; + } +} + + +/* write output in XML format + */ +int +mkd_generatexml(char *p, int size, FILE *out) +{ + unsigned char c; + char *entity; + + while ( size-- > 0 ) { + c = *p++; + + if ( entity = mkd_xmlchar(c) ) + fputs(entity, out); + else + fputc(c, out); + } + return 0; +} + + +/* build a xml'ed version of a string + */ +int +mkd_xml(char *p, int size, char **res) +{ + unsigned char c; + char *entity; + Cstring f; + + CREATE(f); + RESERVE(f, 100); + + while ( size-- > 0 ) { + c = *p++; + if ( entity = mkd_xmlchar(c) ) + Cswrite(&f, entity, strlen(entity)); + else + Csputc(c, &f); + } + /* HACK ALERT! HACK ALERT! HACK ALERT! */ + *res = T(f); /* we know that a T(Cstring) is a character pointer */ + /* so we can simply pick it up and carry it away, */ + return S(f); /* leaving the husk of the Ctring on the stack */ + /* END HACK ALERT */ +} diff --git a/r2/r2/lib/contrib/discount-1.6.0/xmlpage.c b/r2/r2/lib/contrib/discount-1.6.0/xmlpage.c new file mode 100644 index 000000000..96ed2b758 --- /dev/null +++ b/r2/r2/lib/contrib/discount-1.6.0/xmlpage.c @@ -0,0 +1,48 @@ +/* + * xmlpage -- write a skeletal xhtml page + * + * Copyright (C) 2007 David L Parsons. + * The redistribution terms are provided in the COPYRIGHT file that must + * be distributed with this source code. + */ +#include "config.h" +#include +#include +#include + +#include "cstring.h" +#include "markdown.h" +#include "amalloc.h" + + +int +mkd_xhtmlpage(Document *p, int flags, FILE *out) +{ + char *title; + extern char *mkd_doc_title(Document *); + + if ( mkd_compile(p, flags) ) { + fprintf(out, "\n"); + fprintf(out, "\n"); + + fprintf(out, "\n"); + + fprintf(out, "\n"); + if ( title = mkd_doc_title(p) ) + fprintf(out, "%s\n", title); + mkd_generatecss(p, out); + fprintf(out, "\n"); + + fprintf(out, "\n"); + mkd_generatehtml(p, out); + fprintf(out, "\n"); + fprintf(out, "\n"); + + mkd_cleanup(p); + + return 0; + } + return -1; +} diff --git a/r2/r2/lib/contrib/memcache.py b/r2/r2/lib/contrib/memcache.py index abff3bcb1..30bc52ad0 100755 --- a/r2/r2/lib/contrib/memcache.py +++ b/r2/r2/lib/contrib/memcache.py @@ -256,8 +256,8 @@ class Client(local): # return server, key # serverhash = serverHashFunction(str(serverhash) + str(i)) - print ("Couldn't connect to any of the %d memcache servers" % - len(self.buckets)) + print ("Couldn't connect to any of the %d memcache servers: %r" % + (len(self.buckets), [ (x.ip, x.port) for x in self.buckets])) return None, key def disconnect_all(self): @@ -940,7 +940,7 @@ class _Host: buf += foo if len(foo) == 0: raise _Error, ( 'Read %d bytes, expecting %d, ' - 'read returned 0 length bytes' % ( len(buf), foo )) + 'read returned 0 length bytes' % ( len(buf), rlen )) self.buffer = buf[rlen:] return buf[:rlen] diff --git a/r2/r2/lib/cssfilter.py b/r2/r2/lib/cssfilter.py index ac3102efd..1df364713 100644 --- a/r2/r2/lib/cssfilter.py +++ b/r2/r2/lib/cssfilter.py @@ -28,6 +28,7 @@ from r2.lib.pages.things import wrap_links from pylons import g, c from pylons.i18n import _ +from mako import filters import tempfile from r2.lib import s3cp @@ -170,10 +171,21 @@ def valid_url(prop,value,report): * image labels %%..%% for images uploaded on /about/stylesheet * urls with domains in g.allowed_css_linked_domains """ - url = value.getStringValue() + try: + url = value.getStringValue() + except IndexError: + g.log.error("Problem validating [%r]" % value) + raise # local urls are allowed if local_urls.match(url): - pass + t_url = None + while url != t_url: + t_url, url = url, filters.url_unescape(url) + # disallow path trickery + if "../" in url: + report.append(ValidationError(msgs['broken_url'] + % dict(brokenurl = value.cssText), + value)) # custom urls are allowed, but need to be transformed into a real path elif custom_img_urls.match(url): name = custom_img_urls.match(url).group(1) @@ -329,13 +341,14 @@ def find_preview_links(sr): from r2.lib.normalized_hot import get_hot # try to find a link to use, otherwise give up and return - links = get_hot(c.site, only_fullnames = True) + links = get_hot([c.site], only_fullnames = True)[0] if not links: sr = Subreddit._by_name(g.default_sr) if sr: - links = get_hot(sr, only_fullnames = True) + links = get_hot([sr], only_fullnames = True)[0] if links: + links = links[:25] links = Link._by_fullname(links, data=True, return_dict=False) return links diff --git a/r2/r2/lib/db/queries.py b/r2/r2/lib/db/queries.py index c55dc6573..f6d35c527 100644 --- a/r2/r2/lib/db/queries.py +++ b/r2/r2/lib/db/queries.py @@ -1,13 +1,16 @@ from r2.models import Account, Link, Comment, Vote, SaveHide -from r2.models import Message, Inbox, Subreddit +from r2.models import Message, Inbox, Subreddit, ModeratorInbox from r2.lib.db.thing import Thing, Merge from r2.lib.db.operators import asc, desc, timeago from r2.lib.db import query_queue from r2.lib.normalized_hot import expire_hot from r2.lib.db.sorts import epoch_seconds from r2.lib.utils import fetch_things2, tup, UniqueIterator, set_last_modified +from r2.lib import utils from r2.lib.solrsearch import DomainSearchQuery from r2.lib import amqp, sup +from r2.lib.comment_tree import add_comment, link_comments + import cPickle as pickle from datetime import datetime @@ -15,15 +18,15 @@ import itertools from pylons import g query_cache = g.permacache +log = g.log +make_lock = g.make_lock precompute_limit = 1000 db_sorts = dict(hot = (desc, '_hot'), new = (desc, '_date'), top = (desc, '_score'), - controversial = (desc, '_controversy'), - old = (asc, '_date'), - toplinks = (desc, '_hot')) + controversial = (desc, '_controversy')) def db_sort(sort): cls, col = db_sorts[sort] @@ -42,6 +45,29 @@ db_times = dict(all = None, month = Thing.c._date >= timeago('1 month'), year = Thing.c._date >= timeago('1 year')) +# batched_time_sorts/batched_time_times: top and controversial +# listings with a time-component are really expensive, and for the +# ones that span more than a day they don't change much (if at all) +# within that time. So we have some hacks to avoid re-running these +# queries against the precomputer except up to once per day +# * To get the results of the queries, we return the results of the +# (potentially stale) query, merged with the query by 'day' (see +# get_links) +# * When we are adding the special queries to the queue, we add them +# with a preflight check to determine if they are runnable and a +# postflight action to make them not runnable again for 24 hours +# (see new_vote) +# * We have a task called catch_up_batch_queries to be run at least +# once per day (ideally about once per hour) to find subreddits +# where these queries haven't been run in the last 24 hours but that +# have had at least one vote in that time +# TODO: +# * Do we need a filter on merged time-queries to keep items that are +# barely too old from making it into the listing? This probably only +# matters for 'week' +batched_time_times = set(('year', 'month', 'week')) +batched_time_sorts = set(('top', 'controversial')) + #we need to define the filter functions here so cachedresults can be pickled def filter_identity(x): return x @@ -51,6 +77,20 @@ def filter_thing2(x): the object of the relationship.""" return x._thing2 +def make_batched_time_query(sr, sort, time, preflight_check = True): + q = get_links(sr, sort, time, merge_batched=False) + + if (g.use_query_cache + and sort in batched_time_sorts + and time in batched_time_times): + + if not preflight_check: + q.force_run = True + + q.batched_time_srid = sr._id + + return q + class CachedResults(object): """Given a query returns a list-like object that will lazily look up the query from the persistent cache. """ @@ -63,11 +103,57 @@ class CachedResults(object): self.data = [] self._fetched = False + self.batched_time_srid = None + + @property + def sort(self): + return self.query._sort + + def preflight_check(self): + if getattr(self, 'force_run', False): + return True + + sr_id = getattr(self, 'batched_time_srid', None) + if not sr_id: + return True + + # this is a special query that tries to run less often, see + # the discussion about batched_time_times + sr = Subreddit._byID(sr_id, data=True) + + if (self.iden in getattr(sr, 'last_batch_query', {}) + and sr.last_batch_query[self.iden] > utils.timeago('1 day')): + # this has been done in the last 24 hours, so we should skip it + return False + + return True + + def postflight(self): + sr_id = getattr(self, 'batched_time_srid', None) + if not sr_id: + return True + + with make_lock('modify_sr_last_batch_query(%s)' % sr_id): + sr = Subreddit._byID(sr_id, data=True) + last_batch_query = getattr(sr, 'last_batch_query', {}).copy() + last_batch_query[self.iden] = datetime.now(g.tz) + sr.last_batch_query = last_batch_query + sr._commit() + def fetch(self): """Loads the query from the cache.""" - if not self._fetched: - self._fetched = True - self.data = query_cache.get(self.iden) or [] + self.fetch_multi([self]) + + @classmethod + def fetch_multi(cls, crs): + unfetched = [cr for cr in crs if not cr._fetched] + if not unfetched: + return + + cached = query_cache.get_multi([cr.iden for cr in unfetched]) + for cr in unfetched: + cr.data = cached.get(cr.iden) or [] + cr._fetched = True def make_item_tuple(self, item): """Given a single 'item' from the result of a query build the tuple @@ -87,15 +173,21 @@ class CachedResults(object): def can_insert(self): """True if a new item can just be inserted rather than - rerunning the query. This is only true in some - circumstances, which includes having no time rules, and - being sorted descending""" + rerunning the query.""" + # This is only true in some circumstances: queries where + # eligibility in the list is determined only by its sort + # value (e.g. hot) and where addition/removal from the list + # incurs an insertion/deletion event called on the query. So + # the top hottest items in X some subreddit where the query + # is notified on every submission/banning/unbanning/deleting + # will work, but for queries with a time-component or some + # other eligibility factor, it cannot be inserted this way. if self.query._sort in ([desc('_date')], [desc('_hot'), desc('_date')], [desc('_score'), desc('_date')], [desc('_controversy'), desc('_date')]): - if not any(r.lval.name == '_date' - for r in self.query._rules): + if not any(r for r in self.query._rules + if r.lval.name == '_date'): # if no time-rule is specified, then it's 'all' return True return False @@ -117,9 +209,11 @@ class CachedResults(object): data = UniqueIterator(data, key = lambda x: x[0]) data = sorted(data, key=lambda x: x[1:], reverse=True) data = list(data) + data = data[:precompute_limit] + self.data = data - query_cache.set(self.iden, self.data[:precompute_limit]) + query_cache.set(self.iden, self.data) def delete(self, items): """Deletes an item from the cached data.""" @@ -150,33 +244,47 @@ class CachedResults(object): for x in self.data: yield x[0] -def merge_cached_results(*results): - """Given two CachedResults, merges their lists based on the sorts of - their queries.""" - if len(results) == 1: - return list(results[0]) +class MergedCachedResults(object): + """Given two CachedResults, merges their lists based on the sorts + of their queries.""" + # normally we'd do this by having a superclass of CachedResults, + # but we have legacy pickled CachedResults that we don't want to + # break - #make sure the sorts match - sort = results[0].query._sort - assert all(r.query._sort == sort for r in results[1:]) + def __init__(self, results): + self.cached_results = results + CachedResults.fetch_multi([r for r in results + if isinstance(r, CachedResults)]) + self._fetched = True - def thing_cmp(t1, t2): - for i, s in enumerate(sort): - #t1 and t2 are tuples of (fullname, *sort_cols), so we can - #get the value to compare right out of the tuple + self.sort = results[0].sort + # make sure they're all the same + assert all(r.sort == self.sort for r in results[1:]) + + # if something is 'top' for the year *and* for today, it would + # appear in both listings, so we need to filter duplicates + all_items = UniqueIterator((item for cr in results + for item in cr.data), + key = lambda x: x[0]) + all_items = sorted(all_items, cmp=self._thing_cmp) + self.data = list(all_items) + + def _thing_cmp(self, t1, t2): + for i, s in enumerate(self.sort): + # t1 and t2 are tuples of (fullname, *sort_cols), so we + # can get the value to compare right out of the tuple v1, v2 = t1[i + 1], t2[i + 1] if v1 != v2: return cmp(v1, v2) if isinstance(s, asc) else cmp(v2, v1) #they're equal return 0 - all_items = [] - for r in results: - r.fetch() - all_items.extend(r.data) + def __repr__(self): + return '' % (self.cached_results,) - #all_items = Thing._by_fullname(all_items, return_dict = False) - return [i[0] for i in sorted(all_items, cmp = thing_cmp)] + def __iter__(self): + for x in self.data: + yield x[0] def make_results(query, filter = filter_identity): if g.use_query_cache: @@ -187,24 +295,37 @@ def make_results(query, filter = filter_identity): def merge_results(*results): if g.use_query_cache: - return merge_cached_results(*results) + return MergedCachedResults(results) else: m = Merge(results, sort = results[0]._sort) #assume the prewrap_fn's all match m.prewrap_fn = results[0].prewrap_fn return m -def get_links(sr, sort, time): +def get_links(sr, sort, time, merge_batched=True): """General link query for a subreddit.""" q = Link._query(Link.c.sr_id == sr._id, sort = db_sort(sort)) - if sort == 'toplinks': - q._filter(Link.c.top_link == True) - if time != 'all': q._filter(db_times[time]) - return make_results(q) + + res = make_results(q) + + # see the discussion above batched_time_times + if (merge_batched + and g.use_query_cache + and sort in batched_time_sorts + and time in batched_time_times): + + byday = Link._query(Link.c.sr_id == sr._id, + sort = db_sort(sort)) + byday._filter(db_times['day']) + + res = merge_results(res, + make_results(byday)) + + return res def get_spam_links(sr): q_l = Link._query(Link.c.sr_id == sr._id, @@ -297,6 +418,13 @@ def get_hidden(user): def get_saved(user): return user_rel_query(SaveHide, user, 'save') +def get_subreddit_messages(sr): + return user_rel_query(ModeratorInbox, sr, 'inbox') + +def get_unread_subreddit_messages(sr): + return user_rel_query(ModeratorInbox, sr, 'inbox', + filters = [ModeratorInbox.c.new == True]) + inbox_message_rel = Inbox.rel(Account, Message) def get_inbox_messages(user): return user_rel_query(inbox_message_rel, user, 'inbox') @@ -338,15 +466,14 @@ def get_unread_inbox(user): def add_queries(queries, insert_items = None, delete_items = None): """Adds multiple queries to the query queue. If insert_items or - delete_items is specified, the query may not need to be recomputed at - all.""" + delete_items is specified, the query may not need to be + recomputed against the database.""" if not g.write_query_queue: return - log = g.log - make_lock = g.make_lock def _add_queries(): for q in queries: + query_cache.reset() if not isinstance(q, CachedResults): continue @@ -393,27 +520,22 @@ def new_link(link): sr = Subreddit._byID(link.sr_id) author = Account._byID(link.author_id) - results = all_queries(get_links, sr, ('hot', 'new', 'old'), ['all']) + results = [get_links(sr, 'new', 'all')] + # we don't have to do hot/top/controversy because new_vote will do + # that - results.extend(all_queries(get_links, sr, ('top', 'controversial'), - db_times.keys())) results.append(get_submitted(author, 'new', 'all')) - #results.append(get_links(sr, 'toplinks', 'all')) if link._spam: results.append(get_spam_links(sr)) - - if link._deleted: - results.append(get_links(sr, 'new', 'all')) - add_queries(results, delete_items = link) - else: - # only 'new' qualifies for insertion, which will be done in - # run_new_links - add_queries(results, insert_items = link) - amqp.add_item('new_link', link._fullname) + # only 'new' qualifies for insertion, which will be done in + # run_new_links + add_queries(results, insert_items = link) + + amqp.add_item('new_link', link._fullname) -def new_comment(comment, inbox_rel): +def new_comment(comment, inbox_rels): author = Account._byID(comment.author_id) job = [get_comments(author, 'new', 'all')] if comment._deleted: @@ -425,19 +547,23 @@ def new_comment(comment, inbox_rel): # job.append(get_spam_comments(sr)) add_queries(job, insert_items = comment) amqp.add_item('new_comment', comment._fullname) + if not g.amqp_host: + l = Link._byID(comment.link_id,data=True) + add_comment_tree(comment, l) # note that get_all_comments() is updated by the amqp process # r2.lib.db.queries.run_new_comments - if inbox_rel: - inbox_owner = inbox_rel._thing1 - if inbox_rel._name == "inbox": - add_queries([get_inbox_comments(inbox_owner)], - insert_items = inbox_rel) - else: - add_queries([get_inbox_selfreply(inbox_owner)], - insert_items = inbox_rel) - set_unread(comment, True) + if inbox_rels: + for inbox_rel in tup(inbox_rels): + inbox_owner = inbox_rel._thing1 + if inbox_rel._name == "inbox": + add_queries([get_inbox_comments(inbox_owner)], + insert_items = inbox_rel) + else: + add_queries([get_inbox_selfreply(inbox_owner)], + insert_items = inbox_rel) + set_unread(comment, inbox_owner, True) @@ -455,10 +581,21 @@ def new_vote(vote): if vote.valid_thing and not item._spam and not item._deleted: sr = item.subreddit_slow + # don't do 'new', because that was done by new_link results = [get_links(sr, 'hot', 'all')] - results.extend(all_queries(get_links, sr, ('top', 'controversial'), db_times.keys())) - #results.append(get_links(sr, 'toplinks', 'all')) + + # for top and controversial we do some magic to recompute + # these less often; see the discussion above + # batched_time_times + for sort in batched_time_sorts: + for time in db_times.keys(): + q = make_batched_time_query(sr, sort, time) + results.append(q) + add_queries(results, insert_items = item) + + sr.last_valid_vote = datetime.now(g.tz) + sr._commit() #must update both because we don't know if it's a changed vote if vote._name == '1': @@ -471,27 +608,39 @@ def new_vote(vote): add_queries([get_liked(user)], delete_items = vote) add_queries([get_disliked(user)], delete_items = vote) -def new_message(message, inbox_rel): +def new_message(message, inbox_rels): from r2.lib.comment_tree import add_message from_user = Account._byID(message.author_id) - to_user = Account._byID(message.to_id) - - add_queries([get_sent(from_user)], insert_items = message) - add_queries([get_inbox_messages(to_user)], insert_items = inbox_rel) + for inbox_rel in tup(inbox_rels): + to = inbox_rel._thing1 + # moderator message + if isinstance(inbox_rel, ModeratorInbox): + add_queries([get_subreddit_messages(to)], + insert_items = inbox_rel) + # personal message + else: + add_queries([get_sent(from_user)], insert_items = message) + add_queries([get_inbox_messages(to)], + insert_items = inbox_rel) + set_unread(message, to, True) add_message(message) - set_unread(message, True) -def set_unread(message, unread): - for i in Inbox.set_unread(message, unread): - kw = dict(insert_items = i) if unread else dict(delete_items = i) - if i._name == 'selfreply': - add_queries([get_unread_selfreply(i._thing1)], **kw) - elif isinstance(message, Comment): - add_queries([get_unread_comments(i._thing1)], **kw) - else: - add_queries([get_unread_messages(i._thing1)], **kw) +def set_unread(message, to, unread): + if isinstance(to, Subreddit): + for i in ModeratorInbox.set_unread(message, unread): + kw = dict(insert_items = i) if unread else dict(delete_items = i) + add_queries([get_unread_subreddit_messages(i._thing1)], **kw) + else: + for i in Inbox.set_unread(message, unread): + kw = dict(insert_items = i) if unread else dict(delete_items = i) + if i._name == 'selfreply': + add_queries([get_unread_selfreply(i._thing1)], **kw) + elif isinstance(message, Comment): + add_queries([get_unread_comments(i._thing1)], **kw) + else: + add_queries([get_unread_messages(i._thing1)], **kw) def new_savehide(rel): user = rel._thing1 @@ -517,8 +666,8 @@ def _by_srid(things): sr_id, in addition to the looked-up subreddits""" ret = {} - for thing in things: - if hasattr(thing, 'sr_id'): + for thing in tup(things): + if getattr(thing, 'sr_id', None) is not None: ret.setdefault(thing.sr_id, []).append(thing) srs = Subreddit._byID(ret.keys(), return_dict=True) if ret else {} @@ -526,6 +675,12 @@ def _by_srid(things): return ret, srs def ban(things): + del_or_ban(things, "ban") + +def delete_links(links): + del_or_ban(links, "del") + +def del_or_ban(things, why): by_srid, srs = _by_srid(things) if not by_srid: return @@ -536,15 +691,19 @@ def ban(things): comments = [x for x in things if isinstance(x, Comment)] if links: - add_queries([get_spam_links(sr)], insert_items = links) + if why == "ban": + add_queries([get_spam_links(sr)], insert_items = links) # rip it out of the listings. bam! results = [get_links(sr, 'hot', 'all'), - get_links(sr, 'new', 'all'), - get_links(sr, 'top', 'all'), - get_links(sr, 'controversial', 'all')] - results.extend(all_queries(get_links, sr, - ('top', 'controversial'), - db_times.keys())) + get_links(sr, 'new', 'all')] + + for sort in batched_time_sorts: + for time in db_times.keys(): + # this will go through delete_items, so handling + # of batched_time_times isn't necessary and is + # included only for consistancy + q = make_batched_time_query(sr, sort, time) + add_queries(results, delete_items = links) if comments: @@ -567,12 +726,15 @@ def unban(things): add_queries([get_spam_links(sr)], delete_items = links) # put it back in the listings results = [get_links(sr, 'hot', 'all'), - get_links(sr, 'new', 'all'), - get_links(sr, 'top', 'all'), - get_links(sr, 'controversial', 'all')] - results.extend(all_queries(get_links, sr, - ('top', 'controversial'), - db_times.keys())) + get_links(sr, 'new', 'all')] + for sort in batched_time_sorts: + for time in db_times.keys(): + # skip the preflight check because we need to redo + # this query regardless + q = make_batched_time_query(sr, sort, time, + preflight_check=False) + results.append(q) + add_queries(results, insert_items = links) if comments: @@ -619,10 +781,9 @@ def add_all_srs(): """Adds every listing query for every subreddit to the queue.""" q = Subreddit._query(sort = asc('_date')) for sr in fetch_things2(q): - add_queries(all_queries(get_links, sr, ('hot', 'new', 'old'), ['all'])) + add_queries(all_queries(get_links, sr, ('hot', 'new'), ['all'])) add_queries(all_queries(get_links, sr, ('top', 'controversial'), db_times.keys())) - add_queries([get_links(sr, 'toplinks', 'all'), - get_spam_links(sr), + add_queries([get_spam_links(sr), #get_spam_comments(sr), get_reported_links(sr), #get_reported_comments(sr), @@ -651,19 +812,53 @@ def add_all_users(): for user in fetch_things2(q): update_user(user) +def add_comment_tree(comment, link): + #update the comment cache + add_comment(comment) + #update last modified + set_last_modified(link, 'comments') # amqp queue processing functions def run_new_comments(): + """Add new incoming comments to the /comments page""" + # this is done as a queue because otherwise the contention for the + # lock on the query would be very high def _run_new_comments(msgs, chan): fnames = [msg.body for msg in msgs] - comments = Comment._by_fullname(fnames, return_dict=False) + comments = Comment._by_fullname(fnames, data=True, return_dict=False) + add_queries([get_all_comments()], insert_items = comments) amqp.handle_items('newcomments_q', _run_new_comments, limit=100) +def run_commentstree(): + """Add new incoming comments to their respective comments trees""" + + def _run_commentstree(msgs, chan): + fnames = [msg.body for msg in msgs] + comments = Comment._by_fullname(fnames, data=True, return_dict=False) + + links = Link._byID(set(cm.link_id for cm in comments), + data=True, + return_dict=True) + + # add the comment to the comments-tree + for comment in comments: + l = links[comment.link_id] + try: + add_comment_tree(comment, l) + except KeyError: + # Hackity hack. Try to recover from a corrupted + # comment tree + print "Trying to fix broken comments-tree." + link_comments(l._id, _update=True) + add_comment_tree(comment, l) + + amqp.handle_items('commentstree_q', _run_commentstree, limit=1) + #def run_new_links(): # """queue to add new links to the 'new' page. note that this isn't @@ -798,6 +993,32 @@ def process_votes(drain = False, limit = 100): amqp.handle_items('register_vote_q', _handle_votes, limit = limit, drain = drain) +def catch_up_batch_queries(): + # catch up on batched_time_times queries that haven't been run + # that should be, which should only happen to small + # subreddits. This should be cronned to run about once an + # hour. The more often, the more the work of rerunning the actual + # queries is spread out, but every run has a fixed-cost of looking + # at every single subreddit + sr_q = Subreddit._query(sort=desc('_downs'), + data=True) + dayago = utils.timeago('1 day') + for sr in fetch_things2(sr_q): + if hasattr(sr, 'last_valid_vote') and sr.last_valid_vote > dayago: + # if we don't know when the last vote was, it couldn't + # have been today + for sort in batched_time_sorts: + for time in batched_time_times: + q = make_batched_time_query(sr, sort, time) + if q.preflight_check(): + # we haven't run the batched_time_times in the + # last day + add_queries([q]) + + # make sure that all of the jobs have been completed or processed + # by the time we return + amqp.worker.join() + try: from r2admin.lib.admin_queries import * except ImportError: diff --git a/r2/r2/lib/db/query_queue.py b/r2/r2/lib/db/query_queue.py index 21406fc0a..85f89bdb8 100644 --- a/r2/r2/lib/db/query_queue.py +++ b/r2/r2/lib/db/query_queue.py @@ -7,36 +7,34 @@ from pylons import g working_prefix = 'working_' prefix = 'prec_link_' -TIMEOUT = 600 +TIMEOUT = 600 # after TIMEOUT seconds, assume that the process + # calculating a given query has crashed and allow it to + # be rerun as appropriate def add_query(cached_results): amqp.add_item('prec_links', pickle.dumps(cached_results, -1)) -def _skip_key(iden): - return 'skip_precompute_queries-%s' % iden - def run(): def callback(msgs, chan): for msg in msgs: # will be len==1 - # r2.lib.db.queries.CachedResults + # cr is a r2.lib.db.queries.CachedResults cr = pickle.loads(msg.body) iden = cr.query._iden() - if (iden in g.skip_precompute_queries - and g.hardcache.get(_skip_key(iden))): - print 'skipping known query', iden - continue - working_key = working_prefix + iden key = prefix + iden last_time = g.memcache.get(key) # check to see if we've computed this job since it was # added to the queue - if last_time and last_time > msg.timestamp: + if last_time and last_time > msg.timestamp: print 'skipping, already computed ', key return + if not cr.preflight_check(): + print 'skipping, preflight check failed', key + return + # check if someone else is working on this elif not g.memcache.add(working_key, 1, TIMEOUT): print 'skipping, someone else is working', working_key @@ -48,10 +46,7 @@ def run(): cr.update() g.memcache.set(key, datetime.now()) - if iden in g.skip_precompute_queries: - print 'setting to be skipped for 6 hours', iden - g.hardcache.set(_skip_key(iden), start, - 60*60*6) + cr.postflight() finally: g.memcache.delete(working_key) diff --git a/r2/r2/lib/db/stats.py b/r2/r2/lib/db/stats.py index 47fdb76de..4e37e9f0a 100644 --- a/r2/r2/lib/db/stats.py +++ b/r2/r2/lib/db/stats.py @@ -78,24 +78,3 @@ def default_queries(): queries.append(q) return queries - -def run_queries(): - from r2.models import subreddit - from pylons import g - cache = g.cache - queries = cache.get(cache_key) or default_queries() - - for q in queries: - q._read_cache = False - q._write_cache = True - q._cache_time = cache_time - q._list() - - #find top - q = default_queries()[0] - q._limit = 1 - top_link = list(q)[0] - if top_link: - top_link._load() - top_link.top_link = True - top_link._commit() diff --git a/r2/r2/lib/db/thing.py b/r2/r2/lib/db/thing.py index adef809fc..9d00e3cc4 100644 --- a/r2/r2/lib/db/thing.py +++ b/r2/r2/lib/db/thing.py @@ -32,6 +32,7 @@ import sorts from .. utils import iters, Results, tup, to36, Storage from r2.config import cache from r2.lib.cache import sgm +from r2.lib.log import log_text from pylons import g @@ -75,6 +76,7 @@ class DataThing(object): _data_int_props = () _int_prop_suffix = None _defaults = {} + _essentials = () c = operators.Slots() __safe__ = False @@ -120,11 +122,53 @@ class DataThing(object): try: return getattr(self, '_defaults')[attr] except KeyError: + try: + _id = object.__getattribute__(self, "_id") + except AttributeError: + _id = "???" + try: + cl = object.__getattribute__(self, "__class__").__name__ + except AttributeError: + cl = "???" + if self._loaded: - raise AttributeError, '%s not found' % attr + nl = "it IS loaded." else: - raise AttributeError,\ - attr + ' not found. thing is not loaded' + nl = "it is NOT loaded." + + # The %d format is nicer, since it has no "L" at the end, but + # if we can't do that, fall back on %r. + try: + id_str = "%d" % _id + except TypeError: + id_str = "%r" % _id + + desc = '%s(%s).%s' % (cl, id_str, attr) + + try: + essentials = object.__getattribute__(self, "_essentials") + except AttributeError: + print "%s has no _essentials" % desc + essentials = () + + if isinstance(essentials, str): + print "Some dumbass forgot a comma." + essentials = essentials, + + if attr in essentials: + log_text ("essentials-bandaid-reload", + "%s not found; %s Forcing reload." % (desc, nl), + "warning") + self._load() + + try: + return self._t[attr] + except KeyError: + log_text ("essentials-bandaid-failed", + "Reload of %s didn't help. I recommend deletion." + % desc, "error") + + raise AttributeError, '%s not found; %s' % (desc, nl) def _cache_key(self): return thing_prefix(self.__class__.__name__, self._id) @@ -713,8 +757,12 @@ def Relation(type1, type2, denorm1 = None, denorm2 = None): res = sgm(cache, pairs, items_db, prefix) #convert the keys back into objects - #we can assume the rels will be in the cache and just call - #_byID lots + + # populate up the local-cache in batch + cls._byID(filter(None, res.values()), data=data) + + # now we can assume the rels will be in the cache and just + # call _byID lots res_obj = {} for k, rid in res.iteritems(): obj_key = (thing1_dict[k[0]], thing2_dict[k[1]], k[2]) diff --git a/r2/r2/lib/db/userrel.py b/r2/r2/lib/db/userrel.py index ab211b86c..32ef0dce4 100644 --- a/r2/r2/lib/db/userrel.py +++ b/r2/r2/lib/db/userrel.py @@ -19,7 +19,7 @@ # All portions of the code written by CondeNet are Copyright (c) 2006-2010 # CondeNet, Inc. All Rights Reserved. ################################################################################ -from r2.lib.memoize import memoize, clear_memo +from r2.lib.memoize import memoize def UserRel(name, relation, disable_ids_fn = False, disable_reverse_ids_fn = False): diff --git a/r2/r2/lib/emailer.py b/r2/r2/lib/emailer.py index 284173e4e..192f1908f 100644 --- a/r2/r2/lib/emailer.py +++ b/r2/r2/lib/emailer.py @@ -44,6 +44,13 @@ def _system_email(email, body, kind, reply_to = "", thing = None): kind, body = body, reply_to = reply_to, thing = thing) +def _nerds_email(body, from_name, kind): + """ + For sending email to the nerds who run this joint + """ + Email.handler.add_to_queue(None, g.nerds_email, from_name, g.nerds_email, + kind, body = body) + def verify_email(user, dest): """ For verifying an email address @@ -93,6 +100,10 @@ def i18n_email(email, body, name='', reply_to = ''): return _feedback_email(email, body, Email.Kind.HELP_TRANSLATE, name = name, reply_to = reply_to) +def nerds_email(body, from_name=g.domain): + """Queues a feedback email to the nerds running this site.""" + return _nerds_email(body, from_name, Email.Kind.NERDMAIL) + def share(link, emails, from_name = "", reply_to = "", body = ""): """Queues a 'share link' email.""" now = datetime.datetime.now(g.tz) @@ -138,11 +149,15 @@ def send_queued_mail(test = False): should_queue = email.should_queue() # check only on sharing that the mail is invalid - if email.kind == Email.Kind.SHARE and should_queue: - email.body = Share(username = email.from_name(), - msg_hash = email.msg_hash, - link = email.thing, - body = email.body).render(style = "email") + if email.kind == Email.Kind.SHARE: + if should_queue: + email.body = Share(username = email.from_name(), + msg_hash = email.msg_hash, + link = email.thing, + body =email.body).render(style = "email") + else: + email.set_sent(rejected = True) + continue elif email.kind == Email.Kind.OPTOUT: email.body = Mail_Opt(msg_hash = email.msg_hash, leave = True).render(style = "email") diff --git a/r2/r2/lib/filters.py b/r2/r2/lib/filters.py index 808b681eb..6c32cea54 100644 --- a/r2/r2/lib/filters.py +++ b/r2/r2/lib/filters.py @@ -19,13 +19,17 @@ # All portions of the code written by CondeNet are Copyright (c) 2006-2010 # CondeNet, Inc. All Rights Reserved. ################################################################################ -from BeautifulSoup import BeautifulSoup - -from pylons import c - import cgi import urllib import re +from cStringIO import StringIO + +from xml.sax.handler import ContentHandler +from lxml.sax import saxify +import lxml.etree + +from pylons import g, c + from wrapped import Templated, CacheStub SC_OFF = "" @@ -122,52 +126,63 @@ def edit_comment_filter(text = ''): text = unicode(text) return url_escape(text) +class SouptestSaxHandler(ContentHandler): + def __init__(self, ok_tags): + self.ok_tags = ok_tags + + def startElementNS(self, tagname, qname, attrs): + if qname not in self.ok_tags: + raise ValueError('HAX: Unknown tag: %r' % qname) + + for (ns, name), val in attrs.items(): + if ns is not None: + raise ValueError('HAX: Unknown namespace? Seriously? %r' % ns) + + if name not in self.ok_tags[qname]: + raise ValueError('HAX: Unknown attribute-name %r' % name) + + if qname == 'a' and name == 'href': + lv = val.lower() + if not (lv.startswith('http://') + or lv.startswith('https://') + or lv.startswith('ftp://') + or lv.startswith('mailto:') + or lv.startswith('news:') + or lv.startswith('/')): + raise ValueError('HAX: Unsupported link scheme %r' % val) + +markdown_ok_tags = { + 'div': ('class'), + 'a': set(('href', 'title', 'target', 'nofollow')), + 'table': ("align", ), + 'th': ("align", ), + 'td': ("align", ), + } +markdown_boring_tags = ('p', 'em', 'strong', 'br', 'ol', 'ul', 'hr', 'li', + 'pre', 'code', 'blockquote', 'center', + 'tbody', 'thead', "tr", + 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',) +for bt in markdown_boring_tags: + markdown_ok_tags[bt] = () + def markdown_souptest(text, nofollow=False, target=None, lang=None): - ok_tags = { - 'div': ('class'), - 'a': ('href', 'title', 'target', 'nofollow'), - } + if not text: + return text - boring_tags = ( 'p', 'em', 'strong', 'br', 'ol', 'ul', 'hr', 'li', - 'pre', 'code', 'blockquote', - 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', ) + smd = safemarkdown(text, nofollow, target, lang) - for bt in boring_tags: - ok_tags[bt] = () - - smd = safemarkdown (text, nofollow, target, lang) - soup = BeautifulSoup(smd) - - for tag in soup.findAll(): - if not tag.name in ok_tags: - raise ValueError("<%s> tag found in markdown!" % tag.name) - ok_attrs = ok_tags[tag.name] - for k,v in tag.attrs: - if not k in ok_attrs: - raise ValueError("<%s %s='%s'> attr found in markdown!" - % (tag.name, k,v)) - if tag.name == 'a' and k == 'href': - lv = v.lower() - if lv.startswith("http:"): - pass - elif lv.startswith("https:"): - pass - elif lv.startswith("ftp:"): - pass - elif lv.startswith("mailto:"): - pass - elif lv.startswith("/"): - pass - else: - raise ValueError("Link to '%s' found in markdown!" % v) + s = StringIO(smd) + tree = lxml.etree.parse(s) + handler = SouptestSaxHandler(markdown_ok_tags) + saxify(tree, handler) + return smd #TODO markdown should be looked up in batch? #@memoize('markdown') def safemarkdown(text, nofollow=False, target=None, lang=None): from r2.lib.c_markdown import c_markdown from r2.lib.py_markdown import py_markdown - from pylons import g from contrib.markdown import markdown @@ -181,18 +196,14 @@ def safemarkdown(text, nofollow=False, target=None, lang=None): target = "_top" if lang is None: - # TODO: lang should respect g.markdown_backend - lang = "py" + lang = g.markdown_backend - try: - if lang == "c": - text = c_markdown(text, nofollow, target) - elif lang == "py": - text = py_markdown(text, nofollow, target) - else: - raise ValueError("weird lang") - except RuntimeError: - text = "

Comment Broken

" + if lang == "c": + text = c_markdown(text, nofollow, target) + elif lang == "py": + text = py_markdown(text, nofollow, target) + else: + raise ValueError("weird lang [%s]" % lang) return SC_OFF + MD_START + text + MD_END + SC_ON @@ -209,8 +220,6 @@ def unkeep_space(text): def profanity_filter(text): - from pylons import g - def _profane(m): x = m.group(1) return ''.join(u"\u2731" for i in xrange(len(x))) diff --git a/r2/r2/lib/hardcachebackend.py b/r2/r2/lib/hardcachebackend.py index 7fe70a3ba..4e3804af1 100644 --- a/r2/r2/lib/hardcachebackend.py +++ b/r2/r2/lib/hardcachebackend.py @@ -68,6 +68,8 @@ class HardCacheBackend(object): ) def add(self, category, ids, val, time=0): + self.delete_if_expired(category, ids) + expiration = expiration_from_time(time) value, kind = self.tdb.py2db(val, True) @@ -87,6 +89,8 @@ class HardCacheBackend(object): return self.get(category, ids) def incr(self, category, ids, time=0, delta=1): + self.delete_if_expired(category, ids) + expiration = expiration_from_time(time) rp = self.table.update(sa.and_(self.table.c.category==category, @@ -155,7 +159,8 @@ class HardCacheBackend(object): def ids_by_category(self, category, limit=1000): s = sa.select([self.table.c.ids], - self.table.c.category==category, + sa.and_(self.table.c.category==category, + self.table.c.expiration > datetime.now(g.tz)), limit = limit) rows = s.execute().fetchall() return [ r.ids for r in rows ] @@ -179,6 +184,13 @@ class HardCacheBackend(object): rows = s.execute().fetchall() return [ (r.expiration, r.category, r.ids) for r in rows ] + def delete_if_expired(self, category, ids, expiration="now"): + expiration_clause = self.clause_from_expiration(expiration) + self.table.delete(sa.and_(self.table.c.category==category, + self.table.c.ids==ids, + expiration_clause)).execute() + + def delete_expired(expiration="now", limit=5000): hcb = HardCacheBackend(g) diff --git a/r2/r2/lib/jsonresponse.py b/r2/r2/lib/jsonresponse.py index 95a500c15..66566ac15 100644 --- a/r2/r2/lib/jsonresponse.py +++ b/r2/r2/lib/jsonresponse.py @@ -22,7 +22,7 @@ from r2.lib.utils import tup from r2.lib.captcha import get_iden from r2.lib.wrapped import Wrapped, StringTemplate -from r2.lib.filters import websafe_json +from r2.lib.filters import websafe_json, spaceCompress from r2.lib.jsontemplates import get_api_subtype from r2.lib.base import BaseController from r2.lib.pages.things import wrap_links @@ -51,7 +51,7 @@ class JsonResponse(object): self._errors = set() self._new_captcha = False self._data = {} - + def send_failure(self, error): c.errors.add(error) self._clear() @@ -69,7 +69,7 @@ class JsonResponse(object): res['data'] = self._data res['errors'] = [(e[0], c.errors[e].message) for e in self._errors] return {"json": res} - + def set_error(self, error_name, field_name): self._errors.add((error_name, field_name)) @@ -86,6 +86,9 @@ class JsonResponse(object): have_error = True return have_error + def process_rendered(self, res): + return res + def _things(self, things, action, *a, **kw): """ function for inserting/replacing things in listings. @@ -94,7 +97,7 @@ class JsonResponse(object): if not all(isinstance(t, Wrapped) for t in things): wrap = kw.pop('wrap', Wrapped) things = wrap_links(things, wrapper = wrap) - data = [t.render() for t in things] + data = [self.process_rendered(t.render()) for t in things] if kw: for d in data: @@ -114,13 +117,13 @@ class JsonResponse(object): def _send_data(self, **kw): self._data.update(kw) - + class JQueryResponse(JsonResponse): """ class which mimics the jQuery in javascript for allowing Dom manipulations on the client side. - + An instantiated JQueryResponse acts just like the "$" function on the JS layer with the exception of the ability to run arbitrary code on the client. Selectors and method functions evaluate to @@ -144,7 +147,13 @@ class JQueryResponse(JsonResponse): self.objs = None self.ops = None JsonResponse._clear(self) - + + def process_rendered(self, res): + if 'data' in res: + if 'content' in res['data']: + res['data']['content'] = spaceCompress(res['data']['content']) + return res + def send_failure(self, error): c.errors.add(error) self._clear() @@ -181,12 +190,11 @@ class JQueryResponse(JsonResponse): selector += ".field-" + field_name message = c.errors[(error_name, field_name)].message form.find(selector).show().html(message).end() - return {"jquery": self.ops} # thing methods #-------------- - + def _things(self, things, action, *a, **kw): data = JsonResponse._things(self, things, action, *a, **kw) new = self.__getattr__(action) diff --git a/r2/r2/lib/jsontemplates.py b/r2/r2/lib/jsontemplates.py index 161d475bd..cb5bf06d8 100644 --- a/r2/r2/lib/jsontemplates.py +++ b/r2/r2/lib/jsontemplates.py @@ -46,7 +46,7 @@ def make_fullname(typ, _id): class ObjectTemplate(StringTemplate): def __init__(self, d): self.d = d - + def update(self, kw): def _update(obj): if isinstance(obj, (str, unicode)): @@ -56,10 +56,7 @@ class ObjectTemplate(StringTemplate): elif isinstance(obj, (list, tuple)): return map(_update, obj) elif isinstance(obj, CacheStub) and kw.has_key(obj.name): - r = kw[obj.name] - if isinstance(r, (str, unicode)): - r = spaceCompress(r) - return r + return kw[obj.name] else: return obj res = _update(self.d) @@ -194,13 +191,23 @@ class AccountJsonTemplate(ThingJsonTemplate): _data_attrs_ = ThingJsonTemplate.data_attrs(name = "name", link_karma = "safe_karma", comment_karma = "comment_karma", - has_mail = "has_mail") + has_mail = "has_mail", + has_mod_mail = "has_mod_mail", + is_mod = "is_mod", + ) def thing_attr(self, thing, attr): + from r2.models import Subreddit if attr == "has_mail": if c.user_is_loggedin and thing._id == c.user._id: return bool(c.have_messages) return None + if attr == "has_mod_mail": + if c.user_is_loggedin and thing._id == c.user._id: + return bool(c.have_mod_messages) + return None + if attr == "is_mod": + return bool(Subreddit.reverse_moderator_ids(thing)) return ThingJsonTemplate.thing_attr(self, thing, attr) class LinkJsonTemplate(ThingJsonTemplate): @@ -328,6 +335,7 @@ class MessageJsonTemplate(ThingJsonTemplate): body_html = "body_html", author = "author", dest = "dest", + subreddit = "subreddit", was_comment = "was_comment", context = "context", created = "created", @@ -341,7 +349,14 @@ class MessageJsonTemplate(ThingJsonTemplate): return ("" if not thing.was_comment else thing.permalink + "?context=3") elif attr == "dest": - return thing.to.name + if thing.to_id: + return thing.to.name + else: + return "#" + thing.subreddit.name + elif attr == "subreddit": + if thing.sr_id: + return thing.subreddit.name + return None elif attr == "body_html": return safemarkdown(thing.body) return ThingJsonTemplate.thing_attr(self, thing, attr) diff --git a/r2/r2/lib/lock.py b/r2/r2/lib/lock.py index fd3ee8f8b..d04d50218 100644 --- a/r2/r2/lib/lock.py +++ b/r2/r2/lib/lock.py @@ -40,7 +40,7 @@ class MemcacheLock(object): self.locks = locks.locks = getattr(locks, 'locks', set()) self.key = key - self.cache = cache + self.cache = cache.get_local_client() self.time = time self.timeout = timeout self.have_lock = False diff --git a/r2/r2/lib/log.py b/r2/r2/lib/log.py new file mode 100644 index 000000000..d6087bc76 --- /dev/null +++ b/r2/r2/lib/log.py @@ -0,0 +1,68 @@ +# The contents of this file are subject to the Common Public Attribution +# License Version 1.0. (the "License"); you may not use this file except in +# compliance with the License. You may obtain a copy of the License at +# http://code.reddit.com/LICENSE. The License is based on the Mozilla Public +# License Version 1.1, but Sections 14 and 15 have been added to cover use of +# software over a computer network and provide for limited attribution for the +# Original Developer. In addition, Exhibit A has been modified to be consistent +# with Exhibit B. +# +# Software distributed under the License is distributed on an "AS IS" basis, +# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for +# the specific language governing rights and limitations under the License. +# +# The Original Code is Reddit. +# +# The Original Developer is the Initial Developer. The Initial Developer of the +# Original Code is CondeNet, Inc. +# +# All portions of the code written by CondeNet are Copyright (c) 2006-2010 +# CondeNet, Inc. All Rights Reserved. +################################################################################ + +from pylons import g +from r2.lib import amqp +from datetime import datetime +import pickle +import traceback + +tz = g.display_tz + +Q = 'log_q' + +def _default_dict(): + return dict(time=datetime.now(tz), + host=g.reddit_host, + port=g.reddit_port, + pid=g.reddit_pid) + +# e_value and e should actually be the same thing. +# e_type is the just the type of e_value +# So e and e_traceback are the interesting ones. +def log_exception(e, e_type, e_value, e_traceback): + d = _default_dict() + + d['type'] = 'exception' + d['traceback'] = traceback.extract_tb(e_traceback) + + d['exception_type'] = e.__class__.__name__ + d['exception_desc'] = str(e) + + amqp.add_item(Q, pickle.dumps(d)) + +def log_text(classification, text=None, level="info"): + from r2.lib.filters import _force_utf8 + if text is None: + text = classification + + if level not in ('debug', 'info', 'warning', 'error'): + print "What kind of loglevel is %s supposed to be?" % level + level = 'error' + + d = _default_dict() + d['type'] = 'text' + d['level'] = level + d['text'] = _force_utf8(text) + d['classification'] = classification + + amqp.add_item(Q, pickle.dumps(d)) diff --git a/r2/r2/lib/memoize.py b/r2/r2/lib/memoize.py index 00d96927d..9267f9ae8 100644 --- a/r2/r2/lib/memoize.py +++ b/r2/r2/lib/memoize.py @@ -19,9 +19,15 @@ # All portions of the code written by CondeNet are Copyright (c) 2006-2010 # CondeNet, Inc. All Rights Reserved. ################################################################################ +from hashlib import md5 + from r2.config import cache from r2.lib.filters import _force_utf8 -from r2.lib.cache import NoneResult +from r2.lib.cache import NoneResult, make_key +from r2.lib.lock import make_lock_factory +from pylons import g + +make_lock = g.make_lock def memoize(iden, time = 0): def memoize_fn(fn): @@ -35,49 +41,38 @@ def memoize(iden, time = 0): update = kw['_update'] del kw['_update'] - key = _make_key(iden, a, kw) - #print 'CHECKING', key + key = make_key(iden, *a, **kw) res = None if update else cache.get(key) if res is None: - res = fn(*a, **kw) - if res is None: - res = NoneResult - cache.set(key, res, time = time) + # not cached, we should calculate it. + with make_lock('memoize_lock(%s)' % key): + stored = None if update else cache.get(key) + if stored is None: + # okay now go and actually calculate it + res = fn(*a, **kw) + if res is None: + res = NoneResult + cache.set(key, res, time = time) + else: + # it was calculated while we were waiting on + # the lock + res = stored + if res == NoneResult: res = None + return res + return new_fn return memoize_fn -def clear_memo(iden, *a, **kw): - key = _make_key(iden, a, kw) - #print 'CLEARING', key - cache.delete(key) - -def _make_key(iden, a, kw): - """ - Make the cache key. We have to descend into *a and **kw to make - sure that only regular strings are used in the key to keep 'foo' - and u'foo' in an args list from resulting in differing keys - """ - def _conv(s): - if isinstance(s, str): - return s - elif isinstance(s, unicode): - return _force_utf8(s) - else: - return str(s) - - return (_conv(iden) - + str([_conv(x) for x in a]) - + str([(_conv(x),_conv(y)) for (x,y) in sorted(kw.iteritems())])) - @memoize('test') def test(x, y): import time time.sleep(1) + print 'calculating %d + %d' % (x, y) if x + y == 10: return None else: diff --git a/r2/r2/lib/menus.py b/r2/r2/lib/menus.py index 6c313c756..483a9a8f3 100644 --- a/r2/r2/lib/menus.py +++ b/r2/r2/lib/menus.py @@ -98,12 +98,12 @@ menu = MenuHandler(hot = _('hot'), mobile = _("mobile"), store = _("store"), ad_inq = _("inquire about advertising"), - toplinks = _("top links"), random = _('random'), iphone = _("iPhone app"), #preferences options = _('options'), + feeds = _("RSS feeds"), friends = _("friends"), update = _("password/email"), delete = _("delete"), @@ -126,6 +126,7 @@ menu = MenuHandler(hot = _('hot'), about = _("about"), edit = _("edit this reddit"), moderators = _("edit moderators"), + modmail = _("moderator mail"), contributors = _("edit contributors"), banned = _("ban users"), banusers = _("ban users"), @@ -135,7 +136,10 @@ menu = MenuHandler(hot = _('hot'), mine = _("my reddits"), i18n = _("help translate"), + errors = _("errors"), awards = _("awards"), + ads = _("ads"), + usage = _("usage"), promoted = _("promoted"), reporters = _("reporters"), reports = _("reported links"), @@ -160,7 +164,9 @@ menu = MenuHandler(hot = _('hot'), live_promos = _('live'), unpaid_promos = _('unpaid'), pending_promos = _('pending'), - rejected_promos = _('rejected') + rejected_promos = _('rejected'), + + whitelist = _("whitelist") ) def menu_style(type): @@ -287,6 +293,13 @@ class NavButton(Styled): when it is different from self.title)""" return self.title +class ModeratorMailButton(NavButton): + def is_selected(self): + if c.default_sr and not self.sr_path: + return NavButton.is_selected(self) + elif not c.default_sr and self.sr_path: + return NavButton.is_selected(self) + class OffsiteButton(NavButton): def build(self, base_path = ''): self.sr_path = False diff --git a/r2/r2/lib/normalized_hot.py b/r2/r2/lib/normalized_hot.py index 7160bb897..9a6e4a385 100644 --- a/r2/r2/lib/normalized_hot.py +++ b/r2/r2/lib/normalized_hot.py @@ -33,7 +33,6 @@ from datetime import datetime, timedelta import random expire_delta = timedelta(minutes = 2) -TOP_CACHE = 1800 max_items = 150 def access_key(sr): @@ -77,43 +76,55 @@ def cached_query(query, sr): return res -def get_hot(sr, only_fullnames = False): +def get_hot(srs, only_fullnames = False): """Get the (fullname, hotness, epoch_seconds) for the hottest links in a subreddit. Use the query-cache to avoid some lookups if we can.""" from r2.lib.db.thing import Query from r2.lib.db.queries import CachedResults - q = sr.get_links('hot', 'all') - if isinstance(q, Query): - links = cached_query(q, sr) - res = [(link._fullname, link._hot, epoch_seconds(link._date)) - for link in links] - elif isinstance(q, CachedResults): - # we're relying on an implementation detail of CachedResults - # here, where it's storing tuples that look exactly like the - # return-type we want, to make our sorting a bit cheaper - q.fetch() - res = list(q.data) + ret = [] + queries = [sr.get_links('hot', 'all') for sr in srs] - age_limit = epoch_seconds(utils.timeago('%d days' % g.HOT_PAGE_AGE)) - return [(fname if only_fullnames else (fname, hot, date)) - for (fname, hot, date) in res - if date > age_limit] + # fetch these all in one go + cachedresults = filter(lambda q: isinstance(q, CachedResults), queries) + CachedResults.fetch_multi(cachedresults) + + for q in queries: + if isinstance(q, Query): + links = cached_query(q, sr) + res = [(link._fullname, link._hot, epoch_seconds(link._date)) + for link in links] + elif isinstance(q, CachedResults): + # we're relying on an implementation detail of + # CachedResults here, where it's storing tuples that look + # exactly like the return-type we want, to make our + # sorting a bit cheaper + res = list(q.data) + + # remove any that are too old + age_limit = epoch_seconds(utils.timeago('%d days' % g.HOT_PAGE_AGE)) + res = [(fname if only_fullnames else (fname, hot, date)) + for (fname, hot, date) in res + if date > age_limit] + ret.append(res) + + return ret @memoize('normalize_hot', time = g.page_cache_time) def normalized_hot_cached(sr_ids): """Fetches the hot lists for each subreddit, normalizes the scores, and interleaves the results.""" results = [] - srs = Subreddit._byID(sr_ids, data = True, return_dict = False) - for sr in srs: - # items =:= (fname, hot, epoch_seconds), ordered desc('_hot') - items = get_hot(sr)[:max_items] - + srs = Subreddit._byID(sr_ids, return_dict = False) + hots = get_hot(srs) + for items in hots: if not items: continue + # items =:= (fname, hot, epoch_seconds), ordered desc('_hot') + items = items[:max_items] + # the hotness of the hottest item in this subreddit top_score = max(items[0][1], 1) diff --git a/r2/r2/lib/organic.py b/r2/r2/lib/organic.py index c2e5f16e1..5fad18996 100644 --- a/r2/r2/lib/organic.py +++ b/r2/r2/lib/organic.py @@ -99,7 +99,7 @@ def cached_organic_links(user_id, langs): #potentially add a up and coming link if random.choice((True, False)) and sr_ids: sr = Subreddit._byID(random.choice(sr_ids)) - fnames = get_hot(sr, True) + fnames = get_hot([sr], True)[0] if fnames: if len(fnames) == 1: new_item = fnames[0] diff --git a/r2/r2/lib/pages/admin_pages.py b/r2/r2/lib/pages/admin_pages.py index 14064da90..640aed6e9 100644 --- a/r2/r2/lib/pages/admin_pages.py +++ b/r2/r2/lib/pages/admin_pages.py @@ -41,7 +41,7 @@ class AdminPage(Reddit): submit_box = False extension_handling = False show_sidebar = False - + def __init__(self, nav_menus = None, *a, **kw): #add admin options to the nav_menus if c.user_is_admin: @@ -50,7 +50,10 @@ class AdminPage(Reddit): if g.translator: buttons.append(NavButton(menu.i18n, "i18n")) + buttons.append(NavButton(menu.awards, "ads")) buttons.append(NavButton(menu.awards, "awards")) + buttons.append(NavButton(menu.errors, "error log")) + buttons.append(NavButton(menu.usage, "usage stats")) admin_menu = NavMenu(buttons, title='show', base_path = '/admin', type="lightdrop") diff --git a/r2/r2/lib/pages/pages.py b/r2/r2/lib/pages/pages.py index f296e0223..82c28805f 100644 --- a/r2/r2/lib/pages/pages.py +++ b/r2/r2/lib/pages/pages.py @@ -19,9 +19,9 @@ # All portions of the code written by CondeNet are Copyright (c) 2006-2010 # CondeNet, Inc. All Rights Reserved. ################################################################################ -from r2.lib.wrapped import Wrapped, Templated, NoTemplateFound, CachedTemplate -from r2.models import Account, Default -from r2.models import FakeSubreddit, Subreddit +from r2.lib.wrapped import Wrapped, Templated, CachedTemplate +from r2.models import Account, Default, make_feedurl +from r2.models import FakeSubreddit, Subreddit, Ad, AdSR from r2.models import Friends, All, Sub, NotFound, DomainSR from r2.models import Link, Printable, Trophy, bidding, PromoteDates from r2.config import cache @@ -39,14 +39,15 @@ from r2.lib.contrib.markdown import markdown from r2.lib.filters import spaceCompress, _force_unicode, _force_utf8 from r2.lib.filters import unsafe, websafe, SC_ON, SC_OFF from r2.lib.menus import NavButton, NamedButton, NavMenu, PageNameNav, JsButton -from r2.lib.menus import SubredditButton, SubredditMenu +from r2.lib.menus import SubredditButton, SubredditMenu, ModeratorMailButton from r2.lib.menus import OffsiteButton, menu, JsNavMenu from r2.lib.strings import plurals, rand_strings, strings, Score from r2.lib.utils import title_to_url, query_string, UrlParser, to_js, vote_hash -from r2.lib.utils import link_duplicates, make_offset_date, to_csv +from r2.lib.utils import link_duplicates, make_offset_date, to_csv, median from r2.lib.template_helpers import add_sr, get_domain from r2.lib.subreddit_search import popular_searches from r2.lib.scraper import scrapers +from r2.lib.log import log_text import sys, random, datetime, locale, calendar, simplejson, re import graph, pycountry @@ -61,6 +62,18 @@ def get_captcha(): if not c.user_is_loggedin or c.user.needs_captcha(): return get_iden() +def responsive(res, space_compress = False): + """ + Use in places where the template is returned as the result of the + controller so that it becomes compatible with the page cache. + """ + if is_api(): + res = json_respond(res) + elif space_compress: + res = spaceCompress(res) + c.response.content = res + return c.response + class Reddit(Templated): '''Base class for rendering a page on reddit. Handles toolbar creation, content of the footers, and content of the corner buttons. @@ -129,16 +142,22 @@ class Reddit(Templated): self._content = PaneStack([ShareLink(), content]) else: self._content = content - + self.toolbars = self.build_toolbars() def sr_admin_menu(self): buttons = [NamedButton('edit', css_class = 'reddit-edit'), + NamedButton('modmail', dest = "message/inbox", + css_class = 'moderator-mail'), NamedButton('moderators', css_class = 'reddit-moderators')] if c.site.type != 'public': buttons.append(NamedButton('contributors', css_class = 'reddit-contributors')) + elif (c.user_is_loggedin and c.site.use_whitelist and + (c.site.is_moderator(c.user) or c.user_is_admin)): + buttons.append(NavButton(menu.whitelist, "contributors", + css_class = 'reddit-contributors')) buttons.extend([ NamedButton('traffic', css_class = 'reddit-traffic'), @@ -177,7 +196,10 @@ class Reddit(Templated): if total > len(moderators): more_text = "...and %d more" % (total - len(moderators)) mod_href = "http://%s/about/moderators" % get_domain() + helplink = ("/message/compose?to=%%23%s" % c.site.name, + "message the moderators") ps.append(SideContentBox(_('moderators'), moderators, + helplink = helplink, more_href = mod_href, more_text = more_text)) @@ -212,22 +234,8 @@ class Reddit(Templated): In adition, unlike Templated.render, the result is in the form of a pylons Response object with it's content set. """ - try: - res = Templated.render(self, *a, **kw) - if is_api(): - res = json_respond(res) - elif self.space_compress: - res = spaceCompress(res) - c.response.content = res - except NoTemplateFound, e: - # re-raise the error -- development environment - if g.debug: - s = sys.exc_info() - raise s[1], None, s[2] - # die gracefully -- production environment - else: - abort(404, "not found") - return c.response + res = Templated.render(self, *a, **kw) + return responsive(res, self.space_compress) def corner_buttons(self): """set up for buttons in upper right corner of main page.""" @@ -307,8 +315,7 @@ class RedditFooter(CachedTemplate): ('buttons', [[(x.title, x.path) for x in y] for y in self.nav])] def __init__(self): - self.nav = [NavMenu([NamedButton("toplinks", False), - NamedButton("mobile", False, nocname=True), + self.nav = [NavMenu([NamedButton("mobile", False, nocname=True), OffsiteButton("rss", dest = '/.rss'), NamedButton("store", False, nocname=True), NamedButton("awards", False, nocname=True), @@ -478,9 +485,13 @@ class PrefsPage(Reddit): *a, **kw) def build_toolbars(self): - buttons = [NavButton(menu.options, ''), - NamedButton('friends'), - NamedButton('update')] + buttons = [NavButton(menu.options, '')] + + if c.user.pref_private_feeds: + buttons.append(NamedButton('feeds')) + + buttons.extend([NamedButton('friends'), + NamedButton('update')]) #if CustomerID.get_id(user): # buttons += [NamedButton('payment')] buttons += [NamedButton('delete')] @@ -492,6 +503,9 @@ class PrefOptions(Templated): def __init__(self, done = False): Templated.__init__(self, done = done) +class PrefFeeds(Templated): + pass + class PrefUpdate(Templated): """Preference form for updating email address and passwords""" def __init__(self, email = True, password = True, verify = False): @@ -526,11 +540,21 @@ class MessagePage(Reddit): self._content)) def build_toolbars(self): - buttons = [NamedButton('compose'), + buttons = [NamedButton('compose', sr_path = False), NamedButton('inbox', aliases = ["/message/comments", + "/message/uread", "/message/messages", - "/message/selfreply"]), - NamedButton('sent')] + "/message/selfreply"], + sr_path = False), + NamedButton('sent', sr_path = False)] + if c.show_mod_mail: + buttons.append(ModeratorMailButton(menu.modmail, "moderator", + sr_path = False)) + if not c.default_sr: + buttons.append(ModeratorMailButton( + _("%(site)s mail") % {'site': c.site.name}, "moderator", + aliases = ["/about/message/inbox", + "/about/message/unread"])) return [PageNameNav('nomenu', title = _("message")), NavMenu(buttons, base_path = "/message", type="tabmenu")] @@ -565,6 +589,8 @@ class HelpPage(BoringPage): return [PageNameNav('help', title = self.pagename)] class FormPage(BoringPage): + create_reddit_box = False + submit_box = False """intended for rendering forms with no rightbox needed or wanted""" def __init__(self, pagename, show_sidebar = False, *a, **kw): BoringPage.__init__(self, pagename, show_sidebar = show_sidebar, @@ -1293,13 +1319,17 @@ class OptIn(Templated): pass -class ButtonEmbed(Templated): +class ButtonEmbed(CachedTemplate): """Generates the JS wrapper around the buttons for embedding.""" def __init__(self, button = None, width = 100, height=100, referer = "", url = "", **kw): + arg = "cnameframe=1&" if c.cname else "" Templated.__init__(self, button = button, width = width, height = height, - referer=referer, url = url, **kw) + referer=referer, url = url, + domain = get_domain(), + arg = arg, + **kw) class Button(Wrapped): cachable = True @@ -1322,9 +1352,13 @@ class Button(Wrapped): if not hasattr(w, '_fullname'): w._fullname = None + def render(self, *a, **kw): + res = Wrapped.render(self, *a, **kw) + return responsive(res, True) + class ButtonLite(Button): - pass - + def render(self, *a, **kw): + return Wrapped.render(self, *a, **kw) class ButtonNoBody(Button): """A button page that just returns the raw button for direct embeding""" @@ -1420,7 +1454,101 @@ class UserAwards(Templated): else: raise NotImplementedError +class AdminErrorLog(Templated): + """The admin page for viewing the error log""" + def __init__(self): + hcb = g.hardcache.backend + date_groupings = {} + hexkeys_seen = {} + + for ids in hcb.ids_by_category("error"): + date, hexkey = ids.split("-") + + hexkeys_seen[hexkey] = True + + d = g.hardcache.get("error-" + ids) + + if d is None: + log_text("error=None", "Why is error-%s None?" % ids, + "warning") + continue + + tpl = (len(d['occurrences']), hexkey, d) + date_groupings.setdefault(date, []).append(tpl) + + self.nicknames = {} + self.statuses = {} + + for hexkey in hexkeys_seen.keys(): + nick = g.hardcache.get("error_nickname-%s" % hexkey, "???") + self.nicknames[hexkey] = nick + status = g.hardcache.get("error_status-%s" % hexkey, "normal") + self.statuses[hexkey] = status + + for ids in hcb.ids_by_category("logtext"): + date, level, classification = ids.split("-", 2) + textoccs = [] + dicts = g.hardcache.get("logtext-" + ids) + if dicts is None: + log_text("logtext=None", "Why is logtext-%s None?" % ids, + "warning") + continue + for d in dicts: + textoccs.append( (d['text'], d['occ'] ) ) + + sort_order = { + 'error': -1, + 'warning': -2, + 'info': -3, + 'debug': -4, + }[level] + + tpl = (sort_order, level, classification, textoccs) + date_groupings.setdefault(date, []).append(tpl) + + self.date_summaries = [] + + for date in sorted(date_groupings.keys(), reverse=True): + groupings = sorted(date_groupings[date], reverse=True) + self.date_summaries.append( (date, groupings) ) + + Templated.__init__(self) + +class AdminAds(Templated): + """The admin page for editing ads""" + def __init__(self): + from r2.models import Ad + Templated.__init__(self) + self.ads = Ad._all_ads() + +class AdminAdAssign(Templated): + """The interface for assigning an ad to a community""" + def __init__(self, ad): + self.weight = 100 + Templated.__init__(self, ad = ad) + +class AdminAdSRs(Templated): + """View the communities an ad is running on""" + def __init__(self, ad): + self.adsrs = AdSR.by_ad(ad) + + # Create a dictionary of + # SR => total weight of all its ads + # for all SRs that this ad is running on + self.sr_totals = {} + for adsr in self.adsrs: + sr = adsr._thing2 + + if sr.name not in self.sr_totals: + # We haven't added up this SR yet. + self.sr_totals[sr.name] = 0 + # Get all its ads and total them up. + sr_adsrs = AdSR.by_sr_merged(sr) + for adsr2 in sr_adsrs: + self.sr_totals[sr.name] += adsr2.weight + + Templated.__init__(self, ad = ad) class AdminAwards(Templated): """The admin page for editing awards""" @@ -1447,6 +1575,130 @@ class AdminAwardWinners(Templated): trophies = Trophy.by_award(award) Templated.__init__(self, award = award, trophies = trophies) +class AdminUsage(Templated): + """The admin page for viewing usage stats""" + def __init__(self): + hcb = g.hardcache.backend + + self.actions = {} + triples = set() # sorting key + daily_stats = {} + + for ids in hcb.ids_by_category("profile_count", limit=10000): + time, action = ids.split("-") + + if time.endswith("xx:xx"): + factor = 1.0 + label = time[5:10] # MM/DD + day = True + elif time.endswith(":xx"): + factor = 24.0 + label = time[11:] # HH:xx + else: + factor = 288.0 # number of five-minute periods in a day + label = time[11:] # HH:MM + + # Elapsed in hardcache is in hundredths of a second. + # Multiply it by 100 so from this point forward, we're + # dealing with seconds -- as floats with two decimal + # places of precision. Similarly, round the average + # to two decimal places. + count = g.hardcache.get("profile_count-" + ids) + if count is None or count == 0: + log_text("usage count=None", "For %r, it's %r" % (ids, count), "error") + continue + elapsed = g.hardcache.get("profile_elapsed-" + ids, 0) / 100.0 + average = int(100.0 * elapsed / count) / 100.0 + + triples.add( (factor, time, label) ) + + if factor == 1.0: + daily_stats.setdefault(action, []).append( + (count, elapsed, average) + ) + + self.actions.setdefault(action, {}) + self.actions[action][label] = dict(count=count, elapsed=elapsed, + average=average, + factor=factor, + classes = {}) + + # Figure out what a typical day looks like. For each action, + # look at the daily stats and record the median. + for action in daily_stats.keys(): + med = {} + med["count"] = median([ x[0] for x in daily_stats[action] ]) + med["elapsed"] = median([ x[1] for x in daily_stats[action] ]) + med["average"] = median([ x[2] for x in daily_stats[action] ]) + + for d in self.actions[action].values(): + ice_cold = False + for category in ("elapsed", "count", "average"): + if category == "average": + scaled = d[category] + else: + scaled = d[category] * d["factor"] + + if category == "elapsed" and scaled < 5 * 60: + # If we're spending less than five mins a day + # on this operation, consider it ice cold regardless + # of how much of an outlier it is + ice_cold = True + + if ice_cold: + d["classes"][category] = "load0" + continue + + if med[category] <= 0: + # This shouldn't happen. If it does, + # toggle commenting of the next three lines. + raise ValueError("Huh. I guess this can happen.") +# d["classes"][category] = "load9" +# continue + + ratio = scaled / med[category] + if ratio > 5.0: + d["classes"][category] = "load9" + elif ratio > 3.0: + d["classes"][category] = "load8" + elif ratio > 2.0: + d["classes"][category] = "load7" + elif ratio > 1.5: + d["classes"][category] = "load6" + elif ratio > 1.1: + d["classes"][category] = "load5" + elif ratio > 0.9: + d["classes"][category] = "load4" + elif ratio > 0.75: + d["classes"][category] = "load3" + elif ratio > 0.5: + d["classes"][category] = "load2" + elif ratio > 0.10: + d["classes"][category] = "load1" + else: + d["classes"][category] = "load0" + + # Build a list called labels that gives the template a sorting + # order for the columns. + self.labels = [] + # Keep track of how many times we've seen a granularity (i.e., factor) + # so we can hide any that come after the third + factor_counts = {} + # sort actions by whatever will end up as the first column + action_sorting_column = None + for factor, time, label in sorted(triples, reverse=True): + if action_sorting_column is None: + action_sorting_column = label + factor_counts.setdefault(factor, 0) + factor_counts[factor] += 1 + self.labels.append( (label, factor_counts[factor] > 3) ) + + self.action_order = sorted(self.actions.keys(), reverse=True, + key = lambda x: + self.actions[x].get(action_sorting_column, {"elapsed":0})["elapsed"]) + + Templated.__init__(self) + class Embed(Templated): """wrapper for embedding /help into reddit as if it were not on a separate wiki.""" @@ -1578,6 +1830,8 @@ class ContributorList(UserList): @property def form_title(self): + if c.site.type == "public": + return _("add to whitelist") return _('add contributor') @property @@ -1826,7 +2080,9 @@ class UserText(CachedTemplate): class MediaEmbedBody(CachedTemplate): """What's rendered inside the iframe that contains media objects""" - pass + def render(self, *a, **kw): + res = CachedTemplate.render(self, *a, **kw) + return responsive(res, True) class Traffic(Templated): @staticmethod @@ -2058,6 +2314,28 @@ class RedditTraffic(Traffic): "%5.2f%%" % f)) return res +class RedditAds(Templated): + def __init__(self, **kw): + self.sr_name = c.site.name + self.adsrs = AdSR.by_sr_merged(c.site) + self.total = 0 + + self.adsrs.sort(key=lambda a: a._thing1.codename) + + seen = {} + for adsr in self.adsrs: + seen[adsr._thing1.codename] = True + self.total += adsr.weight + + self.other_ads = [] + all_ads = Ad._all_ads() + all_ads.sort(key=lambda a: a.codename) + for ad in all_ads: + if ad.codename not in seen: + self.other_ads.append(ad) + + Templated.__init__(self, **kw) + class PaymentForm(Templated): def __init__(self, **kw): self.countries = pycountry.countries @@ -2147,7 +2425,6 @@ class Promote_Graph(Templated): (total_sale, total_refund)), multiy = False) - # table is labeled as "last month" history = self.now - datetime.timedelta(30) self.top_promoters = bidding.PromoteDates.top_promoters(history) else: @@ -2239,9 +2516,79 @@ class RawString(Templated): def render(self, *a, **kw): return unsafe(self.s) -class Dart_Ad(Templated): +class Dart_Ad(CachedTemplate): def __init__(self, tag = None): tag = tag or "homepage" tracker_url = AdframeInfo.gen_url(fullname = "dart_" + tag, ip = request.ip) Templated.__init__(self, tag = tag, tracker_url = tracker_url) + + def render(self, *a, **kw): + res = CachedTemplate.render(self, *a, **kw) + return responsive(res, False) + +class HouseAd(CachedTemplate): + def __init__(self, imgurl=None, linkurl=None, submit_link=None): + Templated.__init__(self, imgurl = imgurl, linkurl = linkurl, + submit_link = submit_link) + + def render(self, *a, **kw): + res = CachedTemplate.render(self, *a, **kw) + return responsive(res, False) + +class ComScore(CachedTemplate): + pass + +def render_ad(reddit_name=None, codename=None): + if not reddit_name: + reddit_name = g.default_sr + + if codename: + if codename == "DART": + return Dart_Ad(reddit_name).render() + else: + try: + ad = Ad._by_codename(codename) + except NotFound: + abort(404) + attrs = ad.important_attrs() + return HouseAd(**attrs).render() + + try: + sr = Subreddit._by_name(reddit_name) + except NotFound: + return Dart_Ad(g.default_sr).render() + + ads = {} + + for adsr in AdSR.by_sr_merged(sr): + ad = adsr._thing1 + ads[ad.codename] = (ad, adsr.weight) + + total_weight = sum(t[1] for t in ads.values()) + + if total_weight == 0: + log_text("no ads", "No ads found for %s" % reddit_name, "error") + abort(404) + + lotto = random.randint(0, total_weight - 1) + winner = None + for t in ads.values(): + lotto -= t[1] + if lotto <= 0: + winner = t[0] + + if winner.codename == "DART": + return Dart_Ad(reddit_name).render() + else: + attrs = winner.important_attrs() + return HouseAd(**attrs).render() + + # No winner? + + log_text("no winner", + "No winner found for /r/%s, total_weight=%d" % + (reddit_name, total_weight), + "error") + + return Dart_Ad(reddit_name).render() diff --git a/r2/r2/lib/queues.py b/r2/r2/lib/queues.py index 1075316d0..0f637cf83 100644 --- a/r2/r2/lib/queues.py +++ b/r2/r2/lib/queues.py @@ -68,9 +68,12 @@ class RedditQueueMap(QueueMap): self._q('scraper_q') self._q('searchchanges_q', self_refer=True) self._q('newcomments_q') + self._q('commentstree_q') # this isn't in use until the spam_q plumbing is #self._q('newpage_q') self._q('register_vote_q', self_refer=True) + self._q('log_q', self_refer=True) + self._q('usage_q', self_refer=True) def bindings(self): self.newlink_bindings() @@ -87,6 +90,7 @@ class RedditQueueMap(QueueMap): def newcomment_bindings(self): self._bind('new_comment', 'newcomments_q') + self._bind('new_comment', 'commentstree_q') def newsubreddit_bindings(self): self._bind('new_subreddit', 'searchchanges_q') diff --git a/r2/r2/lib/solrsearch.py b/r2/r2/lib/solrsearch.py index b50cf2c62..b10ed3449 100644 --- a/r2/r2/lib/solrsearch.py +++ b/r2/r2/lib/solrsearch.py @@ -44,6 +44,8 @@ from r2.lib.utils import unicode_safe, tup from r2.lib.cache import SelfEmptyingCache from r2.lib import amqp +solr_cache_time = g.solr_cache_time + ## Changes to the list of searchable languages will require changes to ## Solr's configuration (specifically, the fields that are searched) searchable_langs = set(['dk','nl','en','fi','fr','de','it','no','nn','pt', @@ -485,7 +487,7 @@ class SearchQuery(object): return "<%s(%s)>" % (self.__class__.__name__, ", ".join(attrs)) - def run(self, after = None, num = 100, reverse = False): + def run(self, after = None, num = 1000, reverse = False): if not self.q: return pysolr.Results([],0) @@ -568,71 +570,24 @@ class SearchQuery(object): if reverse: sort = swap_strings(sort,'asc','desc') + after = after._fullname if after else None - if after: - # size of the pre-search to run in the case that we need - # to search more than once. A bigger one can reduce the - # number of searches that need to be run twice, but if - # it's bigger than the default display size, it could - # waste some - PRESEARCH_SIZE = num - - # run a search and get back the number of hits, so that we - # can re-run the search with that max_count. - pre_search = cls.run_search_cached(q, sort, 0, PRESEARCH_SIZE, - solr_params) - - if (PRESEARCH_SIZE >= pre_search.hits - or pre_search.hits == len(pre_search.docs)): - # don't run a second search if our pre-search found - # all of the elements anyway - search = pre_search - else: - # now that we know how many to request, we can request - # the whole lot - search = cls.run_search_cached(q, sort, 0, - pre_search.hits, - solr_params, max=True) - - search.docs = get_after(search.docs, after._fullname, num) - else: - search = cls.run_search_cached(q, sort, 0, num, solr_params) + search = cls.run_search_cached(q, sort, 0, num, solr_params) + search.docs = get_after(search.docs, after, num) return search @staticmethod - def run_search_cached(q, sort, start, rows, other_params, max=False): - "Run the search, first trying the best available cache" + @memoize('solr_search', solr_cache_time) + def run_search_cached(q, sort, start, rows, other_params): + with SolrConnection() as s: + g.log.debug(("Searching q = %r; sort = %r," + + " start = %r, rows = %r," + + " params = %r") + % (q,sort,start,rows,other_params)) - # first, try to see if we've cached the result for the entire - # dataset for that query, returning the requested slice of it - # if so. If that's not available, try the cache for the - # partial result requested (passing the actual search along to - # solr if both of those fail) - full_key = 'solrsearch_%s' % ','.join(('%r' % r) - for r in (q,sort,other_params)) - part_key = "%s,%d,%d" % (full_key, start, rows) - - full_cached = g.cache.get(full_key) - if full_cached: - res = pysolr.Results(hits = full_cached.hits, - docs = full_cached.docs[start:start+rows]) - else: - part_cached = g.cache.get(part_key) - if part_cached: - res = part_cached - else: - with SolrConnection() as s: - g.log.debug(("Searching q = %r; sort = %r," - + " start = %r, rows = %r," - + " params = %r, max = %r") - % (q,sort,start,rows,other_params,max)) - - res = s.search(q, sort, start = start, rows = rows, - other_params = other_params) - - g.cache.set(full_key if max else part_key, - res, time = g.solr_cache_time) + res = s.search(q, sort, start = start, rows = rows, + other_params = other_params) # extract out the fullname in the 'docs' field, since that's # all we care about @@ -708,11 +663,14 @@ class DomainSearchQuery(SearchQuery): qt='standard') def get_after(fullnames, fullname, num): + if not fullname: + return fullnames[:num] + for i, item in enumerate(fullnames): if item == fullname: return fullnames[i+1:i+num+1] - else: - return fullnames[:num] + + return fullnames[:num] def run_commit(optimize=False): diff --git a/r2/r2/lib/strings.py b/r2/r2/lib/strings.py index f3d11907d..8820a5a04 100644 --- a/r2/r2/lib/strings.py +++ b/r2/r2/lib/strings.py @@ -80,7 +80,7 @@ string_dict = dict( moderator = _("you have been added as a moderator to [%(title)s](%(url)s)."), contributor = _("you have been added as a contributor to [%(title)s](%(url)s)."), banned = _("you have been banned from posting to [%(title)s](%(url)s)."), - traffic = _('you have been added to the list of users able to see [traffic for the sponsoted link "%(title)s"](%(traffic_url)s).') + traffic = _('you have been added to the list of users able to see [traffic for the sponsored link "%(title)s"](%(traffic_url)s).') ), subj_add_friend = dict( @@ -117,12 +117,7 @@ string_dict = dict( permalink_title = _("%(author)s comments on %(title)s"), link_info_title = _("%(title)s : %(site)s"), banned_subreddit = _("""**this reddit has been banned**\n\nmost likely this was done automatically by our spam filtering program. the program is still learning, and may even have some bugs, so if you feel the ban was a mistake, please send a message to [our site admins](%(link)s) and be sure to include the **exact name of the reddit**."""), - comments_panel_text = _(""" - The following is a sample of what Reddit users had to say about this - page. The full discussion is available [here](%(fd_link)s); you can - also get there by clicking the link's title - (in the middle of the toolbar, to the right of the comments button). - """), + comments_panel_text = _("""The following is a sample of what Reddit users had to say about this page. The full discussion is available [here](%(fd_link)s); you can also get there by clicking the link's title (in the middle of the toolbar, to the right of the comments button)."""), submit_link = _("""You are submitting a link. The key to a successful submission is interesting content and a descriptive title."""), submit_text = _("""You are submitting a text-based post. Speak your mind. A title is required, but expanding further in the text field is not. Beginning your title with "vote up if" is violation of intergalactic law."""), @@ -130,7 +125,7 @@ string_dict = dict( verify_email = _("we're going to need to verify your email address for you to proceed."), email_verified = _("your email address has been verfied"), email_verify_failed = _("Verification failed. Please try that again"), - search_failed = _("Our search machines are under too much load to handle your request right now. :( Sorry for the inconvenience.\n\n[Try again](%(link)s) in a little bit -- but please don't mash reload; that only makes the problem worse.") + search_failed = _("Our search machines are under too much load to handle your request right now. :( Sorry for the inconvenience. [Try again](%(link)s) in a little bit -- but please don't mash reload; that only makes the problem worse.") ) class StringHandler(object): diff --git a/r2/r2/lib/traffic.py b/r2/r2/lib/traffic.py index 816a213ac..6c65d78bf 100644 --- a/r2/r2/lib/traffic.py +++ b/r2/r2/lib/traffic.py @@ -25,7 +25,7 @@ from cPickle import loads from utils import query_string import os, socket, time, datetime from pylons import g -from r2.lib.memoize import memoize, clear_memo +from r2.lib.memoize import memoize def load_traffic_uncached(interval, what, iden, start_time = None, stop_time = None, diff --git a/r2/r2/lib/translation.py b/r2/r2/lib/translation.py index 8a20356db..194a5a5ca 100644 --- a/r2/r2/lib/translation.py +++ b/r2/r2/lib/translation.py @@ -263,8 +263,8 @@ class TranslatedString(Templated): if indx < 0: return all(self.is_valid(i) for i in range(0,len(self.msgstr))) elif indx < len(self.msgstr): - return self.msgid.compatible(self.msgstr[indx]) or \ - self.msgstr.compatible(self.msgstr[indx]) + return self.msgid.compatible(self.msgstr[indx]) #or \ + #self.msgstr.compatible(self.msgstr[indx]) return True else: return self.msgid.compatible(self.msgstr) @@ -655,7 +655,7 @@ class Transliterator(AutoTranslator): def __init__(self, **kw): Translator.__init__(self, **kw) for string in self.strings: - if string.is_translated() \ + if not string.is_translated() \ and not isinstance(string, GettextHeader): if string.plural: string.add(self.translate(string.msgstr[0].unicode()), @@ -767,12 +767,80 @@ class TamilTranslator(Transliterator): t = t.replace(k, v) return t +class SerbianCyrillicTranslator(Transliterator): + letters = \ + (( "A" , u'\u0410'), + ( "B" , u'\u0411'), + ( "V" , u'\u0412'), + ( "G" , u'\u0413'), + ( "D" , u'\u0414'), + ( u'\u0110' , u'\u0402'), + ( "E" , u'\u0415'), + ( u"\u017d" , u'\u0416'), + ( "Z" , u'\u0417'), + ( "I" , u'\u0418'), + ( "J" , u'\u0408'), + ( "K" , u'\u041a'), + ( "L" , u'\u041b'), + ( "Lj" , u'\u0409'), + ( "M" , u'\u041c'), + ( "N" , u'\u041d'), + ( "Nj" , u'\u040a'), + ( "O" , u'\u041e'), + ( "P" , u'\u041f'), + ( "R" , u'\u0420'), + ( "S" , u'\u0421'), + ( "T" , u'\u0422'), + ( u"\u0106" , u'\u040b'), + ( "U" , u'\u0423'), + ( "F" , u'\u0424'), + ( 'H' , u'\u0425'), + ( "C" , u'\u0426'), + ( u"\u010c", u'\u0427'), + ( u"D\u017e", u'\u040f'), + ( u"\u0160", u'\u0428'), - - + ( "a" , u'\u0430'), + ( "b" , u'\u0431'), + ( "v" , u'\u0432'), + ( "g" , u'\u0433'), + ( "d" , u'\u0434'), + ( u'\u0111' , u'\u0452'), + ( "e" , u'\u0435'), + ( u"\u017e" , u'\u0436'), + ( "z" , u'\u0437'), + ( "i" , u'\u0438'), + ( "j" , u'\u0458'), + ( "k" , u'\u043a'), + ( "l" , u'\u043b'), + ( "lj" , u'\u0459'), + ( "m" , u'\u043c'), + ( "n" , u'\u043d'), + ( "nj" , u'\u045a'), + ( "o" , u'\u043e'), + ( "p" , u'\u043f'), + ( "r" , u'\u0440'), + ( "s" , u'\u0441'), + ( "t" , u'\u0442'), + ( u"\u0107" , u'\u045b'), + ( "u" , u'\u0443'), + ( "f" , u'\u0444'), + ( 'h' , u'\u0445'), + ( "c" , u'\u0446'), + ( u"\u010d", u'\u0447'), + ( u"d\u017e", u'\u045f'), + ( u"\u0161", u'\u0448')) + ligatures = [(x,y) for x, y in letters if len(x) == 2] + letters = dict((x, y) for x, y in letters if len(x) == 1) + def trans_rules(self, string): + for x, y in self.ligatures: + string = string.replace(x, y) + return "".join(self.letters.get(s, s) for s in string) + import random class LeetTranslator(AutoTranslator): def trans_rules(self, string): + print string key = dict(a=["4","@"], b=["8"], c=["("], d=[")", "|)"], e=["3"], @@ -786,9 +854,11 @@ class LeetTranslator(AutoTranslator): return ''.join(s) def get_translator(locale): + #if locale == 'sr': + # return SerbianCyrillicTranslator(locale = locale) if locale == 'leet': return LeetTranslator(locale = locale) - elif locale == 'en': + elif locale.startswith('en'): return USEnglishTranslator(locale = locale) elif locale == 'ta': return TamilTranslator(locale = locale) diff --git a/r2/r2/lib/utils/utils.py b/r2/r2/lib/utils/utils.py index 92f8c22f9..e13aec807 100644 --- a/r2/r2/lib/utils/utils.py +++ b/r2/r2/lib/utils/utils.py @@ -19,7 +19,8 @@ # All portions of the code written by CondeNet are Copyright (c) 2006-2010 # CondeNet, Inc. All Rights Reserved. ################################################################################ -from urllib import unquote_plus, urlopen +from urllib import unquote_plus +from urllib2 import urlopen from urlparse import urlparse, urlunparse from threading import local import signal @@ -27,6 +28,8 @@ from copy import deepcopy import cPickle as pickle import re, math, random +from BeautifulSoup import BeautifulSoup + from datetime import datetime, timedelta from pylons.i18n import ungettext, _ from r2.lib.filters import _force_unicode @@ -54,6 +57,9 @@ def randstr(len, reallyrandom = False): return ''.join(random.choice(alphabet) for i in range(len)) +def is_authorized_cname(domain, cnames): + return any((domain == cname or domain.endswith('.' + cname)) + for cname in cnames) class Storage(dict): """ @@ -292,26 +298,31 @@ def path_component(s): res = r_path_component.findall(base_url(s)) return (res and res[0]) or s -r_title = re.compile('(.*?)<\/title>', re.I|re.S) -r_charset = re.compile("<meta.*charset\W*=\W*([\w_-]+)", re.I|re.S) -r_encoding = re.compile("<?xml.*encoding=\W*([\w_-]+)", re.I|re.S) def get_title(url): """Fetches the contents of url and extracts (and utf-8 encodes) - the contents of <title>""" - import chardet - if not url or not url.startswith('http://'): return None + the contents of <title>""" + if not url or not url.startswith('http://'): + return None + try: - content = urlopen(url).read() - t = r_title.findall(content) - if t: - title = t[0].strip() - en = (r_charset.findall(content) or - r_encoding.findall(content)) - encoding = en[0] if en else chardet.detect(content)["encoding"] - if encoding: - title = unicode(title, encoding).encode("utf-8") - return title - except: return None + # if we don't find it in the first kb of the resource, we + # probably won't find it + opener = urlopen(url, timeout=15) + text = opener.read(1024) + opener.close() + bs = BeautifulSoup(text) + if not bs: + return + + title_bs = bs.first('title') + + if not title_bs or title_bs.children: + return + + return title_bs.text.encode('utf-8') + + except: + return None valid_schemes = ('http', 'https', 'ftp', 'mailto') valid_dns = re.compile('^[-a-zA-Z0-9]+$') @@ -348,6 +359,9 @@ def sanitize_url(url, require_scheme = False): #if this succeeds, this portion of the dns is almost #valid and converted to ascii label = label.encode('idna') + except TypeError: + print "label sucks: [%r]" % label + raise except UnicodeError: return else: @@ -456,6 +470,10 @@ def to_base(q, alphabet): def to36(q): return to_base(q, '0123456789abcdefghijklmnopqrstuvwxyz') +def median(l): + if l: + return l[len(l)/2] + def query_string(dict): pairs = [] for k,v in dict.iteritems(): @@ -628,8 +646,9 @@ class UrlParser(object): g.domain, or a subdomain of the provided subreddit's cname. """ from pylons import g - return (not self.hostname or + return (not self.hostname or self.hostname.endswith(g.domain) or + is_authorized_cname(self.hostname, g.authorized_cnames) or (subreddit and subreddit.domain and self.hostname.endswith(subreddit.domain))) @@ -1147,3 +1166,20 @@ def in_chunks(it, size=25): except StopIteration: if chunk: yield chunk + +class Hell(object): + def __str__(self): + return "boom!" + +class Bomb(object): + @classmethod + def __getattr__(cls, key): + raise Hell() + + @classmethod + def __setattr__(cls, key, val): + raise Hell() + + @classmethod + def __repr__(cls): + raise Hell() diff --git a/r2/r2/lib/wrapped.py b/r2/r2/lib/wrapped.py index e574aba0f..a5560302b 100644 --- a/r2/r2/lib/wrapped.py +++ b/r2/r2/lib/wrapped.py @@ -23,7 +23,7 @@ from itertools import chain from datetime import datetime import re, types -class NoTemplateFound(Exception): pass +from hashlib import md5 class StringTemplate(object): """ @@ -54,7 +54,7 @@ class StringTemplate(object): self.template = unicode(template) except UnicodeDecodeError: self.template = unicode(template, "utf8") - + def update(self, d): """ Given a dictionary of replacement rules for the Template, @@ -134,20 +134,37 @@ class Templated(object): if not hasattr(self, "render_class"): self.render_class = self.__class__ + def _notfound(self, style): + from pylons import g, request + from pylons.controllers.util import abort + from r2.lib.log import log_text + if g.debug: + raise NotImplementedError (repr(self), style) + else: + if style == 'png': + level = "debug" + else: + level = "warning" + log_text("missing template", + "Couldn't find %s template for %r %s" % + (style, self, request.path), + level) + abort(404) + def template(self, style = 'html'): """ Fetches template from the template manager """ from r2.config.templates import tpm from pylons import g + debug = g.template_debug template = None try: template = tpm.get(self.render_class, style, cache = not debug) except AttributeError: - raise NoTemplateFound, (repr(self), style) - + self._notfound(style) return template def cache_key(self, *a): @@ -165,6 +182,7 @@ class Templated(object): """ from filters import unsafe from pylons import c + # the style has to default to the global render style # fetch template template = self.template(style) @@ -183,7 +201,7 @@ class Templated(object): c.render_style = render_style return res else: - raise NoTemplateFound, repr(self) + self._notfound(style) def _render(self, attr, style, **kwargs): """ @@ -249,7 +267,7 @@ class Templated(object): # in the tuple that is the current dict's values. # This dict cast will generate a new dict of cache_key # to value - cached = g.rendercache.get_multi(dict(current.values())) + cached = self._read_cache(dict(current.values())) # replacements will be a map of key -> rendered content # for updateing the current set of updates replacements = {} @@ -290,10 +308,10 @@ class Templated(object): # that we didn't find in the cache. # cache content that was newly rendered - g.rendercache.set_multi(dict((k, v) - for k, (v, kw) in updates.values() - if k in to_cache)) - + self._write_cache(dict((k, v) + for k, (v, kw) in updates.values() + if k in to_cache)) + # edge case: this may be the primary tempalte and cachable if isinstance(res, CacheStub): res = updates[res.name][1][0] @@ -321,8 +339,25 @@ class Templated(object): res = res.finalize(kwargs) return res - - + + def _write_cache(self, keys): + from pylons import g + + toset = dict((md5(key).hexdigest(), val) + for (key, val) + in keys.iteritems()) + g.rendercache.set_multi(toset) + + def _read_cache(self, keys): + from pylons import g + + ekeys = dict((md5(key).hexdigest(), key) + for key in keys) + found = g.rendercache.get_multi(ekeys) + return dict((ekeys[fkey], val) + for (fkey, val) + in found.iteritems()) + def render(self, style = None, **kw): from r2.lib.filters import unsafe res = self._render(None, style, **kw) @@ -380,7 +415,7 @@ class CachedTemplate(Templated): # can make the caching process-local. template_hash = getattr(self.template(style), "hash", id(self.__class__)) - + # these values are needed to render any link on the site, and # a menu is just a set of links, so we best cache against # them. @@ -453,9 +488,9 @@ class Wrapped(CachedTemplate): break except AttributeError: pass - + if not found: - raise AttributeError, attr + raise AttributeError, "%r has no %s" % (self, attr) setattr(self, attr, res) return res diff --git a/r2/r2/models/__init__.py b/r2/r2/models/__init__.py index fdf8db877..2301271f1 100644 --- a/r2/r2/models/__init__.py +++ b/r2/r2/models/__init__.py @@ -27,6 +27,7 @@ from vote import * from report import * from subreddit import * from award import * +from ad import * from bidding import * from mail_queue import Email, has_opted_out, opt_count from admintools import * diff --git a/r2/r2/models/account.py b/r2/r2/models/account.py index d11b626bf..91b73619d 100644 --- a/r2/r2/models/account.py +++ b/r2/r2/models/account.py @@ -23,7 +23,7 @@ from r2.lib.db.thing import Thing, Relation, NotFound from r2.lib.db.operators import lower from r2.lib.db.userrel import UserRel from r2.lib.memoize import memoize -from r2.lib.utils import modhash, valid_hash, randstr, timefromnow +from r2.lib.utils import modhash, valid_hash, randstr, timefromnow, UrlParser from r2.lib.cache import sgm from pylons import g @@ -61,6 +61,7 @@ class Account(Thing): pref_mark_messages_read = True, pref_threaded_messages = True, pref_collapse_read_messages = False, + pref_private_feeds = True, reported = 0, report_made = 0, report_correct = 0, @@ -301,7 +302,6 @@ class Account(Thing): else: g.hardcache.set("cup_info-%d" % self._id, cup_info, cache_lifetime) - def remove_cup(self): g.hardcache.delete("cup_info-%d" % self._id) @@ -313,6 +313,7 @@ class Account(Thing): ids = [ int(i) for i in ids ] return sgm(g.hardcache, ids, miss_fn=None, prefix="cup_info-") + class FakeAccount(Account): _nodb = True pref_no_profanity = True @@ -338,6 +339,29 @@ def valid_cookie(cookie): return (account, True) return (False, False) +def valid_feed(name, feedhash, path): + if name and feedhash and path: + from r2.lib.template_helpers import add_sr + path = add_sr(path) + try: + user = Account._by_name(name) + if (user.pref_private_feeds and + feedhash == make_feedhash(user, path)): + return user + except NotFound: + pass + +def make_feedhash(user, path): + return sha.new("".join([user.name, user.password, g.FEEDSECRET]) + ).hexdigest() + +def make_feedurl(user, path, ext = "rss"): + u = UrlParser(path) + u.update_query(user = user.name, + feed = make_feedhash(user, path)) + u.set_extension(ext) + return u.unparse() + def valid_login(name, password): try: a = Account._by_name(name) @@ -358,7 +382,7 @@ def valid_password(a, password): salt = a.password[:3] if a.password == passhash(a.name, password, salt): return a - except AttributeError: + except AttributeError, UnicodeEncodeError: return False def passhash(username, password, salt = ''): @@ -398,8 +422,18 @@ class DeletedUser(FakeAccount): def name(self): return '[deleted]' + @property + def _deleted(self): + return True + def _fullname(self): raise NotImplementedError def _id(self): raise NotImplementedError + + def __setattr__(self, attr, val): + if attr == '_deleted': + pass + else: + object.__setattr__(self, attr, val) diff --git a/r2/r2/models/ad.py b/r2/r2/models/ad.py new file mode 100644 index 000000000..181d953e6 --- /dev/null +++ b/r2/r2/models/ad.py @@ -0,0 +1,142 @@ +# The contents of this file are subject to the Common Public Attribution +# License Version 1.0. (the "License"); you may not use this file except in +# compliance with the License. You may obtain a copy of the License at +# http://code.reddit.com/LICENSE. The License is based on the Mozilla Public +# License Version 1.1, but Sections 14 and 15 have been added to cover use of +# software over a computer network and provide for limited attribution for the +# Original Developer. In addition, Exhibit A has been modified to be consistent +# with Exhibit B. +# +# Software distributed under the License is distributed on an "AS IS" basis, +# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for +# the specific language governing rights and limitations under the License. +# +# The Original Code is Reddit. +# +# The Original Developer is the Initial Developer. The Initial Developer of the +# Original Code is CondeNet, Inc. +# +# All portions of the code written by CondeNet are Copyright (c) 2006-2008 +# CondeNet, Inc. All Rights Reserved. +################################################################################ +from r2.lib.db.thing import Thing, Relation, NotFound +from r2.lib.db.operators import asc, desc, lower +from r2.lib.memoize import memoize +from r2.models import Subreddit +from pylons import c, g, request + +class Ad (Thing): + _defaults = dict( + codename = None, + imgurl = None, + linkurl = None, + ) + + @classmethod + @memoize('ad.all_ads') + def _all_ads_cache(cls): + return [ a._id for a in Ad._query(sort=desc('_date'), limit=1000) ] + + @classmethod + def _all_ads(cls, _update=False): + all = cls._all_ads_cache(_update=_update) + # Can't just return Ad._byID() results because + # the ordering will be lost + d = Ad._byID(all, data=True) + return [ d[id] for id in all ] + + @classmethod + def _new(cls, codename, imgurl, linkurl): + print "Creating new ad codename=%s imgurl=%s linkurl=%s" % ( + codename, imgurl, linkurl) + a = Ad(codename=codename, imgurl=imgurl, linkurl=linkurl) + a._commit() + Ad._all_ads_cache(_update=True) + + @classmethod + def _by_codename(cls, codename): + q = cls._query(lower(Ad.c.codename) == codename.lower()) + q._limit = 1 + ad = list(q) + + if ad: + return cls._byID(ad[0]._id, True) + else: + raise NotFound, 'Ad %s' % codename + + def url(self): + return "%s/ads/%s" % (g.ad_domain, self.codename) + + def submit_link(self): + from r2.lib.template_helpers import get_domain + from mako.filters import url_escape + + d = get_domain(subreddit=False) + u = self.url() + + return "http://%s/r/ads/submit?url=%s" % (d, url_escape(u)) + + def important_attrs(self): + return dict(imgurl=self.imgurl, linkurl=self.linkurl, submit_link=self.submit_link()) + +class AdSR(Relation(Ad, Subreddit)): + @classmethod + def _new(cls, ad, sr, weight=100): + t = AdSR(ad, sr, "adsr") + t.weight = weight + t._commit() + + AdSR.by_ad(ad, _update=True) + AdSR.by_sr(sr, _update=True) + + @classmethod + @memoize('adsr.by_ad') + def by_ad_cache(cls, ad): + q = AdSR._query(AdSR.c._thing1_id == ad._id, + sort = desc('_date')) + q._limit = 500 + return [ t._id for t in q ] + + @classmethod + def by_ad(cls, ad, _update=False): + rel_ids = cls.by_ad_cache(ad, _update=_update) + adsrs = AdSR._byID_rel(rel_ids, data=True, eager_load=True, + thing_data=True, return_dict = False) + return adsrs + + @classmethod + @memoize('adsr.by_sr') + def by_sr_cache(cls, sr): + q = AdSR._query(AdSR.c._thing2_id == sr._id, + sort = desc('_date')) + q._limit = 500 + return [ t._id for t in q ] + + @classmethod + def by_sr(cls, sr, _update=False): + rel_ids = cls.by_sr_cache(sr, _update=_update) + adsrs = AdSR._byID_rel(rel_ids, data=True, eager_load=True, + thing_data=True, return_dict = False) + return adsrs + + @classmethod + def by_sr_merged(cls, sr, _update=False): + if sr.name == g.default_sr: + return cls.by_sr(sr) + + my_adsrs = cls.by_sr(sr) + global_adsrs = cls.by_sr(Subreddit._by_name(g.default_sr)) + + seen = {} + for adsr in my_adsrs: + seen[adsr._thing1.codename] = True + for adsr in global_adsrs: + if adsr._thing1.codename not in seen: + my_adsrs.append(adsr) + + return my_adsrs + + @classmethod + def by_ad_and_sr(cls, ad, sr): + q = cls._fast_query(ad, sr, "adsr") + return q.values()[0] diff --git a/r2/r2/models/admintools.py b/r2/r2/models/admintools.py index 32c98ab3a..a5880c09a 100644 --- a/r2/r2/models/admintools.py +++ b/r2/r2/models/admintools.py @@ -21,6 +21,7 @@ ################################################################################ from r2.lib.utils import tup from r2.lib.filters import websafe +from r2.lib.log import log_text from r2.models import Report, Account from pylons import g @@ -30,7 +31,8 @@ from copy import copy class AdminTools(object): - def spam(self, things, auto, moderator_banned, banner, date = None, **kw): + def spam(self, things, auto=True, moderator_banned=False, + banner=None, date = None, **kw): from r2.lib.db import queries things = [x for x in tup(things) if not x._spam] @@ -98,7 +100,7 @@ class AdminTools(object): def set_last_sr_ban(self, things): by_srid = {} for thing in things: - if hasattr(thing, 'sr_id'): + if getattr(thing, 'sr_id', None) is not None: by_srid.setdefault(thing.sr_id, []).append(thing) if by_srid: @@ -117,7 +119,7 @@ def is_banned_IP(ip): return False def is_banned_domain(dom): - return False + return None def valid_thing(v, karma): return not v._thing1._spam @@ -125,6 +127,10 @@ def valid_thing(v, karma): def valid_user(v, sr, karma): return True +# Returns whether this person is being suspicious +def login_throttle(username, wrong_password): + return False + def apply_updates(user): pass diff --git a/r2/r2/models/award.py b/r2/r2/models/award.py index 4447fe31c..8b61938ed 100644 --- a/r2/r2/models/award.py +++ b/r2/r2/models/award.py @@ -21,11 +21,10 @@ ################################################################################ from r2.lib.db.thing import Thing, Relation, NotFound from r2.lib.db.userrel import UserRel -from r2.lib.db.operators import desc, lower +from r2.lib.db.operators import asc, desc, lower from r2.lib.memoize import memoize from r2.models import Account from pylons import c, g, request -from r2.lib.db.operators import asc class Award (Thing): _defaults = dict( diff --git a/r2/r2/models/builder.py b/r2/r2/models/builder.py index 4b701b768..82d847666 100644 --- a/r2/r2/models/builder.py +++ b/r2/r2/models/builder.py @@ -34,7 +34,7 @@ from r2.lib.wrapped import Wrapped from r2.lib import utils from r2.lib.db import operators from r2.lib.cache import sgm -from r2.lib.comment_tree import link_comments, user_messages, conversation, tree_sort_fn +from r2.lib.comment_tree import * from copy import deepcopy, copy import time @@ -90,18 +90,12 @@ class Builder(object): wrapped = [] count = 0 - if isinstance(c.site, FakeSubreddit): - mods = [] - else: - mods = c.site.moderators - modlink = '' - if c.cname: - modlink = '/about/moderators' - else: - modlink = '/r/%s/about/moderators' % c.site.name - - modlabel = (_('moderator of /r/%(reddit)s, speaking officially') % - dict(reddit = c.site.name) ) + modlink = {} + modlabel = {} + for s in subreddits.values(): + modlink[s._id] = '/r/%s/about/moderators' % s.name + modlabel[s._id] = (_('moderator of /r/%(reddit)s, speaking officially') % + dict(reddit = s.name) ) for item in items: @@ -142,9 +136,9 @@ class Builder(object): w.author and w.author.name in g.admins): add_attr(w.attribs, 'A') - if (w.distinguished == 'moderator' and - getattr(item, "author_id", None) in mods): - add_attr(w.attribs, 'M', label=modlabel, link=modlink) + if w.distinguished == 'moderator': + add_attr(w.attribs, 'M', label=modlabel[item.sr_id], + link=modlink[item.sr_id]) if w.author and w.author._id in cup_infos and not c.profilepage: cup_info = cup_infos[w.author._id] @@ -154,7 +148,7 @@ class Builder(object): label=label, link = "/user/%s" % w.author.name) - if hasattr(item, "sr_id"): + if hasattr(item, "sr_id") and item.sr_id is not None: w.subreddit = subreddits[item.sr_id] w.likes = likes.get((user, item)) @@ -204,6 +198,8 @@ class Builder(object): w.moderator_banned = ban_info.get('moderator_banned', False) w.autobanned = ban_info.get('auto', False) w.banner = ban_info.get('banner') + if hasattr(w, "author") and w.author._spam: + w.show_spam = "author" elif getattr(item, 'reported', 0) > 0: w.show_reports = True @@ -240,7 +236,7 @@ class QueryBuilder(Builder): self.start_count = kw.get('count', 0) or 0 self.after = kw.get('after') self.reverse = kw.get('reverse') - + self.prewrap_fn = None if hasattr(query, 'prewrap_fn'): self.prewrap_fn = query.prewrap_fn @@ -372,18 +368,30 @@ class QueryBuilder(Builder): class IDBuilder(QueryBuilder): def init_query(self): - names = self.names = list(tup(self.query)) + names = list(tup(self.query)) - if self.reverse: + after = self.after._fullname if self.after else None + + self.names = self._get_after(names, + after, + self.reverse) + + @staticmethod + def _get_after(l, after, reverse): + names = list(l) + + if reverse: names.reverse() - if self.after: + if after: try: - i = names.index(self.after._fullname) + i = names.index(after) except ValueError: - self.names = () + names = () else: - self.names = names[i + 1:] + names = names[i + 1:] + + return names def fetch_more(self, last_item, num_have): done = False @@ -405,14 +413,22 @@ class IDBuilder(QueryBuilder): return done, new_items -class SearchBuilder(QueryBuilder): +class SearchBuilder(IDBuilder): def init_query(self): self.skip = True - self.total_num = 0 - self.start_time = time.time() self.start_time = time.time() + search = self.query.run() + names = list(search.docs) + self.total_num = search.hits + + after = self.after._fullname if self.after else None + + self.names = self._get_after(names, + after, + self.reverse) + def keep_item(self,item): # doesn't use the default keep_item because we want to keep # things that were voted on, even if they've chosen to hide @@ -422,31 +438,6 @@ class SearchBuilder(QueryBuilder): else: return True - - def fetch_more(self, last_item, num_have): - from r2.lib import solrsearch - - done = False - limit = None - if self.num: - num_need = self.num - num_have - if num_need <= 0: - return True, None - else: - limit = max(int(num_need * EXTRA_FACTOR), 1) - else: - done = True - - search = self.query.run(after = last_item or self.after, - reverse = self.reverse, - num = limit) - - new_items = Thing._by_fullname(search.docs, data = True, return_dict=False) - - self.total_num = search.hits - - return done, new_items - def empty_listing(*things): parent_name = None for t in things: @@ -484,9 +475,21 @@ class CommentBuilder(Builder): for j in self.item_iter(i.child.things): yield j - def get_items(self, num, starting_depth = 0): + def get_items(self, num): r = link_comments(self.link._id) cids, comment_tree, depth, num_children = r + + if (not isinstance(self.comment, utils.iters) + and self.comment and not self.comment._id in depth): + g.log.error("self.comment (%d) not in depth. Forcing update..." + % self.comment._id) + + r = link_comments(self.link._id, _update=True) + cids, comment_tree, depth, num_children = r + + if not self.comment._id in depth: + g.log.error("Update didn't help. This is gonna end in tears.") + if cids: comments = set(Comment._byID(cids, data = True, return_dict = False)) @@ -503,7 +506,11 @@ class CommentBuilder(Builder): extra = {} top = None dont_collapse = [] + ignored_parent_ids = [] #loading a portion of the tree + + start_depth = 0 + if isinstance(self.comment, utils.iters): candidates = [] candidates.extend(self.comment) @@ -514,6 +521,10 @@ class CommentBuilder(Builder): #if hasattr(candidates[0], "parent_id"): # parent = comment_dict[candidates[0].parent_id] # items.append(parent) + if (hasattr(candidates[0], "parent_id") and + candidates[0].parent_id is not None): + ignored_parent_ids.append(candidates[0].parent_id) + start_depth = depth[candidates[0].parent_id] #if permalink elif self.comment: top = self.comment @@ -549,7 +560,7 @@ class CommentBuilder(Builder): comments.remove(to_add) if to_add._deleted and not comment_tree.has_key(to_add._id): pass - elif depth[to_add._id] < self.max_depth: + elif depth[to_add._id] < self.max_depth + start_depth: #add children if comment_tree.has_key(to_add._id): candidates.extend(comment_tree[to_add._id]) @@ -589,6 +600,11 @@ class CommentBuilder(Builder): #put the extras in the tree for p_id, morelink in extra.iteritems(): + if p_id not in cids: + if p_id in ignored_parent_ids: + raise KeyError("%r not in cids because it was ignored" % p_id) + else: + raise KeyError("%r not in cids but it wasn't ignored" % p_id) parent = cids[p_id] parent.child = empty_listing(morelink) parent.child.parent_name = parent._fullname @@ -641,9 +657,9 @@ class CommentBuilder(Builder): return final class MessageBuilder(Builder): - def __init__(self, user, parent = None, focal = None, + def __init__(self, parent = None, focal = None, skip = True, **kw): - self.user = user + self.num = kw.pop('num', None) self.focal = focal self.parent = parent @@ -661,11 +677,11 @@ class MessageBuilder(Builder): for j in i.child.things: yield j + def get_tree(self): + raise NotImplementedError, "get_tree" + def get_items(self): - if self.parent: - tree = conversation(self.user, self.parent) - else: - tree = user_messages(self.user) + tree = self.get_tree() prev = next = None if not self.parent: @@ -747,6 +763,37 @@ class MessageBuilder(Builder): return (final, prev, next, len(final), len(final)) +class SrMessageBuilder(MessageBuilder): + def __init__(self, sr, **kw): + self.sr = sr + MessageBuilder.__init__(self, **kw) + + def get_tree(self): + if self.parent: + return sr_conversation(self.sr, self.parent) + return subreddit_messages(self.sr) + +class UserMessageBuilder(MessageBuilder): + def __init__(self, user, **kw): + self.user = user + MessageBuilder.__init__(self, **kw) + + def get_tree(self): + if self.parent: + return conversation(self.user, self.parent) + return user_messages(self.user) + +class ModeratorMessageBuilder(MessageBuilder): + def __init__(self, user, **kw): + self.user = user + MessageBuilder.__init__(self, **kw) + + def get_tree(self): + if self.parent: + return conversation(self.user, self.parent) + return moderator_messages(self.user) + + def make_wrapper(parent_wrapper = Wrapped, **params): def wrapper_fn(thing): w = parent_wrapper(thing) @@ -765,5 +812,5 @@ class TopCommentBuilder(CommentBuilder): max_depth = 1, wrap = wrap) def get_items(self, num = 10): - final = CommentBuilder.get_items(self, num = num, starting_depth = 0) + final = CommentBuilder.get_items(self, num = num) return [ cm for cm in final if not cm.deleted ] diff --git a/r2/r2/models/link.py b/r2/r2/models/link.py index ff94b8457..35283aa5e 100644 --- a/r2/r2/models/link.py +++ b/r2/r2/models/link.py @@ -53,7 +53,7 @@ class Link(Thing, Printable): disable_comments = False, selftext = '', ip = '0.0.0.0') - + _essentials = ('sr_id',) _nsfw = re.compile(r"\bnsfw\b", re.I) def __init__(self, *a, **kw): @@ -72,7 +72,7 @@ class Link(Thing, Printable): from subreddit import Default if sr == Default: sr = None - + url = cls.by_url_key(url) link_ids = g.permacache.get(url) if link_ids: @@ -131,7 +131,7 @@ class Link(Thing, Printable): l._commit() l.set_url_cache() if author._spam: - admintools.spam(l, True, False, 'banned user') + admintools.spam(l, banner='banned user') return l @classmethod @@ -186,15 +186,15 @@ class Link(Thing, Printable): if self._spam and (not user or (user and self.author_id != user._id)): return False - + #author_karma = wrapped.author.link_karma #if author_karma <= 0 and random.randint(author_karma, 0) != 0: #return False - if user: + if user and not c.ignore_hide_rules: if user.pref_hide_ups and wrapped.likes == True: return False - + if user.pref_hide_downs and wrapped.likes == False: return False @@ -325,9 +325,7 @@ class Link(Thing, Printable): item.score = max(0, item.score) item.domain = (domain(item.url) if not item.is_self - else 'self.' + item.subreddit.name) - if not hasattr(item,'top_link'): - item.top_link = False + else 'self.' + item.subreddit.name) item.urlprefix = '' item.saved = bool(saved.get((user, item, 'save'))) item.hidden = bool(hidden.get((user, item, 'hide'))) @@ -389,8 +387,10 @@ class Link(Thing, Printable): item.link_child = SelfTextChild(item, expand = expand, nofollow = item.nofollow) #draw the edit button if the contents are pre-expanded - item.editable = expand and item.author == c.user - + item.editable = (expand and + item.author == c.user and + not item._deleted) + item.tblink = "http://%s/tb/%s" % ( get_domain(cname = cname, subreddit=False), item._id36) @@ -424,6 +424,11 @@ class Link(Thing, Printable): item.midcolmargin = CachedVariable("midcolmargin") item.comment_label = CachedVariable("numcomments") + item.as_deleted = False + if item.deleted and not c.user_is_admin: + item.author = DeletedUser() + item.as_deleted = True + if user_is_loggedin: incr_counts(wrapped) @@ -468,7 +473,7 @@ class PromotedLink(Link): class Comment(Thing, Printable): _data_int_props = Thing._data_int_props + ('reported',) - _defaults = dict(reported = 0, parent_id = None, + _defaults = dict(reported = 0, parent_id = None, moderator_banned = False, new = False, banned_before_moderator = False) @@ -596,7 +601,7 @@ class Comment(Thing, Printable): if not hasattr(item, 'subreddit'): item.subreddit = item.subreddit_slow - if item.author_id == item.link.author_id: + if item.author_id == item.link.author_id and not item.link._deleted: add_attr(item.attribs, 'S', link = item.link.make_permalink(item.subreddit)) if not hasattr(item, 'target'): @@ -708,6 +713,14 @@ class MoreMessages(Printable): def recipient(self): return self.parent.recipient + @property + def sr_id(self): + return self.parent.sr_id + + @property + def subreddit(self): + return self.parent.subreddit + class MoreComments(Printable): cachable = False @@ -746,47 +759,94 @@ class MoreChildren(MoreComments): class Message(Thing, Printable): _defaults = dict(reported = 0, was_comment = False, parent_id = None, - new = False, first_message = None, - to_collapse = None, author_collapse = None) + new = False, first_message = None, to_id = None, + sr_id = None, to_collapse = None, author_collapse = None) _data_int_props = Thing._data_int_props + ('reported', ) - cache_ignore = set(["to"]).union(Printable.cache_ignore) + cache_ignore = set(["to", "subreddit"]).union(Printable.cache_ignore) @classmethod - def _new(cls, author, to, subject, body, ip, parent = None): + def _new(cls, author, to, subject, body, ip, parent = None, sr = None): m = Message(subject = subject, body = body, author_id = author._id, new = True, ip = ip) m._spam = author._spam + sr_id = None + # check to see if the recipient is a subreddit and swap args accordingly + if to and isinstance(to, Subreddit): + to, sr = None, to + + if sr: + sr_id = sr._id if parent: m.parent_id = parent._id if parent.first_message: m.first_message = parent.first_message else: m.first_message = parent._id + if parent.sr_id: + sr_id = parent.sr_id + + if not to and not sr_id: + raise CreationError, "Message created with neither to nor sr_id" + + m.to_id = to._id if to else None + if sr_id is not None: + m.sr_id = sr_id - m.to_id = to._id m._commit() - #author = Author(author, m, 'author') - #author._commit() - - # only global admins can be message spammed. inbox_rel = None - if not m._spam or to.name in g.admins: - inbox_rel = Inbox._add(to, m, 'inbox') + if sr_id and not sr: + sr = Subreddit._byID(sr_id) + inbox_rel = [] + # if there is a subreddit id, we have to add it to the moderator inbox + if sr_id: + inbox_rel.append(ModeratorInbox._add(sr, m, 'inbox')) + if author.name in g.admins: + m.distinguished = 'admin' + m._commit() + elif sr.is_moderator(author): + m.distinguished = 'yes' + m._commit() + # if there is a "to" we may have to create an inbox relation as well + # also, only global admins can be message spammed. + if to and (not m._spam or to.name in g.admins): + # if the current "to" is not a sr moderator, + # they need to be notified + if not sr_id or not sr.is_moderator(to): + inbox_rel.append(Inbox._add(to, m, 'inbox')) + # find the message originator + elif sr_id and m.first_message: + first = Message._byID(m.first_message, True) + orig = Account._byID(first.author_id) + # if the originator is not a moderator... + if not sr.is_moderator(orig) and orig._id != author._id: + inbox_rel.append(Inbox._add(orig, m, 'inbox')) return (m, inbox_rel) @property def permalink(self): return "/message/messages/%s" % self._id36 - def can_view(self): - return (c.user_is_loggedin and - (c.user_is_admin or - c.user._id in (self.author_id, self.to_id))) + def can_view_slow(self): + if c.user_is_loggedin: + # simple case from before: + if (c.user_is_admin or + c.user._id in (self.author_id, self.to_id)): + return True + elif self.sr_id: + sr = Subreddit._byID(self.sr_id) + is_moderator = sr.is_moderator(c.user) + # moderators can view messages on subreddits they moderate + if is_moderator: + return True + elif self.first_message: + first = Message._byID(self.first_message, True) + return (first.author_id == c.user._id) + @classmethod def add_props(cls, user, wrapped): @@ -795,19 +855,33 @@ class Message(Thing, Printable): #reset msgtime after this request msgtime = c.have_messages - #load the "to" field if required - to_ids = set(w.to_id for w in wrapped) + # make sure there is a sr_id set: + for w in wrapped: + if not hasattr(w, "sr_id"): + w.sr_id = None + + # load the to fields if one exists + to_ids = set(w.to_id for w in wrapped if w.to_id is not None) tos = Account._byID(to_ids, True) if to_ids else {} + + # load the subreddit field if one exists: + sr_ids = set(w.sr_id for w in wrapped if w.sr_id is not None) + m_subreddits = Subreddit._byID(sr_ids, data = True, return_dict = True) + + # load the links and their subreddits (if comment-as-message) links = Link._byID(set(l.link_id for l in wrapped if l.was_comment), data = True, return_dict = True) - subreddits = Subreddit._byID(set(l.sr_id for l in links.values()), - data = True, return_dict = True) + # subreddits of the links (for comment-as-message) + l_subreddits = Subreddit._byID(set(l.sr_id for l in links.values()), + data = True, return_dict = True) + parents = Comment._byID(set(l.parent_id for l in wrapped if l.parent_id and l.was_comment), data = True, return_dict = True) # load the inbox relations for the messages to determine new-ness + # TODO: query cache? inbox = Inbox._fast_query(c.user, [item.lookups[0] for item in wrapped], ['inbox', 'selfreply']) @@ -816,18 +890,36 @@ class Message(Thing, Printable): inbox = dict((m._fullname, v) for (u, m, n), v in inbox.iteritems() if v) - for item in wrapped: - item.to = tos[item.to_id] - item.recipient = (item.to_id == c.user._id) + modinbox = ModeratorInbox._query( + ModeratorInbox.c._thing2_id == [item._id for item in wrapped], + data = True) + + # best to not have to eager_load the things + def make_message_fullname(mid): + return "t%s_%s" % (utils.to36(Message._type_id), utils.to36(mid)) + modinbox = dict((make_message_fullname(v._thing2_id), v) + for v in modinbox) + + for item in wrapped: + item.to = tos.get(item.to_id) + if item.sr_id: + item.recipient = (item.author_id != c.user._id) + else: + item.recipient = (item.to_id == c.user._id) - # don't mark non-recipient messages as new - if not item.recipient: - item.new = False # new-ness is stored on the relation + if item.author_id == c.user._id: + item.new = False elif item._fullname in inbox: item.new = getattr(inbox[item._fullname], "new", False) - if item.new and c.user.pref_mark_messages_read: - queries.set_unread(inbox[item._fullname]._thing2, False) + # wipe new messages if preferences say so, and this isn't a feed + # and it is in the user's personal inbox + if (item.new and c.user.pref_mark_messages_read + and c.extension not in ("rss", "xml", "api", "json")): + queries.set_unread(inbox[item._fullname]._thing2, + c.user, False) + elif item._fullname in modinbox: + item.new = getattr(modinbox[item._fullname], "new", False) else: item.new = False @@ -835,9 +927,10 @@ class Message(Thing, Printable): item.score_fmt = Score.none item.message_style = "" + # comment as message: if item.was_comment: link = links[item.link_id] - sr = subreddits[link.sr_id] + sr = l_subreddits[link.sr_id] item.to_collapse = False item.author_collapse = False item.link_title = link.title @@ -851,6 +944,9 @@ class Message(Thing, Printable): else: item.subject = _('post reply') item.message_style = "post-reply" + elif item.sr_id is not None: + item.subreddit = m_subreddits[item.sr_id] + if c.user.pref_no_profanity: item.subject = profanity_filter(item.subject) @@ -866,12 +962,17 @@ class Message(Thing, Printable): # Run this last Printable.add_props(user, wrapped) + @property + def subreddit_slow(self): + from subreddit import Subreddit + if self.sr_id: + return Subreddit._byID(self.sr_id) + @staticmethod def wrapped_cache_key(wrapped, style): s = Printable.wrapped_cache_key(wrapped, style) - s.extend([c.msg_location, wrapped.new, wrapped.collapsed]) + s.extend([wrapped.new, wrapped.collapsed]) return s - def keep_item(self, wrapped): return True @@ -914,3 +1015,35 @@ class Inbox(MultiRelation('inbox', res.append(i) return res + +class ModeratorInbox(Relation(Subreddit, Message)): + #TODO: shouldn't dupe this + @classmethod + def _add(cls, sr, obj, *a, **kw): + i = ModeratorInbox(sr, obj, *a, **kw) + i.new = True + i._commit() + + if not sr._loaded: + sr._load() + + moderators = Account._byID(sr.moderator_ids(), return_dict = False) + for m in moderators: + if obj.author_id != m._id and not getattr(m, 'modmsgtime', None): + m.modmsgtime = obj._date + m._commit() + + return i + + @classmethod + def set_unread(cls, thing, unread): + inbox = cls._query(cls.c._thing2_id == thing._id, + eager_load = True) + res = [] + for i in inbox: + if i: + i.new = unread + i._commit() + res.append(i) + return res + diff --git a/r2/r2/models/mail_queue.py b/r2/r2/models/mail_queue.py index da91ba967..8f25654f6 100644 --- a/r2/r2/models/mail_queue.py +++ b/r2/r2/models/mail_queue.py @@ -300,6 +300,7 @@ class Email(object): "FINISHED_PROMO", "NEW_PROMO", "HELP_TRANSLATE", + "NERDMAIL" ) subjects = { @@ -318,6 +319,7 @@ class Email(object): Kind.FINISHED_PROMO : _("[reddit] your promotion has finished"), Kind.NEW_PROMO : _("[reddit] your promotion has been created"), Kind.HELP_TRANSLATE : _("[i18n] translation offer from '%(user)s'"), + Kind.NERDMAIL : _("[reddit] hey, nerd!"), } def __init__(self, user, thing, email, from_name, date, ip, banned_ip, diff --git a/r2/r2/models/subreddit.py b/r2/r2/models/subreddit.py index ebd69fc84..00a43d2f9 100644 --- a/r2/r2/models/subreddit.py +++ b/r2/r2/models/subreddit.py @@ -50,12 +50,11 @@ class Subreddit(Thing, Printable): allow_top = False, # overridden in "_new" description = '', images = {}, - ad_type = None, - ad_file = os.path.join(g.static_path, 'ad_default.html'), reported = 0, valid_votes = 0, show_media = False, - css_on_cname = True, + css_on_cname = True, + use_whitelist = False, domain = None, over_18 = False, mod_actions = 0, @@ -64,6 +63,7 @@ class Subreddit(Thing, Printable): sponsorship_img = None, sponsorship_name = None, ) + _essentials = ('type', 'name') _data_int_props = ('mod_actions',) sr_limit = 50 @@ -98,7 +98,11 @@ class Subreddit(Thing, Printable): q = cls._query(lower(cls.c.name) == name.lower(), cls.c._spam == (True, False), limit = 1) - l = list(q) + try: + l = list(q) + except UnicodeEncodeError: + print "Error looking up SR %r" % name + raise if l: return l[0]._id @@ -199,8 +203,9 @@ class Subreddit(Thing, Printable): return (user and (c.user_is_admin or self.is_moderator(user) - or (self.type in ('restricted', 'private') - and self.is_contributor(user)))) + or ((self.type in ('restricted', 'private') or + self.use_whitelist) and + self.is_contributor(user)))) def can_give_karma(self, user): return self.is_special(user) @@ -213,8 +218,8 @@ class Subreddit(Thing, Printable): rl_karma = g.MIN_RATE_LIMIT_COMMENT_KARMA else: rl_karma = g.MIN_RATE_LIMIT_KARMA - - return not (self.is_special(user) or + + return not (self.is_special(user) or user.karma(kind, self) >= rl_karma) def can_view(self, user): @@ -231,7 +236,8 @@ class Subreddit(Thing, Printable): def load_subreddits(cls, links, return_dict = True): """returns the subreddits for a list of links. it also preloads the permissions for the current user.""" - srids = set(l.sr_id for l in links if hasattr(l, "sr_id")) + srids = set(l.sr_id for l in links + if getattr(l, "sr_id", None) is not None) subreddits = {} if srids: subreddits = cls._byID(srids, True) @@ -312,7 +318,7 @@ class Subreddit(Thing, Printable): data = True, read_cache = True, write_cache = True, - cache_time = g.page_cache_time) + cache_time = 3600) if lang != 'all': pop_reddits._filter(Subreddit.c.lang == lang) @@ -579,15 +585,12 @@ class DefaultSR(FakeSubreddit): srs = Subreddit._byID(sr_ids, return_dict = False) if g.use_query_cache: - results = [] - for sr in srs: - results.append(queries.get_links(sr, sort, time)) - return queries.merge_cached_results(*results) + results = [queries.get_links(sr, sort, time) + for sr in srs] + return queries.merge_results(*results) else: q = Link._query(Link.c.sr_id == sr_ids, sort = queries.db_sort(sort)) - if sort == 'toplinks': - q._filter(Link.c.top_link == True) if time != 'all': q._filter(queries.db_times[time]) return q @@ -652,7 +655,7 @@ class DomainSR(FakeSubreddit): def get_links(self, sort, time): from r2.lib.db import queries return queries.get_domain_links(self.domain, sort, time) - + Sub = SubSR() Friends = FriendsSR() All = AllSR() diff --git a/r2/r2/public/static/comscore.html b/r2/r2/public/static/comscore.html new file mode 120000 index 000000000..939a56851 --- /dev/null +++ b/r2/r2/public/static/comscore.html @@ -0,0 +1 @@ +../../templates/comscore.html \ No newline at end of file diff --git a/r2/r2/public/static/css/reddit.css b/r2/r2/public/static/css/reddit.css index ac7fd2b64..fc365b3b5 100644 --- a/r2/r2/public/static/css/reddit.css +++ b/r2/r2/public/static/css/reddit.css @@ -149,6 +149,11 @@ ul.flat-vert {text-align: left;} } #mail img {position: relative; top: 2px} +#modmail img {position: relative; top: 4px; margin-top: -6px; } +#modmail.nohavemail { + opacity: .7; + filter:alpha(opacity=70); /* IE patch */ +} .user {color: gray;} @@ -692,6 +697,10 @@ ul.flat-vert {text-align: left;} margin: 5px; margin-right: 15px; } +.md td, .md th { border: 1px solid #EEE; padding: 1px 3px; } +.md th { font-weight: bold; } +.md table { margin: 5px 10px; } +.md center { text-align: left; } /*top link*/ a.star { text-decoration: none; color: #ff8b60 } @@ -912,6 +921,12 @@ textarea.gray { color: gray; } margin-left: 12px; } +.message.was-comment .child .message, +.message.was-comment .child .usertext { + margin-top: 0px; + margin-left: 0px; +} + .message .expand { display: none; } @@ -1148,16 +1163,16 @@ textarea.gray { color: gray; } } .server-status td { padding-right: 2px; padding-left: 2px; } .server-status .bar { height: 5px; background-color: blue; } -.server-status .load0 { background-color: #FFFFFF; } -.server-status .load1 { background-color: #f0f5FF; } -.server-status .load2 { background-color: #E2ECFF; } -.server-status .load3 { background-color: #d6f5cb; } -.server-status .load4 { background-color: #CAFF98; } -.server-status .load5 { background-color: #e4f484; } -.server-status .load6 { background-color: #FFEA71; } -.server-status .load7 { background-color: #ffdb81; } -.server-status .load8 { background-color: #FF9191; } -.server-status .load9 { background-color: #FF0000; color: #FFFFFF } +.load0 { background-color: #FFFFFF; } /* white */ +.load1 { background-color: #f0f5FF; } /* pale blue */ +.load2 { background-color: #E2ECFF; } /* blue */ +.load3 { background-color: #d6f5cb; } /* pale green */ +.load4 { background-color: #CAFF98; } /* green */ +.load5 { background-color: #e4f484; } /* yellowgreen */ +.load6 { background-color: #FFEA71; } /* orange */ +.load7 { background-color: #ffdb81; } /* orangerose */ +.load8 { background-color: #FF9191; } /* pink */ +.load9 { background-color: #FF0000; color: #FFFFFF } /* red */ .server-status tr.down > * { background-color: #C0C0C0; text-decoration: line-through; @@ -1428,6 +1443,250 @@ textarea.gray { color: gray; } .oldbylink a { background-color: #F0F0F0; margin: 2px; color: gray} +.error-log { + clear: both; +} + +.error-log a:hover { text-decoration: underline } + +.error-log .rest { + display: none; +} + +.error-log:first-child .rest { + display: block; +} + +.error-log, .error-log .exception { + border: solid #aaa 1px; + padding: 3px 5px; + margin-bottom: 10px; +} + +.error-log .exception { + background-color: #f0f0f8; +} + +.error-log .exception.new { + border: dashed #ff6600 2px; +} + +.error-log .exception.severe { + border: solid #ff0000 2px; + background-color: #ffdfdf; +} + +.error-log .exception.interesting { + border: dotted black 2px; + background-color: #e0e0e8; +} + +.error-log .exception.fixed { + border: solid #008800 1px; + background-color: #e8f6e8; +} + +.error-log .exception span { + font-weight: bold; + margin-right: 5px; +} + +.error-log .exception span.normal { + margin-right: 0; + display: none; +} + +.error-log .exception span.new, .error-log .edit-area label.new { + color: #ff6600; +} + +.error-log .exception span.severe, .error-log .edit-area label.severe { + color: #ff0000; +} + +.error-log .exception span.interesting, .error-log .edit-area label.interesting { + font-weight: normal; + font-style: italic; +} + +.error-log .exception span.fixed, .error-log .edit-area label.fixed { + color: #008800; +} + +.error-log .exception-name { + margin-right: 5px; +} + +.error-log .nickname { + color: black; + font-weight: bold; + font-size: larger; +} + +.error-log .exception.fixed .nickname { + text-decoration: line-through; +} + +.error-log a:focus { + -moz-outline-style: none; +} + +.error-log .edit-area { + border: solid black 1px; + background-color: #eee; +} + +.error-log .edit-area label { + margin-right: 25px; +} + +.error-log .edit-area input[type=radio] { + margin-right: 4px; +} + +.error-log .edit-area input[type=text] { + width: 800px; +} + +.error-log .edit-area table td, .error-log .edit-area table th { + padding: 5px 0 0 5px; +} + +.error-log .save-button { + margin: 0 5px 5px 0; + font-size: small; + padding: 0; +} + +.error-log .date { + font-size: 150%; + font-weight: bold; +} + +.error-log .hexkey { + color: #997700; +} + +.error-log .exception-name { + font-size: larger; + color: #000077; +} + +.error-log .frequency { + font-size: larger; + float: right; + color: #886666; +} + +.error-log .occurrences { + border: solid #003300 1px; + margin: 5px 0 2px; + padding: 2px; +} + +.error-log .occurrence { + color: #003300; + font-family: monospace; + margin-right: 3em; + white-space: nowrap; +} + +.error-log table.stacktrace th, .error-log table.stacktrace td { + border: solid 1px #aaa; +} + +.error-log table.stacktrace td { + font-family: monospace; +} + +.error-log table.stacktrace td.col-1 { + text-align: right; + padding-right: 10px; +} + +.error-log .logtext.error { + color: black; + margin: 0 0 10px 0; +} + +.error-log .logtext { + margin-bottom: 10px; + border: solid #555 2px; + background-color: #eeece6; + padding: 5px; + font-size: small; +} + +.error-log .logtext * { + color: black; +} + +.error-log .logtext.error .loglevel { + color: white; + background-color: red; +} + +.error-log .logtext.warning .loglevel { + background-color: #ff6600; +} + +.error-log .logtext.info .loglevel { + background-color: #00bbff; +} + +.error-log .logtext.debug .loglevel { + background-color: #00ee00; +} + +.error-log .logtext .loglevel { + padding: 0 5px; + margin-right: 5px; + border: solid black 1px; +} +.error-log .logtext table { + margin: 8px 5px 2px 0; + font-family: monospace; +} + +.error-log .logtext table, +.error-log .logtext table th, +.error-log .logtext table td { + border: solid #aaa 1px; +} +.error-log .logtext table th, .error-log .logtext table td { + border: solid #aaa 1px; +} + +.error-log .logtext table .occ { + text-align: right; +} + +.error-log .logtext table .dotdotdot { + padding: 0; +} +.error-log .logtext table .dotdotdot a { + margin: 0; + display: block; + width: 100%; + height: 100%; + background-color: #e0e0e0; +} +.error-log .logtext table .dotdotdot a:hover { + background-color: #bbb; + text-decoration: none; +} + +.error-log .logtext .classification { + font-size: larger; + font-weight: bold; +} +.error-log .logtext .actual-text { + max-width: 600px; + overflow: hidden; +} +.error-log .logtext .occ { +} + .details { font-size: x-small; margin-bottom: 10px; @@ -1967,6 +2226,15 @@ form input[type=radio] {margin: 2px .5em 0 0; } .reported { background-color: #f6e69f } .suspicious { background-color: #f6e69f } .spam { background-color: #FA8072 } +.banned-user { + overflow: hidden; + opacity: .7; + filter:alpha(opacity=70); /* IE patch */ +} + +.banned-user .title { + text-decoration: line-through; +} .little { font-size: smaller } .gray { color: gray } @@ -2087,7 +2355,31 @@ ul#image-preview-list .description pre { padding: 5px; margin: 5px; float: left; -} +} + +.private-feeds.instructions .prefright { + line-height: 2em; +} + +.private-feeds.instructions .feedlink { + padding: 2px 5px; + font-weight: bold; + margin-right: 5px; + border: 1px solid #0000FF; + color: white; + padding-left: 22px; + background: #336699 none no-repeat scroll top left; +} + +.private-feeds.instructions .feedlink.rss-link { + background-image: url(/static/rss.png); +} + +.private-feeds.instructions .feedlink.json-link { + background-color: #DDDDDD; + background-image: url(/static/json.png); + color: black; +} /* Socialite */ .socialite.instructions ul { @@ -2744,20 +3036,20 @@ ul.tabmenu.formtab { color: #336699; } -.award-table { +.lined-table { margin: 5px; } -table.award-table { +table.lined-table { margin: 5px 3px; } -.award-table th, .award-table td { +.lined-table th, .lined-table td { border: solid #cdcdcd 1px; padding: 3px; } -.award-table th { +.lined-table th { text-align: center; font-weight: bold; } @@ -2782,7 +3074,6 @@ table.award-table { .sidecontentbox a.helplink { float: right; - font-size: x-small; margin-top: 4px; } @@ -3242,6 +3533,9 @@ dd { margin-left: 20px; } .icon-menu .reddit-moderators { background-image: url(/static/star.png); /* SPRITE */ } +.icon-menu .moderator-mail { + background-image: url(/static/mailgray.png); /* SPRITE */ +} .icon-menu .reddit-contributors { background-image: url(/static/pencil.png); /* SPRITE */ } @@ -3278,14 +3572,15 @@ dd { margin-left: 20px; } border: 1px solid gray; } -a.ip { +a.adminbox { border: solid 1px #eeeeee; color: #cdcdcd; font-family: monospace; - text-size: x-small; + text-align: center; + padding-right: 1px; } -a.ip:hover { +a.adminbox:hover { text-decoration: none; color: orangered; border: solid 1px orangered; @@ -3302,3 +3597,83 @@ a.ip:hover { font-weight: bold; } +.wide { + width: 100%; +} + +.centered { + text-align: center; + vertical-align: middle; +} + +.sr-ad-table .inherited { + background-color: #ddeeff; +} +.sr-ad-table .overridden { + background-color: #ffeedd; +} +.sr-ad-table .unused { + background-color: #eee; +} +.sr-ad-table .inherited .whence { + font-style: italic; +} +.sr-ad-table .overridden .whence { + font-weight: bold; +} +.sr-ad-table .details { + font-size: 150%; + padding: 10px; + vertical-align: top; +} +.sr-ad-table .details div { +} +.sr-ad-table .details .codename { + font-size: 150%; + margin-bottom: 20px; +} +.sr-ad-table .weight { + width: 4em; +} + +.ad-assign-table .warning { + font-weight: bold; + color: red; +} + +.usage-table .intersection { + color: #888; + font-family: monospace; + text-align: right; + border-left: none; + border-right: none; +} + +.usage-table .intersection span { + padding: 1px 3px 0 2px; +} + +.usage-table .empty.intersection { + text-align: center; + color: #ccc; +} + +.usage-table .elapsed.intersection { + color: black; +} + +.usage-table .count.intersection { + color: black; +} + +.usage-table .average.intersection { + color: black; + border-right: solid #cdcdcd 1px; +} + +.usage-table .empty.intersection, .usage-table .average.intersection { + padding-left: 0; + margin-left: 0; + border-right: solid #cdcdcd 1px; + padding-right: 5px; +} diff --git a/r2/r2/public/static/js/jquery.reddit.js b/r2/r2/public/static/js/jquery.reddit.js index 8f1b1fe24..4f9a37ea2 100644 --- a/r2/r2/public/static/js/jquery.reddit.js +++ b/r2/r2/public/static/js/jquery.reddit.js @@ -66,9 +66,9 @@ $.with_default = function(value, alt) { $.unsafe = function(text) { /* inverts websafe filtering of reddit app. */ if(typeof(text) == "string") { - text = text.replace(/>/g, ">") - .replace(/</g, "<").replace(/&/g, "&") - .replace(/"/g, '"'); + text = text.replace(/"/g, '"') + .replace(/>/g, ">").replace(/</g, "<") + .replace(/&/g, "&"); } return (text || ""); }; @@ -121,8 +121,12 @@ function handleResponse(action) { objs[0] = jQuery; $.map(r.jquery, function(q) { var old_i = q[0], new_i = q[1], op = q[2], args = q[3]; - for(var i = 0; args.length && i < args.length; i++) + if (typeof(args) == "string") { + args = $.unsafe(args); + } else { // assume array + for(var i = 0; args.length && i < args.length; i++) args[i] = $.unsafe(args[i]); + } if (op == "call") objs[new_i] = objs[old_i].apply(objs[old_i]._obj, args); else if (op == "attr") { @@ -220,7 +224,8 @@ rate_limit = function() { var default_rate_limit = 333; /* rate limit on a per-action basis (also in ms, 0 = don't rate limit) */ var rate_limits = {"vote": 333, "comment": 5000, - "ignore": 0, "ban": 0, "unban": 0}; + "ignore": 0, "ban": 0, "unban": 0, + "assignad": 0 }; var last_dates = {}; /* paranoia: copy global functions used to avoid tampering. */ @@ -483,9 +488,9 @@ $.insert_things = function(things, append) { var midcol = $(".midcol:visible:first").css("width"); var numcol = $(".rank:visible:first").css("width"); var s = $.listing(data.parent); - if(append) + if(append) s = s.append($.unsafe(data.content)).children(".thing:last"); - else + else s = s.prepend($.unsafe(data.content)).children(".thing:first"); s.find(".midcol").css("width", midcol); s.find(".rank").css("width", midcol); diff --git a/r2/r2/public/static/json.png b/r2/r2/public/static/json.png new file mode 100644 index 000000000..6f349fa85 Binary files /dev/null and b/r2/r2/public/static/json.png differ diff --git a/r2/r2/public/static/modmail.png b/r2/r2/public/static/modmail.png new file mode 100644 index 000000000..fd9291131 Binary files /dev/null and b/r2/r2/public/static/modmail.png differ diff --git a/r2/r2/public/static/modmailgray.png b/r2/r2/public/static/modmailgray.png new file mode 100644 index 000000000..5129cae2f Binary files /dev/null and b/r2/r2/public/static/modmailgray.png differ diff --git a/r2/r2/public/static/robots.txt b/r2/r2/public/static/robots.txt index a888ef1f2..cd6dbede2 100644 --- a/r2/r2/public/static/robots.txt +++ b/r2/r2/public/static/robots.txt @@ -1,6 +1,11 @@ +# 80legs +User-agent: 008 +Disallow: / + User-Agent: * Disallow: /goto Disallow: /*after= Disallow: /*before= Disallow: /domain/*t= +Disallow: /login Allow: / diff --git a/r2/r2/public/static/rss.png b/r2/r2/public/static/rss.png new file mode 100644 index 000000000..59c537511 Binary files /dev/null and b/r2/r2/public/static/rss.png differ diff --git a/r2/r2/templates/adminadassign.html b/r2/r2/templates/adminadassign.html new file mode 100644 index 000000000..c9cbf2cb8 --- /dev/null +++ b/r2/r2/templates/adminadassign.html @@ -0,0 +1,70 @@ +## The contents of this file are subject to the Common Public Attribution +## License Version 1.0. (the "License"); you may not use this file except in +## compliance with the License. You may obtain a copy of the License at +## http://code.reddit.com/LICENSE. The License is based on the Mozilla Public +## License Version 1.1, but Sections 14 and 15 have been added to cover use of +## software over a computer network and provide for limited attribution for the +## Original Developer. In addition, Exhibit A has been modified to be consistent +## with Exhibit B. +## +## Software distributed under the License is distributed on an "AS IS" basis, +## WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for +## the specific language governing rights and limitations under the License. +## +## The Original Code is Reddit. +## +## The Original Developer is the Initial Developer. The Initial Developer of +## the Original Code is CondeNet, Inc. +## +## All portions of the code written by CondeNet are Copyright (c) 2006-2010 +## CondeNet, Inc. All Rights Reserved. +################################################################################ + +<%namespace file="utils.html" import="error_field"/> + +<form action="/post/assignad" method="post" class="pretty-form medium-text" + onsubmit="return post_form(this, 'assignad');"> + + <input type="hidden" name="fullname" value="${thing.ad._fullname}" /> + + <table class="lined-table borderless"> + <tr> + <td> + <img src="${thing.ad.imgurl}"/> + </td> + <td> + <a href="/admin/ads/#${thing.ad.codename}"> + <h1 class="centered">${thing.ad.codename}</h1> + </a> + </td> + </tr> + <tr> + <td> + community + </td> + <td> + <input type="text" name="community" /><br/> + ${error_field("SUBREDDIT_REQUIRED", "community", "span")} + ${error_field("SUBREDDIT_NOEXIST", "community", "span")} + </td> + </tr> + <tr> + <td> + weight + </td> + <td> + <input type="text" name="weight" value="${thing.weight}" /><br/> + ${error_field("BAD_NUMBER", "weight", "span")} + </td> + </tr> + </table> + + <button class="btn" type="submit">assign</button> + + <span class="status"></span> + + <p> + <a href="/admin/ads">back to ads</a> + </p> +</form> + diff --git a/r2/r2/templates/adminads.html b/r2/r2/templates/adminads.html new file mode 100644 index 000000000..f5ae80d39 --- /dev/null +++ b/r2/r2/templates/adminads.html @@ -0,0 +1,112 @@ +## The contents of this file are subject to the Common Public Attribution +## License Version 1.0. (the "License"); you may not use this file except in +## compliance with the License. You may obtain a copy of the License at +## http://code.reddit.com/LICENSE. The License is based on the Mozilla Public +## License Version 1.1, but Sections 14 and 15 have been added to cover use of +## software over a computer network and provide for limited attribution for the +## Original Developer. In addition, Exhibit A has been modified to be consistent +## with Exhibit B. +## +## Software distributed under the License is distributed on an "AS IS" basis, +## WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for +## the specific language governing rights and limitations under the License. +## +## The Original Code is Reddit. +## +## The Original Developer is the Initial Developer. The Initial Developer of +## the Original Code is CondeNet, Inc. +## +## All portions of the code written by CondeNet are Copyright (c) 2006-2010 +## CondeNet, Inc. All Rights Reserved. +################################################################################ + +<%namespace file="utils.html" import="error_field"/> + +<%def name="adbuttons(codename, submit_link)"> + <ul class="flat-list buttons"> + %if codename != 'DART': + <li><a href="#" + onclick="$(this).parents('td').find('form').toggle(); return false;"> + edit</a></li> + <li><a href="${submit_link}">submit</a></li> + %endif + <li><a href="/admin/ads/${codename}/assign">assign_SR</a></li> + <li><a href="/admin/ads/${codename}/srs">view_SRs</a></li> + </ul> +</%def> + +<%def name="adedit(fullname, codename='', imgurl='', linkurl='')"> + <form action="/post/editad" method="post" class="pretty-form medium-text" + style="display:none" + onsubmit="return post_form(this, 'editad');" id="adedit-${fullname}"> + <input type="hidden" name="fullname" value="${fullname}" /> + + <table class="lined-table borderless"> + <tr> + <td>codename</td> + <td> + <input type="text" name="codename" value="${codename}" /> + ${error_field("NO_TEXT", "codename", "span")} + ${error_field("INVALID_OPTION", "codename", "span")} + </td> + </tr> + <tr> + <td>img url</td> + <td> + <input type="text" name="imgurl" value="${imgurl}" /> + ${error_field("NO_TEXT", "imgurl", "span")} + </td> + </tr> + <tr> + <td>link url</td> + <td> + <input type="text" name="linkurl" value="${linkurl}" /> + ${error_field("NO_TEXT", "linkurl", "span")} + </td> + </tr> + </table> + <button class="savebutton" type="submit">save</button> + <span class="status"></span> + </form> +</%def> + +<table class="lined-table"> + <tbody> + <tr> + <th>fn</th> + <th>cn</th> + <th>img</th> + <th>links & buttons</th> + </tr> + %for ad in thing.ads: + <tr> + <td>${ad._fullname}</td> + <td>${ad.codename}</td> + %if ad.codename == "DART": + <td class="centered"> + <img src="${ad.imgurl}"/> + </td> + <td class="entry"> + ${adbuttons(ad.codename, ad.submit_link())} + </td> + %else: + <td> + <a name="${ad.codename}" href="${ad.linkurl}"> + <img src="${ad.imgurl}"/> + </a> + </td> + <td class="entry"> + img: <a href="${ad.imgurl}">${ad.imgurl}</a><br/> + link: <a href="${ad.linkurl}">${ad.linkurl}</a><br/> + <br/> + ${adbuttons(ad.codename, ad.submit_link())} + ${adedit(ad._fullname, ad.codename, ad.imgurl, ad.linkurl)} + </td> + %endif + </tr> + %endfor + </tbody> +</table> +<button onclick="$('#adedit-NEW').show()">new ad</button> + +${adedit("NEW")} diff --git a/r2/r2/templates/adminadsrs.html b/r2/r2/templates/adminadsrs.html new file mode 100644 index 000000000..b7e77bbcc --- /dev/null +++ b/r2/r2/templates/adminadsrs.html @@ -0,0 +1,71 @@ +## The contents of this file are subject to the Common Public Attribution +## License Version 1.0. (the "License"); you may not use this file except in +## compliance with the License. You may obtain a copy of the License at +## http://code.reddit.com/LICENSE. The License is based on the Mozilla Public +## License Version 1.1, but Sections 14 and 15 have been added to cover use of +## software over a computer network and provide for limited attribution for the +## Original Developer. In addition, Exhibit A has been modified to be consistent +## with Exhibit B. +## +## Software distributed under the License is distributed on an "AS IS" basis, +## WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for +## the specific language governing rights and limitations under the License. +## +## The Original Code is Reddit. +## +## The Original Developer is the Initial Developer. The Initial Developer of +## the Original Code is CondeNet, Inc. +## +## All portions of the code written by CondeNet are Copyright (c) 2006-2010 +## CondeNet, Inc. All Rights Reserved. +################################################################################ + +<%namespace file="utils.html" import="percentage"/> + +<%def name="adsrline(adsr)"> + <tr> + <td> + <a href="/r/${adsr._thing2.name}/about/ads#${thing.ad.codename}"> + ${adsr._thing2.name} + </a> + </td> + <td style="text-align: right"> + ${adsr.weight} + </td> + <td style="text-align: right"> + ${percentage(adsr.weight, thing.sr_totals[adsr._thing2.name])} + </td> + </tr> +</%def> + +<div style="float: right"> + <a href="${thing.ad.linkurl}"> + <img src="${thing.ad.imgurl}"/> + </a> +</div> + +<a href="/admin/ads/#${thing.ad.codename}"> + <h1>${thing.ad.codename}</h1> +</a> + +<table class="lined-table"> + <tr> + <th> + community + </th> + <th> + wt + </th> + <th> + pct + </th> + </tr> + %for adsr in thing.adsrs: + ${adsrline(adsr)} + %endfor +</table> + +<p> + <a href="/admin/ads">back to ads</a> +</p> + diff --git a/r2/r2/templates/adminawardgive.html b/r2/r2/templates/adminawardgive.html index 7c9c3c059..acffedbc5 100644 --- a/r2/r2/templates/adminawardgive.html +++ b/r2/r2/templates/adminawardgive.html @@ -27,7 +27,7 @@ <input type="hidden" name="fullname" value="${thing.award._fullname}" /> - <table class="award-table borderless"> + <table class="lined-table borderless"> <tr> <td> <img src="${thing.award.imgurl % 40}"/> diff --git a/r2/r2/templates/adminawards.html b/r2/r2/templates/adminawards.html index b7f1c73ca..2a205fb97 100644 --- a/r2/r2/templates/adminawards.html +++ b/r2/r2/templates/adminawards.html @@ -46,7 +46,7 @@ onsubmit="return post_form(this, 'editaward');" id="awardedit-${fullname}"> <input type="hidden" name="fullname" value="${fullname}" /> - <table class="award-table borderless"> + <table class="lined-table borderless"> <tr> <td>codename</td> <td> @@ -84,7 +84,7 @@ </form> </%def> -<table class="award-table"> +<table class="lined-table"> <tbody> <tr> <th>fn</th> diff --git a/r2/r2/templates/adminawardwinners.html b/r2/r2/templates/adminawardwinners.html index 5902c9738..0f93a6766 100644 --- a/r2/r2/templates/adminawardwinners.html +++ b/r2/r2/templates/adminawardwinners.html @@ -39,7 +39,7 @@ </tr> </%def> -<table class="award-table"> +<table class="lined-table"> <tr> <td> <img src="${thing.award.imgurl % 40}"/> diff --git a/r2/r2/templates/adminerrorlog.html b/r2/r2/templates/adminerrorlog.html new file mode 100644 index 000000000..bfe35ee59 --- /dev/null +++ b/r2/r2/templates/adminerrorlog.html @@ -0,0 +1,211 @@ +## The contents of this file are subject to the Common Public Attribution +## License Version 1.0. (the "License"); you may not use this file except in +## compliance with the License. You may obtain a copy of the License at +## http://code.reddit.com/LICENSE. The License is based on the Mozilla Public +## License Version 1.1, but Sections 14 and 15 have been added to cover use of +## software over a computer network and provide for limited attribution for the +## Original Developer. In addition, Exhibit A has been modified to be consistent +## with Exhibit B. +## +## Software distributed under the License is distributed on an "AS IS" basis, +## WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for +## the specific language governing rights and limitations under the License. +## +## The Original Code is Reddit. +## +## The Original Developer is the Initial Developer. The Initial Developer of +## the Original Code is CondeNet, Inc. +## +## All portions of the code written by CondeNet are Copyright (c) 2006-2010 +## CondeNet, Inc. All Rights Reserved. +################################################################################ + +<%namespace file="utils.html" import="error_field"/> + +<%def name="status_radio(val, datehex, current)"> + <input id="status-${datehex}-${val}" + class="nomargin" type="radio" value="${val}" name="status" + ${"checked='checked'" if current == val else ''} /> + <label class="${val}" for="status-${datehex}-${val}">${val}</label> +</%def> + +<div class="error-logs"> + %for date, groupings in thing.date_summaries: + + <div class="error-log"> + <a class="date" href="#" + onclick="$(this).parent().find('.rest').toggle();return false"> + ${date} + </a> + + <div class="rest"> + %for g in groupings: + %if g[0] > 0: + ${exception(date, *g)} + %else: + ${text(date, *g)} + %endif + %endfor + </div> + </div> + %endfor +</div> + +<%def name="exception(date, frequency, hexkey, d)"> + <% datehex = "-".join([date.replace("/",""), hexkey]) %> + + <div class="exception ${thing.statuses[hexkey]} rounded"> + <a class="frequency hover" href="#" + onclick="$(this).parent().find('.occurrences').toggle();return false"> + ${frequency} occurrences + </a> + + <span class="${thing.statuses[hexkey]}"> + ${thing.statuses[hexkey]}: + </span> + + <a class="nickname" name="${datehex}" href="#${datehex}" + onclick="$(this).parent().find('.edit-area').toggle();return false"> + ${thing.nicknames[hexkey]} + </a> + + <br/> + + <div class="edit-area" style="display: none"> + <form action="/post/edit_error" method="post" + onsubmit="return post_form(this, 'edit_error');" + id="nickname-${hexkey}"> + + <input type="hidden" name="hexkey" value="${hexkey}" /> + + <table> + <tr> + <th> + nickname: + </th> + <td> + <input type="text" value="${thing.nicknames[hexkey]}" name="nickname"/> + </td> + </tr> + <tr> + <th> + status: + </th> + <td> + ${status_radio("new" , datehex, thing.statuses[hexkey])} + ${status_radio("severe", datehex, thing.statuses[hexkey])} + ${status_radio("interesting", datehex, thing.statuses[hexkey])} + ${status_radio("normal", datehex, thing.statuses[hexkey])} + ${status_radio("fixed" , datehex, thing.statuses[hexkey])} + </td> + </tr> + <tr> + <td> + <button class="save-button" type="submit"> + save + </button> + </td> + <td> + ${error_field("NO_TEXT", "codename", "span")} + <span class="status"></span> + </td> + </tr> + </table> + </form> + </div> + + <a class="hover" href="#" + onclick="$(this).parent().find('.stacktrace').toggle();return false"> + + <span class="exception-name"> + ${d['exception']} + </span> + + <span class="hexkey">(${hexkey})</span> + + </a> + + <div class="occurrences" style="display: none"> + %for o in d['occurrences']: + <span class="occurrence"> + ${o} + </span> + + %endfor + </div> + + <table class="stacktrace lined-table wide" style="display: none"> + <thead> + <tr> + <th>file</th> + <th>line#</th> + <th>func</th> + <th>code</th> + </tr> + </thead> + <tbody> + %for row in d['traceback']: + <tr> + %for i, col in enumerate(row): + <td class="col-${i}"> + %if i == 2: + ${col}() + %else: + ${col} + %endif + </td> + %endfor + </tr> + %endfor + </tbody> + </table> + </div> +</%def> + +<%def name="textocc(text, occ, hide)"> + %if hide: + <tr class="extra-occs" style="display: none"> + %else: + <tr> + %endif + <td class="actual-text"> + ${text} + </td> + <td class="occ"> + ${occ} + </td> + </tr> +</%def> + +<%def name="text(date, sort_order, level, classification, textoccs)"> +<div class="logtext ${level}"> + <span class="loglevel rounded"> + ${level}: + </span> + <span class="classification"> + ${classification} + </span> + <table class="lined-table wide"> + %for i, (text, occ) in enumerate (textoccs): + %if i < 3 or i >= len(textoccs) - 3: + ${textocc(text, occ, False)} + %elif i == 3: + <tr class="extra-occs"> + <td colspan="2" class="dotdotdot"> + <a href="#" + onclick="$(this).closest('table').find('.extra-occs').toggle();return false"> + <b>...</b> + + (${len(textoccs) - 6} more lines) + </a> + </td> + </tr> + + ${textocc(text, occ, True)} + %else: + ${textocc(text, occ, True)} + %endif + %endfor + </table> +</div> +</%def> diff --git a/r2/r2/templates/adminusage.html b/r2/r2/templates/adminusage.html new file mode 100644 index 000000000..b7e8827e8 --- /dev/null +++ b/r2/r2/templates/adminusage.html @@ -0,0 +1,78 @@ +## The contents of this file are subject to the Common Public Attribution +## License Version 1.0. (the "License"); you may not use this file except in +## compliance with the License. You may obtain a copy of the License at +## http://code.reddit.com/LICENSE. The License is based on the Mozilla Public +## License Version 1.1, but Sections 14 and 15 have been added to cover use of +## software over a computer network and provide for limited attribution for the +## Original Developer. In addition, Exhibit A has been modified to be consistent +## with Exhibit B. +## +## Software distributed under the License is distributed on an "AS IS" basis, +## WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for +## the specific language governing rights and limitations under the License. +## +## The Original Code is Reddit. +## +## The Original Developer is the Initial Developer. The Initial Developer of +## the Original Code is CondeNet, Inc. +## +## All portions of the code written by CondeNet are Copyright (c) 2006-2010 +## CondeNet, Inc. All Rights Reserved. +################################################################################ + +<%def name="intersection(d, hidden)"> + %if d is None: + <td class="empty intersection" colspan="5" + %if hidden: + style="display:none" + %endif + > + <span>—</span> + </td> + %else: + %for cls in ("elapsed", "slash", "count", "equals", "average"): + <td class="${cls} intersection" + %if hidden: + style="display:none" + %endif + > + %if cls == "slash": + / + %elif cls == "equals": + = + %else: + <span class="${d['classes'].get(cls, 'load0')}"> + %if cls == 'count': + ${d[cls]} + %else: + ${"%0.2f" % d[cls]} + %endif + </span> + %endif + </td> + %endfor + %endif +</%def> + +<table class="usage-table lined-table"> + <tr> + <th>action</th> + %for label, hidden in thing.labels: + <th colspan="5" + %if hidden: + style="display:none" + %endif + >${label}</th> + %endfor + </tr> + +%for action in thing.action_order: + <tr> + <td>${action}</td> + %for label, hidden in thing.labels: + ${intersection(thing.actions[action].get(label), hidden)} + %endfor + </tr> +%endfor + +</table> diff --git a/r2/r2/templates/ads.html b/r2/r2/templates/ads.html index 28d3651e3..8b1796ef5 100644 --- a/r2/r2/templates/ads.html +++ b/r2/r2/templates/ads.html @@ -25,20 +25,12 @@ import random %> -%if c.site.ad_type == "custom" or c.site.ad_file != c.site._defaults.get("ad_file"): - <iframe id="ad-frame" frameborder="0" scrolling="no" name="ad-frame" - src="${c.site.ad_file}"> - </iframe> -%elif c.site.ad_type == "basic": - <% name = c.site.name if not c.default_sr else '' %> - <iframe id="ad-frame" frameborder="0" scrolling="no" name="ad-frame" - src="/ads/${name}"> - </iframe> -%else: - <iframe id="ad-frame" frameborder="0" scrolling="no" name="ad-frame" - src="/ads/"> - </iframe> -%endif +<% name = "r/%s/" % c.site.name if not c.default_sr else '' %> + +<iframe id="ad-frame" frameborder="0" scrolling="no" name="ad-frame" + src="${g.ad_domain}/ads/${name}"> +</iframe> + <script type="text/javascript"> <% tracker_url = AdframeInfo.gen_url(fullname = "adblock", ip = request.ip) diff --git a/r2/r2/templates/buttonembed.js b/r2/r2/templates/buttonembed.js index 489c633f1..e45c1ec0a 100644 --- a/r2/r2/templates/buttonembed.js +++ b/r2/r2/templates/buttonembed.js @@ -19,16 +19,8 @@ ## All portions of the code written by CondeNet are Copyright (c) 2006-2010 ## CondeNet, Inc. All Rights Reserved. ################################################################################ -<%! - from r2.lib.template_helpers import get_domain - %> - -<% - domain = get_domain() - arg = "cnameframe=1&" if c.cname else "" -%> (function() { -var write_string="<iframe src=\"http://${domain}/button_content?${arg}t=${thing.button}&width=${thing.width}&url=${thing.url or ""}"; +var write_string="<iframe src=\"http://${thing.domain}/button_content?${thing.arg}t=${thing.button}&width=${thing.width}&url=${thing.url or ""}"; %if not thing.url: if (window.reddit_url) { write_string += encodeURIComponent(reddit_url); } else { write_string += encodeURIComponent('${thing.referer}');} diff --git a/r2/r2/templates/commentspanel.xml b/r2/r2/templates/commentspanel.xml new file mode 100644 index 000000000..e69de29bb diff --git a/r2/r2/templates/comscore.html b/r2/r2/templates/comscore.html new file mode 100644 index 000000000..c0237f55b --- /dev/null +++ b/r2/r2/templates/comscore.html @@ -0,0 +1,27 @@ +<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> +<html> + <head> + <title> + + + + + + + + diff --git a/r2/r2/templates/createsubreddit.html b/r2/r2/templates/createsubreddit.html index f911d64be..7f13ae92c 100644 --- a/r2/r2/templates/createsubreddit.html +++ b/r2/r2/templates/createsubreddit.html @@ -163,6 +163,14 @@ function update_title(elem) { ${_("show thumbnail images of content")} +
  • + + +
  • http://www.pell.portland.or.us/~orc/Code/discount

    @@ -233,57 +241,6 @@ function update_title(elem) { %if c.user_is_admin: - <% - ad_type = "default" - if thing.site.ad_type == "custom" or thing.site.ad_file != thing.site._defaults.get("ad_file"): - ad_type = "custom" - elif thing.site.ad_type == "basic": - ad_type = "basic" - %> - <%utils:line_field title="${_('ad frame')}"> -
    - - - - - - - - - - - - - -
    - - - - ${_("Use the same ad frame as on the front page.")} - -
    - - - - ${_("Use the default customized (subreddit-specific) DART ad frame")} -
    - - - - ${_("specify the location of the ad frame:")} - - -
    -
    - - <%utils:line_field title="${_('sponsorship')}">
    • diff --git a/r2/r2/templates/dart_ad.html b/r2/r2/templates/dart_ad.html index 800a6de78..dd22bd1a1 100644 --- a/r2/r2/templates/dart_ad.html +++ b/r2/r2/templates/dart_ad.html @@ -31,7 +31,7 @@ diff --git a/r2/r2/templates/errorpage.xml b/r2/r2/templates/errorpage.xml new file mode 100644 index 000000000..e69de29bb diff --git a/r2/r2/templates/feedbackblurb.html b/r2/r2/templates/feedbackblurb.html index 5d10e0cfe..aa5155055 100644 --- a/r2/r2/templates/feedbackblurb.html +++ b/r2/r2/templates/feedbackblurb.html @@ -34,7 +34,7 @@
      Are your submissions not showing up? Subreddit marked as spam? Is the spam filter acting up?
      - Send a private message to an admin. (We really do want to help) + Send a private message to the admins. (We really do want to help)
      Got a feature request?
      Post it to our development community so the admins can see it.
      diff --git a/r2/r2/templates/frame.xml b/r2/r2/templates/frame.xml new file mode 100644 index 000000000..e69de29bb diff --git a/r2/r2/templates/housead.html b/r2/r2/templates/housead.html new file mode 100644 index 000000000..b3d20e4e9 --- /dev/null +++ b/r2/r2/templates/housead.html @@ -0,0 +1,36 @@ + + + + + + + + + + + diff --git a/r2/r2/templates/link.html b/r2/r2/templates/link.html index da1e37fc0..470db3e98 100644 --- a/r2/r2/templates/link.html +++ b/r2/r2/templates/link.html @@ -33,13 +33,7 @@ <%def name="numcol()"> <% num = thing.num %> - %if thing.top_link: - - %endif ${thing.num} - %if thing.top_link: - - %endif @@ -79,7 +73,7 @@ <%def name="entry()">

      <%call expr="make_link('title', 'title')"> - ${thing.title} + ${thing.title} ${self.domain()} @@ -91,7 +85,7 @@ ${thing.link_child.css_style}" onclick="expando_child(this)"> %endif - +

      ${self.tagline()}

      @@ -168,15 +162,19 @@ ${parent.thing_css_class(what)} ${"over18" if thing.over_18 else ""} <%def name="domain()"> (${plain_link(thing.domain, thing.domain_path, _sr_path = False)}) + %if c.user_is_admin: + + d + %endif <%def name="tagline()"> - <% + <% if thing.different_sr: - taglinetext = _("submitted %(when)s ago by %(author)s to %(reddit)s") + taglinetext = _("submitted %(when)s ago by %(author)s to %(reddit)s") else: - taglinetext = _("submitted %(when)s ago by %(author)s") + taglinetext = _("submitted %(when)s ago by %(author)s") taglinetext = taglinetext.replace(" ", " ") %> @@ -188,6 +186,10 @@ ${parent.thing_css_class(what)} ${"over18" if thing.over_18 else ""} ${unsafe(taglinetext % dict(reddit = self.subreddit(), when = thing.timesince, author= WrappedUser(thing.author, thing.attribs, thing).render()))} + + %if c.user_is_admin and thing._deleted: + [link deleted] + %endif <%def name="child()"> diff --git a/r2/r2/templates/link.xml b/r2/r2/templates/link.xml index 9721f88f0..10b790c08 100644 --- a/r2/r2/templates/link.xml +++ b/r2/r2/templates/link.xml @@ -26,13 +26,17 @@ from r2.models import FakeSubreddit %> <% - url = add_sr(thing.permalink, force_hostname = True) + permalink = add_sr(thing.permalink, force_hostname = True) + if thing.mousedown_url: + url = thing.mousedown_url + else: + url = permalink use_thumbs = thing.thumbnail and not request.GET.has_key("nothumbs") %> ${thing.title} ${url} - ${url} + ${permalink} ${thing._date.strftime('%a, %d %b %Y %H:%M:%S %z')} ${thing._date.isoformat()} @@ -57,7 +61,7 @@ [link] ${description()} - <a href="${url}">[${thing.comment_label}]</a> + <a href="${permalink}">[${thing.comment_label}]</a> %if use_thumbs: </td></tr></table> %endif diff --git a/r2/r2/templates/listing.html b/r2/r2/templates/listing.html index 95359d51e..32ee86e69 100644 --- a/r2/r2/templates/listing.html +++ b/r2/r2/templates/listing.html @@ -19,7 +19,9 @@ ## All portions of the code written by CondeNet are Copyright (c) 2006-2010 ## CondeNet, Inc. All Rights Reserved. ################################################################################ - +<%! + from r2.models import Sub + %> <%namespace file="utils.html" import="plain_link" /> <% @@ -35,13 +37,13 @@ %if thing.nextprev and (thing.prev or thing.next):

      ${_("view more:")} %if thing.prev: - ${plain_link(_("prev"), thing.prev, rel="nofollow,prev")} + ${plain_link(_("prev"), thing.prev, _sr_path = (c.site != Sub), rel="nofollow,prev")} %endif %if thing.prev and thing.next: | %endif %if thing.next: - ${plain_link(_("next"), thing.next, rel="nofollow,prev")} + ${plain_link(_("next"), thing.next, _sr_path = (c.site != Sub), rel="nofollow,prev")} %endif

      %endif diff --git a/r2/r2/templates/login.xml b/r2/r2/templates/login.xml new file mode 100644 index 000000000..e69de29bb diff --git a/r2/r2/templates/message.html b/r2/r2/templates/message.html index ab9d14c25..e9471d498 100644 --- a/r2/r2/templates/message.html +++ b/r2/r2/templates/message.html @@ -52,7 +52,7 @@ ${parent.thing_css_class(what)} ${"new" if thing.new else ""} ${"was-comment" if <% taglinetext = '' - if thing.to_id == c.user._id: + if thing.to_id == c.user._id or thing.to_id is None: taglinetext = _("from %(author)s sent %(when)s ago") elif thing.author_id == c.user._id: taglinetext = _("to %(dest)s sent %(when)s ago") @@ -61,10 +61,15 @@ ${parent.thing_css_class(what)} ${"new" if thing.new else ""} ${"was-comment" if taglinetext = taglinetext.replace(' ', ' ') author = WrappedUser(thing.author, thing.attribs, thing).render() + if thing.sr_id: + updated_author = _("%(author)s via %(subreddit)s").replace(' ', ' ') + subreddit = '%s' % (thing.subreddit.path, + thing.subreddit.name) + author = updated_author % dict(author = author, subreddit = subreddit) %> ${unsafe(taglinetext % dict(when = thing.timesince, author= u"%s" % author, - dest = u"%s" % thing.to.name))} + dest = u"%s" % (thing.to.name if thing.to else "")))} %if c.user_is_admin: ${self.admintagline()} @@ -74,12 +79,20 @@ ${parent.thing_css_class(what)} ${"new" if thing.new else ""} ${"was-comment" if <%def name="subject()">

      %if getattr(thing, "is_parent", False): - - <% - corr = thing.author if thing.recipient else thing.to - %> - ${WrappedUser(corr)} - + %if thing.sr_id: + + + #${thing.subreddit.name} + + + %else: + + <% + corr = thing.author if thing.recipient else thing.to + %> + ${WrappedUser(corr)} + + %endif %endif ${thing.subject} %if thing.was_comment: diff --git a/r2/r2/templates/message.xml b/r2/r2/templates/message.xml index 920be6446..ecd6f062b 100644 --- a/r2/r2/templates/message.xml +++ b/r2/r2/templates/message.xml @@ -22,24 +22,50 @@ <%! from r2.lib.filters import safemarkdown + from r2.lib.template_helpers import add_sr %> +<% + permalink = add_sr(thing.permalink, force_hostname = True) + %> - ${thing._fullname} + ${permalink} + ${permalink} <% taglinetext = '' - if c.msg_location == "inbox": + if thing.to_id == c.user._id or thing.to_id is None: taglinetext = _("from %(author)s sent %(when)s ago") - elif c.msg_location == "sent" or not c.msg_location: + elif thing.author_id == c.user._id: taglinetext = _("to %(dest)s sent %(when)s ago") - taglinetext = taglinetext.replace(' ', ' ') - %> - ${unsafe(taglinetext % dict(when = thing.timesince, - author= u"%s" % thing.author.name, - dest = u"%s" % thing.to.name))} + else: + taglinetext = _("to %(dest)s from %(author)s sent %(when)s ago") + author = thing.author.name + if thing.sr_id and not (getattr(thing, "is_child", False) or + getattr(thing, "is_parent", False)): + updated_author = _("%(author)s via %(subreddit)s") + subreddit = thing.subreddit.name + author = updated_author % dict(author = author, subreddit = subreddit) + %> + ${thing.subject} : ${unsafe(taglinetext % dict(when = thing.timesince, + author= u"%s" % author, + dest = u"%s" % thing.to.name if thing.to else ""))} + ${thing._date.strftime('%a, %d %b %Y %H:%M:%S %z')} ${thing._date.isoformat()}-0700 - ${safemarkdown(thing.body)} + + ${description()} + + +<%def name="description()" filter="h"> + ${unsafe(safemarkdown(thing.body))} +

      + %if thing.was_comment: + [context] + %else: + [full conversation] + %endif +
      + diff --git a/r2/r2/templates/messagecompose.html b/r2/r2/templates/messagecompose.html index 846f5cf74..2a5de16e9 100644 --- a/r2/r2/templates/messagecompose.html +++ b/r2/r2/templates/messagecompose.html @@ -48,11 +48,13 @@ function admincheck(elem) {
      - <%utils:round_field title="${_('to')}", description="${_('(username)')}"> + <%utils:round_field title="${_('to')}", + description="${_('(username, or # followed by the reddit name)')}"> ${error_field("NO_USER", "to")} ${error_field("USER_DOESNT_EXIST", "to")} + ${error_field("SUBREDDIT_NOEXIST", "to")}
      @@ -95,7 +97,7 @@ function admincheck(elem) { Also:

        -
      • Please don't send out a mass-message to multiple admins.
      • +
      • If you'd like to message the admins, send your message to the admin message list.
      • If you think your posts are being caught in the spam filter, please write to diff --git a/r2/r2/templates/messagecompose.xml b/r2/r2/templates/messagecompose.xml new file mode 100644 index 000000000..e69de29bb diff --git a/r2/r2/templates/preffeeds.html b/r2/r2/templates/preffeeds.html new file mode 100644 index 000000000..7f42ab9c0 --- /dev/null +++ b/r2/r2/templates/preffeeds.html @@ -0,0 +1,105 @@ +## The contents of this file are subject to the Common Public Attribution +## License Version 1.0. (the "License"); you may not use this file except in +## compliance with the License. You may obtain a copy of the License at +## http://code.reddit.com/LICENSE. The License is based on the Mozilla Public +## License Version 1.1, but Sections 14 and 15 have been added to cover use of +## software over a computer network and provide for limited attribution for the +## Original Developer. In addition, Exhibit A has been modified to be consistent +## with Exhibit B. +## +## Software distributed under the License is distributed on an "AS IS" basis, +## WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for +## the specific language governing rights and limitations under the License. +## +## The Original Code is Reddit. +## +## The Original Developer is the Initial Developer. The Initial Developer of +## the Original Code is CondeNet, Inc. +## +## All portions of the code written by CondeNet are Copyright (c) 2006-2010 +## CondeNet, Inc. All Rights Reserved. +################################################################################ + +<%! + from r2.models import make_feedurl + from r2.lib.template_helpers import get_domain + from r2.lib.filters import safemarkdown +%> +
        +

        ${_("Private RSS feeds")}

        + +${unsafe(safemarkdown(_("On this page are links to private RSS feeds so that you can get listings of your content (personalized front page, message panel, saved listing, etc.) without having to deal with cookies or other auth.")))} +${unsafe(safemarkdown(_("Keep in mind that these urls are intended to be private, so **share at your own risk.**")))} +${unsafe(safemarkdown(_("All feeds are invalidated if you change your password, however.")))} + +<%def name="feedbuttons(path)"> +<% + domain = get_domain(subreddit = False) + %> + + RSS + + + JSON + + + + + + + + + + + + + + + + + %if c.show_mod_mail: + + + + + %endif +
        private listings + <%self:feedbuttons path="/"> + ${_("your front page")} +
        + <%self:feedbuttons path="/saved"> + ${_("your saved links")} +
        private profile pages + <%self:feedbuttons path="/user/${c.user.name}/liked"> + ${_("links you've liked")} +
        + <%self:feedbuttons path="/user/${c.user.name}/disliked"> + ${_("links you've disliked")} +
        + <%self:feedbuttons path="/user/${c.user.name}/hidden"> + ${_("links you've hidden")} +
        your inbox + <%self:feedbuttons path="/message/inbox/"> + ${_("everything")} +
        + <%self:feedbuttons path="/message/unread/"> + ${_("unread messages")} +
        + <%self:feedbuttons path="/message/messages/"> + ${_("messages only")} +
        + <%self:feedbuttons path="/message/comments/"> + ${_("comment replies only")} +
        + <%self:feedbuttons path="/message/selfreply"> + ${_("self-post replies only")} +
        your moderator inbox + <%self:feedbuttons path="/message/moderator/inbox/"> + ${_("everything")} +
        + <%self:feedbuttons path="/message/moderator/unread/"> + ${_("unread messages")} +
        +
        diff --git a/r2/r2/templates/prefoptions.html b/r2/r2/templates/prefoptions.html index ca059d445..f2f19da3b 100644 --- a/r2/r2/templates/prefoptions.html +++ b/r2/r2/templates/prefoptions.html @@ -218,6 +218,10 @@ ${checkbox(_("make safe(r) for work."), "no_profanity", disabled = not c.user.pref_over_18, disabled_text = "(requires over 18)")}
        ${checkbox(_("label posts that are not safe for work (NSFW)"), "label_nsfw", disabled = c.user.pref_no_profanity, disabled_text = "(requires not 'safer for work' mode)")} +
        + ${checkbox(_("enable private RSS feeds"), "private_feeds")} + + ${_("(available from the 'RSS feed' tab in prefs)")} ${_("privacy options")} diff --git a/r2/r2/templates/printable.html b/r2/r2/templates/printable.html index d56571812..a89e134e7 100644 --- a/r2/r2/templates/printable.html +++ b/r2/r2/templates/printable.html @@ -65,6 +65,8 @@ thing id-${what._fullname} if thing.show_spam: rowclass = thing.rowstyle + " spam" + if thing.show_spam == "author": + rowclass += " banned-user" elif thing.show_reports: rowclass = thing.rowstyle + " reported" else: diff --git a/r2/r2/templates/printablebuttons.html b/r2/r2/templates/printablebuttons.html index d2db806e1..90b734739 100644 --- a/r2/r2/templates/printablebuttons.html +++ b/r2/r2/templates/printablebuttons.html @@ -188,6 +188,11 @@
      • ${self.bylink_button(_("permalink"), thing.permalink)}
      • + %if c.profilepage: +
      • + ${self.bylink_button(_("context"), thing.permalink + "?context=3")} +
      • + %endif %if thing.deleted: %if thing.parent_permalink and not thing.profilepage:
      • diff --git a/r2/r2/templates/reddit.html b/r2/r2/templates/reddit.html index 50b0f4b1b..802eaa991 100644 --- a/r2/r2/templates/reddit.html +++ b/r2/r2/templates/reddit.html @@ -26,6 +26,7 @@ from r2.lib import tracking from pylons import request from r2.lib.strings import strings + from r2.models import make_feedurl, Sub %> <%namespace file="login.html" import="login_panel, login_form"/> <%namespace file="framebuster.html" import="framebuster"/> @@ -74,8 +75,13 @@ %if thing.extension_handling: + <% + rss= add_sr(join_urls(request.path,'.rss')) + if thing.extension_handling == "private": + rss = make_feedurl(c.user, rss) + %> + href="${rss}" /> %endif