diff --git a/CHANGELOG.md b/CHANGELOG.md
index 79556c15..01e9909a 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,7 +1,17 @@
# Changelog
+## v0.6.2
+ Released 26 May 2024
+
+Highlights:
+* Added soulseek support
+* Added bandcamp support
+* Changes and dependency updates to work with Python >= 3.12
+
+The full list of commits can be found [here](https://github.com/rembo10/headphones/compare/v0.6.1...v0.6.2).
+
## v0.6.1
-Released 26 November 2023
+R eleased 26 November 2023
Highlights:
* Dependency updates to work with > Python 3.11
diff --git a/data/interfaces/default/config.html b/data/interfaces/default/config.html
index 9da1a1df..77d3f27c 100644
--- a/data/interfaces/default/config.html
+++ b/data/interfaces/default/config.html
@@ -310,6 +310,16 @@
+
+
+
+
@@ -579,6 +620,19 @@
+
diff --git a/data/interfaces/default/history.html b/data/interfaces/default/history.html
index c76875e1..a93c5157 100644
--- a/data/interfaces/default/history.html
+++ b/data/interfaces/default/history.html
@@ -56,6 +56,8 @@
fileid = 'torrent'
if item['URL'].find('codeshy') != -1:
fileid = 'nzb'
+ if item['URL'].find('bandcamp') != -1:
+ fileid = 'bandcamp'
folder = 'Folder: ' + item['FolderName']
diff --git a/headphones/bandcamp.py b/headphones/bandcamp.py
new file mode 100644
index 00000000..ed78276f
--- /dev/null
+++ b/headphones/bandcamp.py
@@ -0,0 +1,166 @@
+# This file is part of Headphones.
+#
+# Headphones is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Headphones is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Headphones. If not, see
+
+import headphones
+import json
+import os
+import re
+
+from headphones import logger, helpers, metadata, request
+from headphones.common import USER_AGENT
+from headphones.types import Result
+
+from mediafile import MediaFile, UnreadableFileError
+from bs4 import BeautifulSoup
+from bs4 import FeatureNotFound
+
+
+def search(album, albumlength=None, page=1, resultlist=None):
+ dic = {'...': '', ' & ': ' ', ' = ': ' ', '?': '', '$': 's', ' + ': ' ',
+ '"': '', ',': '', '*': '', '.': '', ':': ''}
+ if resultlist is None:
+ resultlist = []
+
+ cleanalbum = helpers.latinToAscii(
+ helpers.replace_all(album['AlbumTitle'], dic)
+ ).strip()
+ cleanartist = helpers.latinToAscii(
+ helpers.replace_all(album['ArtistName'], dic)
+ ).strip()
+
+ headers = {'User-Agent': USER_AGENT}
+ params = {
+ "page": page,
+ "q": cleanalbum,
+ }
+ logger.info("Looking up https://bandcamp.com/search with {}".format(
+ params))
+ content = request.request_content(
+ url='https://bandcamp.com/search',
+ params=params,
+ headers=headers
+ ).decode('utf8')
+ try:
+ soup = BeautifulSoup(content, "html5lib")
+ except FeatureNotFound:
+ soup = BeautifulSoup(content, "html.parser")
+
+ for item in soup.find_all("li", class_="searchresult"):
+ type = item.find('div', class_='itemtype').text.strip().lower()
+ if type == "album":
+ data = parse_album(item)
+
+ cleanartist_found = helpers.latinToAscii(data['artist'])
+ cleanalbum_found = helpers.latinToAscii(data['album'])
+
+ logger.debug(u"{} - {}".format(data['album'], cleanalbum_found))
+
+ logger.debug("Comparing {} to {}".format(
+ cleanalbum, cleanalbum_found))
+ if (cleanartist.lower() == cleanartist_found.lower() and
+ cleanalbum.lower() == cleanalbum_found.lower()):
+ resultlist.append(Result(
+ data['title'], data['size'], data['url'],
+ 'bandcamp', 'bandcamp', True))
+ else:
+ continue
+
+ if(soup.find('a', class_='next')):
+ page += 1
+ logger.debug("Calling next page ({})".format(page))
+ search(album, albumlength=albumlength,
+ page=page, resultlist=resultlist)
+
+ return resultlist
+
+
+def download(album, bestqual):
+ html = request.request_content(url=bestqual.url).decode('utf-8')
+ trackinfo = []
+ try:
+ trackinfo = json.loads(
+ re.search(r"trackinfo":(\[.*?\]),", html)
+ .group(1)
+ .replace('"', '"'))
+ except ValueError as e:
+ logger.warn("Couldn't load json: {}".format(e))
+
+ directory = os.path.join(
+ headphones.CONFIG.BANDCAMP_DIR,
+ u'{} - {}'.format(
+ album['ArtistName'].replace('/', '_'),
+ album['AlbumTitle'].replace('/', '_')))
+ directory = helpers.latinToAscii(directory)
+
+ if not os.path.exists(directory):
+ try:
+ os.makedirs(directory)
+ except Exception as e:
+ logger.warn("Could not create directory ({})".format(e))
+
+ index = 1
+ for track in trackinfo:
+ filename = helpers.replace_illegal_chars(
+ u'{:02d} - {}.mp3'.format(index, track['title']))
+ fullname = os.path.join(directory.encode('utf-8'),
+ filename.encode('utf-8'))
+ logger.debug("Downloading to {}".format(fullname))
+
+ if 'file' in track and track['file'] != None and 'mp3-128' in track['file']:
+ content = request.request_content(track['file']['mp3-128'])
+ open(fullname, 'wb').write(content)
+ try:
+ f = MediaFile(fullname)
+ date, year = metadata._date_year(album)
+ f.update({
+ 'artist': album['ArtistName'].encode('utf-8'),
+ 'album': album['AlbumTitle'].encode('utf-8'),
+ 'title': track['title'].encode('utf-8'),
+ 'track': track['track_num'],
+ 'tracktotal': len(trackinfo),
+ 'year': year,
+ })
+ f.save()
+ except UnreadableFileError as ex:
+ logger.warn("MediaFile couldn't parse: %s (%s)",
+ fullname,
+ str(ex))
+
+ index += 1
+
+ return directory
+
+
+def parse_album(item):
+ album = item.find('div', class_='heading').text.strip()
+ artist = item.find('div', class_='subhead').text.strip().replace("by ", "")
+ released = item.find('div', class_='released').text.strip().replace(
+ "released ", "")
+ year = re.search(r"(\d{4})", released).group(1)
+
+ url = item.find('div', class_='heading').find('a')['href'].split("?")[0]
+
+ length = item.find('div', class_='length').text.strip()
+ tracks, minutes = length.split(",")
+ tracks = tracks.replace(" tracks", "").replace(" track", "").strip()
+ minutes = minutes.replace(" minutes", "").strip()
+ # bandcamp offers mp3 128b with should be 960KB/minute
+ size = int(minutes) * 983040
+
+ data = {"title": u'{} - {} [{}]'.format(artist, album, year),
+ "artist": artist, "album": album,
+ "url": url, "size": size}
+
+ return data
diff --git a/headphones/common.py b/headphones/common.py
index a311860a..3461a18e 100644
--- a/headphones/common.py
+++ b/headphones/common.py
@@ -102,36 +102,6 @@ class Quality:
return (anyQualities, bestQualities)
- @staticmethod
- def nameQuality(name):
-
- def checkName(list, func):
- return func([re.search(x, name, re.I) for x in list])
-
- name = os.path.basename(name)
-
- # if we have our exact text then assume we put it there
- for x in Quality.qualityStrings:
- if x == Quality.UNKNOWN:
- continue
-
- regex = '\W' + Quality.qualityStrings[x].replace(' ', '\W') + '\W'
- regex_match = re.search(regex, name, re.I)
- if regex_match:
- return x
-
- # TODO: fix quality checking here
- if checkName(["mp3", "192"], any) and not checkName(["flac"], all):
- return Quality.B192
- elif checkName(["mp3", "256"], any) and not checkName(["flac"], all):
- return Quality.B256
- elif checkName(["mp3", "vbr"], any) and not checkName(["flac"], all):
- return Quality.VBR
- elif checkName(["mp3", "320"], any) and not checkName(["flac"], all):
- return Quality.B320
- else:
- return Quality.UNKNOWN
-
@staticmethod
def assumeQuality(name):
if name.lower().endswith(".mp3"):
@@ -158,13 +128,6 @@ class Quality:
return (Quality.NONE, status)
- @staticmethod
- def statusFromName(name, assume=True):
- quality = Quality.nameQuality(name)
- if assume and quality == Quality.UNKNOWN:
- quality = Quality.assumeQuality(name)
- return Quality.compositeStatus(DOWNLOADED, quality)
-
DOWNLOADED = None
SNATCHED = None
SNATCHED_PROPER = None
diff --git a/headphones/config.py b/headphones/config.py
index d7a19f32..e7f579b2 100644
--- a/headphones/config.py
+++ b/headphones/config.py
@@ -80,6 +80,7 @@ _CONFIG_DEFINITIONS = {
'DELUGE_PASSWORD': (str, 'Deluge', ''),
'DELUGE_LABEL': (str, 'Deluge', ''),
'DELUGE_DONE_DIRECTORY': (str, 'Deluge', ''),
+ 'DELUGE_DOWNLOAD_DIRECTORY': (str, 'Deluge', ''),
'DELUGE_PAUSED': (int, 'Deluge', 0),
'DESTINATION_DIR': (str, 'General', ''),
'DETECT_BITRATE': (int, 'General', 0),
@@ -269,6 +270,11 @@ _CONFIG_DEFINITIONS = {
'SONGKICK_ENABLED': (int, 'Songkick', 1),
'SONGKICK_FILTER_ENABLED': (int, 'Songkick', 0),
'SONGKICK_LOCATION': (str, 'Songkick', ''),
+ 'SOULSEEK_API_URL': (str, 'Soulseek', ''),
+ 'SOULSEEK_API_KEY': (str, 'Soulseek', ''),
+ 'SOULSEEK_DOWNLOAD_DIR': (str, 'Soulseek', ''),
+ 'SOULSEEK_INCOMPLETE_DOWNLOAD_DIR': (str, 'Soulseek', ''),
+ 'SOULSEEK': (int, 'Soulseek', 0),
'SUBSONIC_ENABLED': (int, 'Subsonic', 0),
'SUBSONIC_HOST': (str, 'Subsonic', ''),
'SUBSONIC_PASSWORD': (str, 'Subsonic', ''),
@@ -317,7 +323,9 @@ _CONFIG_DEFINITIONS = {
'XBMC_PASSWORD': (str, 'XBMC', ''),
'XBMC_UPDATE': (int, 'XBMC', 0),
'XBMC_USERNAME': (str, 'XBMC', ''),
- 'XLDPROFILE': (str, 'General', '')
+ 'XLDPROFILE': (str, 'General', ''),
+ 'BANDCAMP': (int, 'General', 0),
+ 'BANDCAMP_DIR': (path, 'General', '')
}
diff --git a/headphones/deluge.py b/headphones/deluge.py
index 578adc6d..5130fb96 100644
--- a/headphones/deluge.py
+++ b/headphones/deluge.py
@@ -58,12 +58,12 @@ def _scrubber(text):
if scrub_logs:
try:
# URL parameter values
- text = re.sub('=[0-9a-zA-Z]*', '=REMOVED', text)
+ text = re.sub(r'=[0-9a-zA-Z]*', r'=REMOVED', text)
# Local host with port
# text = re.sub('\:\/\/.*\:', '://REMOVED:', text) # just host
- text = re.sub('\:\/\/.*\:[0-9]*', '://REMOVED:', text)
+ text = re.sub(r'\:\/\/.*\:[0-9]*', r'://REMOVED:', text)
# Session cookie
- text = re.sub("_session_id'\: '.*'", "_session_id': 'REMOVED'", text)
+ text = re.sub(r"_session_id'\: '.*'", r"_session_id': 'REMOVED'", text)
# Local Windows user path
if text.lower().startswith('c:\\users\\'):
k = text.split('\\')
@@ -128,9 +128,9 @@ def addTorrent(link, data=None, name=None):
# Extract torrent name from .torrent
try:
logger.debug('Deluge: Getting torrent name length')
- name_length = int(re.findall('name([0-9]*)\:.*?\:', str(torrentfile))[0])
+ name_length = int(re.findall(r'name([0-9]*)\:.*?\:', str(torrentfile))[0])
logger.debug('Deluge: Getting torrent name')
- name = re.findall('name[0-9]*\:(.*?)\:', str(torrentfile))[0][:name_length]
+ name = re.findall(r'name[0-9]*\:(.*?)\:', str(torrentfile))[0][:name_length]
except Exception as e:
logger.debug('Deluge: Could not get torrent name, getting file name')
# get last part of link/path (name only)
@@ -160,9 +160,9 @@ def addTorrent(link, data=None, name=None):
# Extract torrent name from .torrent
try:
logger.debug('Deluge: Getting torrent name length')
- name_length = int(re.findall('name([0-9]*)\:.*?\:', str(torrentfile))[0])
+ name_length = int(re.findall(r'name([0-9]*)\:.*?\:', str(torrentfile))[0])
logger.debug('Deluge: Getting torrent name')
- name = re.findall('name[0-9]*\:(.*?)\:', str(torrentfile))[0][:name_length]
+ name = re.findall(r'name[0-9]*\:(.*?)\:', str(torrentfile))[0][:name_length]
except Exception as e:
logger.debug('Deluge: Could not get torrent name, getting file name')
# get last part of link/path (name only)
@@ -466,19 +466,56 @@ def _add_torrent_url(result):
def _add_torrent_file(result):
logger.debug('Deluge: Adding file')
+
+ options = {}
+
+ if headphones.CONFIG.DELUGE_DOWNLOAD_DIRECTORY:
+ options['download_location'] = headphones.CONFIG.DELUGE_DOWNLOAD_DIRECTORY
+
+ if headphones.CONFIG.DELUGE_DONE_DIRECTORY or headphones.CONFIG.DOWNLOAD_TORRENT_DIR:
+ options['move_completed'] = 1
+ if headphones.CONFIG.DELUGE_DONE_DIRECTORY:
+ options['move_completed_path'] = headphones.CONFIG.DELUGE_DONE_DIRECTORY
+ else:
+ options['move_completed_path'] = headphones.CONFIG.DOWNLOAD_TORRENT_DIR
+
+ if headphones.CONFIG.DELUGE_PAUSED:
+ options['add_paused'] = headphones.CONFIG.DELUGE_PAUSED
+
if not any(delugeweb_auth):
_get_auth()
try:
# content is torrent file contents that needs to be encoded to base64
post_data = json.dumps({"method": "core.add_torrent_file",
"params": [result['name'] + '.torrent',
- b64encode(result['content']).decode(), {}],
+ b64encode(result['content'].encode('utf8')),
+ options],
"id": 2})
response = requests.post(delugeweb_url, data=post_data.encode('utf-8'), cookies=delugeweb_auth,
verify=deluge_verify_cert, headers=headers)
result['hash'] = json.loads(response.text)['result']
logger.debug('Deluge: Response was %s' % str(json.loads(response.text)))
return json.loads(response.text)['result']
+ except UnicodeDecodeError:
+ try:
+ # content is torrent file contents that needs to be encoded to base64
+ # this time let's try leaving the encoding as is
+ logger.debug('Deluge: There was a decoding issue, let\'s try again')
+ post_data = json.dumps({"method": "core.add_torrent_file",
+ "params": [result['name'].decode('utf8') + '.torrent',
+ b64encode(result['content']),
+ options],
+ "id": 22})
+ response = requests.post(delugeweb_url, data=post_data.encode('utf-8'), cookies=delugeweb_auth,
+ verify=deluge_verify_cert, headers=headers)
+ result['hash'] = json.loads(response.text)['result']
+ logger.debug('Deluge: Response was %s' % str(json.loads(response.text)))
+ return json.loads(response.text)['result']
+ except Exception as e:
+ logger.error('Deluge: Adding torrent file failed after decode: %s' % str(e))
+ formatted_lines = traceback.format_exc().splitlines()
+ logger.error('; '.join(formatted_lines))
+ return False
except Exception as e:
logger.error('Deluge: Adding torrent file failed: %s' % str(e))
formatted_lines = traceback.format_exc().splitlines()
@@ -566,61 +603,3 @@ def setSeedRatio(result):
return None
-def setTorrentPath(result):
- logger.debug('Deluge: Setting download path')
- if not any(delugeweb_auth):
- _get_auth()
-
- try:
- if headphones.CONFIG.DELUGE_DONE_DIRECTORY or headphones.CONFIG.DOWNLOAD_TORRENT_DIR:
- post_data = json.dumps({"method": "core.set_torrent_move_completed",
- "params": [result['hash'], True],
- "id": 7})
- response = requests.post(delugeweb_url, data=post_data.encode('utf-8'), cookies=delugeweb_auth,
- verify=deluge_verify_cert, headers=headers)
-
- if headphones.CONFIG.DELUGE_DONE_DIRECTORY:
- move_to = headphones.CONFIG.DELUGE_DONE_DIRECTORY
- else:
- move_to = headphones.CONFIG.DOWNLOAD_TORRENT_DIR
-
- if not os.path.exists(move_to):
- logger.debug('Deluge: %s directory doesn\'t exist, let\'s create it' % move_to)
- os.makedirs(move_to)
- post_data = json.dumps({"method": "core.set_torrent_move_completed_path",
- "params": [result['hash'], move_to],
- "id": 8})
- response = requests.post(delugeweb_url, data=post_data.encode('utf-8'), cookies=delugeweb_auth,
- verify=deluge_verify_cert, headers=headers)
-
- return not json.loads(response.text)['error']
-
- return True
- except Exception as e:
- logger.error('Deluge: Setting torrent move-to directory failed: %s' % str(e))
- formatted_lines = traceback.format_exc().splitlines()
- logger.error('; '.join(formatted_lines))
- return None
-
-
-def setTorrentPause(result):
- logger.debug('Deluge: Pausing torrent')
- if not any(delugeweb_auth):
- _get_auth()
-
- try:
- if headphones.CONFIG.DELUGE_PAUSED:
- post_data = json.dumps({"method": "core.pause_torrent",
- "params": [[result['hash']]],
- "id": 9})
- response = requests.post(delugeweb_url, data=post_data.encode('utf-8'), cookies=delugeweb_auth,
- verify=deluge_verify_cert, headers=headers)
-
- return not json.loads(response.text)['error']
-
- return True
- except Exception as e:
- logger.error('Deluge: Setting torrent paused failed: %s' % str(e))
- formatted_lines = traceback.format_exc().splitlines()
- logger.error('; '.join(formatted_lines))
- return None
diff --git a/headphones/helpers.py b/headphones/helpers.py
index c8f53c64..ea8960f2 100644
--- a/headphones/helpers.py
+++ b/headphones/helpers.py
@@ -184,7 +184,7 @@ def bytes_to_mb(bytes):
def mb_to_bytes(mb_str):
- result = re.search('^(\d+(?:\.\d+)?)\s?(?:mb)?', mb_str, flags=re.I)
+ result = re.search(r"^(\d+(?:\.\d+)?)\s?(?:mb)?", mb_str, flags=re.I)
if result:
return int(float(result.group(1)) * 1048576)
@@ -253,9 +253,9 @@ def replace_all(text, dic):
def replace_illegal_chars(string, type="file"):
if type == "file":
- string = re.sub('[\?"*:|<>/]', '_', string)
+ string = re.sub(r"[\?\"*:|<>/]", "_", string)
if type == "folder":
- string = re.sub('[:\?<>"|*]', '_', string)
+ string = re.sub(r"[:\?<>\"|*]", "_", string)
return string
@@ -386,7 +386,7 @@ def clean_musicbrainz_name(s, return_as_string=True):
def cleanTitle(title):
- title = re.sub('[\.\-\/\_]', ' ', title).lower()
+ title = re.sub(r"[\.\-\/\_]", " ", title).lower()
# Strip out extra whitespace
title = ' '.join(title.split())
@@ -1050,3 +1050,10 @@ def have_pct_have_total(db_artist):
have_pct = have_tracks / total_tracks if total_tracks else 0
return (have_pct, total_tracks)
+
+def has_token(title, token):
+ return bool(
+ re.search(rf'(?:\W|^)+{token}(?:\W|$)+',
+ title,
+ re.IGNORECASE | re.UNICODE)
+ )
diff --git a/headphones/helpers_test.py b/headphones/helpers_test.py
index 16753f91..09a1783c 100644
--- a/headphones/helpers_test.py
+++ b/headphones/helpers_test.py
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
from .unittestcompat import TestCase
-from headphones.helpers import clean_name, is_valid_date, age
+from headphones.helpers import clean_name, is_valid_date, age, has_token
class HelpersTest(TestCase):
@@ -56,3 +56,18 @@ class HelpersTest(TestCase):
]
for input, expected, desc in test_cases:
self.assertEqual(is_valid_date(input), expected, desc)
+
+ def test_has_token(self):
+ """helpers: has_token()"""
+ self.assertEqual(
+ has_token("a cat ran", "cat"),
+ True,
+ "return True if token is in string"
+ )
+ self.assertEqual(
+ has_token("acatran", "cat"),
+ False,
+ "return False if token is part of another word"
+ )
+
+
diff --git a/headphones/postprocessor.py b/headphones/postprocessor.py
index dc7ac271..b0b67cc2 100755
--- a/headphones/postprocessor.py
+++ b/headphones/postprocessor.py
@@ -27,7 +27,7 @@ from beets import config as beetsconfig
from beets import logging as beetslogging
from mediafile import MediaFile, FileTypeError, UnreadableFileError
from beetsplug import lyrics as beetslyrics
-from headphones import notifiers, utorrent, transmission, deluge, qbittorrent
+from headphones import notifiers, utorrent, transmission, deluge, qbittorrent, soulseek
from headphones import db, albumart, librarysync
from headphones import logger, helpers, mb, music_encoder
from headphones import metadata
@@ -36,18 +36,44 @@ postprocessor_lock = threading.Lock()
def checkFolder():
- logger.debug("Checking download folder for completed downloads (only snatched ones).")
+ logger.info("Checking download folder for completed downloads (only snatched ones).")
with postprocessor_lock:
myDB = db.DBConnection()
snatched = myDB.select('SELECT * from snatched WHERE Status="Snatched"')
+ # If soulseek is used, this part will get the status from the soulseek api and return completed and errored albums
+ completed_albums, errored_albums = set(), set()
+ if any(album['Kind'] == 'soulseek' for album in snatched):
+ completed_albums, errored_albums = soulseek.download_completed()
+
for album in snatched:
if album['FolderName']:
folder_name = album['FolderName']
single = False
- if album['Kind'] == 'nzb':
- download_dir = headphones.CONFIG.DOWNLOAD_DIR
+ if album['Kind'] == 'soulseek':
+ if folder_name in errored_albums:
+ # If the album had any tracks with errors in it, the whole download is considered faulty. Status will be reset to wanted.
+ logger.info(f"Album with folder '{folder_name}' had errors during download. Setting status to 'Wanted'.")
+ myDB.action('UPDATE albums SET Status="Wanted" WHERE AlbumID=? AND Status="Snatched"', (album['AlbumID'],))
+
+ # Folder will be removed from configured complete and Incomplete directory
+ complete_path = os.path.join(headphones.CONFIG.SOULSEEK_DOWNLOAD_DIR, folder_name)
+ incomplete_path = os.path.join(headphones.CONFIG.SOULSEEK_INCOMPLETE_DOWNLOAD_DIR, folder_name)
+ for path in [complete_path, incomplete_path]:
+ try:
+ shutil.rmtree(path)
+ except Exception as e:
+ pass
+ continue
+ elif folder_name in completed_albums:
+ download_dir = headphones.CONFIG.SOULSEEK_DOWNLOAD_DIR
+ else:
+ continue
+ elif album['Kind'] == 'nzb':
+ download_dir = headphones.CONFIG.DOWNLOAD_DIR
+ elif album['Kind'] == 'bandcamp':
+ download_dir = headphones.CONFIG.BANDCAMP_DIR
else:
if headphones.CONFIG.DELUGE_DONE_DIRECTORY and headphones.CONFIG.TORRENT_DOWNLOADER == 3:
download_dir = headphones.CONFIG.DELUGE_DONE_DIRECTORY
@@ -289,7 +315,7 @@ def verify(albumid, albumpath, Kind=None, forced=False, keep_original_folder=Fal
logger.debug('Metadata check failed. Verifying filenames...')
for downloaded_track in downloaded_track_list:
track_name = os.path.splitext(downloaded_track)[0]
- split_track_name = re.sub('[\.\-\_]', ' ', track_name).lower()
+ split_track_name = re.sub(r'[\.\-\_]', r' ', track_name).lower()
for track in tracks:
if not track['TrackTitle']:
@@ -1170,8 +1196,14 @@ def forcePostProcess(dir=None, expand_subfolders=True, album_dir=None, keep_orig
download_dirs.append(dir)
if headphones.CONFIG.DOWNLOAD_DIR and not dir:
download_dirs.append(headphones.CONFIG.DOWNLOAD_DIR)
+ if headphones.CONFIG.SOULSEEK_DOWNLOAD_DIR and not dir:
+ download_dirs.append(headphones.CONFIG.SOULSEEK_DOWNLOAD_DIR)
if headphones.CONFIG.DOWNLOAD_TORRENT_DIR and not dir:
- download_dirs.append(headphones.CONFIG.DOWNLOAD_TORRENT_DIR)
+ download_dirs.append(
+ headphones.CONFIG.DOWNLOAD_TORRENT_DIR.encode(headphones.SYS_ENCODING, 'replace'))
+ if headphones.CONFIG.BANDCAMP and not dir:
+ download_dirs.append(
+ headphones.CONFIG.BANDCAMP_DIR.encode(headphones.SYS_ENCODING, 'replace'))
# If DOWNLOAD_DIR and DOWNLOAD_TORRENT_DIR are the same, remove the duplicate to prevent us from trying to process the same folder twice.
download_dirs = list(set(download_dirs))
diff --git a/headphones/rutracker.py b/headphones/rutracker.py
index 988e05b1..21d2abd4 100644
--- a/headphones/rutracker.py
+++ b/headphones/rutracker.py
@@ -42,19 +42,22 @@ class Rutracker(object):
'login_password': headphones.CONFIG.RUTRACKER_PASSWORD,
'login': b'\xc2\xf5\xee\xe4' # '%C2%F5%EE%E4'
}
+ headers = {
+ 'User-Agent' : 'Headphones'
+ }
logger.info("Attempting to log in to rutracker...")
try:
- r = self.session.post(loginpage, data=post_params, timeout=self.timeout, allow_redirects=False)
+ r = self.session.post(loginpage, data=post_params, timeout=self.timeout, allow_redirects=False, headers=headers)
# try again
if not self.has_bb_session_cookie(r):
time.sleep(10)
if headphones.CONFIG.RUTRACKER_COOKIE:
logger.info("Attempting to log in using predefined cookie...")
- r = self.session.post(loginpage, data=post_params, timeout=self.timeout, allow_redirects=False, cookies={'bb_session': headphones.CONFIG.RUTRACKER_COOKIE})
+ r = self.session.post(loginpage, data=post_params, timeout=self.timeout, allow_redirects=False, headers=headers, cookies={'bb_session': headphones.CONFIG.RUTRACKER_COOKIE})
else:
- r = self.session.post(loginpage, data=post_params, timeout=self.timeout, allow_redirects=False)
+ r = self.session.post(loginpage, data=post_params, timeout=self.timeout, allow_redirects=False, headers=headers)
if self.has_bb_session_cookie(r):
self.loggedin = True
logger.info("Successfully logged in to rutracker")
@@ -113,7 +116,10 @@ class Rutracker(object):
Parse the search results and return valid torrent list
"""
try:
- headers = {'Referer': self.search_referer}
+ headers = {
+ 'Referer': self.search_referer,
+ 'User-Agent' : 'Headphones'
+ }
r = self.session.get(url=searchurl, headers=headers, timeout=self.timeout)
soup = BeautifulSoup(r.content, 'html.parser')
@@ -183,7 +189,10 @@ class Rutracker(object):
downloadurl = 'https://rutracker.org/forum/dl.php?t=' + torrent_id
cookie = {'bb_dl': torrent_id}
try:
- headers = {'Referer': url}
+ headers = {
+ 'Referer': url,
+ 'User-Agent' : 'Headphones'
+ }
r = self.session.post(url=downloadurl, cookies=cookie, headers=headers,
timeout=self.timeout)
return r.content
diff --git a/headphones/searcher.py b/headphones/searcher.py
index e4762373..d6c8478e 100644
--- a/headphones/searcher.py
+++ b/headphones/searcher.py
@@ -37,10 +37,29 @@ from unidecode import unidecode
import headphones
from headphones.common import USER_AGENT
+from headphones.helpers import (
+ bytes_to_mb,
+ has_token,
+ piratesize,
+ replace_all,
+ replace_illegal_chars,
+ sab_replace_dots,
+ sab_replace_spaces,
+ sab_sanitize_foldername,
+ split_string
+ )
from headphones.types import Result
-from headphones import logger, db, helpers, classes, sab, nzbget, request
-from headphones import utorrent, transmission, notifiers, rutracker, deluge, qbittorrent
-
+from headphones import logger, db, classes, sab, nzbget, request
+from headphones import (
+ bandcamp,
+ deluge,
+ notifiers,
+ qbittorrent,
+ rutracker,
+ soulseek,
+ transmission,
+ utorrent,
+ )
# Magnet to torrent services, for Black hole. Stolen from CouchPotato.
TORRENT_TO_MAGNET_SERVICES = [
@@ -137,7 +156,7 @@ def calculate_torrent_hash(link, data=None):
"""
if link.startswith("magnet:"):
- torrent_hash = re.findall("urn:btih:([\w]{32,40})", link)[0]
+ torrent_hash = re.findall(r"urn:btih:([\w]{32,40})", link)[0]
if len(torrent_hash) == 32:
torrent_hash = b16encode(b32decode(torrent_hash)).lower()
elif data:
@@ -261,6 +280,8 @@ def strptime_musicbrainz(date_str):
def do_sorted_search(album, new, losslessOnly, choose_specific_download=False):
+
+
NZB_PROVIDERS = (headphones.CONFIG.HEADPHONES_INDEXER or
headphones.CONFIG.NEWZNAB or
headphones.CONFIG.NZBSORG or
@@ -284,25 +305,33 @@ def do_sorted_search(album, new, losslessOnly, choose_specific_download=False):
[album['AlbumID']])[0][0]
if headphones.CONFIG.PREFER_TORRENTS == 0 and not choose_specific_download:
-
if NZB_PROVIDERS and NZB_DOWNLOADERS:
results = searchNZB(album, new, losslessOnly, albumlength)
if not results and TORRENT_PROVIDERS:
results = searchTorrent(album, new, losslessOnly, albumlength)
- elif headphones.CONFIG.PREFER_TORRENTS == 1 and not choose_specific_download:
+ if not results and headphones.CONFIG.BANDCAMP:
+ results = searchBandcamp(album, new, albumlength)
+ elif headphones.CONFIG.PREFER_TORRENTS == 1 and not choose_specific_download:
if TORRENT_PROVIDERS:
results = searchTorrent(album, new, losslessOnly, albumlength)
if not results and NZB_PROVIDERS and NZB_DOWNLOADERS:
results = searchNZB(album, new, losslessOnly, albumlength)
+ if not results and headphones.CONFIG.BANDCAMP:
+ results = searchBandcamp(album, new, albumlength)
+
+ elif headphones.CONFIG.PREFER_TORRENTS == 2 and not choose_specific_download:
+ results = searchSoulseek(album, new, losslessOnly, albumlength)
+
else:
nzb_results = None
torrent_results = None
+ bandcamp_results = None
if NZB_PROVIDERS and NZB_DOWNLOADERS:
nzb_results = searchNZB(album, new, losslessOnly, albumlength, choose_specific_download)
@@ -311,13 +340,16 @@ def do_sorted_search(album, new, losslessOnly, choose_specific_download=False):
torrent_results = searchTorrent(album, new, losslessOnly, albumlength,
choose_specific_download)
+ if headphones.CONFIG.BANDCAMP:
+ bandcamp_results = searchBandcamp(album, new, albumlength)
+
if not nzb_results:
nzb_results = []
if not torrent_results:
torrent_results = []
- results = nzb_results + torrent_results
+ results = nzb_results + torrent_results + bandcamp_results
if choose_specific_download:
return results
@@ -338,6 +370,7 @@ def do_sorted_search(album, new, losslessOnly, choose_specific_download=False):
(data, result) = preprocess(sorted_search_results)
if data and result:
+ #print(f'going to send stuff to downloader. data: {data}, album: {album}')
send_to_downloader(data, result, album)
@@ -360,7 +393,7 @@ def more_filtering(results, album, albumlength, new):
logger.debug('Target bitrate: %s kbps' % headphones.CONFIG.PREFERRED_BITRATE)
if albumlength:
targetsize = albumlength / 1000 * int(headphones.CONFIG.PREFERRED_BITRATE) * 128
- logger.info('Target size: %s' % helpers.bytes_to_mb(targetsize))
+ logger.info('Target size: %s' % bytes_to_mb(targetsize))
if headphones.CONFIG.PREFERRED_BITRATE_LOW_BUFFER:
low_size_limit = targetsize * int(
headphones.CONFIG.PREFERRED_BITRATE_LOW_BUFFER) / 100
@@ -377,14 +410,14 @@ def more_filtering(results, album, albumlength, new):
if low_size_limit and result.size < low_size_limit:
logger.info(
f"{result.title} from {result.provider} is too small for this album. "
- f"(Size: {result.size}, MinSize: {helpers.bytes_to_mb(low_size_limit)})"
+ f"(Size: {result.size}, MinSize: {bytes_to_mb(low_size_limit)})"
)
continue
if high_size_limit and result.size > high_size_limit:
logger.info(
f"{result.title} from {result.provider} is too large for this album. "
- f"(Size: {result.size}, MaxSize: {helpers.bytes_to_mb(high_size_limit)})"
+ f"(Size: {result.size}, MaxSize: {bytes_to_mb(high_size_limit)})"
)
# Keep lossless results if there are no good lossy matches
if not (allow_lossless and 'flac' in result.title.lower()):
@@ -424,7 +457,7 @@ def sort_search_results(resultlist, album, new, albumlength):
# Add a priority if it has any of the preferred words
results_with_priority = []
- preferred_words = helpers.split_string(headphones.CONFIG.PREFERRED_WORDS)
+ preferred_words = split_string(headphones.CONFIG.PREFERRED_WORDS)
for result in resultlist:
priority = 0
for word in preferred_words:
@@ -502,6 +535,10 @@ def get_year_from_release_date(release_date):
return year
+def searchBandcamp(album, new=False, albumlength=None):
+ return bandcamp.search(album)
+
+
def searchNZB(album, new=False, losslessOnly=False, albumlength=None,
choose_specific_download=False):
reldate = album['ReleaseDate']
@@ -521,8 +558,8 @@ def searchNZB(album, new=False, losslessOnly=False, albumlength=None,
':': ''
}
- cleanalbum = unidecode(helpers.replace_all(album['AlbumTitle'], replacements)).strip()
- cleanartist = unidecode(helpers.replace_all(album['ArtistName'], replacements)).strip()
+ cleanalbum = unidecode(replace_all(album['AlbumTitle'], replacements)).strip()
+ cleanartist = unidecode(replace_all(album['ArtistName'], replacements)).strip()
# Use the provided search term if available, otherwise build a search term
if album['SearchTerm']:
@@ -542,8 +579,8 @@ def searchNZB(album, new=False, losslessOnly=False, albumlength=None,
term = cleanartist + ' ' + cleanalbum
# Replace bad characters in the term
- term = re.sub('[\.\-\/]', ' ', term)
- artistterm = re.sub('[\.\-\/]', ' ', cleanartist)
+ term = re.sub(r'[\.\-\/]', r' ', term)
+ artistterm = re.sub(r'[\.\-\/]', r' ', cleanartist)
# If Preferred Bitrate and High Limit and Allow Lossless then get both lossy and lossless
if headphones.CONFIG.PREFERRED_QUALITY == 2 and headphones.CONFIG.PREFERRED_BITRATE and headphones.CONFIG.PREFERRED_BITRATE_HIGH_BUFFER and headphones.CONFIG.PREFERRED_BITRATE_ALLOW_LOSSLESS:
@@ -599,7 +636,7 @@ def searchNZB(album, new=False, losslessOnly=False, albumlength=None,
size = int(item.links[1]['length'])
resultlist.append(Result(title, size, url, provider, 'nzb', True))
- logger.info('Found %s. Size: %s' % (title, helpers.bytes_to_mb(size)))
+ logger.info('Found %s. Size: %s' % (title, bytes_to_mb(size)))
except Exception as e:
logger.error("An unknown error occurred trying to parse the feed: %s" % e)
@@ -670,7 +707,7 @@ def searchNZB(album, new=False, losslessOnly=False, albumlength=None,
size = int(item.links[1]['length'])
if all(word.lower() in title.lower() for word in term.split()):
logger.info(
- 'Found %s. Size: %s' % (title, helpers.bytes_to_mb(size)))
+ 'Found %s. Size: %s' % (title, bytes_to_mb(size)))
resultlist.append(Result(title, size, url, provider, 'nzb', True))
else:
logger.info('Skipping %s, not all search term words found' % title)
@@ -720,7 +757,7 @@ def searchNZB(album, new=False, losslessOnly=False, albumlength=None,
size = int(item.links[1]['length'])
resultlist.append(Result(title, size, url, provider, 'nzb', True))
- logger.info('Found %s. Size: %s' % (title, helpers.bytes_to_mb(size)))
+ logger.info('Found %s. Size: %s' % (title, bytes_to_mb(size)))
except Exception as e:
logger.exception("Unhandled exception while parsing feed")
@@ -767,7 +804,7 @@ def searchNZB(album, new=False, losslessOnly=False, albumlength=None,
size = int(item['sizebytes'])
resultlist.append(Result(title, size, url, provider, 'nzb', True))
- logger.info('Found %s. Size: %s', title, helpers.bytes_to_mb(size))
+ logger.info('Found %s. Size: %s', title, bytes_to_mb(size))
except Exception as e:
logger.exception("Unhandled exception")
@@ -790,7 +827,7 @@ def searchNZB(album, new=False, losslessOnly=False, albumlength=None,
def send_to_downloader(data, result, album):
logger.info(
f"Found best result from {result.provider}: "
- f"{result.title} - {helpers.bytes_to_mb(result.size)}"
+ f"{result.title} - {bytes_to_mb(result.size)}"
)
# Get rid of any dodgy chars here so we can prevent sab from renaming our downloads
kind = result.kind
@@ -798,7 +835,7 @@ def send_to_downloader(data, result, album):
torrentid = None
if kind == 'nzb':
- folder_name = helpers.sab_sanitize_foldername(result.title)
+ folder_name = sab_sanitize_foldername(result.title)
if headphones.CONFIG.NZB_DOWNLOADER == 1:
@@ -820,9 +857,9 @@ def send_to_downloader(data, result, album):
(replace_spaces, replace_dots) = sab.checkConfig()
if replace_dots:
- folder_name = helpers.sab_replace_dots(folder_name)
+ folder_name = sab_replace_dots(folder_name)
if replace_spaces:
- folder_name = helpers.sab_replace_spaces(folder_name)
+ folder_name = sab_replace_spaces(folder_name)
else:
nzb_name = folder_name + '.nzb'
@@ -839,6 +876,15 @@ def send_to_downloader(data, result, album):
except Exception as e:
logger.error('Couldn\'t write NZB file: %s', e)
return
+ elif kind == 'bandcamp':
+ folder_name = bandcamp.download(album, result)
+ logger.info("Setting folder_name to: {}".format(folder_name))
+
+
+ elif kind == 'soulseek':
+ soulseek.download(user=result.user, filelist=result.files)
+ folder_name = result.folder
+
else:
folder_name = '%s - %s [%s]' % (
unidecode(album['ArtistName']).replace('/', '_'),
@@ -849,7 +895,7 @@ def send_to_downloader(data, result, album):
if headphones.CONFIG.TORRENT_DOWNLOADER == 0:
# Get torrent name from .torrent, this is usually used by the torrent client as the folder name
- torrent_name = helpers.replace_illegal_chars(folder_name) + '.torrent'
+ torrent_name = replace_illegal_chars(folder_name) + '.torrent'
download_path = os.path.join(headphones.CONFIG.TORRENTBLACKHOLE_DIR, torrent_name)
if result.url.lower().startswith("magnet:"):
@@ -954,10 +1000,6 @@ def send_to_downloader(data, result, album):
logger.error("Error sending torrent to Deluge. Are you sure it's running? Maybe the torrent already exists?")
return
- # This pauses the torrent right after it is added
- if headphones.CONFIG.DELUGE_PAUSED:
- deluge.setTorrentPause({'hash': torrentid})
-
# Set Label
if headphones.CONFIG.DELUGE_LABEL:
deluge.setTorrentLabel({'hash': torrentid})
@@ -967,10 +1009,6 @@ def send_to_downloader(data, result, album):
if seed_ratio is not None:
deluge.setSeedRatio({'hash': torrentid, 'ratio': seed_ratio})
- # Set move-to directory
- if headphones.CONFIG.DELUGE_DONE_DIRECTORY or headphones.CONFIG.DOWNLOAD_TORRENT_DIR:
- deluge.setTorrentPath({'hash': torrentid})
-
# Get folder name from Deluge, it's usually the torrent name
folder_name = deluge.getTorrentFolder({'hash': torrentid})
if folder_name:
@@ -1156,7 +1194,7 @@ def send_to_downloader(data, result, album):
def verifyresult(title, artistterm, term, lossless):
- title = re.sub('[\.\-\/\_]', ' ', title)
+ title = re.sub(r'[\.\-\/\_]', r' ', title)
# if artistterm != 'Various Artists':
#
@@ -1188,16 +1226,16 @@ def verifyresult(title, artistterm, term, lossless):
return False
if headphones.CONFIG.IGNORED_WORDS:
- for each_word in helpers.split_string(headphones.CONFIG.IGNORED_WORDS):
+ for each_word in split_string(headphones.CONFIG.IGNORED_WORDS):
if each_word.lower() in title.lower():
logger.info("Removed '%s' from results because it contains ignored word: '%s'",
title, each_word)
return False
if headphones.CONFIG.REQUIRED_WORDS:
- for each_word in helpers.split_string(headphones.CONFIG.REQUIRED_WORDS):
+ for each_word in split_string(headphones.CONFIG.REQUIRED_WORDS):
if ' OR ' in each_word:
- or_words = helpers.split_string(each_word, 'OR')
+ or_words = split_string(each_word, 'OR')
if any(word.lower() in title.lower() for word in or_words):
continue
else:
@@ -1219,23 +1257,23 @@ def verifyresult(title, artistterm, term, lossless):
title, each_word)
return False
- tokens = re.split('\W', term, re.IGNORECASE | re.UNICODE)
+ tokens = re.split(r'\W', term, re.IGNORECASE | re.UNICODE)
+
for token in tokens:
if not token:
continue
if token == 'Various' or token == 'Artists' or token == 'VA':
continue
- if not re.search('(?:\W|^)+' + token + '(?:\W|$)+', title, re.IGNORECASE | re.UNICODE):
+ if not has_token(title, token):
cleantoken = ''.join(c for c in token if c not in string.punctuation)
- if not not re.search('(?:\W|^)+' + cleantoken + '(?:\W|$)+', title,
- re.IGNORECASE | re.UNICODE):
+ if not has_token(title, cleantoken):
dic = {'!': 'i', '$': 's'}
- dumbtoken = helpers.replace_all(token, dic)
- if not not re.search('(?:\W|^)+' + dumbtoken + '(?:\W|$)+', title,
- re.IGNORECASE | re.UNICODE):
- logger.info("Removed from results: %s (missing tokens: %s and %s)", title,
- token, cleantoken)
+ dumbtoken = replace_all(token, dic)
+ if not has_token(title, dumbtoken):
+ logger.info(
+ "Removed from results: %s (missing tokens: [%s, %s, %s])",
+ title, token, cleantoken, dumbtoken)
return False
return True
@@ -1264,9 +1302,9 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None,
'*': ''
}
- semi_cleanalbum = helpers.replace_all(album['AlbumTitle'], replacements)
+ semi_cleanalbum = replace_all(album['AlbumTitle'], replacements)
cleanalbum = unidecode(semi_cleanalbum)
- semi_cleanartist = helpers.replace_all(album['ArtistName'], replacements)
+ semi_cleanartist = replace_all(album['ArtistName'], replacements)
cleanartist = unidecode(semi_cleanartist)
# Use provided term if available, otherwise build our own (this code needs to be cleaned up since a lot
@@ -1293,12 +1331,12 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None,
else:
usersearchterm = ''
- semi_clean_artist_term = re.sub('[\.\-\/]', ' ', semi_cleanartist)
- semi_clean_album_term = re.sub('[\.\-\/]', ' ', semi_cleanalbum)
+ semi_clean_artist_term = re.sub(r'[\.\-\/]', r' ', semi_cleanartist)
+ semi_clean_album_term = re.sub(r'[\.\-\/]', r' ', semi_cleanalbum)
# Replace bad characters in the term
- term = re.sub('[\.\-\/]', ' ', term)
- artistterm = re.sub('[\.\-\/]', ' ', cleanartist)
- albumterm = re.sub('[\.\-\/]', ' ', cleanalbum)
+ term = re.sub(r'[\.\-\/]', r' ', term)
+ artistterm = re.sub(r'[\.\-\/]', r' ', cleanartist)
+ albumterm = re.sub(r'[\.\-\/]', r' ', cleanalbum)
# If Preferred Bitrate and High Limit and Allow Lossless then get both lossy and lossless
if headphones.CONFIG.PREFERRED_QUALITY == 2 and headphones.CONFIG.PREFERRED_BITRATE and headphones.CONFIG.PREFERRED_BITRATE_HIGH_BUFFER and headphones.CONFIG.PREFERRED_BITRATE_ALLOW_LOSSLESS:
@@ -1401,7 +1439,7 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None,
if all(word.lower() in title.lower() for word in term.split()):
if size < maxsize and minimumseeders < seeders:
- logger.info('Found %s. Size: %s' % (title, helpers.bytes_to_mb(size)))
+ logger.info('Found %s. Size: %s' % (title, bytes_to_mb(size)))
resultlist.append(Result(title, size, url, provider, 'torrent', True))
else:
logger.info(
@@ -1477,7 +1515,7 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None,
size = int(desc_match.group(1))
url = item.link
resultlist.append(Result(title, size, url, provider, 'torrent', True))
- logger.info('Found %s. Size: %s', title, helpers.bytes_to_mb(size))
+ logger.info('Found %s. Size: %s', title, bytes_to_mb(size))
except Exception as e:
logger.error(
"An error occurred while trying to parse the response from Waffles.ch: %s",
@@ -1761,7 +1799,7 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None,
# Pirate Bay
if headphones.CONFIG.PIRATEBAY:
provider = "The Pirate Bay"
- tpb_term = term.replace("!", "").replace("'", " ")
+ tpb_term = term.replace("!", "").replace("'", " ").replace(" ", "%20")
# Use proxy if specified
if headphones.CONFIG.PIRATEBAY_PROXY_URL:
@@ -1793,6 +1831,8 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None,
# Process content
if data:
rows = data.select('table tbody tr')
+ if not rows:
+ rows = data.select('table tr')
if not rows:
logger.info("No results found from The Pirate Bay using term: %s" % tpb_term)
@@ -1820,7 +1860,7 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None,
formatted_size = re.search('Size (.*),', str(item)).group(1).replace(
'\xa0', ' ')
- size = helpers.piratesize(formatted_size)
+ size = piratesize(formatted_size)
if size < maxsize and minimumseeders < seeds and url is not None:
match = True
@@ -1874,7 +1914,7 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None,
"href"] # Magnet link. The actual download link is not based on the URL
formatted_size = item.select("td.size-row")[0].text
- size = helpers.piratesize(formatted_size)
+ size = piratesize(formatted_size)
if size < maxsize and minimumseeders < seeds and url is not None:
match = True
@@ -1901,22 +1941,49 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None,
return results
+def searchSoulseek(album, new=False, losslessOnly=False, albumlength=None):
+ # Not using some of the input stuff for now or ever
+ replacements = {
+ '...': '',
+ ' & ': ' ',
+ ' = ': ' ',
+ '?': '',
+ '$': '',
+ ' + ': ' ',
+ '"': '',
+ ',': '',
+ '*': '',
+ '.': '',
+ ':': ''
+ }
+
+ num_tracks = get_album_track_count(album['AlbumID'])
+ year = get_year_from_release_date(album['ReleaseDate'])
+ cleanalbum = unidecode(helpers.replace_all(album['AlbumTitle'], replacements)).strip()
+ cleanartist = unidecode(helpers.replace_all(album['ArtistName'], replacements)).strip()
+
+ results = soulseek.search(artist=cleanartist, album=cleanalbum, year=year, losslessOnly=losslessOnly, num_tracks=num_tracks)
+
+ return results
+
+
+def get_album_track_count(album_id):
+ # Not sure if this should be considered a helper function.
+ myDB = db.DBConnection()
+ track_count = myDB.select('SELECT COUNT(*) as count FROM tracks WHERE AlbumID=?', [album_id])[0]['count']
+ return track_count
+
+
# THIS IS KIND OF A MESS AND PROBABLY NEEDS TO BE CLEANED UP
def preprocess(resultlist):
for result in resultlist:
+ headers = {'User-Agent': USER_AGENT}
- if result.provider in ["The Pirate Bay", "Old Pirate Bay"]:
- headers = {
- 'User-Agent':
- 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) \
- AppleWebKit/537.36 (KHTML, like Gecko) \
- Chrome/41.0.2243.2 Safari/537.36'
- }
- else:
- headers = {'User-Agent': USER_AGENT}
-
+ if result.kind == 'soulseek':
+ return True, result
+
if result.kind == 'torrent':
# rutracker always needs the torrent data
@@ -1962,12 +2029,24 @@ def preprocess(resultlist):
return True, result
# Download the torrent file
+
+ if result.provider in ["The Pirate Bay", "Old Pirate Bay"]:
+ headers = {
+ 'User-Agent':
+ 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) \
+ AppleWebKit/537.36 (KHTML, like Gecko) \
+ Chrome/41.0.2243.2 Safari/537.36'
+ }
+
return request.request_content(url=result.url, headers=headers), result
- if result.kind == 'magnet':
+ elif result.kind == 'magnet':
magnet_link = result.url
return "d10:magnet-uri%d:%se" % (len(magnet_link), magnet_link), result
+ elif result.kind == 'bandcamp':
+ return True, result
+
else:
if result.provider == 'headphones':
return request.request_content(
diff --git a/headphones/soulseek.py b/headphones/soulseek.py
new file mode 100644
index 00000000..2802c35c
--- /dev/null
+++ b/headphones/soulseek.py
@@ -0,0 +1,185 @@
+from collections import defaultdict, namedtuple
+import os
+import time
+import slskd_api
+import headphones
+from headphones import logger
+from datetime import datetime, timedelta
+
+Result = namedtuple('Result', ['title', 'size', 'user', 'provider', 'type', 'matches', 'bandwidth', 'hasFreeUploadSlot', 'queueLength', 'files', 'kind', 'url', 'folder'])
+
+def initialize_soulseek_client():
+ host = headphones.CONFIG.SOULSEEK_API_URL
+ api_key = headphones.CONFIG.SOULSEEK_API_KEY
+ return slskd_api.SlskdClient(host=host, api_key=api_key)
+
+ # Search logic, calling search and processing fucntions
+def search(artist, album, year, num_tracks, losslessOnly):
+ client = initialize_soulseek_client()
+
+ # Stage 1: Search with artist, album, year, and num_tracks
+ results = execute_search(client, artist, album, year, losslessOnly)
+ processed_results = process_results(results, losslessOnly, num_tracks)
+ if processed_results:
+ return processed_results
+
+ # Stage 2: If Stage 1 fails, search with artist, album, and num_tracks (excluding year)
+ logger.info("Soulseek search stage 1 did not meet criteria. Retrying without year...")
+ results = execute_search(client, artist, album, None, losslessOnly)
+ processed_results = process_results(results, losslessOnly, num_tracks)
+ if processed_results:
+ return processed_results
+
+ # Stage 3: Final attempt, search only with artist and album
+ logger.info("Soulseek search stage 2 did not meet criteria. Final attempt with only artist and album.")
+ results = execute_search(client, artist, album, None, losslessOnly)
+ processed_results = process_results(results, losslessOnly, num_tracks, ignore_track_count=True)
+
+ return processed_results
+
+def execute_search(client, artist, album, year, losslessOnly):
+ search_text = f"{artist} {album}"
+ if year:
+ search_text += f" {year}"
+ if losslessOnly:
+ search_text += ".flac"
+
+ # Actual search
+ search_response = client.searches.search_text(searchText=search_text, filterResponses=True)
+ search_id = search_response.get('id')
+
+ # Wait for search completion and return response
+ while not client.searches.state(id=search_id).get('isComplete'):
+ time.sleep(2)
+
+ return client.searches.search_responses(id=search_id)
+
+# Processing the search result passed
+def process_results(results, losslessOnly, num_tracks, ignore_track_count=False):
+ valid_extensions = {'.flac'} if losslessOnly else {'.mp3', '.flac'}
+ albums = defaultdict(lambda: {'files': [], 'user': None, 'hasFreeUploadSlot': None, 'queueLength': None, 'uploadSpeed': None})
+
+ # Extract info from the api response and combine files at album level
+ for result in results:
+ user = result.get('username')
+ hasFreeUploadSlot = result.get('hasFreeUploadSlot')
+ queueLength = result.get('queueLength')
+ uploadSpeed = result.get('uploadSpeed')
+
+ # Only handle .mp3 and .flac
+ for file in result.get('files', []):
+ filename = file.get('filename')
+ file_extension = os.path.splitext(filename)[1].lower()
+ if file_extension in valid_extensions:
+ album_directory = os.path.dirname(filename)
+ albums[album_directory]['files'].append(file)
+
+ # Update metadata only once per album_directory
+ if albums[album_directory]['user'] is None:
+ albums[album_directory].update({
+ 'user': user,
+ 'hasFreeUploadSlot': hasFreeUploadSlot,
+ 'queueLength': queueLength,
+ 'uploadSpeed': uploadSpeed,
+ })
+
+ # Filter albums based on num_tracks, add bunch of useful info to the compiled album
+ final_results = []
+ for directory, album_data in albums.items():
+ if ignore_track_count or len(album_data['files']) == num_tracks:
+ album_title = os.path.basename(directory)
+ total_size = sum(file.get('size', 0) for file in album_data['files'])
+ final_results.append(Result(
+ title=album_title,
+ size=int(total_size),
+ user=album_data['user'],
+ provider="soulseek",
+ type="soulseek",
+ matches=True,
+ bandwidth=album_data['uploadSpeed'],
+ hasFreeUploadSlot=album_data['hasFreeUploadSlot'],
+ queueLength=album_data['queueLength'],
+ files=album_data['files'],
+ kind='soulseek',
+ url='http://thisisnot.needed', # URL is needed in other parts of the program.
+ folder=os.path.basename(directory)
+ ))
+
+ return final_results
+
+
+def download(user, filelist):
+ client = initialize_soulseek_client()
+ client.transfers.enqueue(username=user, files=filelist)
+
+
+def download_completed():
+ client = initialize_soulseek_client()
+ all_downloads = client.transfers.get_all_downloads(includeRemoved=False)
+ album_completion_tracker = {} # Tracks completion state of each album's songs
+ album_errored_tracker = {} # Tracks albums with errored downloads
+
+ # Anything older than 24 hours will be canceled
+ cutoff_time = datetime.now() - timedelta(hours=24)
+
+ # Identify errored and completed albums
+ for download in all_downloads:
+ directories = download.get('directories', [])
+ for directory in directories:
+ album_part = directory.get('directory', '').split('\\')[-1]
+ files = directory.get('files', [])
+ for file_data in files:
+ state = file_data.get('state', '')
+ requested_at_str = file_data.get('requestedAt', '1900-01-01 00:00:00')
+ requested_at = parse_datetime(requested_at_str)
+
+ # Initialize or update album entry in trackers
+ if album_part not in album_completion_tracker:
+ album_completion_tracker[album_part] = {'total': 0, 'completed': 0, 'errored': 0}
+ if album_part not in album_errored_tracker:
+ album_errored_tracker[album_part] = False
+
+ album_completion_tracker[album_part]['total'] += 1
+
+ if 'Completed, Succeeded' in state:
+ album_completion_tracker[album_part]['completed'] += 1
+ elif 'Completed, Errored' in state or requested_at < cutoff_time:
+ album_completion_tracker[album_part]['errored'] += 1
+ album_errored_tracker[album_part] = True # Mark album as having errored downloads
+
+ # Identify errored albums
+ errored_albums = {album for album, errored in album_errored_tracker.items() if errored}
+
+ # Cancel downloads for errored albums
+ for download in all_downloads:
+ directories = download.get('directories', [])
+ for directory in directories:
+ album_part = directory.get('directory', '').split('\\')[-1]
+ files = directory.get('files', [])
+ for file_data in files:
+ if album_part in errored_albums:
+ # Extract 'id' and 'username' for each file to cancel the download
+ file_id = file_data.get('id', '')
+ username = file_data.get('username', '')
+ success = client.transfers.cancel_download(username, file_id)
+ if not success:
+ print(f"Failed to cancel download for file ID: {file_id}")
+
+ # Clear completed/canceled/errored stuff from client downloads
+ try:
+ client.transfers.remove_completed_downloads()
+ except Exception as e:
+ print(f"Failed to remove completed downloads: {e}")
+
+ # Identify completed albums
+ completed_albums = {album for album, counts in album_completion_tracker.items() if counts['total'] == counts['completed']}
+
+ # Return both completed and errored albums
+ return completed_albums, errored_albums
+
+
+def parse_datetime(datetime_string):
+ # Parse the datetime api response
+ if '.' in datetime_string:
+ datetime_string = datetime_string[:datetime_string.index('.')+7]
+ return datetime.strptime(datetime_string, '%Y-%m-%dT%H:%M:%S.%f')
\ No newline at end of file
diff --git a/headphones/webserve.py b/headphones/webserve.py
index 758b2267..005f4146 100644
--- a/headphones/webserve.py
+++ b/headphones/webserve.py
@@ -1183,6 +1183,7 @@ class WebInterface(object):
"deluge_password": headphones.CONFIG.DELUGE_PASSWORD,
"deluge_label": headphones.CONFIG.DELUGE_LABEL,
"deluge_done_directory": headphones.CONFIG.DELUGE_DONE_DIRECTORY,
+ "deluge_download_directory": headphones.CONFIG.DELUGE_DOWNLOAD_DIRECTORY,
"deluge_paused": checked(headphones.CONFIG.DELUGE_PAUSED),
"utorrent_host": headphones.CONFIG.UTORRENT_HOST,
"utorrent_username": headphones.CONFIG.UTORRENT_USERNAME,
@@ -1197,6 +1198,8 @@ class WebInterface(object):
"torrent_downloader_deluge": radio(headphones.CONFIG.TORRENT_DOWNLOADER, 3),
"torrent_downloader_qbittorrent": radio(headphones.CONFIG.TORRENT_DOWNLOADER, 4),
"download_dir": headphones.CONFIG.DOWNLOAD_DIR,
+ "soulseek_download_dir": headphones.CONFIG.SOULSEEK_DOWNLOAD_DIR,
+ "soulseek_incomplete_download_dir": headphones.CONFIG.SOULSEEK_INCOMPLETE_DOWNLOAD_DIR,
"use_blackhole": checked(headphones.CONFIG.BLACKHOLE),
"blackhole_dir": headphones.CONFIG.BLACKHOLE_DIR,
"usenet_retention": headphones.CONFIG.USENET_RETENTION,
@@ -1296,6 +1299,7 @@ class WebInterface(object):
"prefer_torrents_0": radio(headphones.CONFIG.PREFER_TORRENTS, 0),
"prefer_torrents_1": radio(headphones.CONFIG.PREFER_TORRENTS, 1),
"prefer_torrents_2": radio(headphones.CONFIG.PREFER_TORRENTS, 2),
+ "prefer_torrents_3": radio(headphones.CONFIG.PREFER_TORRENTS, 3),
"magnet_links_0": radio(headphones.CONFIG.MAGNET_LINKS, 0),
"magnet_links_1": radio(headphones.CONFIG.MAGNET_LINKS, 1),
"magnet_links_2": radio(headphones.CONFIG.MAGNET_LINKS, 2),
@@ -1413,7 +1417,12 @@ class WebInterface(object):
"join_enabled": checked(headphones.CONFIG.JOIN_ENABLED),
"join_onsnatch": checked(headphones.CONFIG.JOIN_ONSNATCH),
"join_apikey": headphones.CONFIG.JOIN_APIKEY,
- "join_deviceid": headphones.CONFIG.JOIN_DEVICEID
+ "join_deviceid": headphones.CONFIG.JOIN_DEVICEID,
+ "use_bandcamp": checked(headphones.CONFIG.BANDCAMP),
+ "bandcamp_dir": headphones.CONFIG.BANDCAMP_DIR,
+ 'soulseek_api_url': headphones.CONFIG.SOULSEEK_API_URL,
+ 'soulseek_api_key': headphones.CONFIG.SOULSEEK_API_KEY,
+ 'use_soulseek': checked(headphones.CONFIG.SOULSEEK)
}
for k, v in config.items():
@@ -1482,7 +1491,7 @@ class WebInterface(object):
"songkick_enabled", "songkick_filter_enabled",
"mpc_enabled", "email_enabled", "email_ssl", "email_tls", "email_onsnatch",
"customauth", "idtag", "deluge_paused",
- "join_enabled", "join_onsnatch"
+ "join_enabled", "join_onsnatch", "use_bandcamp"
]
for checked_config in checked_configs:
if checked_config not in kwargs:
diff --git a/lib/cherrypy/__init__.py b/lib/cherrypy/__init__.py
index 8e27c812..49e955f8 100644
--- a/lib/cherrypy/__init__.py
+++ b/lib/cherrypy/__init__.py
@@ -57,9 +57,11 @@ These API's are described in the `CherryPy specification
"""
try:
- import pkg_resources
+ import importlib.metadata as importlib_metadata
except ImportError:
- pass
+ # fall back for python <= 3.7
+ # This try/except can be removed with py <= 3.7 support
+ import importlib_metadata
from threading import local as _local
@@ -109,7 +111,7 @@ tree = _cptree.Tree()
try:
- __version__ = pkg_resources.require('cherrypy')[0].version
+ __version__ = importlib_metadata.version('cherrypy')
except Exception:
__version__ = 'unknown'
@@ -181,24 +183,28 @@ def quickstart(root=None, script_name='', config=None):
class _Serving(_local):
"""An interface for registering request and response objects.
- Rather than have a separate "thread local" object for the request and
- the response, this class works as a single threadlocal container for
- both objects (and any others which developers wish to define). In this
- way, we can easily dump those objects when we stop/start a new HTTP
- conversation, yet still refer to them as module-level globals in a
- thread-safe way.
+ Rather than have a separate "thread local" object for the request
+ and the response, this class works as a single threadlocal container
+ for both objects (and any others which developers wish to define).
+ In this way, we can easily dump those objects when we stop/start a
+ new HTTP conversation, yet still refer to them as module-level
+ globals in a thread-safe way.
"""
request = _cprequest.Request(_httputil.Host('127.0.0.1', 80),
_httputil.Host('127.0.0.1', 1111))
+ """The request object for the current thread.
+
+ In the main thread, and any threads which are not receiving HTTP
+ requests, this is None.
"""
- The request object for the current thread. In the main thread,
- and any threads which are not receiving HTTP requests, this is None."""
response = _cprequest.Response()
+ """The response object for the current thread.
+
+ In the main thread, and any threads which are not receiving HTTP
+ requests, this is None.
"""
- The response object for the current thread. In the main thread,
- and any threads which are not receiving HTTP requests, this is None."""
def load(self, request, response):
self.request = request
@@ -316,8 +322,8 @@ class _GlobalLogManager(_cplogging.LogManager):
def __call__(self, *args, **kwargs):
"""Log the given message to the app.log or global log.
- Log the given message to the app.log or global
- log as appropriate.
+ Log the given message to the app.log or global log as
+ appropriate.
"""
# Do NOT use try/except here. See
# https://github.com/cherrypy/cherrypy/issues/945
@@ -330,8 +336,8 @@ class _GlobalLogManager(_cplogging.LogManager):
def access(self):
"""Log an access message to the app.log or global log.
- Log the given message to the app.log or global
- log as appropriate.
+ Log the given message to the app.log or global log as
+ appropriate.
"""
try:
return request.app.log.access()
diff --git a/lib/cherrypy/_cpchecker.py b/lib/cherrypy/_cpchecker.py
index f26f319c..096b19c3 100644
--- a/lib/cherrypy/_cpchecker.py
+++ b/lib/cherrypy/_cpchecker.py
@@ -313,7 +313,10 @@ class Checker(object):
# -------------------- Specific config warnings -------------------- #
def check_localhost(self):
- """Warn if any socket_host is 'localhost'. See #711."""
+ """Warn if any socket_host is 'localhost'.
+
+ See #711.
+ """
for k, v in cherrypy.config.items():
if k == 'server.socket_host' and v == 'localhost':
warnings.warn("The use of 'localhost' as a socket host can "
diff --git a/lib/cherrypy/_cpconfig.py b/lib/cherrypy/_cpconfig.py
index 8e3fd612..c22937d3 100644
--- a/lib/cherrypy/_cpconfig.py
+++ b/lib/cherrypy/_cpconfig.py
@@ -1,5 +1,4 @@
-"""
-Configuration system for CherryPy.
+"""Configuration system for CherryPy.
Configuration in CherryPy is implemented via dictionaries. Keys are strings
which name the mapped value, which may be of any type.
@@ -132,8 +131,8 @@ def _if_filename_register_autoreload(ob):
def merge(base, other):
"""Merge one app config (from a dict, file, or filename) into another.
- If the given config is a filename, it will be appended to
- the list of files to monitor for "autoreload" changes.
+ If the given config is a filename, it will be appended to the list
+ of files to monitor for "autoreload" changes.
"""
_if_filename_register_autoreload(other)
diff --git a/lib/cherrypy/_cpdispatch.py b/lib/cherrypy/_cpdispatch.py
index 5c506e99..5a3a8ad6 100644
--- a/lib/cherrypy/_cpdispatch.py
+++ b/lib/cherrypy/_cpdispatch.py
@@ -1,9 +1,10 @@
"""CherryPy dispatchers.
A 'dispatcher' is the object which looks up the 'page handler' callable
-and collects config for the current request based on the path_info, other
-request attributes, and the application architecture. The core calls the
-dispatcher as early as possible, passing it a 'path_info' argument.
+and collects config for the current request based on the path_info,
+other request attributes, and the application architecture. The core
+calls the dispatcher as early as possible, passing it a 'path_info'
+argument.
The default dispatcher discovers the page handler by matching path_info
to a hierarchical arrangement of objects, starting at request.app.root.
@@ -21,7 +22,6 @@ import cherrypy
class PageHandler(object):
-
"""Callable which sets response.body."""
def __init__(self, callable, *args, **kwargs):
@@ -64,8 +64,7 @@ class PageHandler(object):
def test_callable_spec(callable, callable_args, callable_kwargs):
- """
- Inspect callable and test to see if the given args are suitable for it.
+ """Inspect callable and test to see if the given args are suitable for it.
When an error occurs during the handler's invoking stage there are 2
erroneous cases:
@@ -252,16 +251,16 @@ else:
class Dispatcher(object):
-
"""CherryPy Dispatcher which walks a tree of objects to find a handler.
- The tree is rooted at cherrypy.request.app.root, and each hierarchical
- component in the path_info argument is matched to a corresponding nested
- attribute of the root object. Matching handlers must have an 'exposed'
- attribute which evaluates to True. The special method name "index"
- matches a URI which ends in a slash ("/"). The special method name
- "default" may match a portion of the path_info (but only when no longer
- substring of the path_info matches some other object).
+ The tree is rooted at cherrypy.request.app.root, and each
+ hierarchical component in the path_info argument is matched to a
+ corresponding nested attribute of the root object. Matching handlers
+ must have an 'exposed' attribute which evaluates to True. The
+ special method name "index" matches a URI which ends in a slash
+ ("/"). The special method name "default" may match a portion of the
+ path_info (but only when no longer substring of the path_info
+ matches some other object).
This is the default, built-in dispatcher for CherryPy.
"""
@@ -306,9 +305,9 @@ class Dispatcher(object):
The second object returned will be a list of names which are
'virtual path' components: parts of the URL which are dynamic,
- and were not used when looking up the handler.
- These virtual path components are passed to the handler as
- positional arguments.
+ and were not used when looking up the handler. These virtual
+ path components are passed to the handler as positional
+ arguments.
"""
request = cherrypy.serving.request
app = request.app
@@ -448,13 +447,11 @@ class Dispatcher(object):
class MethodDispatcher(Dispatcher):
-
"""Additional dispatch based on cherrypy.request.method.upper().
- Methods named GET, POST, etc will be called on an exposed class.
- The method names must be all caps; the appropriate Allow header
- will be output showing all capitalized method names as allowable
- HTTP verbs.
+ Methods named GET, POST, etc will be called on an exposed class. The
+ method names must be all caps; the appropriate Allow header will be
+ output showing all capitalized method names as allowable HTTP verbs.
Note that the containing class must be exposed, not the methods.
"""
@@ -492,16 +489,14 @@ class MethodDispatcher(Dispatcher):
class RoutesDispatcher(object):
-
"""A Routes based dispatcher for CherryPy."""
def __init__(self, full_result=False, **mapper_options):
- """
- Routes dispatcher
+ """Routes dispatcher.
- Set full_result to True if you wish the controller
- and the action to be passed on to the page handler
- parameters. By default they won't be.
+ Set full_result to True if you wish the controller and the
+ action to be passed on to the page handler parameters. By
+ default they won't be.
"""
import routes
self.full_result = full_result
@@ -617,8 +612,7 @@ def XMLRPCDispatcher(next_dispatcher=Dispatcher()):
def VirtualHost(next_dispatcher=Dispatcher(), use_x_forwarded_host=True,
**domains):
- """
- Select a different handler based on the Host header.
+ """Select a different handler based on the Host header.
This can be useful when running multiple sites within one CP server.
It allows several domains to point to different parts of a single
diff --git a/lib/cherrypy/_cperror.py b/lib/cherrypy/_cperror.py
index f6ff2913..203fabf5 100644
--- a/lib/cherrypy/_cperror.py
+++ b/lib/cherrypy/_cperror.py
@@ -136,19 +136,17 @@ from cherrypy.lib import httputil as _httputil
class CherryPyException(Exception):
-
"""A base class for CherryPy exceptions."""
pass
class InternalRedirect(CherryPyException):
-
"""Exception raised to switch to the handler for a different URL.
- This exception will redirect processing to another path within the site
- (without informing the client). Provide the new path as an argument when
- raising the exception. Provide any params in the querystring for the new
- URL.
+ This exception will redirect processing to another path within the
+ site (without informing the client). Provide the new path as an
+ argument when raising the exception. Provide any params in the
+ querystring for the new URL.
"""
def __init__(self, path, query_string=''):
@@ -173,7 +171,6 @@ class InternalRedirect(CherryPyException):
class HTTPRedirect(CherryPyException):
-
"""Exception raised when the request should be redirected.
This exception will force a HTTP redirect to the URL or URL's you give it.
@@ -202,7 +199,7 @@ class HTTPRedirect(CherryPyException):
"""The list of URL's to emit."""
encoding = 'utf-8'
- """The encoding when passed urls are not native strings"""
+ """The encoding when passed urls are not native strings."""
def __init__(self, urls, status=None, encoding=None):
self.urls = abs_urls = [
@@ -230,8 +227,7 @@ class HTTPRedirect(CherryPyException):
@classproperty
def default_status(cls):
- """
- The default redirect status for the request.
+ """The default redirect status for the request.
RFC 2616 indicates a 301 response code fits our goal; however,
browser support for 301 is quite messy. Use 302/303 instead. See
@@ -249,8 +245,9 @@ class HTTPRedirect(CherryPyException):
"""Modify cherrypy.response status, headers, and body to represent
self.
- CherryPy uses this internally, but you can also use it to create an
- HTTPRedirect object and set its output without *raising* the exception.
+ CherryPy uses this internally, but you can also use it to create
+ an HTTPRedirect object and set its output without *raising* the
+ exception.
"""
response = cherrypy.serving.response
response.status = status = self.status
@@ -339,7 +336,6 @@ def clean_headers(status):
class HTTPError(CherryPyException):
-
"""Exception used to return an HTTP error code (4xx-5xx) to the client.
This exception can be used to automatically send a response using a
@@ -358,7 +354,9 @@ class HTTPError(CherryPyException):
"""
status = None
- """The HTTP status code. May be of type int or str (with a Reason-Phrase).
+ """The HTTP status code.
+
+ May be of type int or str (with a Reason-Phrase).
"""
code = None
@@ -386,8 +384,9 @@ class HTTPError(CherryPyException):
"""Modify cherrypy.response status, headers, and body to represent
self.
- CherryPy uses this internally, but you can also use it to create an
- HTTPError object and set its output without *raising* the exception.
+ CherryPy uses this internally, but you can also use it to create
+ an HTTPError object and set its output without *raising* the
+ exception.
"""
response = cherrypy.serving.response
@@ -426,11 +425,10 @@ class HTTPError(CherryPyException):
class NotFound(HTTPError):
-
"""Exception raised when a URL could not be mapped to any handler (404).
- This is equivalent to raising
- :class:`HTTPError("404 Not Found") `.
+ This is equivalent to raising :class:`HTTPError("404 Not Found")
+ `.
"""
def __init__(self, path=None):
@@ -477,8 +475,8 @@ _HTTPErrorTemplate = ''' cherrypy.server -> HTTPServer.
+ cheroot has been designed to not reference CherryPy in any way, so
+ that it can be used in other frameworks and applications. Therefore,
+ we wrap it here, so we can apply some attributes from config ->
+ cherrypy.server -> HTTPServer.
"""
def __init__(self, server_adapter=cherrypy.server):
diff --git a/lib/cherrypy/_cpreqbody.py b/lib/cherrypy/_cpreqbody.py
index 4d3cefe7..7e0d98be 100644
--- a/lib/cherrypy/_cpreqbody.py
+++ b/lib/cherrypy/_cpreqbody.py
@@ -248,7 +248,10 @@ def process_multipart_form_data(entity):
def _old_process_multipart(entity):
- """The behavior of 3.2 and lower. Deprecated and will be changed in 3.3."""
+ """The behavior of 3.2 and lower.
+
+ Deprecated and will be changed in 3.3.
+ """
process_multipart(entity)
params = entity.params
@@ -277,7 +280,6 @@ def _old_process_multipart(entity):
# -------------------------------- Entities --------------------------------- #
class Entity(object):
-
"""An HTTP request body, or MIME multipart body.
This class collects information about the HTTP request entity. When a
@@ -346,13 +348,15 @@ class Entity(object):
content_type = None
"""The value of the Content-Type request header.
- If the Entity is part of a multipart payload, this will be the Content-Type
- given in the MIME headers for this part.
+ If the Entity is part of a multipart payload, this will be the
+ Content-Type given in the MIME headers for this part.
"""
default_content_type = 'application/x-www-form-urlencoded'
"""This defines a default ``Content-Type`` to use if no Content-Type header
- is given. The empty string is used for RequestBody, which results in the
+ is given.
+
+ The empty string is used for RequestBody, which results in the
request body not being read or parsed at all. This is by design; a missing
``Content-Type`` header in the HTTP request entity is an error at best,
and a security hole at worst. For multipart parts, however, the MIME spec
@@ -402,8 +406,8 @@ class Entity(object):
part_class = None
"""The class used for multipart parts.
- You can replace this with custom subclasses to alter the processing of
- multipart parts.
+ You can replace this with custom subclasses to alter the processing
+ of multipart parts.
"""
def __init__(self, fp, headers, params=None, parts=None):
@@ -509,7 +513,8 @@ class Entity(object):
"""Return a file-like object into which the request body will be read.
By default, this will return a TemporaryFile. Override as needed.
- See also :attr:`cherrypy._cpreqbody.Part.maxrambytes`."""
+ See also :attr:`cherrypy._cpreqbody.Part.maxrambytes`.
+ """
return tempfile.TemporaryFile()
def fullvalue(self):
@@ -525,7 +530,7 @@ class Entity(object):
return value
def decode_entity(self, value):
- """Return a given byte encoded value as a string"""
+ """Return a given byte encoded value as a string."""
for charset in self.attempt_charsets:
try:
value = value.decode(charset)
@@ -569,7 +574,6 @@ class Entity(object):
class Part(Entity):
-
"""A MIME part entity, part of a multipart entity."""
# "The default character set, which must be assumed in the absence of a
@@ -653,8 +657,8 @@ class Part(Entity):
def read_lines_to_boundary(self, fp_out=None):
"""Read bytes from self.fp and return or write them to a file.
- If the 'fp_out' argument is None (the default), all bytes read are
- returned in a single byte string.
+ If the 'fp_out' argument is None (the default), all bytes read
+ are returned in a single byte string.
If the 'fp_out' argument is not None, it must be a file-like
object that supports the 'write' method; all bytes read will be
@@ -755,15 +759,15 @@ class SizedReader:
def read(self, size=None, fp_out=None):
"""Read bytes from the request body and return or write them to a file.
- A number of bytes less than or equal to the 'size' argument are read
- off the socket. The actual number of bytes read are tracked in
- self.bytes_read. The number may be smaller than 'size' when 1) the
- client sends fewer bytes, 2) the 'Content-Length' request header
- specifies fewer bytes than requested, or 3) the number of bytes read
- exceeds self.maxbytes (in which case, 413 is raised).
+ A number of bytes less than or equal to the 'size' argument are
+ read off the socket. The actual number of bytes read are tracked
+ in self.bytes_read. The number may be smaller than 'size' when
+ 1) the client sends fewer bytes, 2) the 'Content-Length' request
+ header specifies fewer bytes than requested, or 3) the number of
+ bytes read exceeds self.maxbytes (in which case, 413 is raised).
- If the 'fp_out' argument is None (the default), all bytes read are
- returned in a single byte string.
+ If the 'fp_out' argument is None (the default), all bytes read
+ are returned in a single byte string.
If the 'fp_out' argument is not None, it must be a file-like
object that supports the 'write' method; all bytes read will be
@@ -918,7 +922,6 @@ class SizedReader:
class RequestBody(Entity):
-
"""The entity of the HTTP request."""
bufsize = 8 * 1024
diff --git a/lib/cherrypy/_cprequest.py b/lib/cherrypy/_cprequest.py
index a661112c..a4ad298b 100644
--- a/lib/cherrypy/_cprequest.py
+++ b/lib/cherrypy/_cprequest.py
@@ -16,7 +16,6 @@ from cherrypy.lib import httputil, reprconf, encoding
class Hook(object):
-
"""A callback and its metadata: failsafe, priority, and kwargs."""
callback = None
@@ -30,10 +29,12 @@ class Hook(object):
from the same call point raise exceptions."""
priority = 50
+ """Defines the order of execution for a list of Hooks.
+
+ Priority numbers should be limited to the closed interval [0, 100],
+ but values outside this range are acceptable, as are fractional
+ values.
"""
- Defines the order of execution for a list of Hooks. Priority numbers
- should be limited to the closed interval [0, 100], but values outside
- this range are acceptable, as are fractional values."""
kwargs = {}
"""
@@ -74,7 +75,6 @@ class Hook(object):
class HookMap(dict):
-
"""A map of call points to lists of callbacks (Hook objects)."""
def __new__(cls, points=None):
@@ -190,23 +190,23 @@ hookpoints = ['on_start_resource', 'before_request_body',
class Request(object):
-
"""An HTTP request.
- This object represents the metadata of an HTTP request message;
- that is, it contains attributes which describe the environment
- in which the request URL, headers, and body were sent (if you
- want tools to interpret the headers and body, those are elsewhere,
- mostly in Tools). This 'metadata' consists of socket data,
- transport characteristics, and the Request-Line. This object
- also contains data regarding the configuration in effect for
- the given URL, and the execution plan for generating a response.
+ This object represents the metadata of an HTTP request message; that
+ is, it contains attributes which describe the environment in which
+ the request URL, headers, and body were sent (if you want tools to
+ interpret the headers and body, those are elsewhere, mostly in
+ Tools). This 'metadata' consists of socket data, transport
+ characteristics, and the Request-Line. This object also contains
+ data regarding the configuration in effect for the given URL, and
+ the execution plan for generating a response.
"""
prev = None
+ """The previous Request object (if any).
+
+ This should be None unless we are processing an InternalRedirect.
"""
- The previous Request object (if any). This should be None
- unless we are processing an InternalRedirect."""
# Conversation/connection attributes
local = httputil.Host('127.0.0.1', 80)
@@ -216,9 +216,10 @@ class Request(object):
'An httputil.Host(ip, port, hostname) object for the client socket.'
scheme = 'http'
+ """The protocol used between client and server.
+
+ In most cases, this will be either 'http' or 'https'.
"""
- The protocol used between client and server. In most cases,
- this will be either 'http' or 'https'."""
server_protocol = 'HTTP/1.1'
"""
@@ -227,25 +228,30 @@ class Request(object):
base = ''
"""The (scheme://host) portion of the requested URL.
+
In some cases (e.g. when proxying via mod_rewrite), this may contain
path segments which cherrypy.url uses when constructing url's, but
- which otherwise are ignored by CherryPy. Regardless, this value
- MUST NOT end in a slash."""
+ which otherwise are ignored by CherryPy. Regardless, this value MUST
+ NOT end in a slash.
+ """
# Request-Line attributes
request_line = ''
+ """The complete Request-Line received from the client.
+
+ This is a single string consisting of the request method, URI, and
+ protocol version (joined by spaces). Any final CRLF is removed.
"""
- The complete Request-Line received from the client. This is a
- single string consisting of the request method, URI, and protocol
- version (joined by spaces). Any final CRLF is removed."""
method = 'GET'
+ """Indicates the HTTP method to be performed on the resource identified by
+ the Request-URI.
+
+ Common methods include GET, HEAD, POST, PUT, and DELETE. CherryPy
+ allows any extension method; however, various HTTP servers and
+ gateways may restrict the set of allowable methods. CherryPy
+ applications SHOULD restrict the set (on a per-URI basis).
"""
- Indicates the HTTP method to be performed on the resource identified
- by the Request-URI. Common methods include GET, HEAD, POST, PUT, and
- DELETE. CherryPy allows any extension method; however, various HTTP
- servers and gateways may restrict the set of allowable methods.
- CherryPy applications SHOULD restrict the set (on a per-URI basis)."""
query_string = ''
"""
@@ -277,22 +283,26 @@ class Request(object):
A dict which combines query string (GET) and request entity (POST)
variables. This is populated in two stages: GET params are added
before the 'on_start_resource' hook, and POST params are added
- between the 'before_request_body' and 'before_handler' hooks."""
+ between the 'before_request_body' and 'before_handler' hooks.
+ """
# Message attributes
header_list = []
+ """A list of the HTTP request headers as (name, value) tuples.
+
+ In general, you should use request.headers (a dict) instead.
"""
- A list of the HTTP request headers as (name, value) tuples.
- In general, you should use request.headers (a dict) instead."""
headers = httputil.HeaderMap()
- """
- A dict-like object containing the request headers. Keys are header
+ """A dict-like object containing the request headers.
+
+ Keys are header
names (in Title-Case format); however, you may get and set them in
a case-insensitive manner. That is, headers['Content-Type'] and
headers['content-type'] refer to the same value. Values are header
values (decoded according to :rfc:`2047` if necessary). See also:
- httputil.HeaderMap, httputil.HeaderElement."""
+ httputil.HeaderMap, httputil.HeaderElement.
+ """
cookie = SimpleCookie()
"""See help(Cookie)."""
@@ -336,7 +346,8 @@ class Request(object):
or multipart, this will be None. Otherwise, this will be an instance
of :class:`RequestBody` (which you
can .read()); this value is set between the 'before_request_body' and
- 'before_handler' hooks (assuming that process_request_body is True)."""
+ 'before_handler' hooks (assuming that process_request_body is True).
+ """
# Dispatch attributes
dispatch = cherrypy.dispatch.Dispatcher()
@@ -347,23 +358,24 @@ class Request(object):
calls the dispatcher as early as possible, passing it a 'path_info'
argument.
- The default dispatcher discovers the page handler by matching path_info
- to a hierarchical arrangement of objects, starting at request.app.root.
- See help(cherrypy.dispatch) for more information."""
+ The default dispatcher discovers the page handler by matching
+ path_info to a hierarchical arrangement of objects, starting at
+ request.app.root. See help(cherrypy.dispatch) for more information.
+ """
script_name = ''
- """
- The 'mount point' of the application which is handling this request.
+ """The 'mount point' of the application which is handling this request.
This attribute MUST NOT end in a slash. If the script_name refers to
the root of the URI, it MUST be an empty string (not "/").
"""
path_info = '/'
+ """The 'relative path' portion of the Request-URI.
+
+ This is relative to the script_name ('mount point') of the
+ application which is handling this request.
"""
- The 'relative path' portion of the Request-URI. This is relative
- to the script_name ('mount point') of the application which is
- handling this request."""
login = None
"""
@@ -391,14 +403,16 @@ class Request(object):
of the form: {Toolbox.namespace: {Tool.name: config dict}}."""
config = None
+ """A flat dict of all configuration entries which apply to the current
+ request.
+
+ These entries are collected from global config, application config
+ (based on request.path_info), and from handler config (exactly how
+ is governed by the request.dispatch object in effect for this
+ request; by default, handler config can be attached anywhere in the
+ tree between request.app.root and the final handler, and inherits
+ downward).
"""
- A flat dict of all configuration entries which apply to the
- current request. These entries are collected from global config,
- application config (based on request.path_info), and from handler
- config (exactly how is governed by the request.dispatch object in
- effect for this request; by default, handler config can be attached
- anywhere in the tree between request.app.root and the final handler,
- and inherits downward)."""
is_index = None
"""
@@ -409,13 +423,14 @@ class Request(object):
the trailing slash. See cherrypy.tools.trailing_slash."""
hooks = HookMap(hookpoints)
- """
- A HookMap (dict-like object) of the form: {hookpoint: [hook, ...]}.
+ """A HookMap (dict-like object) of the form: {hookpoint: [hook, ...]}.
+
Each key is a str naming the hook point, and each value is a list
of hooks which will be called at that hook point during this request.
The list of hooks is generally populated as early as possible (mostly
from Tools specified in config), but may be extended at any time.
- See also: _cprequest.Hook, _cprequest.HookMap, and cherrypy.tools."""
+ See also: _cprequest.Hook, _cprequest.HookMap, and cherrypy.tools.
+ """
error_response = cherrypy.HTTPError(500).set_response
"""
@@ -428,12 +443,11 @@ class Request(object):
error response to the user-agent."""
error_page = {}
- """
- A dict of {error code: response filename or callable} pairs.
+ """A dict of {error code: response filename or callable} pairs.
The error code must be an int representing a given HTTP error code,
- or the string 'default', which will be used if no matching entry
- is found for a given numeric code.
+ or the string 'default', which will be used if no matching entry is
+ found for a given numeric code.
If a filename is provided, the file should contain a Python string-
formatting template, and can expect by default to receive format
@@ -447,8 +461,8 @@ class Request(object):
iterable of strings which will be set to response.body. It may also
override headers or perform any other processing.
- If no entry is given for an error code, and no 'default' entry exists,
- a default template will be used.
+ If no entry is given for an error code, and no 'default' entry
+ exists, a default template will be used.
"""
show_tracebacks = True
@@ -473,9 +487,10 @@ class Request(object):
"""True once the close method has been called, False otherwise."""
stage = None
+ """A string containing the stage reached in the request-handling process.
+
+ This is useful when debugging a live server with hung requests.
"""
- A string containing the stage reached in the request-handling process.
- This is useful when debugging a live server with hung requests."""
unique_id = None
"""A lazy object generating and memorizing UUID4 on ``str()`` render."""
@@ -492,9 +507,10 @@ class Request(object):
server_protocol='HTTP/1.1'):
"""Populate a new Request object.
- local_host should be an httputil.Host object with the server info.
- remote_host should be an httputil.Host object with the client info.
- scheme should be a string, either "http" or "https".
+ local_host should be an httputil.Host object with the server
+ info. remote_host should be an httputil.Host object with the
+ client info. scheme should be a string, either "http" or
+ "https".
"""
self.local = local_host
self.remote = remote_host
@@ -514,7 +530,10 @@ class Request(object):
self.unique_id = LazyUUID4()
def close(self):
- """Run cleanup code. (Core)"""
+ """Run cleanup code.
+
+ (Core)
+ """
if not self.closed:
self.closed = True
self.stage = 'on_end_request'
@@ -551,7 +570,6 @@ class Request(object):
Consumer code (HTTP servers) should then access these response
attributes to build the outbound stream.
-
"""
response = cherrypy.serving.response
self.stage = 'run'
@@ -631,7 +649,10 @@ class Request(object):
return response
def respond(self, path_info):
- """Generate a response for the resource at self.path_info. (Core)"""
+ """Generate a response for the resource at self.path_info.
+
+ (Core)
+ """
try:
try:
try:
@@ -702,7 +723,10 @@ class Request(object):
response.finalize()
def process_query_string(self):
- """Parse the query string into Python structures. (Core)"""
+ """Parse the query string into Python structures.
+
+ (Core)
+ """
try:
p = httputil.parse_query_string(
self.query_string, encoding=self.query_string_encoding)
@@ -715,7 +739,10 @@ class Request(object):
self.params.update(p)
def process_headers(self):
- """Parse HTTP header data into Python structures. (Core)"""
+ """Parse HTTP header data into Python structures.
+
+ (Core)
+ """
# Process the headers into self.headers
headers = self.headers
for name, value in self.header_list:
@@ -751,7 +778,10 @@ class Request(object):
self.base = '%s://%s' % (self.scheme, host)
def get_resource(self, path):
- """Call a dispatcher (which sets self.handler and .config). (Core)"""
+ """Call a dispatcher (which sets self.handler and .config).
+
+ (Core)
+ """
# First, see if there is a custom dispatch at this URI. Custom
# dispatchers can only be specified in app.config, not in _cp_config
# (since custom dispatchers may not even have an app.root).
@@ -762,7 +792,10 @@ class Request(object):
dispatch(path)
def handle_error(self):
- """Handle the last unanticipated exception. (Core)"""
+ """Handle the last unanticipated exception.
+
+ (Core)
+ """
try:
self.hooks.run('before_error_response')
if self.error_response:
@@ -776,7 +809,6 @@ class Request(object):
class ResponseBody(object):
-
"""The body of the HTTP response (the response entity)."""
unicode_err = ('Page handlers MUST return bytes. Use tools.encode '
@@ -802,18 +834,18 @@ class ResponseBody(object):
class Response(object):
-
"""An HTTP Response, including status, headers, and body."""
status = ''
"""The HTTP Status-Code and Reason-Phrase."""
header_list = []
- """
- A list of the HTTP response headers as (name, value) tuples.
+ """A list of the HTTP response headers as (name, value) tuples.
+
In general, you should use response.headers (a dict) instead. This
attribute is generated from response.headers and is not valid until
- after the finalize phase."""
+ after the finalize phase.
+ """
headers = httputil.HeaderMap()
"""
@@ -833,7 +865,10 @@ class Response(object):
"""The body (entity) of the HTTP response."""
time = None
- """The value of time.time() when created. Use in HTTP dates."""
+ """The value of time.time() when created.
+
+ Use in HTTP dates.
+ """
stream = False
"""If False, buffer the response body."""
@@ -861,15 +896,15 @@ class Response(object):
return new_body
def _flush_body(self):
- """
- Discard self.body but consume any generator such that
- any finalization can occur, such as is required by
- caching.tee_output().
- """
+ """Discard self.body but consume any generator such that any
+ finalization can occur, such as is required by caching.tee_output()."""
consume(iter(self.body))
def finalize(self):
- """Transform headers (and cookies) into self.header_list. (Core)"""
+ """Transform headers (and cookies) into self.header_list.
+
+ (Core)
+ """
try:
code, reason, _ = httputil.valid_status(self.status)
except ValueError:
diff --git a/lib/cherrypy/_cpserver.py b/lib/cherrypy/_cpserver.py
index 5f8d98fa..62331673 100644
--- a/lib/cherrypy/_cpserver.py
+++ b/lib/cherrypy/_cpserver.py
@@ -50,7 +50,8 @@ class Server(ServerAdapter):
"""If given, the name of the UNIX socket to use instead of TCP/IP.
When this option is not None, the `socket_host` and `socket_port` options
- are ignored."""
+ are ignored.
+ """
socket_queue_size = 5
"""The 'backlog' argument to socket.listen(); specifies the maximum number
@@ -79,17 +80,24 @@ class Server(ServerAdapter):
"""The number of worker threads to start up in the pool."""
thread_pool_max = -1
- """The maximum size of the worker-thread pool. Use -1 to indicate no limit.
+ """The maximum size of the worker-thread pool.
+
+ Use -1 to indicate no limit.
"""
max_request_header_size = 500 * 1024
"""The maximum number of bytes allowable in the request headers.
- If exceeded, the HTTP server should return "413 Request Entity Too Large".
+
+ If exceeded, the HTTP server should return "413 Request Entity Too
+ Large".
"""
max_request_body_size = 100 * 1024 * 1024
- """The maximum number of bytes allowable in the request body. If exceeded,
- the HTTP server should return "413 Request Entity Too Large"."""
+ """The maximum number of bytes allowable in the request body.
+
+ If exceeded, the HTTP server should return "413 Request Entity Too
+ Large".
+ """
instance = None
"""If not None, this should be an HTTP server instance (such as
@@ -119,7 +127,8 @@ class Server(ServerAdapter):
the builtin WSGI server. Builtin options are: 'builtin' (to
use the SSL library built into recent versions of Python).
You may also register your own classes in the
- cheroot.server.ssl_adapters dict."""
+ cheroot.server.ssl_adapters dict.
+ """
statistics = False
"""Turns statistics-gathering on or off for aware HTTP servers."""
@@ -129,11 +138,13 @@ class Server(ServerAdapter):
wsgi_version = (1, 0)
"""The WSGI version tuple to use with the builtin WSGI server.
- The provided options are (1, 0) [which includes support for PEP 3333,
- which declares it covers WSGI version 1.0.1 but still mandates the
- wsgi.version (1, 0)] and ('u', 0), an experimental unicode version.
- You may create and register your own experimental versions of the WSGI
- protocol by adding custom classes to the cheroot.server.wsgi_gateways dict.
+
+ The provided options are (1, 0) [which includes support for PEP
+ 3333, which declares it covers WSGI version 1.0.1 but still mandates
+ the wsgi.version (1, 0)] and ('u', 0), an experimental unicode
+ version. You may create and register your own experimental versions
+ of the WSGI protocol by adding custom classes to the
+ cheroot.server.wsgi_gateways dict.
"""
peercreds = False
@@ -184,7 +195,8 @@ class Server(ServerAdapter):
def bind_addr(self):
"""Return bind address.
- A (host, port) tuple for TCP sockets or a str for Unix domain sockts.
+ A (host, port) tuple for TCP sockets or a str for Unix domain
+ sockets.
"""
if self.socket_file:
return self.socket_file
diff --git a/lib/cherrypy/_cptools.py b/lib/cherrypy/_cptools.py
index 716f99a4..e47c046e 100644
--- a/lib/cherrypy/_cptools.py
+++ b/lib/cherrypy/_cptools.py
@@ -1,7 +1,7 @@
"""CherryPy tools. A "tool" is any helper, adapted to CP.
-Tools are usually designed to be used in a variety of ways (although some
-may only offer one if they choose):
+Tools are usually designed to be used in a variety of ways (although
+some may only offer one if they choose):
Library calls
All tools are callables that can be used wherever needed.
@@ -48,10 +48,10 @@ _attr_error = (
class Tool(object):
-
"""A registered function for use with CherryPy request-processing hooks.
- help(tool.callable) should give you more information about this Tool.
+ help(tool.callable) should give you more information about this
+ Tool.
"""
namespace = 'tools'
@@ -135,8 +135,8 @@ class Tool(object):
def _setup(self):
"""Hook this tool into cherrypy.request.
- The standard CherryPy request object will automatically call this
- method when the tool is "turned on" in config.
+ The standard CherryPy request object will automatically call
+ this method when the tool is "turned on" in config.
"""
conf = self._merged_args()
p = conf.pop('priority', None)
@@ -147,15 +147,15 @@ class Tool(object):
class HandlerTool(Tool):
-
"""Tool which is called 'before main', that may skip normal handlers.
- If the tool successfully handles the request (by setting response.body),
- if should return True. This will cause CherryPy to skip any 'normal' page
- handler. If the tool did not handle the request, it should return False
- to tell CherryPy to continue on and call the normal page handler. If the
- tool is declared AS a page handler (see the 'handler' method), returning
- False will raise NotFound.
+ If the tool successfully handles the request (by setting
+ response.body), if should return True. This will cause CherryPy to
+ skip any 'normal' page handler. If the tool did not handle the
+ request, it should return False to tell CherryPy to continue on and
+ call the normal page handler. If the tool is declared AS a page
+ handler (see the 'handler' method), returning False will raise
+ NotFound.
"""
def __init__(self, callable, name=None):
@@ -185,8 +185,8 @@ class HandlerTool(Tool):
def _setup(self):
"""Hook this tool into cherrypy.request.
- The standard CherryPy request object will automatically call this
- method when the tool is "turned on" in config.
+ The standard CherryPy request object will automatically call
+ this method when the tool is "turned on" in config.
"""
conf = self._merged_args()
p = conf.pop('priority', None)
@@ -197,7 +197,6 @@ class HandlerTool(Tool):
class HandlerWrapperTool(Tool):
-
"""Tool which wraps request.handler in a provided wrapper function.
The 'newhandler' arg must be a handler wrapper function that takes a
@@ -232,7 +231,6 @@ class HandlerWrapperTool(Tool):
class ErrorTool(Tool):
-
"""Tool which is used to replace the default request.error_response."""
def __init__(self, callable, name=None):
@@ -244,8 +242,8 @@ class ErrorTool(Tool):
def _setup(self):
"""Hook this tool into cherrypy.request.
- The standard CherryPy request object will automatically call this
- method when the tool is "turned on" in config.
+ The standard CherryPy request object will automatically call
+ this method when the tool is "turned on" in config.
"""
cherrypy.serving.request.error_response = self._wrapper
@@ -254,7 +252,6 @@ class ErrorTool(Tool):
class SessionTool(Tool):
-
"""Session Tool for CherryPy.
sessions.locking
@@ -282,8 +279,8 @@ class SessionTool(Tool):
def _setup(self):
"""Hook this tool into cherrypy.request.
- The standard CherryPy request object will automatically call this
- method when the tool is "turned on" in config.
+ The standard CherryPy request object will automatically call
+ this method when the tool is "turned on" in config.
"""
hooks = cherrypy.serving.request.hooks
@@ -325,7 +322,6 @@ class SessionTool(Tool):
class XMLRPCController(object):
-
"""A Controller (page handler collection) for XML-RPC.
To use it, have your controllers subclass this base class (it will
@@ -392,7 +388,6 @@ class SessionAuthTool(HandlerTool):
class CachingTool(Tool):
-
"""Caching Tool for CherryPy."""
def _wrapper(self, **kwargs):
@@ -416,11 +411,11 @@ class CachingTool(Tool):
class Toolbox(object):
-
"""A collection of Tools.
This object also functions as a config namespace handler for itself.
- Custom toolboxes should be added to each Application's toolboxes dict.
+ Custom toolboxes should be added to each Application's toolboxes
+ dict.
"""
def __init__(self, namespace):
diff --git a/lib/cherrypy/_cptree.py b/lib/cherrypy/_cptree.py
index 917c5b1a..3dea1c29 100644
--- a/lib/cherrypy/_cptree.py
+++ b/lib/cherrypy/_cptree.py
@@ -10,19 +10,22 @@ from cherrypy.lib import httputil, reprconf
class Application(object):
"""A CherryPy Application.
- Servers and gateways should not instantiate Request objects directly.
- Instead, they should ask an Application object for a request object.
+ Servers and gateways should not instantiate Request objects
+ directly. Instead, they should ask an Application object for a
+ request object.
- An instance of this class may also be used as a WSGI callable
- (WSGI application object) for itself.
+ An instance of this class may also be used as a WSGI callable (WSGI
+ application object) for itself.
"""
root = None
- """The top-most container of page handlers for this app. Handlers should
- be arranged in a hierarchy of attributes, matching the expected URI
- hierarchy; the default dispatcher then searches this hierarchy for a
- matching handler. When using a dispatcher other than the default,
- this value may be None."""
+ """The top-most container of page handlers for this app.
+
+ Handlers should be arranged in a hierarchy of attributes, matching
+ the expected URI hierarchy; the default dispatcher then searches
+ this hierarchy for a matching handler. When using a dispatcher other
+ than the default, this value may be None.
+ """
config = {}
"""A dict of {path: pathconf} pairs, where 'pathconf' is itself a dict
@@ -32,10 +35,16 @@ class Application(object):
toolboxes = {'tools': cherrypy.tools}
log = None
- """A LogManager instance. See _cplogging."""
+ """A LogManager instance.
+
+ See _cplogging.
+ """
wsgiapp = None
- """A CPWSGIApp instance. See _cpwsgi."""
+ """A CPWSGIApp instance.
+
+ See _cpwsgi.
+ """
request_class = _cprequest.Request
response_class = _cprequest.Response
@@ -82,12 +91,15 @@ class Application(object):
def script_name(self): # noqa: D401; irrelevant for properties
"""The URI "mount point" for this app.
- A mount point is that portion of the URI which is constant for all URIs
- that are serviced by this application; it does not include scheme,
- host, or proxy ("virtual host") portions of the URI.
+ A mount point is that portion of the URI which is constant for
+ all URIs that are serviced by this application; it does not
+ include scheme, host, or proxy ("virtual host") portions of the
+ URI.
- For example, if script_name is "/my/cool/app", then the URL
- "http://www.example.com/my/cool/app/page1" might be handled by a
+ For example, if script_name is "/my/cool/app", then the URL "
+
+ http://www.example.com/my/cool/app/page1"
+ might be handled by a
"page1" method on the root object.
The value of script_name MUST NOT end in a slash. If the script_name
@@ -171,9 +183,9 @@ class Application(object):
class Tree(object):
"""A registry of CherryPy applications, mounted at diverse points.
- An instance of this class may also be used as a WSGI callable
- (WSGI application object), in which case it dispatches to all
- mounted apps.
+ An instance of this class may also be used as a WSGI callable (WSGI
+ application object), in which case it dispatches to all mounted
+ apps.
"""
apps = {}
diff --git a/lib/cherrypy/_cpwsgi.py b/lib/cherrypy/_cpwsgi.py
index b4f55fd6..b2a6da52 100644
--- a/lib/cherrypy/_cpwsgi.py
+++ b/lib/cherrypy/_cpwsgi.py
@@ -1,10 +1,10 @@
"""WSGI interface (see PEP 333 and 3333).
Note that WSGI environ keys and values are 'native strings'; that is,
-whatever the type of "" is. For Python 2, that's a byte string; for Python 3,
-it's a unicode string. But PEP 3333 says: "even if Python's str type is
-actually Unicode "under the hood", the content of native strings must
-still be translatable to bytes via the Latin-1 encoding!"
+whatever the type of "" is. For Python 2, that's a byte string; for
+Python 3, it's a unicode string. But PEP 3333 says: "even if Python's
+str type is actually Unicode "under the hood", the content of native
+strings must still be translatable to bytes via the Latin-1 encoding!"
"""
import sys as _sys
@@ -34,7 +34,6 @@ def downgrade_wsgi_ux_to_1x(environ):
class VirtualHost(object):
-
"""Select a different WSGI application based on the Host header.
This can be useful when running multiple sites within one CP server.
@@ -56,7 +55,10 @@ class VirtualHost(object):
cherrypy.tree.graft(vhost)
"""
default = None
- """Required. The default WSGI application."""
+ """Required.
+
+ The default WSGI application.
+ """
use_x_forwarded_host = True
"""If True (the default), any "X-Forwarded-Host"
@@ -65,11 +67,12 @@ class VirtualHost(object):
domains = {}
"""A dict of {host header value: application} pairs.
- The incoming "Host" request header is looked up in this dict,
- and, if a match is found, the corresponding WSGI application
- will be called instead of the default. Note that you often need
- separate entries for "example.com" and "www.example.com".
- In addition, "Host" headers may contain the port number.
+
+ The incoming "Host" request header is looked up in this dict, and,
+ if a match is found, the corresponding WSGI application will be
+ called instead of the default. Note that you often need separate
+ entries for "example.com" and "www.example.com". In addition, "Host"
+ headers may contain the port number.
"""
def __init__(self, default, domains=None, use_x_forwarded_host=True):
@@ -89,7 +92,6 @@ class VirtualHost(object):
class InternalRedirector(object):
-
"""WSGI middleware that handles raised cherrypy.InternalRedirect."""
def __init__(self, nextapp, recursive=False):
@@ -137,7 +139,6 @@ class InternalRedirector(object):
class ExceptionTrapper(object):
-
"""WSGI middleware that traps exceptions."""
def __init__(self, nextapp, throws=(KeyboardInterrupt, SystemExit)):
@@ -226,7 +227,6 @@ class _TrappedResponse(object):
class AppResponse(object):
-
"""WSGI response iterable for CherryPy applications."""
def __init__(self, environ, start_response, cpapp):
@@ -277,7 +277,10 @@ class AppResponse(object):
return next(self.iter_response)
def close(self):
- """Close and de-reference the current request and response. (Core)"""
+ """Close and de-reference the current request and response.
+
+ (Core)
+ """
streaming = _cherrypy.serving.response.stream
self.cpapp.release_serving()
@@ -380,18 +383,20 @@ class AppResponse(object):
class CPWSGIApp(object):
-
"""A WSGI application object for a CherryPy Application."""
pipeline = [
('ExceptionTrapper', ExceptionTrapper),
('InternalRedirector', InternalRedirector),
]
- """A list of (name, wsgiapp) pairs. Each 'wsgiapp' MUST be a
- constructor that takes an initial, positional 'nextapp' argument,
- plus optional keyword arguments, and returns a WSGI application
- (that takes environ and start_response arguments). The 'name' can
- be any you choose, and will correspond to keys in self.config."""
+ """A list of (name, wsgiapp) pairs.
+
+ Each 'wsgiapp' MUST be a constructor that takes an initial,
+ positional 'nextapp' argument, plus optional keyword arguments, and
+ returns a WSGI application (that takes environ and start_response
+ arguments). The 'name' can be any you choose, and will correspond to
+ keys in self.config.
+ """
head = None
"""Rather than nest all apps in the pipeline on each call, it's only
@@ -399,9 +404,12 @@ class CPWSGIApp(object):
this to None again if you change self.pipeline after calling self."""
config = {}
- """A dict whose keys match names listed in the pipeline. Each
- value is a further dict which will be passed to the corresponding
- named WSGI callable (from the pipeline) as keyword arguments."""
+ """A dict whose keys match names listed in the pipeline.
+
+ Each value is a further dict which will be passed to the
+ corresponding named WSGI callable (from the pipeline) as keyword
+ arguments.
+ """
response_class = AppResponse
"""The class to instantiate and return as the next app in the WSGI chain.
@@ -417,8 +425,8 @@ class CPWSGIApp(object):
def tail(self, environ, start_response):
"""WSGI application callable for the actual CherryPy application.
- You probably shouldn't call this; call self.__call__ instead,
- so that any WSGI middleware in self.pipeline can run first.
+ You probably shouldn't call this; call self.__call__ instead, so
+ that any WSGI middleware in self.pipeline can run first.
"""
return self.response_class(environ, start_response, self.cpapp)
diff --git a/lib/cherrypy/_cpwsgi_server.py b/lib/cherrypy/_cpwsgi_server.py
index 11dd846a..b8e96deb 100644
--- a/lib/cherrypy/_cpwsgi_server.py
+++ b/lib/cherrypy/_cpwsgi_server.py
@@ -1,7 +1,7 @@
-"""
-WSGI server interface (see PEP 333).
+"""WSGI server interface (see PEP 333).
-This adds some CP-specific bits to the framework-agnostic cheroot package.
+This adds some CP-specific bits to the framework-agnostic cheroot
+package.
"""
import sys
@@ -35,10 +35,11 @@ class CPWSGIHTTPRequest(cheroot.server.HTTPRequest):
class CPWSGIServer(cheroot.wsgi.Server):
"""Wrapper for cheroot.wsgi.Server.
- cheroot has been designed to not reference CherryPy in any way,
- so that it can be used in other frameworks and applications. Therefore,
- we wrap it here, so we can set our own mount points from cherrypy.tree
- and apply some attributes from config -> cherrypy.server -> wsgi.Server.
+ cheroot has been designed to not reference CherryPy in any way, so
+ that it can be used in other frameworks and applications. Therefore,
+ we wrap it here, so we can set our own mount points from
+ cherrypy.tree and apply some attributes from config ->
+ cherrypy.server -> wsgi.Server.
"""
fmt = 'CherryPy/{cherrypy.__version__} {cheroot.wsgi.Server.version}'
diff --git a/lib/cherrypy/_helper.py b/lib/cherrypy/_helper.py
index d57cd1f9..497438eb 100644
--- a/lib/cherrypy/_helper.py
+++ b/lib/cherrypy/_helper.py
@@ -137,7 +137,6 @@ def popargs(*args, **kwargs):
class Root:
def index(self):
#...
-
"""
# Since keyword arg comes after *args, we have to process it ourselves
# for lower versions of python.
@@ -201,16 +200,17 @@ def url(path='', qs='', script_name=None, base=None, relative=None):
If it does not start with a slash, this returns
(base + script_name [+ request.path_info] + path + qs).
- If script_name is None, cherrypy.request will be used
- to find a script_name, if available.
+ If script_name is None, cherrypy.request will be used to find a
+ script_name, if available.
If base is None, cherrypy.request.base will be used (if available).
Note that you can use cherrypy.tools.proxy to change this.
- Finally, note that this function can be used to obtain an absolute URL
- for the current request path (minus the querystring) by passing no args.
- If you call url(qs=cherrypy.request.query_string), you should get the
- original browser URL (assuming no internal redirections).
+ Finally, note that this function can be used to obtain an absolute
+ URL for the current request path (minus the querystring) by passing
+ no args. If you call url(qs=cherrypy.request.query_string), you
+ should get the original browser URL (assuming no internal
+ redirections).
If relative is None or not provided, request.app.relative_urls will
be used (if available, else False). If False, the output will be an
@@ -320,8 +320,8 @@ def normalize_path(path):
class _ClassPropertyDescriptor(object):
"""Descript for read-only class-based property.
- Turns a classmethod-decorated func into a read-only property of that class
- type (means the value cannot be set).
+ Turns a classmethod-decorated func into a read-only property of that
+ class type (means the value cannot be set).
"""
def __init__(self, fget, fset=None):
diff --git a/lib/cherrypy/_json.py b/lib/cherrypy/_json.py
index 0c2a0f0e..4ef85580 100644
--- a/lib/cherrypy/_json.py
+++ b/lib/cherrypy/_json.py
@@ -1,5 +1,4 @@
-"""
-JSON support.
+"""JSON support.
Expose preferred json module as json and provide encode/decode
convenience functions.
diff --git a/lib/cherrypy/lib/__init__.py b/lib/cherrypy/lib/__init__.py
index 0edaaf20..9788ffdf 100644
--- a/lib/cherrypy/lib/__init__.py
+++ b/lib/cherrypy/lib/__init__.py
@@ -6,8 +6,8 @@ def is_iterator(obj):
(i.e. like a generator).
- This will return False for objects which are iterable,
- but not iterators themselves.
+ This will return False for objects which are iterable, but not
+ iterators themselves.
"""
from types import GeneratorType
if isinstance(obj, GeneratorType):
diff --git a/lib/cherrypy/lib/auth_basic.py b/lib/cherrypy/lib/auth_basic.py
index ad379a26..b938a560 100644
--- a/lib/cherrypy/lib/auth_basic.py
+++ b/lib/cherrypy/lib/auth_basic.py
@@ -18,7 +18,6 @@ as the credentials store::
'tools.auth_basic.accept_charset': 'UTF-8',
}
app_config = { '/' : basic_auth }
-
"""
import binascii
diff --git a/lib/cherrypy/lib/auth_digest.py b/lib/cherrypy/lib/auth_digest.py
index 981e9a5d..46749268 100644
--- a/lib/cherrypy/lib/auth_digest.py
+++ b/lib/cherrypy/lib/auth_digest.py
@@ -55,7 +55,7 @@ def TRACE(msg):
def get_ha1_dict_plain(user_password_dict):
- """Returns a get_ha1 function which obtains a plaintext password from a
+ """Return a get_ha1 function which obtains a plaintext password from a
dictionary of the form: {username : password}.
If you want a simple dictionary-based authentication scheme, with plaintext
@@ -72,7 +72,7 @@ def get_ha1_dict_plain(user_password_dict):
def get_ha1_dict(user_ha1_dict):
- """Returns a get_ha1 function which obtains a HA1 password hash from a
+ """Return a get_ha1 function which obtains a HA1 password hash from a
dictionary of the form: {username : HA1}.
If you want a dictionary-based authentication scheme, but with
@@ -87,7 +87,7 @@ def get_ha1_dict(user_ha1_dict):
def get_ha1_file_htdigest(filename):
- """Returns a get_ha1 function which obtains a HA1 password hash from a
+ """Return a get_ha1 function which obtains a HA1 password hash from a
flat file with lines of the same format as that produced by the Apache
htdigest utility. For example, for realm 'wonderland', username 'alice',
and password '4x5istwelve', the htdigest line would be::
@@ -135,7 +135,7 @@ def synthesize_nonce(s, key, timestamp=None):
def H(s):
- """The hash function H"""
+ """The hash function H."""
return md5_hex(s)
@@ -259,10 +259,11 @@ class HttpDigestAuthorization(object):
return False
def is_nonce_stale(self, max_age_seconds=600):
- """Returns True if a validated nonce is stale. The nonce contains a
- timestamp in plaintext and also a secure hash of the timestamp.
- You should first validate the nonce to ensure the plaintext
- timestamp is not spoofed.
+ """Return True if a validated nonce is stale.
+
+ The nonce contains a timestamp in plaintext and also a secure
+ hash of the timestamp. You should first validate the nonce to
+ ensure the plaintext timestamp is not spoofed.
"""
try:
timestamp, hashpart = self.nonce.split(':', 1)
@@ -275,7 +276,10 @@ class HttpDigestAuthorization(object):
return True
def HA2(self, entity_body=''):
- """Returns the H(A2) string. See :rfc:`2617` section 3.2.2.3."""
+ """Return the H(A2) string.
+
+ See :rfc:`2617` section 3.2.2.3.
+ """
# RFC 2617 3.2.2.3
# If the "qop" directive's value is "auth" or is unspecified,
# then A2 is:
@@ -306,7 +310,6 @@ class HttpDigestAuthorization(object):
4.3. This refers to the entity the user agent sent in the
request which has the Authorization header. Typically GET
requests don't have an entity, and POST requests do.
-
"""
ha2 = self.HA2(entity_body)
# Request-Digest -- RFC 2617 3.2.2.1
@@ -395,7 +398,6 @@ def digest_auth(realm, get_ha1, key, debug=False, accept_charset='utf-8'):
key
A secret string known only to the server, used in the synthesis
of nonces.
-
"""
request = cherrypy.serving.request
@@ -447,9 +449,7 @@ def digest_auth(realm, get_ha1, key, debug=False, accept_charset='utf-8'):
def _respond_401(realm, key, accept_charset, debug, **kwargs):
- """
- Respond with 401 status and a WWW-Authenticate header
- """
+ """Respond with 401 status and a WWW-Authenticate header."""
header = www_authenticate(
realm, key,
accept_charset=accept_charset,
diff --git a/lib/cherrypy/lib/caching.py b/lib/cherrypy/lib/caching.py
index 08d2d8e4..89cb0442 100644
--- a/lib/cherrypy/lib/caching.py
+++ b/lib/cherrypy/lib/caching.py
@@ -42,7 +42,6 @@ from cherrypy.lib import cptools, httputil
class Cache(object):
-
"""Base class for Cache implementations."""
def get(self):
@@ -64,17 +63,16 @@ class Cache(object):
# ------------------------------ Memory Cache ------------------------------- #
class AntiStampedeCache(dict):
-
"""A storage system for cached items which reduces stampede collisions."""
def wait(self, key, timeout=5, debug=False):
"""Return the cached value for the given key, or None.
- If timeout is not None, and the value is already
- being calculated by another thread, wait until the given timeout has
- elapsed. If the value is available before the timeout expires, it is
- returned. If not, None is returned, and a sentinel placed in the cache
- to signal other threads to wait.
+ If timeout is not None, and the value is already being
+ calculated by another thread, wait until the given timeout has
+ elapsed. If the value is available before the timeout expires,
+ it is returned. If not, None is returned, and a sentinel placed
+ in the cache to signal other threads to wait.
If timeout is None, no waiting is performed nor sentinels used.
"""
@@ -127,7 +125,6 @@ class AntiStampedeCache(dict):
class MemoryCache(Cache):
-
"""An in-memory cache for varying response content.
Each key in self.store is a URI, and each value is an AntiStampedeCache.
@@ -381,7 +378,10 @@ def get(invalid_methods=('POST', 'PUT', 'DELETE'), debug=False, **kwargs):
def tee_output():
- """Tee response output to cache storage. Internal."""
+ """Tee response output to cache storage.
+
+ Internal.
+ """
# Used by CachingTool by attaching to request.hooks
request = cherrypy.serving.request
@@ -441,7 +441,6 @@ def expires(secs=0, force=False, debug=False):
* Expires
If any are already present, none of the above response headers are set.
-
"""
response = cherrypy.serving.response
diff --git a/lib/cherrypy/lib/cpstats.py b/lib/cherrypy/lib/cpstats.py
index 111af063..5dff319b 100644
--- a/lib/cherrypy/lib/cpstats.py
+++ b/lib/cherrypy/lib/cpstats.py
@@ -184,7 +184,6 @@ To report statistics::
To format statistics reports::
See 'Reporting', above.
-
"""
import logging
@@ -254,7 +253,6 @@ def proc_time(s):
class ByteCountWrapper(object):
-
"""Wraps a file-like object, counting the number of bytes read."""
def __init__(self, rfile):
@@ -307,7 +305,6 @@ def _get_threading_ident():
class StatsTool(cherrypy.Tool):
-
"""Record various information about the current request."""
def __init__(self):
@@ -316,8 +313,8 @@ class StatsTool(cherrypy.Tool):
def _setup(self):
"""Hook this tool into cherrypy.request.
- The standard CherryPy request object will automatically call this
- method when the tool is "turned on" in config.
+ The standard CherryPy request object will automatically call
+ this method when the tool is "turned on" in config.
"""
if appstats.get('Enabled', False):
cherrypy.Tool._setup(self)
diff --git a/lib/cherrypy/lib/cptools.py b/lib/cherrypy/lib/cptools.py
index 613a8995..61d4d36b 100644
--- a/lib/cherrypy/lib/cptools.py
+++ b/lib/cherrypy/lib/cptools.py
@@ -94,8 +94,8 @@ def validate_etags(autotags=False, debug=False):
def validate_since():
"""Validate the current Last-Modified against If-Modified-Since headers.
- If no code has set the Last-Modified response header, then no validation
- will be performed.
+ If no code has set the Last-Modified response header, then no
+ validation will be performed.
"""
response = cherrypy.serving.response
lastmod = response.headers.get('Last-Modified')
@@ -123,9 +123,9 @@ def validate_since():
def allow(methods=None, debug=False):
"""Raise 405 if request.method not in methods (default ['GET', 'HEAD']).
- The given methods are case-insensitive, and may be in any order.
- If only one method is allowed, you may supply a single string;
- if more than one, supply a list of strings.
+ The given methods are case-insensitive, and may be in any order. If
+ only one method is allowed, you may supply a single string; if more
+ than one, supply a list of strings.
Regardless of whether the current method is allowed or not, this
also emits an 'Allow' response header, containing the given methods.
@@ -154,22 +154,23 @@ def proxy(base=None, local='X-Forwarded-Host', remote='X-Forwarded-For',
scheme='X-Forwarded-Proto', debug=False):
"""Change the base URL (scheme://host[:port][/path]).
- For running a CP server behind Apache, lighttpd, or other HTTP server.
+ For running a CP server behind Apache, lighttpd, or other HTTP
+ server.
- For Apache and lighttpd, you should leave the 'local' argument at the
- default value of 'X-Forwarded-Host'. For Squid, you probably want to set
- tools.proxy.local = 'Origin'.
+ For Apache and lighttpd, you should leave the 'local' argument at
+ the default value of 'X-Forwarded-Host'. For Squid, you probably
+ want to set tools.proxy.local = 'Origin'.
- If you want the new request.base to include path info (not just the host),
- you must explicitly set base to the full base path, and ALSO set 'local'
- to '', so that the X-Forwarded-Host request header (which never includes
- path info) does not override it. Regardless, the value for 'base' MUST
- NOT end in a slash.
+ If you want the new request.base to include path info (not just the
+ host), you must explicitly set base to the full base path, and ALSO
+ set 'local' to '', so that the X-Forwarded-Host request header
+ (which never includes path info) does not override it. Regardless,
+ the value for 'base' MUST NOT end in a slash.
cherrypy.request.remote.ip (the IP address of the client) will be
- rewritten if the header specified by the 'remote' arg is valid.
- By default, 'remote' is set to 'X-Forwarded-For'. If you do not
- want to rewrite remote.ip, set the 'remote' arg to an empty string.
+ rewritten if the header specified by the 'remote' arg is valid. By
+ default, 'remote' is set to 'X-Forwarded-For'. If you do not want to
+ rewrite remote.ip, set the 'remote' arg to an empty string.
"""
request = cherrypy.serving.request
@@ -217,8 +218,8 @@ def proxy(base=None, local='X-Forwarded-Host', remote='X-Forwarded-For',
def ignore_headers(headers=('Range',), debug=False):
"""Delete request headers whose field names are included in 'headers'.
- This is a useful tool for working behind certain HTTP servers;
- for example, Apache duplicates the work that CP does for 'Range'
+ This is a useful tool for working behind certain HTTP servers; for
+ example, Apache duplicates the work that CP does for 'Range'
headers, and will doubly-truncate the response.
"""
request = cherrypy.serving.request
@@ -281,7 +282,6 @@ def referer(pattern, accept=True, accept_missing=False, error=403,
class SessionAuth(object):
-
"""Assert that the user is logged in."""
session_key = 'username'
@@ -319,7 +319,10 @@ Message: %(error_msg)s