mirror of
https://github.com/rembo10/headphones.git
synced 2026-01-09 14:48:07 -05:00
Initial python3 changes
Mostly just updating libraries, removing string encoding/decoding, fixing some edge cases. No new functionality was added in this commit.
This commit is contained in:
@@ -17,6 +17,10 @@
|
||||
import os
|
||||
import sys
|
||||
|
||||
if sys.version_info <= (3, 5):
|
||||
sys.stdout.write("Headphones requires Python >= 3.5\n")
|
||||
sys.exit(1)
|
||||
|
||||
# Ensure lib added to path, before any other imports
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'lib/'))
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ import sys
|
||||
# Ensure that we use the Headphones provided libraries.
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../lib"))
|
||||
|
||||
import urlparse
|
||||
import urllib.parse
|
||||
|
||||
|
||||
def can_import(module):
|
||||
@@ -89,7 +89,7 @@ def main():
|
||||
url = sys.argv[1]
|
||||
|
||||
# Check if it is a HTTPS website.
|
||||
parts = urlparse.urlparse(url)
|
||||
parts = urllib.parse.urlparse(url)
|
||||
|
||||
if parts.scheme.lower() != "https":
|
||||
sys.stderr.write(
|
||||
|
||||
@@ -1274,7 +1274,7 @@
|
||||
<input type="checkbox" name="synoindex_enabled" id="synoindex" value="1" ${config['synoindex_enabled']} /><label for="synoindex"><span class="option">Synology NAS</span></label>
|
||||
</div>
|
||||
</fieldset>
|
||||
|
||||
<!--
|
||||
<fieldset>
|
||||
<div class="row checkbox left">
|
||||
<input type="checkbox" class="bigcheck" name="twitter_enabled" id="twitter" value="1" ${config['twitter_enabled']} /><label for="twitter"><span class="option">Twitter</span></label>
|
||||
@@ -1295,7 +1295,7 @@
|
||||
</div>
|
||||
</div>
|
||||
</fieldset>
|
||||
|
||||
-->
|
||||
<fieldset>
|
||||
<div class="row checkbox left">
|
||||
<input type="checkbox" class="bigcheck" name="slack_enabled" id="slack" value="1" ${config['slack_enabled']} /><label for="slack"><span class="option">Slack</span></label>
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
<%inherit file="base.html"/>
|
||||
<%!
|
||||
from headphones import helpers
|
||||
import cgi
|
||||
from html import escape as html_escape
|
||||
%>
|
||||
|
||||
<%def name="headerIncludes()">
|
||||
@@ -62,11 +62,11 @@
|
||||
%>
|
||||
<tr class="grade${grade}">
|
||||
<td id="dateadded">${item['DateAdded']}</td>
|
||||
<td id="filename">${cgi.escape(item['Title'], quote=True)} [<a href="${item['URL']}">${fileid}</a>]<a href="albumPage?AlbumID=${item['AlbumID']}">[album page]</a></td>
|
||||
<td id="filename">${html_escape(item['Title'], quote=True)} [<a href="${item['URL']}">${fileid}</a>]<a href="albumPage?AlbumID=${item['AlbumID']}">[album page]</a></td>
|
||||
<td id="size">${helpers.bytes_to_mb(item['Size'])}</td>
|
||||
<td title="${folder}" id="status">${item['Status']}</td>
|
||||
<td id="action">[<a href="javascript:void(0)" onclick="doAjaxCall('queueAlbum?AlbumID=${item['AlbumID']}&redirect=history', $(this),'table')" data-success="Retrying download of '${cgi.escape(item['Title'], quote=True)}'">retry</a>][<a href="javascript:void(0)" onclick="doAjaxCall('queueAlbum?AlbumID=${item['AlbumID']}&new=True&redirect=history',$(this),'table')" data-success="Looking for a new version of '${cgi.escape(item['Title'], quote=True)}'">new</a>]</td>
|
||||
<td id="delete"><a href="javascript:void(0)" onclick="doAjaxCall('clearhistory?date_added=${item['DateAdded']}&title=${cgi.escape(item['Title'], quote=True)}',$(this),'table')" data-success="${cgi.escape(item['Title'], quote=True)} cleared from history"><img src="interfaces/default/images/trashcan.png" height="18" width="18" id="trashcan" title="Clear this item from the history"></a>
|
||||
<td id="action">[<a href="javascript:void(0)" onclick="doAjaxCall('queueAlbum?AlbumID=${item['AlbumID']}&redirect=history', $(this),'table')" data-success="Retrying download of '${html_escape(item['Title'], quote=True)}'">retry</a>][<a href="javascript:void(0)" onclick="doAjaxCall('queueAlbum?AlbumID=${item['AlbumID']}&new=True&redirect=history',$(this),'table')" data-success="Looking for a new version of '${html_escape(item['Title'], quote=True)}'">new</a>]</td>
|
||||
<td id="delete"><a href="javascript:void(0)" onclick="doAjaxCall('clearhistory?date_added=${item['DateAdded']}&title=${html_escape(item['Title'], quote=True)}',$(this),'table')" data-success="${html_escape(item['Title'], quote=True)} cleared from history"><img src="interfaces/default/images/trashcan.png" height="18" width="18" id="trashcan" title="Clear this item from the history"></a>
|
||||
</tr>
|
||||
%endfor
|
||||
</tbody>
|
||||
|
||||
@@ -218,7 +218,7 @@ def daemonize():
|
||||
pid = os.fork() # @UndefinedVariable - only available in UNIX
|
||||
if pid != 0:
|
||||
sys.exit(0)
|
||||
except OSError, e:
|
||||
except OSError as e:
|
||||
raise RuntimeError("1st fork failed: %s [%d]", e.strerror, e.errno)
|
||||
|
||||
os.setsid()
|
||||
@@ -232,7 +232,7 @@ def daemonize():
|
||||
pid = os.fork() # @UndefinedVariable - only available in UNIX
|
||||
if pid != 0:
|
||||
sys.exit(0)
|
||||
except OSError, e:
|
||||
except OSError as e:
|
||||
raise RuntimeError("2nd fork failed: %s [%d]", e.strerror, e.errno)
|
||||
|
||||
dev_null = file('/dev/null', 'r')
|
||||
|
||||
@@ -86,7 +86,7 @@ class Api(object):
|
||||
methodToCall = getattr(self, "_" + self.cmd)
|
||||
methodToCall(**self.kwargs)
|
||||
if 'callback' not in self.kwargs:
|
||||
if isinstance(self.data, basestring):
|
||||
if isinstance(self.data, str):
|
||||
return self.data
|
||||
else:
|
||||
return json.dumps(self.data)
|
||||
@@ -106,7 +106,7 @@ class Api(object):
|
||||
rows_as_dic = []
|
||||
|
||||
for row in rows:
|
||||
row_as_dic = dict(zip(row.keys(), row))
|
||||
row_as_dic = dict(list(zip(list(row.keys()), row)))
|
||||
rows_as_dic.append(row_as_dic)
|
||||
|
||||
return rows_as_dic
|
||||
@@ -474,17 +474,17 @@ class Api(object):
|
||||
# Handle situations where the torrent url contains arguments that are
|
||||
# parsed
|
||||
if kwargs:
|
||||
import urllib
|
||||
import urllib2
|
||||
url = urllib2.quote(
|
||||
url, safe=":?/=&") + '&' + urllib.urlencode(kwargs)
|
||||
import urllib.request, urllib.parse, urllib.error
|
||||
import urllib.request, urllib.error, urllib.parse
|
||||
url = urllib.parse.quote(
|
||||
url, safe=":?/=&") + '&' + urllib.parse.urlencode(kwargs)
|
||||
|
||||
try:
|
||||
result = [(title, int(size), url, provider, kind)]
|
||||
except ValueError:
|
||||
result = [(title, float(size), url, provider, kind)]
|
||||
|
||||
logger.info(u"Making sure we can download the chosen result")
|
||||
logger.info("Making sure we can download the chosen result")
|
||||
(data, bestqual) = searcher.preprocess(result)
|
||||
|
||||
if data and bestqual:
|
||||
|
||||
@@ -240,7 +240,7 @@ class Cache(object):
|
||||
|
||||
# fallback to 1st album cover if none of the above
|
||||
elif 'albums' in data:
|
||||
for mbid, art in data.get('albums', dict()).items():
|
||||
for mbid, art in list(data.get('albums', dict()).items()):
|
||||
if 'albumcover' in art:
|
||||
image_url = art['albumcover'][0]['url']
|
||||
break
|
||||
@@ -352,7 +352,7 @@ class Cache(object):
|
||||
|
||||
# fallback to 1st album cover if none of the above
|
||||
elif 'albums' in data:
|
||||
for mbid, art in data.get('albums', dict()).items():
|
||||
for mbid, art in list(data.get('albums', dict()).items()):
|
||||
if 'albumcover' in art:
|
||||
image_url = art['albumcover'][0]['url']
|
||||
break
|
||||
|
||||
@@ -18,12 +18,12 @@
|
||||
#######################################
|
||||
|
||||
|
||||
import urllib
|
||||
import urllib.request, urllib.parse, urllib.error
|
||||
|
||||
from common import USER_AGENT
|
||||
from .common import USER_AGENT
|
||||
|
||||
|
||||
class HeadphonesURLopener(urllib.FancyURLopener):
|
||||
class HeadphonesURLopener(urllib.request.FancyURLopener):
|
||||
version = USER_AGENT
|
||||
|
||||
|
||||
@@ -44,7 +44,7 @@ class AuthURLOpener(HeadphonesURLopener):
|
||||
self.numTries = 0
|
||||
|
||||
# call the base class
|
||||
urllib.FancyURLopener.__init__(self)
|
||||
urllib.request.FancyURLopener.__init__(self)
|
||||
|
||||
def prompt_user_passwd(self, host, realm):
|
||||
"""
|
||||
|
||||
@@ -24,6 +24,7 @@ import operator
|
||||
import os
|
||||
import re
|
||||
from headphones import version
|
||||
from functools import reduce
|
||||
|
||||
|
||||
# Identify Our Application
|
||||
@@ -74,7 +75,7 @@ class Quality:
|
||||
@staticmethod
|
||||
def _getStatusStrings(status):
|
||||
toReturn = {}
|
||||
for x in Quality.qualityStrings.keys():
|
||||
for x in list(Quality.qualityStrings.keys()):
|
||||
toReturn[Quality.compositeStatus(status, x)] = Quality.statusPrefixes[status] + " (" + \
|
||||
Quality.qualityStrings[x] + ")"
|
||||
return toReturn
|
||||
@@ -93,7 +94,7 @@ class Quality:
|
||||
def splitQuality(quality):
|
||||
anyQualities = []
|
||||
bestQualities = []
|
||||
for curQual in Quality.qualityStrings.keys():
|
||||
for curQual in list(Quality.qualityStrings.keys()):
|
||||
if curQual & quality:
|
||||
anyQualities.append(curQual)
|
||||
if curQual << 16 & quality:
|
||||
@@ -151,7 +152,7 @@ class Quality:
|
||||
@staticmethod
|
||||
def splitCompositeStatus(status):
|
||||
"""Returns a tuple containing (status, quality)"""
|
||||
for x in sorted(Quality.qualityStrings.keys(), reverse=True):
|
||||
for x in sorted(list(Quality.qualityStrings.keys()), reverse=True):
|
||||
if status > x * 100:
|
||||
return (status - x * 100, x)
|
||||
|
||||
@@ -169,10 +170,10 @@ class Quality:
|
||||
SNATCHED_PROPER = None
|
||||
|
||||
|
||||
Quality.DOWNLOADED = [Quality.compositeStatus(DOWNLOADED, x) for x in Quality.qualityStrings.keys()]
|
||||
Quality.SNATCHED = [Quality.compositeStatus(SNATCHED, x) for x in Quality.qualityStrings.keys()]
|
||||
Quality.DOWNLOADED = [Quality.compositeStatus(DOWNLOADED, x) for x in list(Quality.qualityStrings.keys())]
|
||||
Quality.SNATCHED = [Quality.compositeStatus(SNATCHED, x) for x in list(Quality.qualityStrings.keys())]
|
||||
Quality.SNATCHED_PROPER = [Quality.compositeStatus(SNATCHED_PROPER, x) for x in
|
||||
Quality.qualityStrings.keys()]
|
||||
list(Quality.qualityStrings.keys())]
|
||||
|
||||
MP3 = Quality.combineQualities([Quality.B192, Quality.B256, Quality.B320, Quality.VBR], [])
|
||||
LOSSLESS = Quality.combineQualities([Quality.FLAC], [])
|
||||
|
||||
@@ -2,15 +2,16 @@ import itertools
|
||||
|
||||
import os
|
||||
import re
|
||||
import ast
|
||||
from configparser import ConfigParser
|
||||
import headphones.logger
|
||||
from configobj import ConfigObj
|
||||
|
||||
|
||||
def bool_int(value):
|
||||
"""
|
||||
Casts a config value into a 0 or 1
|
||||
"""
|
||||
if isinstance(value, basestring):
|
||||
if isinstance(value, str):
|
||||
if value.lower() in ('', '0', 'false', 'f', 'no', 'n', 'off'):
|
||||
value = 0
|
||||
return int(bool(value))
|
||||
@@ -326,8 +327,9 @@ class Config(object):
|
||||
def __init__(self, config_file):
|
||||
""" Initialize the config with values from a file """
|
||||
self._config_file = config_file
|
||||
self._config = ConfigObj(self._config_file, encoding='utf-8')
|
||||
for key in _CONFIG_DEFINITIONS.keys():
|
||||
self._config = ConfigParser()
|
||||
self._config.read(self._config_file)
|
||||
for key in list(_CONFIG_DEFINITIONS.keys()):
|
||||
self.check_setting(key)
|
||||
self.ENCODER_MULTICORE_COUNT = max(0, self.ENCODER_MULTICORE_COUNT)
|
||||
self._upgrade()
|
||||
@@ -344,7 +346,7 @@ class Config(object):
|
||||
|
||||
def check_section(self, section):
|
||||
""" Check if INI section exists, if not create it """
|
||||
if section not in self._config:
|
||||
if not self._config.has_section(section):
|
||||
self._config[section] = {}
|
||||
return True
|
||||
else:
|
||||
@@ -354,28 +356,38 @@ class Config(object):
|
||||
""" Cast any value in the config to the right type or use the default """
|
||||
key, definition_type, section, ini_key, default = self._define(key)
|
||||
self.check_section(section)
|
||||
|
||||
# ConfigParser values are strings, so need to convert to actual list
|
||||
if definition_type == list:
|
||||
definition_type = ast.literal_eval
|
||||
|
||||
try:
|
||||
my_val = definition_type(self._config[section][ini_key])
|
||||
# ConfigParser interprets empty strings in the config
|
||||
# literally, so we need to sanitize it. It's not really
|
||||
# a config upgrade, since a user can at any time put
|
||||
# some_key = ''
|
||||
if my_val == '""' or my_val == "''":
|
||||
my_val = ''
|
||||
except Exception:
|
||||
my_val = definition_type(default)
|
||||
self._config[section][ini_key] = my_val
|
||||
my_val = default
|
||||
self._config[section][ini_key] = str(my_val)
|
||||
return my_val
|
||||
|
||||
def write(self):
|
||||
""" Make a copy of the stored config and write it to the configured file """
|
||||
new_config = ConfigObj(encoding="UTF-8")
|
||||
new_config.filename = self._config_file
|
||||
new_config = ConfigParser()
|
||||
|
||||
# first copy over everything from the old config, even if it is not
|
||||
# correctly defined to keep from losing data
|
||||
for key, subkeys in self._config.items():
|
||||
for key, subkeys in list(self._config.items()):
|
||||
if key not in new_config:
|
||||
new_config[key] = {}
|
||||
for subkey, value in subkeys.items():
|
||||
for subkey, value in list(subkeys.items()):
|
||||
new_config[key][subkey] = value
|
||||
|
||||
# next make sure that everything we expect to have defined is so
|
||||
for key in _CONFIG_DEFINITIONS.keys():
|
||||
for key in list(_CONFIG_DEFINITIONS.keys()):
|
||||
key, definition_type, section, ini_key, default = self._define(key)
|
||||
self.check_setting(key)
|
||||
if section not in new_config:
|
||||
@@ -386,14 +398,15 @@ class Config(object):
|
||||
headphones.logger.info("Writing configuration to file")
|
||||
|
||||
try:
|
||||
new_config.write()
|
||||
with open(self._config_file, 'w') as configfile:
|
||||
new_config.write(configfile)
|
||||
except IOError as e:
|
||||
headphones.logger.error("Error writing configuration file: %s", e)
|
||||
|
||||
def get_extra_newznabs(self):
|
||||
""" Return the extra newznab tuples """
|
||||
extra_newznabs = list(
|
||||
itertools.izip(*[itertools.islice(self.EXTRA_NEWZNABS, i, None, 3)
|
||||
zip(*[itertools.islice(self.EXTRA_NEWZNABS, i, None, 3)
|
||||
for i in range(3)])
|
||||
)
|
||||
return extra_newznabs
|
||||
@@ -412,7 +425,7 @@ class Config(object):
|
||||
def get_extra_torznabs(self):
|
||||
""" Return the extra torznab tuples """
|
||||
extra_torznabs = list(
|
||||
itertools.izip(*[itertools.islice(self.EXTRA_TORZNABS, i, None, 4)
|
||||
zip(*[itertools.islice(self.EXTRA_TORZNABS, i, None, 4)
|
||||
for i in range(4)])
|
||||
)
|
||||
return extra_torznabs
|
||||
@@ -448,20 +461,21 @@ class Config(object):
|
||||
return value
|
||||
else:
|
||||
key, definition_type, section, ini_key, default = self._define(name)
|
||||
self._config[section][ini_key] = definition_type(value)
|
||||
self._config[section][ini_key] = str(value)
|
||||
return self._config[section][ini_key]
|
||||
|
||||
def process_kwargs(self, kwargs):
|
||||
"""
|
||||
Given a big bunch of key value pairs, apply them to the ini.
|
||||
"""
|
||||
for name, value in kwargs.items():
|
||||
for name, value in list(kwargs.items()):
|
||||
key, definition_type, section, ini_key, default = self._define(name)
|
||||
self._config[section][ini_key] = definition_type(value)
|
||||
self._config[section][ini_key] = str(value)
|
||||
|
||||
def _upgrade(self):
|
||||
"""
|
||||
Bring old configs up to date
|
||||
Bring old configs up to date. Although this is kind of a dumb
|
||||
way to do it because it doesn't handle multi-step upgrades
|
||||
"""
|
||||
if self.CONFIG_VERSION == '2':
|
||||
# Update the config to use direct path to the encoder rather than the encoder folder
|
||||
@@ -488,12 +502,12 @@ class Config(object):
|
||||
# Add Seed Ratio to Torznabs
|
||||
if self.EXTRA_TORZNABS:
|
||||
extra_torznabs = list(
|
||||
itertools.izip(*[itertools.islice(self.EXTRA_TORZNABS, i, None, 3)
|
||||
zip(*[itertools.islice(self.EXTRA_TORZNABS, i, None, 3)
|
||||
for i in range(3)])
|
||||
)
|
||||
new_torznabs = []
|
||||
for torznab in extra_torznabs:
|
||||
new_torznabs.extend([torznab[0], torznab[1], u'', torznab[2]])
|
||||
new_torznabs.extend([torznab[0], torznab[1], '', torznab[2]])
|
||||
if new_torznabs:
|
||||
self.EXTRA_TORZNABS = new_torznabs
|
||||
|
||||
|
||||
@@ -2,8 +2,8 @@ import mock
|
||||
from mock import MagicMock
|
||||
import headphones.config
|
||||
import re
|
||||
import unittestcompat
|
||||
from unittestcompat import TestCase, TestArgs
|
||||
from . import unittestcompat
|
||||
from .unittestcompat import TestCase, TestArgs
|
||||
|
||||
|
||||
class ConfigApiTest(TestCase):
|
||||
@@ -101,7 +101,7 @@ class ConfigApiTest(TestCase):
|
||||
# call methods
|
||||
c = headphones.config.Config(path)
|
||||
# assertions:
|
||||
with self.assertRaisesRegexp(KeyError, exc_regex):
|
||||
with self.assertRaisesRegex(KeyError, exc_regex):
|
||||
c.check_setting(setting_name)
|
||||
pass
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ def cry():
|
||||
main_thread = t
|
||||
|
||||
# Loop over each thread's current frame, writing info about it
|
||||
for tid, frame in sys._current_frames().iteritems():
|
||||
for tid, frame in sys._current_frames().items():
|
||||
thread = tmap.get(tid, main_thread)
|
||||
|
||||
lines = []
|
||||
|
||||
@@ -87,7 +87,7 @@ def check_splitter(command):
|
||||
|
||||
def split_baby(split_file, split_cmd):
|
||||
'''Let's split baby'''
|
||||
logger.info('Splitting %s...', split_file.decode(headphones.SYS_ENCODING, 'replace'))
|
||||
logger.info(f"Splitting {split_file}...")
|
||||
logger.debug(subprocess.list2cmdline(split_cmd))
|
||||
|
||||
# Prevent Windows from opening a terminal window
|
||||
@@ -108,16 +108,16 @@ def split_baby(split_file, split_cmd):
|
||||
|
||||
process = subprocess.Popen(split_cmd, startupinfo=startupinfo,
|
||||
stdin=open(os.devnull, 'rb'), stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE, env=env)
|
||||
stderr=subprocess.PIPE, env=env, text=True)
|
||||
stdout, stderr = process.communicate()
|
||||
|
||||
if process.returncode:
|
||||
logger.error('Split failed for %s', split_file.decode(headphones.SYS_ENCODING, 'replace'))
|
||||
out = stdout if stdout else stderr
|
||||
logger.error('Error details: %s', out.decode(headphones.SYS_ENCODING, 'replace'))
|
||||
logger.error(f"Split failed for {split_file}")
|
||||
out = stdout or stderr
|
||||
logger.error(f"Error details: {out}")
|
||||
return False
|
||||
else:
|
||||
logger.info('Split success %s', split_file.decode(headphones.SYS_ENCODING, 'replace'))
|
||||
logger.info(f"Split succeeded for {split_file}")
|
||||
return True
|
||||
|
||||
|
||||
@@ -232,7 +232,7 @@ class Directory:
|
||||
for i in list_dir:
|
||||
if not check_match(i):
|
||||
# music file
|
||||
if os.path.splitext(i)[-1] in WAVE_FILE_TYPE_BY_EXTENSION.keys():
|
||||
if os.path.splitext(i)[-1] in list(WAVE_FILE_TYPE_BY_EXTENSION.keys()):
|
||||
track_nr = identify_track_number(i)
|
||||
if track_nr:
|
||||
self.content.append(WaveFile(self.path + os.sep + i, track_nr=track_nr))
|
||||
@@ -378,7 +378,7 @@ class CueFile(File):
|
||||
except:
|
||||
raise ValueError('Cant encode CUE Sheet.')
|
||||
|
||||
if self.content[0] == u'\ufeff':
|
||||
if self.content[0] == '\ufeff':
|
||||
self.content = self.content[1:]
|
||||
|
||||
header = header_parser()
|
||||
@@ -581,7 +581,7 @@ def split(albumpath):
|
||||
|
||||
# use xld profile to split cue
|
||||
if headphones.CONFIG.ENCODER == 'xld' and headphones.CONFIG.MUSIC_ENCODER and headphones.CONFIG.XLDPROFILE:
|
||||
import getXldProfile
|
||||
from . import getXldProfile
|
||||
xldprofile, xldformat, _ = getXldProfile.getXldProfile(headphones.CONFIG.XLDPROFILE)
|
||||
if not xldformat:
|
||||
raise ValueError(
|
||||
@@ -601,7 +601,7 @@ def split(albumpath):
|
||||
raise ValueError('Command not found, ensure shntool or xld installed')
|
||||
|
||||
# Determine if file can be split
|
||||
if wave.name_ext not in WAVE_FILE_TYPE_BY_EXTENSION.keys():
|
||||
if wave.name_ext not in list(WAVE_FILE_TYPE_BY_EXTENSION.keys()):
|
||||
raise ValueError('Cannot split, audio file has unsupported extension')
|
||||
|
||||
# Split with xld
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
# Stolen from Sick-Beard's db.py #
|
||||
###################################
|
||||
|
||||
from __future__ import with_statement
|
||||
|
||||
|
||||
import time
|
||||
|
||||
@@ -116,7 +116,7 @@ class DBConnection:
|
||||
|
||||
break
|
||||
|
||||
except sqlite3.OperationalError, e:
|
||||
except sqlite3.OperationalError as e:
|
||||
if "unable to open database file" in e.message or "database is locked" in e.message:
|
||||
dberror = e
|
||||
if args is None:
|
||||
@@ -128,7 +128,7 @@ class DBConnection:
|
||||
else:
|
||||
logger.error('Database error: %s', e)
|
||||
raise
|
||||
except sqlite3.DatabaseError, e:
|
||||
except sqlite3.DatabaseError as e:
|
||||
logger.error('Fatal Error executing %s :: %s', query, e)
|
||||
raise
|
||||
|
||||
@@ -156,14 +156,14 @@ class DBConnection:
|
||||
If the table is not updated then the 'WHERE changes' will be 0 and the table inserted
|
||||
"""
|
||||
def genParams(myDict):
|
||||
return [x + " = ?" for x in myDict.keys()]
|
||||
return [x + " = ?" for x in list(myDict.keys())]
|
||||
|
||||
update_query = "UPDATE " + tableName + " SET " + ", ".join(genParams(valueDict)) + " WHERE " + " AND ".join(genParams(keyDict))
|
||||
|
||||
insert_query = ("INSERT INTO " + tableName + " (" + ", ".join(valueDict.keys() + keyDict.keys()) + ")" + " SELECT " + ", ".join(
|
||||
["?"] * len(valueDict.keys() + keyDict.keys())) + " WHERE changes()=0")
|
||||
insert_query = ("INSERT INTO " + tableName + " (" + ", ".join(list(valueDict.keys()) + list(keyDict.keys())) + ")" + " SELECT " + ", ".join(
|
||||
["?"] * len(list(valueDict.keys()) + list(keyDict.keys()))) + " WHERE changes()=0")
|
||||
|
||||
try:
|
||||
self.action(update_query, valueDict.values() + keyDict.values(), upsert_insert_qry=insert_query)
|
||||
self.action(update_query, list(valueDict.values()) + list(keyDict.values()), upsert_insert_qry=insert_query)
|
||||
except sqlite3.IntegrityError:
|
||||
logger.info('Queries failed: %s and %s', update_query, insert_query)
|
||||
|
||||
@@ -34,7 +34,7 @@
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from __future__ import unicode_literals
|
||||
|
||||
|
||||
from headphones import logger
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ def getXldProfile(xldProfile):
|
||||
# Get xld preferences plist
|
||||
try:
|
||||
preferences = biplist.readPlist(expanded)
|
||||
except (biplist.InvalidPlistException, biplist.NotBinaryPlistException), e:
|
||||
except (biplist.InvalidPlistException, biplist.NotBinaryPlistException) as e:
|
||||
logger.error("Error reading xld preferences plist: %s", e)
|
||||
return (xldProfileNotFound, None, None)
|
||||
|
||||
|
||||
@@ -28,9 +28,10 @@ import six
|
||||
from contextlib import contextmanager
|
||||
|
||||
import fnmatch
|
||||
import functools
|
||||
import re
|
||||
import os
|
||||
from beets.mediafile import MediaFile, FileTypeError, UnreadableFileError
|
||||
from mediafile import MediaFile, FileTypeError, UnreadableFileError
|
||||
import headphones
|
||||
|
||||
|
||||
@@ -40,6 +41,17 @@ RE_FEATURING = re.compile(r"[fF]t\.|[fF]eaturing|[fF]eat\.|\b[wW]ith\b|&|vs\.")
|
||||
RE_CD_ALBUM = re.compile(r"\(?((CD|disc)\s*[0-9]+)\)?", re.I)
|
||||
RE_CD = re.compile(r"^(CD|dics)\s*[0-9]+$", re.I)
|
||||
|
||||
def cmp(x, y):
|
||||
"""
|
||||
Replacement for built-in function cmp that was removed in Python 3
|
||||
|
||||
Compare the two objects x and y and return an integer according to
|
||||
the outcome. The return value is negative if x < y, zero if x == y
|
||||
and strictly positive if x > y.
|
||||
|
||||
https://portingguide.readthedocs.io/en/latest/comparisons.html#the-cmp-function
|
||||
"""
|
||||
return (x > y) - (x < y)
|
||||
|
||||
def multikeysort(items, columns):
|
||||
comparers = [
|
||||
@@ -54,7 +66,7 @@ def multikeysort(items, columns):
|
||||
else:
|
||||
return 0
|
||||
|
||||
return sorted(items, cmp=comparer)
|
||||
return sorted(items, key=functools.cmp_to_key(comparer))
|
||||
|
||||
|
||||
def checked(variable):
|
||||
@@ -210,7 +222,7 @@ def pattern_substitute(pattern, dic, normalize=False):
|
||||
|
||||
if normalize:
|
||||
new_dic = {}
|
||||
for i, j in dic.iteritems():
|
||||
for i, j in dic.items():
|
||||
if j is not None:
|
||||
try:
|
||||
if sys.platform == 'darwin':
|
||||
@@ -229,7 +241,7 @@ def replace_all(text, dic):
|
||||
if not text:
|
||||
return ''
|
||||
|
||||
for i, j in dic.iteritems():
|
||||
for i, j in dic.items():
|
||||
text = text.replace(i, j)
|
||||
return text
|
||||
|
||||
@@ -242,8 +254,8 @@ def replace_illegal_chars(string, type="file"):
|
||||
return string
|
||||
|
||||
|
||||
_CN_RE1 = re.compile(ur'[^\w]+', re.UNICODE)
|
||||
_CN_RE2 = re.compile(ur'[\s_]+', re.UNICODE)
|
||||
_CN_RE1 = re.compile(r'[^\w]+', re.UNICODE)
|
||||
_CN_RE2 = re.compile(r'[\s_]+', re.UNICODE)
|
||||
|
||||
|
||||
_XLATE_GRAPHICAL_AND_DIACRITICAL = {
|
||||
@@ -253,33 +265,33 @@ _XLATE_GRAPHICAL_AND_DIACRITICAL = {
|
||||
# ©ª«®²³¹»¼½¾ÆÐØÞßæðøþĐđĦħıIJijĸĿŀŁłŒœŦŧDŽDždžLJLjljNJNjnjǤǥDZDzdzȤȥ. This
|
||||
# includes also some graphical symbols which can be easily replaced and
|
||||
# usually are written by people who don't have appropriate keyboard layout.
|
||||
u'©': '(C)', u'ª': 'a.', u'«': '<<', u'®': '(R)', u'²': '2', u'³': '3',
|
||||
u'¹': '1', u'»': '>>', u'¼': ' 1/4 ', u'½': ' 1/2 ', u'¾': ' 3/4 ',
|
||||
u'Æ': 'AE', u'Ð': 'D', u'Ø': 'O', u'Þ': 'Th', u'ß': 'ss', u'æ': 'ae',
|
||||
u'ð': 'd', u'ø': 'o', u'þ': 'th', u'Đ': 'D', u'đ': 'd', u'Ħ': 'H',
|
||||
u'ħ': 'h', u'ı': 'i', u'IJ': 'IJ', u'ij': 'ij', u'ĸ': 'q', u'Ŀ': 'L',
|
||||
u'ŀ': 'l', u'Ł': 'L', u'ł': 'l', u'Œ': 'OE', u'œ': 'oe', u'Ŧ': 'T',
|
||||
u'ŧ': 't', u'DŽ': 'DZ', u'Dž': 'Dz', u'LJ': 'LJ', u'Lj': 'Lj',
|
||||
u'lj': 'lj', u'NJ': 'NJ', u'Nj': 'Nj', u'nj': 'nj',
|
||||
u'Ǥ': 'G', u'ǥ': 'g', u'DZ': 'DZ', u'Dz': 'Dz', u'dz': 'dz',
|
||||
u'Ȥ': 'Z', u'ȥ': 'z', u'№': 'No.',
|
||||
u'º': 'o.', # normalize Nº abbrev (popular w/ classical music),
|
||||
'©': '(C)', 'ª': 'a.', '«': '<<', '®': '(R)', '²': '2', '³': '3',
|
||||
'¹': '1', '»': '>>', '¼': ' 1/4 ', '½': ' 1/2 ', '¾': ' 3/4 ',
|
||||
'Æ': 'AE', 'Ð': 'D', 'Ø': 'O', 'Þ': 'Th', 'ß': 'ss', 'æ': 'ae',
|
||||
'ð': 'd', 'ø': 'o', 'þ': 'th', 'Đ': 'D', 'đ': 'd', 'Ħ': 'H',
|
||||
'ħ': 'h', 'ı': 'i', 'IJ': 'IJ', 'ij': 'ij', 'ĸ': 'q', 'Ŀ': 'L',
|
||||
'ŀ': 'l', 'Ł': 'L', 'ł': 'l', 'Œ': 'OE', 'œ': 'oe', 'Ŧ': 'T',
|
||||
'ŧ': 't', 'DŽ': 'DZ', 'Dž': 'Dz', 'LJ': 'LJ', 'Lj': 'Lj',
|
||||
'lj': 'lj', 'NJ': 'NJ', 'Nj': 'Nj', 'nj': 'nj',
|
||||
'Ǥ': 'G', 'ǥ': 'g', 'DZ': 'DZ', 'Dz': 'Dz', 'dz': 'dz',
|
||||
'Ȥ': 'Z', 'ȥ': 'z', '№': 'No.',
|
||||
'º': 'o.', # normalize Nº abbrev (popular w/ classical music),
|
||||
# this is 'masculine ordering indicator', not degree
|
||||
}
|
||||
|
||||
_XLATE_SPECIAL = {
|
||||
# Translation table.
|
||||
# Cover additional special characters processing normalization.
|
||||
u"'": '', # replace apostrophe with nothing
|
||||
u"’": '', # replace musicbrainz style apostrophe with nothing
|
||||
u'&': ' and ', # expand & to ' and '
|
||||
"'": '', # replace apostrophe with nothing
|
||||
"’": '', # replace musicbrainz style apostrophe with nothing
|
||||
'&': ' and ', # expand & to ' and '
|
||||
}
|
||||
|
||||
_XLATE_MUSICBRAINZ = {
|
||||
# Translation table for Musicbrainz.
|
||||
u"…": '...', # HORIZONTAL ELLIPSIS (U+2026)
|
||||
u"’": "'", # APOSTROPHE (U+0027)
|
||||
u"‐": "-", # EN DASH (U+2013)
|
||||
"…": '...', # HORIZONTAL ELLIPSIS (U+2026)
|
||||
"’": "'", # APOSTROPHE (U+0027)
|
||||
"‐": "-", # EN DASH (U+2013)
|
||||
}
|
||||
|
||||
|
||||
@@ -314,10 +326,10 @@ def _transliterate(u, xlate):
|
||||
Perform transliteration using the specified dictionary
|
||||
"""
|
||||
u = unicodedata.normalize('NFD', u)
|
||||
u = u''.join([u'' if _is_unicode_combining(x) else x for x in u])
|
||||
u = ''.join(['' if _is_unicode_combining(x) else x for x in u])
|
||||
u = _translate(u, xlate)
|
||||
# at this point output is either unicode, or plain ascii
|
||||
return unicode(u)
|
||||
return str(u)
|
||||
|
||||
|
||||
def clean_name(s):
|
||||
@@ -327,10 +339,10 @@ def clean_name(s):
|
||||
:param s: string to clean up, possibly unicode one.
|
||||
:return: cleaned-up version of input string.
|
||||
"""
|
||||
if not isinstance(s, unicode):
|
||||
if not isinstance(s, str):
|
||||
# ignore extended chars if someone was dumb enough to pass non-ascii
|
||||
# narrow string here, use only unicode for meaningful texts
|
||||
u = unicode(s, 'ascii', 'replace')
|
||||
u = str(s, 'ascii', 'replace')
|
||||
else:
|
||||
u = s
|
||||
# 1. don't bother doing normalization NFKC, rather transliterate
|
||||
@@ -341,9 +353,9 @@ def clean_name(s):
|
||||
# 3. translate spacials
|
||||
u = _translate(u, _XLATE_SPECIAL)
|
||||
# 4. replace any non-alphanumeric character sequences by spaces
|
||||
u = _CN_RE1.sub(u' ', u)
|
||||
u = _CN_RE1.sub(' ', u)
|
||||
# 5. coalesce interleaved space/underscore sequences
|
||||
u = _CN_RE2.sub(u' ', u)
|
||||
u = _CN_RE2.sub(' ', u)
|
||||
# 6. trim
|
||||
u = u.strip()
|
||||
# 7. lowercase
|
||||
@@ -357,8 +369,8 @@ def clean_musicbrainz_name(s, return_as_string=True):
|
||||
:param s: string to clean up, probably unicode.
|
||||
:return: cleaned-up version of input string.
|
||||
"""
|
||||
if not isinstance(s, unicode):
|
||||
u = unicode(s, 'ascii', 'replace')
|
||||
if not isinstance(s, str):
|
||||
u = str(s, 'ascii', 'replace')
|
||||
else:
|
||||
u = s
|
||||
u = _translate(u, _XLATE_MUSICBRAINZ)
|
||||
@@ -452,8 +464,7 @@ def expand_subfolders(f):
|
||||
|
||||
if difference > 0:
|
||||
logger.info(
|
||||
"Found %d media folders, but depth difference between lowest and deepest media folder is %d (expected zero). If this is a discography or a collection of albums, make sure albums are per folder.",
|
||||
len(media_folders), difference)
|
||||
f"Found {len(media_folders)} media folders, but depth difference between lowest and deepest media folder is {difference} (expected zero). If this is a discography or a collection of albums, make sure albums are per folder.")
|
||||
|
||||
# While already failed, advice the user what he could try. We assume the
|
||||
# directory may contain separate CD's and maybe some extra's. The
|
||||
@@ -465,8 +476,7 @@ def expand_subfolders(f):
|
||||
set([os.path.join(*media_folder) for media_folder in extra_media_folders]))
|
||||
|
||||
logger.info(
|
||||
"Please look at the following folder(s), since they cause the depth difference: %s",
|
||||
extra_media_folders)
|
||||
f"Please look at the following folder(s), since they cause the depth difference: {extra_media_folders}")
|
||||
return
|
||||
|
||||
# Convert back to paths and remove duplicates, which may be there after
|
||||
@@ -480,7 +490,7 @@ def expand_subfolders(f):
|
||||
logger.debug("Did not expand subfolder, as it resulted in one folder.")
|
||||
return
|
||||
|
||||
logger.debug("Expanded subfolders in folder: %s", media_folders)
|
||||
logger.debug(f"Expanded subfolders in folder: {media_folders}")
|
||||
return media_folders
|
||||
|
||||
|
||||
@@ -498,7 +508,7 @@ def path_match_patterns(path, patterns):
|
||||
return False
|
||||
|
||||
|
||||
def path_filter_patterns(paths, patterns, root=None):
|
||||
def path_filter_patterns(paths, patterns, root=''):
|
||||
"""
|
||||
Scan for ignored paths based on glob patterns. Note that the whole path
|
||||
will be matched, therefore paths should only contain the relative paths.
|
||||
@@ -512,8 +522,7 @@ def path_filter_patterns(paths, patterns, root=None):
|
||||
|
||||
for path in paths[:]:
|
||||
if path_match_patterns(path, patterns):
|
||||
logger.debug("Path ignored by pattern: %s",
|
||||
os.path.join(root or "", path))
|
||||
logger.debug(f"Path ignored by pattern: {os.path.join(root, path)}")
|
||||
|
||||
ignored += 1
|
||||
paths.remove(path)
|
||||
@@ -595,7 +604,7 @@ def extract_metadata(f):
|
||||
count_ratio = 0.75
|
||||
|
||||
if count < (count_ratio * len(results)):
|
||||
logger.info("Counted %d media files, but only %d have tags, ignoring.", count, len(results))
|
||||
logger.info(f"Counted {count} media files, but only {len(results)} have tags, ignoring.")
|
||||
return (None, None, None)
|
||||
|
||||
# Count distinct values
|
||||
@@ -613,8 +622,7 @@ def extract_metadata(f):
|
||||
old_album = new_albums[index]
|
||||
new_albums[index] = RE_CD_ALBUM.sub("", album).strip()
|
||||
|
||||
logger.debug("Stripped albumd number identifier: %s -> %s", old_album,
|
||||
new_albums[index])
|
||||
logger.debug(f"Stripped album number identifier: {old_album} -> {new_albums[index]}")
|
||||
|
||||
# Remove duplicates
|
||||
new_albums = list(set(new_albums))
|
||||
@@ -632,7 +640,7 @@ def extract_metadata(f):
|
||||
if len(artists) > 1 and len(albums) == 1:
|
||||
split_artists = [RE_FEATURING.split(x) for x in artists]
|
||||
featurings = [len(split_artist) - 1 for split_artist in split_artists]
|
||||
logger.info("Album seem to feature %d different artists", sum(featurings))
|
||||
logger.info("Album seem to feature {sum(featurings)} different artists")
|
||||
|
||||
if sum(featurings) > 0:
|
||||
# Find the artist of which the least splits have been generated.
|
||||
@@ -644,9 +652,11 @@ def extract_metadata(f):
|
||||
return (artist, albums[0], years[0])
|
||||
|
||||
# Not sure what to do here.
|
||||
logger.info("Found %d artists, %d albums and %d years in metadata, so ignoring", len(artists),
|
||||
len(albums), len(years))
|
||||
logger.debug("Artists: %s, Albums: %s, Years: %s", artists, albums, years)
|
||||
logger.info(
|
||||
f"Found {len(artists)} artists, {len(albums)} albums and "
|
||||
f"{len(years)} years in metadata, so ignoring"
|
||||
)
|
||||
logger.debug("Artists: {artists}, Albums: {albums}, Years: {years}")
|
||||
|
||||
return (None, None, None)
|
||||
|
||||
@@ -678,8 +688,7 @@ def preserve_torrent_directory(albumpath, forced=False, single=False):
|
||||
else:
|
||||
tempdir = tempfile.gettempdir()
|
||||
|
||||
logger.info("Preparing to copy to a temporary directory for post processing: " + albumpath.decode(
|
||||
headphones.SYS_ENCODING, 'replace'))
|
||||
logger.info(f"Preparing to copy to a temporary directory for post processing: {albumpath}")
|
||||
|
||||
try:
|
||||
file_name = os.path.basename(os.path.normpath(albumpath))
|
||||
@@ -689,8 +698,7 @@ def preserve_torrent_directory(albumpath, forced=False, single=False):
|
||||
prefix = "headphones_" + os.path.splitext(file_name)[0] + "_@hp@_"
|
||||
new_folder = tempfile.mkdtemp(prefix=prefix, dir=tempdir)
|
||||
except Exception as e:
|
||||
logger.error("Cannot create temp directory: " + tempdir.decode(
|
||||
headphones.SYS_ENCODING, 'replace') + ". Error: " + str(e))
|
||||
logger.error(f"Cannot create temp directory: {tempdir}. Error: {e}")
|
||||
return None
|
||||
|
||||
# Attempt to stop multiple temp dirs being created for the same albumpath
|
||||
@@ -701,17 +709,21 @@ def preserve_torrent_directory(albumpath, forced=False, single=False):
|
||||
workdir = re.sub(r'(?<!\[)\]', '[]]', workdir)
|
||||
if len(glob.glob(workdir + '*/')) >= 3:
|
||||
logger.error(
|
||||
"Looks like a temp directory has previously been created for this albumpath, not continuing " + workdir.decode(
|
||||
headphones.SYS_ENCODING, 'replace'))
|
||||
"Looks like a temp directory has previously been created "
|
||||
"for this albumpath, not continuing "
|
||||
)
|
||||
shutil.rmtree(new_folder)
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.warn("Cannot determine if already copied/processed, will copy anyway: Warning: " + str(e))
|
||||
logger.warn(
|
||||
"Cannot determine if already copied/processed, will copy anyway. "
|
||||
f"Warning: {e}"
|
||||
)
|
||||
|
||||
# Copy to temp dir
|
||||
try:
|
||||
subdir = os.path.join(new_folder, "headphones")
|
||||
logger.info("Copying files to " + subdir.decode(headphones.SYS_ENCODING, 'replace'))
|
||||
logger.info(f"Copying files to {subdir}")
|
||||
if not single:
|
||||
shutil.copytree(albumpath, subdir)
|
||||
else:
|
||||
@@ -720,9 +732,10 @@ def preserve_torrent_directory(albumpath, forced=False, single=False):
|
||||
# Update the album path with the new location
|
||||
return subdir
|
||||
except Exception as e:
|
||||
logger.warn("Cannot copy/move files to temp directory: " + new_folder.decode(headphones.SYS_ENCODING,
|
||||
'replace') + ". Not continuing. Error: " + str(
|
||||
e))
|
||||
logger.warn(
|
||||
f"Cannot copy/move files to temp directory: {new_folder}. "
|
||||
f"Not continuing. Error: {e}"
|
||||
)
|
||||
shutil.rmtree(new_folder)
|
||||
return None
|
||||
|
||||
@@ -767,7 +780,7 @@ def cue_split(albumpath, keep_original_folder=False):
|
||||
cuesplit.split(cue_dir)
|
||||
except Exception as e:
|
||||
os.chdir(cwd)
|
||||
logger.warn("Cue not split: " + str(e))
|
||||
logger.warn(f"Cue not split. Error: {e}")
|
||||
return None
|
||||
|
||||
os.chdir(cwd)
|
||||
@@ -805,7 +818,7 @@ def extract_song_data(s):
|
||||
year = match.group("year")
|
||||
return (name, album, year)
|
||||
else:
|
||||
logger.info("Couldn't parse %s into a valid default format", s)
|
||||
logger.info(f"Couldn't parse {s} into a valid default format")
|
||||
|
||||
# newzbin default format
|
||||
pattern = re.compile(r'(?P<name>.*?)\s\-\s(?P<album>.*?)\s\((?P<year>\d+?\))', re.VERBOSE)
|
||||
@@ -816,7 +829,7 @@ def extract_song_data(s):
|
||||
year = match.group("year")
|
||||
return (name, album, year)
|
||||
else:
|
||||
logger.info("Couldn't parse %s into a valid Newbin format", s)
|
||||
logger.info(f"Couldn't parse {s} into a valid Newbin format")
|
||||
return (name, album, year)
|
||||
|
||||
|
||||
@@ -829,7 +842,7 @@ def smartMove(src, dest, delete=True):
|
||||
dest_path = os.path.join(dest, filename)
|
||||
|
||||
if os.path.isfile(dest_path):
|
||||
logger.info('Destination file exists: %s', dest_path)
|
||||
logger.info(f"Destination file exists: {dest_path}")
|
||||
title = os.path.splitext(filename)[0]
|
||||
ext = os.path.splitext(filename)[1]
|
||||
i = 1
|
||||
@@ -838,13 +851,12 @@ def smartMove(src, dest, delete=True):
|
||||
if os.path.isfile(os.path.join(dest, newfile)):
|
||||
i += 1
|
||||
else:
|
||||
logger.info('Renaming to %s', newfile)
|
||||
logger.info(f"Renaming to {newfile}")
|
||||
try:
|
||||
os.rename(src, os.path.join(source_dir, newfile))
|
||||
filename = newfile
|
||||
except Exception as e:
|
||||
logger.warn('Error renaming %s: %s',
|
||||
src.decode(headphones.SYS_ENCODING, 'replace'), e)
|
||||
logger.warn(f"Error renaming {src}: {e}")
|
||||
break
|
||||
|
||||
if delete:
|
||||
@@ -854,8 +866,9 @@ def smartMove(src, dest, delete=True):
|
||||
except Exception as e:
|
||||
exists = os.path.exists(dest_path)
|
||||
if exists and os.path.getsize(source_path) == os.path.getsize(dest_path):
|
||||
logger.warn('Successfully moved file "%s", but something went wrong: %s',
|
||||
filename.decode(headphones.SYS_ENCODING, 'replace'), e)
|
||||
logger.warn(
|
||||
f"Successfully moved {filename}, but something went wrong: {e}"
|
||||
)
|
||||
os.unlink(source_path)
|
||||
else:
|
||||
# remove faultly copied file
|
||||
@@ -864,12 +877,11 @@ def smartMove(src, dest, delete=True):
|
||||
raise
|
||||
else:
|
||||
try:
|
||||
logger.info('Copying "%s" to "%s"', source_path, dest_path)
|
||||
logger.info(f"Copying {source_path} to {dest_path}")
|
||||
shutil.copy(source_path, dest_path)
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.warn('Error copying file %s: %s', filename.decode(headphones.SYS_ENCODING, 'replace'),
|
||||
e)
|
||||
logger.warn(f"Error copying {filename}: {e}")
|
||||
|
||||
|
||||
def walk_directory(basedir, followlinks=True):
|
||||
@@ -878,7 +890,7 @@ def walk_directory(basedir, followlinks=True):
|
||||
with care. In case a folder is already processed, don't traverse it again.
|
||||
"""
|
||||
|
||||
import logger
|
||||
from . import logger
|
||||
|
||||
# Add the base path, because symlinks poiting to the basedir should not be
|
||||
# traversed again.
|
||||
@@ -892,8 +904,10 @@ def walk_directory(basedir, followlinks=True):
|
||||
real_path = os.path.abspath(os.readlink(path))
|
||||
|
||||
if real_path in traversed:
|
||||
logger.debug("Skipping '%s' since it is a symlink to "
|
||||
"'%s', which is already visited.", path, real_path)
|
||||
logger.debug(
|
||||
f"Skipping {path} since it is a symlink to "
|
||||
f"{real_path}, which is already visited."
|
||||
)
|
||||
else:
|
||||
traversed.append(real_path)
|
||||
|
||||
@@ -935,22 +949,13 @@ def sab_sanitize_foldername(name):
|
||||
FL_ILLEGAL = CH_ILLEGAL + ':\x92"'
|
||||
FL_LEGAL = CH_LEGAL + "-''"
|
||||
|
||||
uFL_ILLEGAL = FL_ILLEGAL.decode('latin-1')
|
||||
uFL_LEGAL = FL_LEGAL.decode('latin-1')
|
||||
|
||||
if not name:
|
||||
return name
|
||||
if isinstance(name, unicode):
|
||||
illegal = uFL_ILLEGAL
|
||||
legal = uFL_LEGAL
|
||||
else:
|
||||
illegal = FL_ILLEGAL
|
||||
legal = FL_LEGAL
|
||||
return
|
||||
|
||||
lst = []
|
||||
for ch in name.strip():
|
||||
if ch in illegal:
|
||||
ch = legal[illegal.find(ch)]
|
||||
if ch in FL_ILLEGAL:
|
||||
ch = FL_LEGAL[FL_ILLEGAL.find(ch)]
|
||||
lst.append(ch)
|
||||
else:
|
||||
lst.append(ch)
|
||||
@@ -1006,7 +1011,7 @@ def create_https_certificates(ssl_cert, ssl_key):
|
||||
with open(ssl_cert, "w") as fp:
|
||||
fp.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
|
||||
except IOError as e:
|
||||
logger.error("Error creating SSL key and certificate: %s", e)
|
||||
logger.error(f"Error creating SSL key and certificate: e")
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from unittestcompat import TestCase
|
||||
from .unittestcompat import TestCase
|
||||
from headphones.helpers import clean_name
|
||||
|
||||
|
||||
@@ -8,28 +8,28 @@ class HelpersTest(TestCase):
|
||||
def test_clean_name(self):
|
||||
"""helpers: check correctness of clean_name() function"""
|
||||
cases = {
|
||||
u' Weiße & rose ': 'Weisse and rose',
|
||||
u'Multiple / spaces': 'Multiple spaces',
|
||||
u'Kevin\'s m²': 'Kevins m2',
|
||||
u'Symphonęy Nº9': 'Symphoney No.9',
|
||||
u'ÆæßðÞIJij': u'AeaessdThIJıj',
|
||||
u'Obsessió (Cerebral Apoplexy remix)': 'obsessio cerebral '
|
||||
' Weiße & rose ': 'Weisse and rose',
|
||||
'Multiple / spaces': 'Multiple spaces',
|
||||
'Kevin\'s m²': 'Kevins m2',
|
||||
'Symphonęy Nº9': 'Symphoney No.9',
|
||||
'ÆæßðÞIJij': 'AeaessdThIJıj',
|
||||
'Obsessió (Cerebral Apoplexy remix)': 'obsessio cerebral '
|
||||
'apoplexy remix',
|
||||
u'Doktór Hałabała i siedmiu zbojów': 'doktor halabala i siedmiu '
|
||||
'Doktór Hałabała i siedmiu zbojów': 'doktor halabala i siedmiu '
|
||||
'zbojow',
|
||||
u'Arbetets Söner och Döttrar': 'arbetets soner och dottrar',
|
||||
u'Björk Guðmundsdóttir': 'bjork gudmundsdottir',
|
||||
u'L\'Arc~en~Ciel': 'larc en ciel',
|
||||
u'Orquesta de la Luz (オルケスタ・デ・ラ・ルス)':
|
||||
u'Orquesta de la Luz オルケスタ デ ラ ルス'
|
||||
'Arbetets Söner och Döttrar': 'arbetets soner och dottrar',
|
||||
'Björk Guðmundsdóttir': 'bjork gudmundsdottir',
|
||||
'L\'Arc~en~Ciel': 'larc en ciel',
|
||||
'Orquesta de la Luz (オルケスタ・デ・ラ・ルス)':
|
||||
'Orquesta de la Luz オルケスタ デ ラ ルス'
|
||||
|
||||
}
|
||||
for first, second in cases.iteritems():
|
||||
for first, second in cases.items():
|
||||
nf = clean_name(first).lower()
|
||||
ns = clean_name(second).lower()
|
||||
self.assertEqual(
|
||||
nf, ns, u"check cleaning of case (%s,"
|
||||
u"%s)" % (nf, ns)
|
||||
nf, ns, "check cleaning of case (%s,"
|
||||
"%s)" % (nf, ns)
|
||||
)
|
||||
|
||||
def test_clean_name_nonunicode(self):
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
import time
|
||||
|
||||
from headphones import logger, helpers, db, mb, lastfm, metacritic
|
||||
from beets.mediafile import MediaFile
|
||||
from mediafile import MediaFile
|
||||
import headphones
|
||||
|
||||
blacklisted_special_artist_names = ['[anonymous]', '[data]', '[no artist]',
|
||||
@@ -39,7 +39,7 @@ def is_exists(artistid):
|
||||
|
||||
if any(artistid in x for x in artistlist):
|
||||
logger.info(artistlist[0][
|
||||
1] + u" is already in the database. Updating 'have tracks', but not artist information")
|
||||
1] + " is already in the database. Updating 'have tracks', but not artist information")
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
@@ -53,7 +53,7 @@ def artistlist_to_mbids(artistlist, forced=False):
|
||||
|
||||
# If adding artists through Manage New Artists, they're coming through as non-unicode (utf-8?)
|
||||
# and screwing everything up
|
||||
if not isinstance(artist, unicode):
|
||||
if not isinstance(artist, str):
|
||||
try:
|
||||
artist = artist.decode('utf-8', 'replace')
|
||||
except Exception:
|
||||
@@ -184,7 +184,7 @@ def addArtisttoDB(artistid, extrasonly=False, forcefull=False, type="artist"):
|
||||
else:
|
||||
sortname = artist['artist_name']
|
||||
|
||||
logger.info(u"Now adding/updating: " + artist['artist_name'])
|
||||
logger.info("Now adding/updating: " + artist['artist_name'])
|
||||
controlValueDict = {"ArtistID": artistid}
|
||||
newValueDict = {"ArtistName": artist['artist_name'],
|
||||
"ArtistSortName": sortname,
|
||||
@@ -263,8 +263,8 @@ def addArtisttoDB(artistid, extrasonly=False, forcefull=False, type="artist"):
|
||||
new_releases = mb.get_new_releases(rgid, includeExtras)
|
||||
|
||||
else:
|
||||
if check_release_date is None or check_release_date == u"None":
|
||||
if headphones.CONFIG.MB_IGNORE_AGE_MISSING is not 1:
|
||||
if check_release_date is None or check_release_date == "None":
|
||||
if not headphones.CONFIG.MB_IGNORE_AGE_MISSING:
|
||||
logger.info("[%s] Now updating: %s (No Release Date)" % (artist['artist_name'], rg['title']))
|
||||
new_releases = mb.get_new_releases(rgid, includeExtras, True)
|
||||
else:
|
||||
@@ -517,7 +517,7 @@ def addArtisttoDB(artistid, extrasonly=False, forcefull=False, type="artist"):
|
||||
marked_as_downloaded = True
|
||||
|
||||
logger.info(
|
||||
u"[%s] Seeing if we need album art for %s" % (artist['artist_name'], rg['title']))
|
||||
"[%s] Seeing if we need album art for %s" % (artist['artist_name'], rg['title']))
|
||||
try:
|
||||
cache.getThumb(AlbumID=rg['id'])
|
||||
except Exception as e:
|
||||
@@ -530,19 +530,19 @@ def addArtisttoDB(artistid, extrasonly=False, forcefull=False, type="artist"):
|
||||
album_searches.append(rg['id'])
|
||||
else:
|
||||
if skip_log == 0:
|
||||
logger.info(u"[%s] No new releases, so no changes made to %s" % (
|
||||
logger.info("[%s] No new releases, so no changes made to %s" % (
|
||||
artist['artist_name'], rg['title']))
|
||||
|
||||
time.sleep(3)
|
||||
finalize_update(artistid, artist['artist_name'], errors)
|
||||
|
||||
logger.info(u"Seeing if we need album art for: %s" % artist['artist_name'])
|
||||
logger.info("Seeing if we need album art for: %s" % artist['artist_name'])
|
||||
try:
|
||||
cache.getThumb(ArtistID=artistid)
|
||||
except Exception as e:
|
||||
logger.error("Error getting album art: %s", e)
|
||||
|
||||
logger.info(u"Fetching Metacritic reviews for: %s" % artist['artist_name'])
|
||||
logger.info("Fetching Metacritic reviews for: %s" % artist['artist_name'])
|
||||
try:
|
||||
metacritic.update(artistid, artist['artist_name'], artist['releasegroups'])
|
||||
except Exception as e:
|
||||
@@ -554,7 +554,7 @@ def addArtisttoDB(artistid, extrasonly=False, forcefull=False, type="artist"):
|
||||
artist['artist_name'], artist['artist_name']))
|
||||
else:
|
||||
myDB.action('DELETE FROM newartists WHERE ArtistName = ?', [artist['artist_name']])
|
||||
logger.info(u"Updating complete for: %s" % artist['artist_name'])
|
||||
logger.info("Updating complete for: %s" % artist['artist_name'])
|
||||
|
||||
# Start searching for newly added albums
|
||||
if album_searches:
|
||||
@@ -663,7 +663,7 @@ def addReleaseById(rid, rgid=None):
|
||||
sortname = release_dict['artist_name']
|
||||
|
||||
logger.info(
|
||||
u"Now manually adding: " + release_dict['artist_name'] + " - with status Paused")
|
||||
"Now manually adding: " + release_dict['artist_name'] + " - with status Paused")
|
||||
controlValueDict = {"ArtistID": release_dict['artist_id']}
|
||||
newValueDict = {"ArtistName": release_dict['artist_name'],
|
||||
"ArtistSortName": sortname,
|
||||
@@ -696,7 +696,7 @@ def addReleaseById(rid, rgid=None):
|
||||
|
||||
if not rg_exists and release_dict or status == 'Loading' and release_dict: # it should never be the case that we have an rg and not the artist
|
||||
# but if it is this will fail
|
||||
logger.info(u"Now adding-by-id album (" + release_dict['title'] + ") from id: " + rgid)
|
||||
logger.info("Now adding-by-id album (" + release_dict['title'] + ") from id: " + rgid)
|
||||
controlValueDict = {"AlbumID": rgid}
|
||||
if status != 'Loading':
|
||||
status = 'Wanted'
|
||||
@@ -772,7 +772,7 @@ def addReleaseById(rid, rgid=None):
|
||||
|
||||
# Start a search for the album
|
||||
if headphones.CONFIG.AUTOWANT_MANUALLY_ADDED:
|
||||
import searcher
|
||||
from . import searcher
|
||||
searcher.searchforalbum(rgid, False)
|
||||
|
||||
elif not rg_exists and not release_dict:
|
||||
|
||||
@@ -91,7 +91,7 @@ def getSimilar():
|
||||
for artist, mbid in artistlist:
|
||||
count[artist, mbid] += 1
|
||||
|
||||
items = count.items()
|
||||
items = list(count.items())
|
||||
top_list = sorted(items, key=lambda x: x[1], reverse=True)[:25]
|
||||
|
||||
random.shuffle(top_list)
|
||||
|
||||
@@ -17,7 +17,7 @@ import os
|
||||
import math
|
||||
|
||||
import headphones
|
||||
from beets.mediafile import MediaFile, FileTypeError, UnreadableFileError
|
||||
from mediafile import MediaFile, FileTypeError, UnreadableFileError
|
||||
from headphones import db, logger, helpers, importer, lastfm
|
||||
|
||||
|
||||
@@ -30,72 +30,64 @@ def libraryScan(dir=None, append=False, ArtistID=None, ArtistName=None,
|
||||
|
||||
if not dir:
|
||||
if not headphones.CONFIG.MUSIC_DIR:
|
||||
logger.info(
|
||||
"No music directory configured. Add it under "
|
||||
"Manage -> Scan Music Library"
|
||||
)
|
||||
return
|
||||
else:
|
||||
dir = headphones.CONFIG.MUSIC_DIR
|
||||
|
||||
# If we're appending a dir, it's coming from the post processor which is
|
||||
# already bytestring
|
||||
if not append or artistScan:
|
||||
dir = dir.encode(headphones.SYS_ENCODING)
|
||||
|
||||
if not os.path.isdir(dir):
|
||||
logger.warn('Cannot find directory: %s. Not scanning' % dir.decode(headphones.SYS_ENCODING,
|
||||
'replace'))
|
||||
logger.warn(f"Cannot find music directory: {dir}")
|
||||
return
|
||||
|
||||
myDB = db.DBConnection()
|
||||
new_artists = []
|
||||
|
||||
logger.info('Scanning music directory: %s' % dir.decode(headphones.SYS_ENCODING, 'replace'))
|
||||
logger.info(f"Scanning music directory: {dir}")
|
||||
|
||||
if not append:
|
||||
|
||||
# Clean up bad filepaths. Queries can take some time, ensure all results are loaded before processing
|
||||
if ArtistID:
|
||||
tracks = myDB.action(
|
||||
dbtracks = myDB.action(
|
||||
'SELECT Location FROM alltracks WHERE ArtistID = ? AND Location IS NOT NULL UNION SELECT Location FROM tracks WHERE ArtistID = ? AND Location '
|
||||
'IS NOT NULL',
|
||||
[ArtistID, ArtistID])
|
||||
else:
|
||||
tracks = myDB.action(
|
||||
dbtracks = myDB.action(
|
||||
'SELECT Location FROM alltracks WHERE Location IS NOT NULL UNION SELECT Location FROM tracks WHERE Location IS NOT NULL')
|
||||
|
||||
locations = []
|
||||
for track in tracks:
|
||||
locations.append(track['Location'])
|
||||
for location in locations:
|
||||
encoded_track_string = location.encode(headphones.SYS_ENCODING, 'replace')
|
||||
if not os.path.isfile(encoded_track_string):
|
||||
for track in dbtracks:
|
||||
track_location = track['Location']
|
||||
if not os.path.isfile(track_location):
|
||||
myDB.action('UPDATE tracks SET Location=?, BitRate=?, Format=? WHERE Location=?',
|
||||
[None, None, None, location])
|
||||
[None, None, None, track_location])
|
||||
myDB.action('UPDATE alltracks SET Location=?, BitRate=?, Format=? WHERE Location=?',
|
||||
[None, None, None, location])
|
||||
[None, None, None, track_location])
|
||||
|
||||
if ArtistName:
|
||||
del_have_tracks = myDB.select('SELECT Location, Matched, ArtistName FROM have WHERE ArtistName = ? COLLATE NOCASE', [ArtistName])
|
||||
else:
|
||||
del_have_tracks = myDB.select('SELECT Location, Matched, ArtistName FROM have')
|
||||
|
||||
locations = []
|
||||
for track in del_have_tracks:
|
||||
locations.append([track['Location'], track['ArtistName']])
|
||||
for location in locations:
|
||||
encoded_track_string = location[0].encode(headphones.SYS_ENCODING, 'replace')
|
||||
if not os.path.isfile(encoded_track_string):
|
||||
if location[1]:
|
||||
if not os.path.isfile(track['Location']):
|
||||
if track['ArtistName']:
|
||||
# Make sure deleted files get accounted for when updating artist track counts
|
||||
new_artists.append(location[1])
|
||||
myDB.action('DELETE FROM have WHERE Location=?', [location[0]])
|
||||
new_artists.append(track['ArtistName'])
|
||||
myDB.action('DELETE FROM have WHERE Location=?', [Track['Location']])
|
||||
logger.info(
|
||||
'File %s removed from Headphones, as it is no longer on disk' % encoded_track_string.decode(
|
||||
headphones.SYS_ENCODING, 'replace'))
|
||||
f"{Track['Location']} removed from Headphones, as it "
|
||||
f"is no longer on disk"
|
||||
)
|
||||
|
||||
bitrates = []
|
||||
song_list = []
|
||||
track_list = []
|
||||
latest_subdirectory = []
|
||||
|
||||
new_song_count = 0
|
||||
new_track_count = 0
|
||||
file_count = 0
|
||||
|
||||
for r, d, f in helpers.walk_directory(dir):
|
||||
@@ -110,32 +102,16 @@ def libraryScan(dir=None, append=False, ArtistID=None, ArtistName=None,
|
||||
subdirectory = r.replace(dir, '')
|
||||
latest_subdirectory.append(subdirectory)
|
||||
|
||||
if file_count == 0 and r.replace(dir, '') != '':
|
||||
logger.info("[%s] Now scanning subdirectory %s" % (
|
||||
dir.decode(headphones.SYS_ENCODING, 'replace'),
|
||||
subdirectory.decode(headphones.SYS_ENCODING, 'replace')))
|
||||
elif latest_subdirectory[file_count] != latest_subdirectory[
|
||||
file_count - 1] and file_count != 0:
|
||||
logger.info("[%s] Now scanning subdirectory %s" % (
|
||||
dir.decode(headphones.SYS_ENCODING, 'replace'),
|
||||
subdirectory.decode(headphones.SYS_ENCODING, 'replace')))
|
||||
|
||||
song = os.path.join(r, files)
|
||||
|
||||
# We need the unicode path to use for logging, inserting into database
|
||||
unicode_song_path = song.decode(headphones.SYS_ENCODING, 'replace')
|
||||
track_path = os.path.join(r, files)
|
||||
|
||||
# Try to read the metadata
|
||||
try:
|
||||
f = MediaFile(song)
|
||||
f = MediaFile(track_path)
|
||||
except (FileTypeError, UnreadableFileError):
|
||||
logger.warning(
|
||||
"Cannot read media file '%s', skipping. It may be corrupted or not a media file.",
|
||||
unicode_song_path)
|
||||
logger.warning(f"Cannot read `{track_path}`. It may be corrupted or not a media file.")
|
||||
continue
|
||||
except IOError:
|
||||
logger.warning("Cannnot read media file '%s', skipping. Does the file exists?",
|
||||
unicode_song_path)
|
||||
logger.warning(f"Cannnot read `{track_path}`. Does the file exists?")
|
||||
continue
|
||||
|
||||
# Grab the bitrates for the auto detect bit rate option
|
||||
@@ -150,15 +126,15 @@ def libraryScan(dir=None, append=False, ArtistID=None, ArtistName=None,
|
||||
else:
|
||||
f_artist = None
|
||||
|
||||
# Add the song to our song list -
|
||||
# TODO: skip adding songs without the minimum requisite information (just a matter of putting together the right if statements)
|
||||
# Add the track to our track list -
|
||||
# TODO: skip adding tracks without the minimum requisite information (just a matter of putting together the right if statements)
|
||||
|
||||
if f_artist and f.album and f.title:
|
||||
CleanName = helpers.clean_name(f_artist + ' ' + f.album + ' ' + f.title)
|
||||
else:
|
||||
CleanName = None
|
||||
|
||||
controlValueDict = {'Location': unicode_song_path}
|
||||
controlValueDict = {'Location': track_path}
|
||||
|
||||
newValueDict = {'TrackID': f.mb_trackid,
|
||||
# 'ReleaseID' : f.mb_albumid,
|
||||
@@ -174,24 +150,24 @@ def libraryScan(dir=None, append=False, ArtistID=None, ArtistName=None,
|
||||
'CleanName': CleanName
|
||||
}
|
||||
|
||||
# song_list.append(song_dict)
|
||||
check_exist_song = myDB.action("SELECT * FROM have WHERE Location=?",
|
||||
[unicode_song_path]).fetchone()
|
||||
# Only attempt to match songs that are new, haven't yet been matched, or metadata has changed.
|
||||
if not check_exist_song:
|
||||
# track_list.append(track_dict)
|
||||
check_exist_track = myDB.action("SELECT * FROM have WHERE Location=?",
|
||||
[track_path]).fetchone()
|
||||
# Only attempt to match tracks that are new, haven't yet been matched, or metadata has changed.
|
||||
if not check_exist_track:
|
||||
# This is a new track
|
||||
if f_artist:
|
||||
new_artists.append(f_artist)
|
||||
myDB.upsert("have", newValueDict, controlValueDict)
|
||||
new_song_count += 1
|
||||
new_track_count += 1
|
||||
else:
|
||||
if check_exist_song['ArtistName'] != f_artist or check_exist_song[
|
||||
'AlbumTitle'] != f.album or check_exist_song['TrackTitle'] != f.title:
|
||||
if check_exist_track['ArtistName'] != f_artist or check_exist_track[
|
||||
'AlbumTitle'] != f.album or check_exist_track['TrackTitle'] != f.title:
|
||||
# Important track metadata has been modified, need to run matcher again
|
||||
if f_artist and f_artist != check_exist_song['ArtistName']:
|
||||
if f_artist and f_artist != check_exist_track['ArtistName']:
|
||||
new_artists.append(f_artist)
|
||||
elif f_artist and f_artist == check_exist_song['ArtistName'] and \
|
||||
check_exist_song['Matched'] != "Ignored":
|
||||
elif f_artist and f_artist == check_exist_track['ArtistName'] and \
|
||||
check_exist_track['Matched'] != "Ignored":
|
||||
new_artists.append(f_artist)
|
||||
else:
|
||||
continue
|
||||
@@ -200,51 +176,59 @@ def libraryScan(dir=None, append=False, ArtistID=None, ArtistName=None,
|
||||
myDB.upsert("have", newValueDict, controlValueDict)
|
||||
myDB.action(
|
||||
'UPDATE tracks SET Location=?, BitRate=?, Format=? WHERE Location=?',
|
||||
[None, None, None, unicode_song_path])
|
||||
[None, None, None, track_path])
|
||||
myDB.action(
|
||||
'UPDATE alltracks SET Location=?, BitRate=?, Format=? WHERE Location=?',
|
||||
[None, None, None, unicode_song_path])
|
||||
new_song_count += 1
|
||||
[None, None, None, track_path])
|
||||
new_track_count += 1
|
||||
else:
|
||||
# This track information hasn't changed
|
||||
if f_artist and check_exist_song['Matched'] != "Ignored":
|
||||
if f_artist and check_exist_track['Matched'] != "Ignored":
|
||||
new_artists.append(f_artist)
|
||||
|
||||
file_count += 1
|
||||
|
||||
# Now we start track matching
|
||||
logger.info("%s new/modified songs found and added to the database" % new_song_count)
|
||||
song_list = myDB.action("SELECT * FROM have WHERE Matched IS NULL AND LOCATION LIKE ?",
|
||||
[dir.decode(headphones.SYS_ENCODING, 'replace') + "%"])
|
||||
total_number_of_songs = \
|
||||
myDB.action("SELECT COUNT(*) FROM have WHERE Matched IS NULL AND LOCATION LIKE ?",
|
||||
[dir.decode(headphones.SYS_ENCODING, 'replace') + "%"]).fetchone()[0]
|
||||
logger.info("Found " + str(total_number_of_songs) + " new/modified tracks in: '" + dir.decode(
|
||||
headphones.SYS_ENCODING, 'replace') + "'. Matching tracks to the appropriate releases....")
|
||||
logger.info(f"{new_track_count} new/modified tracks found and added to the database")
|
||||
dbtracks = myDB.action(
|
||||
"SELECT * FROM have WHERE Matched IS NULL AND LOCATION LIKE ?",
|
||||
[f"{dir}%"]
|
||||
)
|
||||
dbtracks_count = myDB.action(
|
||||
"SELECT COUNT(*) FROM have WHERE Matched IS NULL AND LOCATION LIKE ?",
|
||||
[f"{dir}%"]
|
||||
).fetchone()[0]
|
||||
logger.info(f"Found {dbtracks_count} new/modified tracks in `{dir}`")
|
||||
logger.info("Matching tracks to the appropriate releases....")
|
||||
|
||||
# Sort the song_list by most vague (e.g. no trackid or releaseid) to most specific (both trackid & releaseid)
|
||||
# When we insert into the database, the tracks with the most specific information will overwrite the more general matches
|
||||
|
||||
# song_list = helpers.multikeysort(song_list, ['ReleaseID', 'TrackID'])
|
||||
song_list = helpers.multikeysort(song_list, ['ArtistName', 'AlbumTitle'])
|
||||
|
||||
# We'll use this to give a % completion, just because the track matching might take a while
|
||||
song_count = 0
|
||||
latest_artist = []
|
||||
# Sort the track_list by most vague (e.g. no trackid or releaseid)
|
||||
# to most specific (both trackid & releaseid)
|
||||
# When we insert into the database, the tracks with the most
|
||||
# specific information will overwrite the more general matches
|
||||
|
||||
sorted_dbtracks = helpers.multikeysort(dbtracks, ['ArtistName', 'AlbumTitle'])
|
||||
|
||||
|
||||
# We'll use this to give a % completion, just because the
|
||||
# track matching might take a while
|
||||
tracks_completed = 0
|
||||
latest_artist = None
|
||||
last_completion_percentage = 0
|
||||
prev_artist_name = None
|
||||
artistid = None
|
||||
|
||||
for song in song_list:
|
||||
for track in sorted_dbtracks:
|
||||
|
||||
latest_artist.append(song['ArtistName'])
|
||||
if song_count == 0:
|
||||
logger.info("Now matching songs by %s" % song['ArtistName'])
|
||||
elif latest_artist[song_count] != latest_artist[song_count - 1] and song_count != 0:
|
||||
logger.info("Now matching songs by %s" % song['ArtistName'])
|
||||
if latest_artist != track['ArtistName']:
|
||||
logger.info(f"Now matching tracks by {track['ArtistName']}")
|
||||
latest_artist = track['ArtistName']
|
||||
|
||||
song_count += 1
|
||||
completion_percentage = math.floor(float(song_count) / total_number_of_songs * 1000) / 10
|
||||
tracks_completed += 1
|
||||
completion_percentage = math.floor(
|
||||
float(tracks_completed) / dbtracks_count * 1000
|
||||
) / 10
|
||||
|
||||
if completion_percentage >= (last_completion_percentage + 10):
|
||||
logger.info("Track matching is " + str(completion_percentage) + "% complete")
|
||||
@@ -257,9 +241,9 @@ def libraryScan(dir=None, append=False, ArtistID=None, ArtistName=None,
|
||||
|
||||
albumid = None
|
||||
|
||||
if song['ArtistName'] and song['CleanName']:
|
||||
artist_name = song['ArtistName']
|
||||
clean_name = song['CleanName']
|
||||
if track['ArtistName'] and track['CleanName']:
|
||||
artist_name = track['ArtistName']
|
||||
clean_name = track['CleanName']
|
||||
|
||||
# Only update if artist is in the db
|
||||
if artist_name != prev_artist_name:
|
||||
@@ -297,12 +281,12 @@ def libraryScan(dir=None, append=False, ArtistID=None, ArtistName=None,
|
||||
# matching on CleanName should be enough, ensure it's the same artist just in case
|
||||
|
||||
# Update tracks
|
||||
track = myDB.action('SELECT AlbumID, ArtistName FROM tracks WHERE CleanName = ? AND ArtistID = ?', [clean_name, artistid]).fetchone()
|
||||
if track:
|
||||
albumid = track['AlbumID']
|
||||
dbtrack = myDB.action('SELECT AlbumID, ArtistName FROM tracks WHERE CleanName = ? AND ArtistID = ?', [clean_name, artistid]).fetchone()
|
||||
if dbtrack:
|
||||
albumid = dbtrack['AlbumID']
|
||||
myDB.action(
|
||||
'UPDATE tracks SET Location = ?, BitRate = ?, Format = ? WHERE CleanName = ? AND ArtistID = ?',
|
||||
[song['Location'], song['BitRate'], song['Format'], clean_name, artistid])
|
||||
[track['Location'], track['BitRate'], track['Format'], clean_name, artistid])
|
||||
|
||||
# Update alltracks
|
||||
alltrack = myDB.action('SELECT AlbumID, ArtistName FROM alltracks WHERE CleanName = ? AND ArtistID = ?', [clean_name, artistid]).fetchone()
|
||||
@@ -310,26 +294,25 @@ def libraryScan(dir=None, append=False, ArtistID=None, ArtistName=None,
|
||||
albumid = alltrack['AlbumID']
|
||||
myDB.action(
|
||||
'UPDATE alltracks SET Location = ?, BitRate = ?, Format = ? WHERE CleanName = ? AND ArtistID = ?',
|
||||
[song['Location'], song['BitRate'], song['Format'], clean_name, artistid])
|
||||
[track['Location'], track['BitRate'], track['Format'], clean_name, artistid])
|
||||
|
||||
# Update have
|
||||
controlValueDict2 = {'Location': song['Location']}
|
||||
controlValueDict2 = {'Location': track['Location']}
|
||||
if albumid:
|
||||
newValueDict2 = {'Matched': albumid}
|
||||
else:
|
||||
newValueDict2 = {'Matched': "Failed"}
|
||||
myDB.upsert("have", newValueDict2, controlValueDict2)
|
||||
|
||||
# myDB.action('INSERT INTO have (ArtistName, AlbumTitle, TrackNumber, TrackTitle, TrackLength, BitRate, Genre, Date, TrackID, Location, CleanName, Format) VALUES( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', [song['ArtistName'], song['AlbumTitle'], song['TrackNumber'], song['TrackTitle'], song['TrackLength'], song['BitRate'], song['Genre'], song['Date'], song['TrackID'], song['Location'], CleanName, song['Format']])
|
||||
# myDB.action('INSERT INTO have (ArtistName, AlbumTitle, TrackNumber, TrackTitle, TrackLength, BitRate, Genre, Date, TrackID, Location, CleanName, Format) VALUES( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', [track['ArtistName'], track['AlbumTitle'], track['TrackNumber'], track['TrackTitle'], track['TrackLength'], track['BitRate'], track['Genre'], track['Date'], track['TrackID'], track['Location'], CleanName, track['Format']])
|
||||
|
||||
logger.info('Completed matching tracks from directory: %s' % dir.decode(headphones.SYS_ENCODING,
|
||||
'replace'))
|
||||
logger.info(f"Completed matching tracks from `{dir}`")
|
||||
|
||||
if not append or artistScan:
|
||||
logger.info('Updating scanned artist track counts')
|
||||
|
||||
# Clean up the new artist list
|
||||
unique_artists = {}.fromkeys(new_artists).keys()
|
||||
unique_artists = list({}.fromkeys(new_artists).keys())
|
||||
|
||||
# # Don't think we need to do this, check the db instead below
|
||||
#
|
||||
|
||||
@@ -4,7 +4,7 @@ Locking-related classes
|
||||
|
||||
import time
|
||||
import threading
|
||||
import Queue
|
||||
import queue
|
||||
|
||||
import headphones.logger
|
||||
|
||||
@@ -29,7 +29,7 @@ class TimedLock(object):
|
||||
self.lock = threading.Lock()
|
||||
self.last_used = 0
|
||||
self.minimum_delta = minimum_delta
|
||||
self.queue = Queue.Queue()
|
||||
self.queue = queue.Queue()
|
||||
|
||||
def __enter__(self):
|
||||
"""
|
||||
@@ -47,7 +47,7 @@ class TimedLock(object):
|
||||
seconds = self.queue.get(False)
|
||||
headphones.logger.debug('Sleeping %s (queued)', seconds)
|
||||
time.sleep(seconds)
|
||||
except Queue.Empty:
|
||||
except queue.Empty:
|
||||
continue
|
||||
self.queue.task_done()
|
||||
|
||||
|
||||
@@ -153,7 +153,8 @@ def initLogger(console=False, log_dir=False, verbose=False):
|
||||
file_formatter = logging.Formatter(
|
||||
'%(asctime)s - %(levelname)-7s :: %(threadName)s : %(message)s', '%d-%b-%Y %H:%M:%S')
|
||||
file_handler = handlers.RotatingFileHandler(filename, maxBytes=MAX_SIZE,
|
||||
backupCount=MAX_FILES)
|
||||
backupCount=MAX_FILES,
|
||||
encoding='utf8')
|
||||
file_handler.setLevel(logging.DEBUG)
|
||||
file_handler.setFormatter(file_formatter)
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import htmlentitydefs
|
||||
import html.entities
|
||||
|
||||
import re
|
||||
from headphones import logger, request
|
||||
@@ -53,7 +53,7 @@ def getLyrics(artist, song):
|
||||
'''<div class='lyricbox'><span style="padding:1em"><a href="/Category:Instrumental" title="Instrumental">''').search(
|
||||
lyricspage)
|
||||
if m:
|
||||
return u'(Instrumental)'
|
||||
return '(Instrumental)'
|
||||
else:
|
||||
logger.warn('Cannot find lyrics on: %s' % lyricsurl)
|
||||
return
|
||||
@@ -72,7 +72,7 @@ def convert_html_entities(s):
|
||||
name = hit[2:-1]
|
||||
try:
|
||||
entnum = int(name)
|
||||
s = s.replace(hit, unichr(entnum))
|
||||
s = s.replace(hit, chr(entnum))
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
@@ -83,7 +83,7 @@ def convert_html_entities(s):
|
||||
hits.remove(amp)
|
||||
for hit in hits:
|
||||
name = hit[1:-1]
|
||||
if name in htmlentitydefs.name2codepoint:
|
||||
s = s.replace(hit, unichr(htmlentitydefs.name2codepoint[name]))
|
||||
if name in html.entities.name2codepoint:
|
||||
s = s.replace(hit, chr(html.entities.name2codepoint[name]))
|
||||
s = s.replace(amp, "&")
|
||||
return s
|
||||
|
||||
146
headphones/mb.py
146
headphones/mb.py
@@ -112,9 +112,9 @@ def findArtist(name, limit=1):
|
||||
return False
|
||||
for result in artistResults:
|
||||
if 'disambiguation' in result:
|
||||
uniquename = unicode(result['sort-name'] + " (" + result['disambiguation'] + ")")
|
||||
uniquename = str(result['sort-name'] + " (" + result['disambiguation'] + ")")
|
||||
else:
|
||||
uniquename = unicode(result['sort-name'])
|
||||
uniquename = str(result['sort-name'])
|
||||
if result['name'] != uniquename and limit == 1:
|
||||
logger.info(
|
||||
'Found an artist with a disambiguation: %s - doing an album based search' % name)
|
||||
@@ -126,7 +126,7 @@ def findArtist(name, limit=1):
|
||||
# Just need the artist id if the limit is 1
|
||||
# 'name': unicode(result['sort-name']),
|
||||
# 'uniquename': uniquename,
|
||||
'id': unicode(result['id']),
|
||||
'id': str(result['id']),
|
||||
# 'url': unicode("http://musicbrainz.org/artist/" + result['id']),#probably needs to be changed
|
||||
# 'score': int(result['ext:score'])
|
||||
})
|
||||
@@ -134,10 +134,10 @@ def findArtist(name, limit=1):
|
||||
artistlist.append(artistdict)
|
||||
else:
|
||||
artistlist.append({
|
||||
'name': unicode(result['sort-name']),
|
||||
'name': str(result['sort-name']),
|
||||
'uniquename': uniquename,
|
||||
'id': unicode(result['id']),
|
||||
'url': unicode("http://musicbrainz.org/artist/" + result['id']),
|
||||
'id': str(result['id']),
|
||||
'url': str("http://musicbrainz.org/artist/" + result['id']),
|
||||
# probably needs to be changed
|
||||
'score': int(result['ext:score'])
|
||||
})
|
||||
@@ -187,7 +187,7 @@ def findRelease(name, limit=1, artist=None):
|
||||
if tracks:
|
||||
tracks += ' + '
|
||||
tracks += str(medium['track-count'])
|
||||
for format, count in format_dict.items():
|
||||
for format, count in list(format_dict.items()):
|
||||
if formats:
|
||||
formats += ' + '
|
||||
if count > 1:
|
||||
@@ -203,22 +203,22 @@ def findRelease(name, limit=1, artist=None):
|
||||
rg_type = secondary_type
|
||||
|
||||
releaselist.append({
|
||||
'uniquename': unicode(result['artist-credit'][0]['artist']['name']),
|
||||
'title': unicode(title),
|
||||
'id': unicode(result['artist-credit'][0]['artist']['id']),
|
||||
'albumid': unicode(result['id']),
|
||||
'url': unicode(
|
||||
'uniquename': str(result['artist-credit'][0]['artist']['name']),
|
||||
'title': str(title),
|
||||
'id': str(result['artist-credit'][0]['artist']['id']),
|
||||
'albumid': str(result['id']),
|
||||
'url': str(
|
||||
"http://musicbrainz.org/artist/" + result['artist-credit'][0]['artist']['id']),
|
||||
# probably needs to be changed
|
||||
'albumurl': unicode("http://musicbrainz.org/release/" + result['id']),
|
||||
'albumurl': str("http://musicbrainz.org/release/" + result['id']),
|
||||
# probably needs to be changed
|
||||
'score': int(result['ext:score']),
|
||||
'date': unicode(result['date']) if 'date' in result else '',
|
||||
'country': unicode(result['country']) if 'country' in result else '',
|
||||
'formats': unicode(formats),
|
||||
'tracks': unicode(tracks),
|
||||
'rgid': unicode(result['release-group']['id']),
|
||||
'rgtype': unicode(rg_type)
|
||||
'date': str(result['date']) if 'date' in result else '',
|
||||
'country': str(result['country']) if 'country' in result else '',
|
||||
'formats': str(formats),
|
||||
'tracks': str(tracks),
|
||||
'rgid': str(result['release-group']['id']),
|
||||
'rgtype': str(rg_type)
|
||||
})
|
||||
return releaselist
|
||||
|
||||
@@ -240,15 +240,15 @@ def findSeries(name, limit=1):
|
||||
return False
|
||||
for result in seriesResults:
|
||||
if 'disambiguation' in result:
|
||||
uniquename = unicode(result['name'] + " (" + result['disambiguation'] + ")")
|
||||
uniquename = str(result['name'] + " (" + result['disambiguation'] + ")")
|
||||
else:
|
||||
uniquename = unicode(result['name'])
|
||||
uniquename = str(result['name'])
|
||||
serieslist.append({
|
||||
'uniquename': uniquename,
|
||||
'name': unicode(result['name']),
|
||||
'type': unicode(result['type']),
|
||||
'id': unicode(result['id']),
|
||||
'url': unicode("http://musicbrainz.org/series/" + result['id']),
|
||||
'name': str(result['name']),
|
||||
'type': str(result['type']),
|
||||
'id': str(result['id']),
|
||||
'url': str("http://musicbrainz.org/series/" + result['id']),
|
||||
# probably needs to be changed
|
||||
'score': int(result['ext:score'])
|
||||
})
|
||||
@@ -284,19 +284,19 @@ def getArtist(artistid, extrasonly=False):
|
||||
if not artist:
|
||||
return False
|
||||
|
||||
artist_dict['artist_name'] = unicode(artist['name'])
|
||||
artist_dict['artist_name'] = str(artist['name'])
|
||||
|
||||
releasegroups = []
|
||||
|
||||
if not extrasonly:
|
||||
for rg in artist['release-group-list']:
|
||||
if "secondary-type-list" in rg.keys(): # only add releases without a secondary type
|
||||
if "secondary-type-list" in list(rg.keys()): # only add releases without a secondary type
|
||||
continue
|
||||
releasegroups.append({
|
||||
'title': unicode(rg['title']),
|
||||
'id': unicode(rg['id']),
|
||||
'url': u"http://musicbrainz.org/release-group/" + rg['id'],
|
||||
'type': unicode(rg['type'])
|
||||
'title': str(rg['title']),
|
||||
'id': str(rg['id']),
|
||||
'url': "http://musicbrainz.org/release-group/" + rg['id'],
|
||||
'type': str(rg['type'])
|
||||
})
|
||||
|
||||
# See if we need to grab extras. Artist specific extras take precedence over global option
|
||||
@@ -314,7 +314,7 @@ def getArtist(artistid, extrasonly=False):
|
||||
|
||||
# Need to convert extras string from something like '2,5.6' to ['ep','live','remix'] (append new extras to end)
|
||||
if db_artist['Extras']:
|
||||
extras = map(int, db_artist['Extras'].split(','))
|
||||
extras = list(map(int, db_artist['Extras'].split(',')))
|
||||
else:
|
||||
extras = []
|
||||
extras_list = headphones.POSSIBLE_EXTRAS
|
||||
@@ -354,10 +354,10 @@ def getArtist(artistid, extrasonly=False):
|
||||
rg_type = secondary_type
|
||||
|
||||
releasegroups.append({
|
||||
'title': unicode(rg['title']),
|
||||
'id': unicode(rg['id']),
|
||||
'url': u"http://musicbrainz.org/release-group/" + rg['id'],
|
||||
'type': unicode(rg_type)
|
||||
'title': str(rg['title']),
|
||||
'id': str(rg['id']),
|
||||
'url': "http://musicbrainz.org/release-group/" + rg['id'],
|
||||
'type': str(rg_type)
|
||||
})
|
||||
artist_dict['releasegroups'] = releasegroups
|
||||
return artist_dict
|
||||
@@ -382,10 +382,10 @@ def getSeries(seriesid):
|
||||
return False
|
||||
|
||||
if 'disambiguation' in series:
|
||||
series_dict['artist_name'] = unicode(
|
||||
series['name'] + " (" + unicode(series['disambiguation']) + ")")
|
||||
series_dict['artist_name'] = str(
|
||||
series['name'] + " (" + str(series['disambiguation']) + ")")
|
||||
else:
|
||||
series_dict['artist_name'] = unicode(series['name'])
|
||||
series_dict['artist_name'] = str(series['name'])
|
||||
|
||||
releasegroups = []
|
||||
|
||||
@@ -448,42 +448,42 @@ def getRelease(releaseid, include_artist_info=True):
|
||||
if not results:
|
||||
return False
|
||||
|
||||
release['title'] = unicode(results['title'])
|
||||
release['id'] = unicode(results['id'])
|
||||
release['asin'] = unicode(results['asin']) if 'asin' in results else None
|
||||
release['date'] = unicode(results['date']) if 'date' in results else None
|
||||
release['title'] = str(results['title'])
|
||||
release['id'] = str(results['id'])
|
||||
release['asin'] = str(results['asin']) if 'asin' in results else None
|
||||
release['date'] = str(results['date']) if 'date' in results else None
|
||||
try:
|
||||
release['format'] = unicode(results['medium-list'][0]['format'])
|
||||
release['format'] = str(results['medium-list'][0]['format'])
|
||||
except:
|
||||
release['format'] = u'Unknown'
|
||||
release['format'] = 'Unknown'
|
||||
|
||||
try:
|
||||
release['country'] = unicode(results['country'])
|
||||
release['country'] = str(results['country'])
|
||||
except:
|
||||
release['country'] = u'Unknown'
|
||||
release['country'] = 'Unknown'
|
||||
|
||||
if include_artist_info:
|
||||
|
||||
if 'release-group' in results:
|
||||
release['rgid'] = unicode(results['release-group']['id'])
|
||||
release['rg_title'] = unicode(results['release-group']['title'])
|
||||
release['rgid'] = str(results['release-group']['id'])
|
||||
release['rg_title'] = str(results['release-group']['title'])
|
||||
try:
|
||||
release['rg_type'] = unicode(results['release-group']['type'])
|
||||
release['rg_type'] = str(results['release-group']['type'])
|
||||
|
||||
if release['rg_type'] == 'Album' and 'secondary-type-list' in results[
|
||||
'release-group']:
|
||||
secondary_type = unicode(results['release-group']['secondary-type-list'][0])
|
||||
secondary_type = str(results['release-group']['secondary-type-list'][0])
|
||||
if secondary_type != release['rg_type']:
|
||||
release['rg_type'] = secondary_type
|
||||
|
||||
except KeyError:
|
||||
release['rg_type'] = u'Unknown'
|
||||
release['rg_type'] = 'Unknown'
|
||||
|
||||
else:
|
||||
logger.warn("Release " + releaseid + "had no ReleaseGroup associated")
|
||||
|
||||
release['artist_name'] = unicode(results['artist-credit'][0]['artist']['name'])
|
||||
release['artist_id'] = unicode(results['artist-credit'][0]['artist']['id'])
|
||||
release['artist_name'] = str(results['artist-credit'][0]['artist']['name'])
|
||||
release['artist_id'] = str(results['artist-credit'][0]['artist']['id'])
|
||||
|
||||
release['tracks'] = getTracksFromRelease(results)
|
||||
|
||||
@@ -529,7 +529,7 @@ def get_new_releases(rgid, includeExtras=False, forcefull=False):
|
||||
force_repackage1 = 0
|
||||
if len(results) != 0:
|
||||
for release_mark in results:
|
||||
release_list.append(unicode(release_mark['id']))
|
||||
release_list.append(str(release_mark['id']))
|
||||
release_title = release_mark['title']
|
||||
remove_missing_releases = myDB.action("SELECT ReleaseID FROM allalbums WHERE AlbumID=?",
|
||||
[rgid])
|
||||
@@ -561,31 +561,31 @@ def get_new_releases(rgid, includeExtras=False, forcefull=False):
|
||||
# DELETE all references to this release since we're updating it anyway.
|
||||
myDB.action('DELETE from allalbums WHERE ReleaseID=?', [rel_id_check])
|
||||
myDB.action('DELETE from alltracks WHERE ReleaseID=?', [rel_id_check])
|
||||
release['AlbumTitle'] = unicode(releasedata['title'])
|
||||
release['AlbumID'] = unicode(rgid)
|
||||
release['AlbumASIN'] = unicode(releasedata['asin']) if 'asin' in releasedata else None
|
||||
release['ReleaseDate'] = unicode(releasedata['date']) if 'date' in releasedata else None
|
||||
release['AlbumTitle'] = str(releasedata['title'])
|
||||
release['AlbumID'] = str(rgid)
|
||||
release['AlbumASIN'] = str(releasedata['asin']) if 'asin' in releasedata else None
|
||||
release['ReleaseDate'] = str(releasedata['date']) if 'date' in releasedata else None
|
||||
release['ReleaseID'] = releasedata['id']
|
||||
if 'release-group' not in releasedata:
|
||||
raise Exception('No release group associated with release id ' + releasedata[
|
||||
'id'] + ' album id' + rgid)
|
||||
release['Type'] = unicode(releasedata['release-group']['type'])
|
||||
release['Type'] = str(releasedata['release-group']['type'])
|
||||
|
||||
if release['Type'] == 'Album' and 'secondary-type-list' in releasedata['release-group']:
|
||||
secondary_type = unicode(releasedata['release-group']['secondary-type-list'][0])
|
||||
secondary_type = str(releasedata['release-group']['secondary-type-list'][0])
|
||||
if secondary_type != release['Type']:
|
||||
release['Type'] = secondary_type
|
||||
|
||||
# making the assumption that the most important artist will be first in the list
|
||||
if 'artist-credit' in releasedata:
|
||||
release['ArtistID'] = unicode(releasedata['artist-credit'][0]['artist']['id'])
|
||||
release['ArtistName'] = unicode(releasedata['artist-credit-phrase'])
|
||||
release['ArtistID'] = str(releasedata['artist-credit'][0]['artist']['id'])
|
||||
release['ArtistName'] = str(releasedata['artist-credit-phrase'])
|
||||
else:
|
||||
logger.warn('Release ' + releasedata['id'] + ' has no Artists associated.')
|
||||
return False
|
||||
|
||||
release['ReleaseCountry'] = unicode(
|
||||
releasedata['country']) if 'country' in releasedata else u'Unknown'
|
||||
release['ReleaseCountry'] = str(
|
||||
releasedata['country']) if 'country' in releasedata else 'Unknown'
|
||||
# assuming that the list will contain media and that the format will be consistent
|
||||
try:
|
||||
additional_medium = ''
|
||||
@@ -600,9 +600,9 @@ def get_new_releases(rgid, includeExtras=False, forcefull=False):
|
||||
disc_number = str(medium_count) + 'x'
|
||||
packaged_medium = disc_number + releasedata['medium-list'][0][
|
||||
'format'] + additional_medium
|
||||
release['ReleaseFormat'] = unicode(packaged_medium)
|
||||
release['ReleaseFormat'] = str(packaged_medium)
|
||||
except:
|
||||
release['ReleaseFormat'] = u'Unknown'
|
||||
release['ReleaseFormat'] = 'Unknown'
|
||||
|
||||
release['Tracks'] = getTracksFromRelease(releasedata)
|
||||
|
||||
@@ -684,14 +684,14 @@ def getTracksFromRelease(release):
|
||||
for medium in release['medium-list']:
|
||||
for track in medium['track-list']:
|
||||
try:
|
||||
track_title = unicode(track['title'])
|
||||
track_title = str(track['title'])
|
||||
except:
|
||||
track_title = unicode(track['recording']['title'])
|
||||
track_title = str(track['recording']['title'])
|
||||
tracks.append({
|
||||
'number': totalTracks,
|
||||
'title': track_title,
|
||||
'id': unicode(track['recording']['id']),
|
||||
'url': u"http://musicbrainz.org/track/" + track['recording']['id'],
|
||||
'id': str(track['recording']['id']),
|
||||
'url': "http://musicbrainz.org/track/" + track['recording']['id'],
|
||||
'duration': int(track['length']) if 'length' in track else 0
|
||||
})
|
||||
totalTracks += 1
|
||||
@@ -739,7 +739,7 @@ def findArtistbyAlbum(name):
|
||||
# uniquename = unicode(newArtist['sort-name'])
|
||||
# artist_dict['name'] = unicode(newArtist['sort-name'])
|
||||
# artist_dict['uniquename'] = uniquename
|
||||
artist_dict['id'] = unicode(newArtist['id'])
|
||||
artist_dict['id'] = str(newArtist['id'])
|
||||
# artist_dict['url'] = u'http://musicbrainz.org/artist/' + newArtist['id']
|
||||
# artist_dict['score'] = int(releaseGroup['ext:score'])
|
||||
|
||||
@@ -768,7 +768,7 @@ def findAlbumID(artist=None, album=None):
|
||||
|
||||
if len(results) < 1:
|
||||
return False
|
||||
rgid = unicode(results[0]['id'])
|
||||
rgid = str(results[0]['id'])
|
||||
return rgid
|
||||
|
||||
|
||||
|
||||
@@ -35,7 +35,7 @@ def update(artistid, artist_name, release_groups):
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2243.2 Safari/537.36'}
|
||||
|
||||
url = "http://www.metacritic.com/person/" + mc_artist_name + "?filter-options=music&sort_options=date&num_items=100"
|
||||
url = "https://www.metacritic.com/person/" + mc_artist_name + "?filter-options=music&sort_options=date&num_items=100"
|
||||
|
||||
res = request.request_soup(url, headers=headers, whitelist_status_code=404)
|
||||
|
||||
|
||||
@@ -17,8 +17,8 @@
|
||||
Track/album metadata handling routines.
|
||||
"""
|
||||
|
||||
from __future__ import print_function
|
||||
from beets.mediafile import MediaFile, UnreadableFileError
|
||||
|
||||
from mediafile import MediaFile, UnreadableFileError
|
||||
import headphones
|
||||
from headphones import logger
|
||||
import os.path
|
||||
@@ -60,7 +60,7 @@ class MetadataDict(dict):
|
||||
self._lower = {}
|
||||
if seq is not None:
|
||||
try:
|
||||
self.add_items(seq.iteritems())
|
||||
self.add_items(iter(seq.items()))
|
||||
except KeyError:
|
||||
self.add_items(seq)
|
||||
|
||||
@@ -103,11 +103,11 @@ def _verify_var_type(val):
|
||||
"""
|
||||
Check if type of value is allowed as a variable in pathname substitution.
|
||||
"""
|
||||
return isinstance(val, (basestring, int, float, datetime.date))
|
||||
return isinstance(val, (str, int, float, datetime.date))
|
||||
|
||||
|
||||
def _as_str(val):
|
||||
if isinstance(val, basestring):
|
||||
if isinstance(val, str):
|
||||
return val
|
||||
else:
|
||||
return str(val)
|
||||
@@ -134,7 +134,7 @@ def _row_to_dict(row, d):
|
||||
"""
|
||||
Populate dict with database row fields.
|
||||
"""
|
||||
for fld in row.keys():
|
||||
for fld in list(row.keys()):
|
||||
val = row[fld]
|
||||
if val is None:
|
||||
val = ''
|
||||
@@ -184,9 +184,7 @@ def file_metadata(path, release):
|
||||
try:
|
||||
f = MediaFile(path)
|
||||
except UnreadableFileError as ex:
|
||||
logger.info("MediaFile couldn't parse: %s (%s)",
|
||||
path.decode(headphones.SYS_ENCODING, 'replace'),
|
||||
str(ex))
|
||||
logger.info(f"MediaFile couldn't parse {path}: {e}")
|
||||
return None, None
|
||||
|
||||
res = MetadataDict()
|
||||
@@ -207,8 +205,7 @@ def file_metadata(path, release):
|
||||
track_number = '%02d' % f.track
|
||||
|
||||
if not f.title:
|
||||
basename = os.path.basename(
|
||||
path.decode(headphones.SYS_ENCODING, 'replace'))
|
||||
basename = os.path.basename(path)
|
||||
title = os.path.splitext(basename)[0]
|
||||
from_metadata = False
|
||||
else:
|
||||
@@ -242,7 +239,7 @@ def file_metadata(path, release):
|
||||
Vars.SORT_ARTIST_LOWER: _lower(sort_name),
|
||||
Vars.ALBUM_LOWER: _lower(album_title),
|
||||
}
|
||||
res.add_items(override_values.iteritems())
|
||||
res.add_items(iter(override_values.items()))
|
||||
return res, from_metadata
|
||||
|
||||
|
||||
@@ -252,7 +249,7 @@ def _intersect(d1, d2):
|
||||
Create intersection (common part) of two dictionaries.
|
||||
"""
|
||||
res = {}
|
||||
for key, val in d1.iteritems():
|
||||
for key, val in d1.items():
|
||||
if key in d2 and d2[key] == val:
|
||||
res[key] = val
|
||||
return res
|
||||
@@ -284,21 +281,19 @@ def album_metadata(path, release, common_tags):
|
||||
sort_name = artist
|
||||
|
||||
if not sort_name or sort_name[0].isdigit():
|
||||
first_char = u'0-9'
|
||||
first_char = '0-9'
|
||||
else:
|
||||
first_char = sort_name[0]
|
||||
|
||||
orig_folder = u''
|
||||
orig_folder = ''
|
||||
|
||||
# Get from temp path
|
||||
if "_@hp@_" in path:
|
||||
orig_folder = path.rsplit("headphones_", 1)[1].split("_@hp@_")[0]
|
||||
orig_folder = orig_folder.decode(headphones.SYS_ENCODING, 'replace')
|
||||
else:
|
||||
for r, d, f in os.walk(path):
|
||||
try:
|
||||
orig_folder = os.path.basename(
|
||||
os.path.normpath(r).decode(headphones.SYS_ENCODING, 'replace'))
|
||||
orig_folder = os.path.basename(os.path.normpath(r))
|
||||
break
|
||||
except:
|
||||
pass
|
||||
@@ -320,7 +315,7 @@ def album_metadata(path, release, common_tags):
|
||||
Vars.ORIGINAL_FOLDER_LOWER: _lower(orig_folder)
|
||||
}
|
||||
res = MetadataDict(common_tags)
|
||||
res.add_items(override_values.iteritems())
|
||||
res.add_items(iter(override_values.items()))
|
||||
return res
|
||||
|
||||
|
||||
@@ -345,7 +340,7 @@ def albumart_metadata(release, common_tags):
|
||||
Vars.ALBUM_LOWER: _lower(album)
|
||||
}
|
||||
res = MetadataDict(common_tags)
|
||||
res.add_items(override_values.iteritems())
|
||||
res.add_items(iter(override_values.items()))
|
||||
return res
|
||||
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ import headphones.helpers as _hp
|
||||
from headphones.metadata import MetadataDict
|
||||
import datetime
|
||||
|
||||
from unittestcompat import TestCase
|
||||
from .unittestcompat import TestCase
|
||||
|
||||
|
||||
__author__ = "Andrzej Ciarkowski <andrzej.ciarkowski@gmail.com>"
|
||||
@@ -50,7 +50,7 @@ class _MockDatabaseRow(object):
|
||||
self._dict = dict(d)
|
||||
|
||||
def keys(self):
|
||||
return self._dict.iterkeys()
|
||||
return iter(self._dict.keys())
|
||||
|
||||
def __getitem__(self, item):
|
||||
return self._dict[item]
|
||||
@@ -63,9 +63,9 @@ class MetadataTest(TestCase):
|
||||
|
||||
def test_metadata_dict_ci(self):
|
||||
"""MetadataDict: case-insensitive lookup"""
|
||||
expected = u'naïve'
|
||||
expected = 'naïve'
|
||||
key_var = '$TitlE'
|
||||
m = MetadataDict({key_var.lower(): u'naïve'})
|
||||
m = MetadataDict({key_var.lower(): 'naïve'})
|
||||
self.assertFalse('$track' in m)
|
||||
self.assertTrue('$tITLe' in m, "cross-case lookup with 'in'")
|
||||
self.assertEqual(m[key_var], expected, "cross-case lookup success")
|
||||
@@ -74,7 +74,7 @@ class MetadataTest(TestCase):
|
||||
|
||||
def test_metadata_dict_cs(self):
|
||||
"""MetadataDice: case-preserving lookup"""
|
||||
expected_var = u'NaïVe'
|
||||
expected_var = 'NaïVe'
|
||||
key_var = '$TitlE'
|
||||
m = MetadataDict({
|
||||
key_var.lower(): expected_var.lower(),
|
||||
@@ -171,5 +171,5 @@ class MetadataTest(TestCase):
|
||||
res = _hp.pattern_substitute(
|
||||
"/music/$First/$Artist/$Artist - $Album{ [$Year]}", md, True)
|
||||
|
||||
self.assertEqual(res, u"/music/A/artist/artist - Album",
|
||||
self.assertEqual(res, "/music/A/artist/artist - Album",
|
||||
"check correct rendering of None via pattern_substitute()")
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import time
|
||||
import datetime
|
||||
import shutil
|
||||
import subprocess
|
||||
import multiprocessing
|
||||
@@ -21,14 +22,15 @@ import multiprocessing
|
||||
import os
|
||||
import headphones
|
||||
from headphones import logger
|
||||
from beets.mediafile import MediaFile
|
||||
from mediafile import MediaFile
|
||||
|
||||
|
||||
# xld
|
||||
import getXldProfile
|
||||
from . import getXldProfile
|
||||
|
||||
|
||||
def encode(albumPath):
|
||||
print(albumPath)
|
||||
use_xld = headphones.CONFIG.ENCODER == 'xld'
|
||||
|
||||
# Return if xld details not found
|
||||
@@ -63,8 +65,7 @@ def encode(albumPath):
|
||||
for music in f:
|
||||
if any(music.lower().endswith('.' + x.lower()) for x in headphones.MEDIA_FORMATS):
|
||||
if not use_xld:
|
||||
encoderFormat = headphones.CONFIG.ENCODEROUTPUTFORMAT.encode(
|
||||
headphones.SYS_ENCODING)
|
||||
encoderFormat = headphones.CONFIG.ENCODEROUTPUTFORMAT
|
||||
else:
|
||||
xldMusicFile = os.path.join(r, music)
|
||||
xldInfoMusic = MediaFile(xldMusicFile)
|
||||
@@ -86,7 +87,7 @@ def encode(albumPath):
|
||||
musicTempFiles.append(os.path.join(tempDirEncode, musicTemp))
|
||||
|
||||
if headphones.CONFIG.ENCODER_PATH:
|
||||
encoder = headphones.CONFIG.ENCODER_PATH.encode(headphones.SYS_ENCODING)
|
||||
encoder = headphones.CONFIG.ENCODER_PATH
|
||||
else:
|
||||
if use_xld:
|
||||
encoder = os.path.join('/Applications', 'xld')
|
||||
@@ -117,18 +118,17 @@ def encode(albumPath):
|
||||
|
||||
if use_xld:
|
||||
if xldBitrate and (infoMusic.bitrate / 1000 <= xldBitrate):
|
||||
logger.info('%s has bitrate <= %skb, will not be re-encoded',
|
||||
music.decode(headphones.SYS_ENCODING, 'replace'), xldBitrate)
|
||||
logger.info(f"{music} has bitrate <= {xldBitrate}kb, will not be re-encoded")
|
||||
else:
|
||||
encode = True
|
||||
elif headphones.CONFIG.ENCODER == 'lame':
|
||||
if not any(
|
||||
music.decode(headphones.SYS_ENCODING, 'replace').lower().endswith('.' + x) for x
|
||||
music.lower().endswith('.' + x) for x
|
||||
in ["mp3", "wav"]):
|
||||
logger.warn('Lame cannot encode %s format for %s, use ffmpeg',
|
||||
os.path.splitext(music)[1], music)
|
||||
else:
|
||||
if music.decode(headphones.SYS_ENCODING, 'replace').lower().endswith('.mp3') and (
|
||||
if music.lower().endswith('.mp3') and (
|
||||
int(infoMusic.bitrate / 1000) <= headphones.CONFIG.BITRATE):
|
||||
logger.info('%s has bitrate <= %skb, will not be re-encoded', music,
|
||||
headphones.CONFIG.BITRATE)
|
||||
@@ -136,13 +136,12 @@ def encode(albumPath):
|
||||
encode = True
|
||||
else:
|
||||
if headphones.CONFIG.ENCODEROUTPUTFORMAT == 'ogg':
|
||||
if music.decode(headphones.SYS_ENCODING, 'replace').lower().endswith('.ogg'):
|
||||
logger.warn('Cannot re-encode .ogg %s',
|
||||
music.decode(headphones.SYS_ENCODING, 'replace'))
|
||||
if music.lower().endswith('.ogg'):
|
||||
logger.warn(f"Cannot re-encode .ogg {music}")
|
||||
else:
|
||||
encode = True
|
||||
else:
|
||||
if music.decode(headphones.SYS_ENCODING, 'replace').lower().endswith('.' + headphones.CONFIG.ENCODEROUTPUTFORMAT) and (int(infoMusic.bitrate / 1000) <= headphones.CONFIG.BITRATE):
|
||||
if music.lower().endswith('.' + headphones.CONFIG.ENCODEROUTPUTFORMAT) and (int(infoMusic.bitrate / 1000) <= headphones.CONFIG.BITRATE):
|
||||
logger.info('%s has bitrate <= %skb, will not be re-encoded', music, headphones.CONFIG.BITRATE)
|
||||
else:
|
||||
encode = True
|
||||
@@ -185,13 +184,13 @@ def encode(albumPath):
|
||||
# Retrieve the results
|
||||
results = results.get()
|
||||
else:
|
||||
results = map(command_map, jobs)
|
||||
results = list(map(command_map, jobs))
|
||||
|
||||
# The results are either True or False, so determine if one is False
|
||||
encoder_failed = not all(results)
|
||||
|
||||
musicFiles = filter(None, musicFiles)
|
||||
musicTempFiles = filter(None, musicTempFiles)
|
||||
musicFiles = [_f for _f in musicFiles if _f]
|
||||
musicTempFiles = [_f for _f in musicTempFiles if _f]
|
||||
|
||||
# check all files to be encoded now exist in temp directory
|
||||
if not encoder_failed and musicTempFiles:
|
||||
@@ -352,36 +351,31 @@ def command(encoder, musicSource, musicDest, albumPath, xldProfile):
|
||||
startupinfo.dwFlags |= subprocess._subprocess.STARTF_USESHOWWINDOW
|
||||
|
||||
# Encode
|
||||
logger.info('Encoding %s...' % (musicSource.decode(headphones.SYS_ENCODING, 'replace')))
|
||||
logger.info(f"Encoding {musicSource}")
|
||||
logger.debug(subprocess.list2cmdline(cmd))
|
||||
|
||||
process = subprocess.Popen(cmd, startupinfo=startupinfo,
|
||||
stdin=open(os.devnull, 'rb'), stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
stderr=subprocess.PIPE, text=True)
|
||||
stdout, stderr = process.communicate(headphones.CONFIG.ENCODER)
|
||||
|
||||
# Error if return code not zero
|
||||
if process.returncode:
|
||||
logger.error(
|
||||
'Encoding failed for %s' % (musicSource.decode(headphones.SYS_ENCODING, 'replace')))
|
||||
out = stdout if stdout else stderr
|
||||
out = out.decode(headphones.SYS_ENCODING, 'replace')
|
||||
logger.error(f"Encoding failed for {musicSource}")
|
||||
out = stdout or stderr
|
||||
outlast2lines = '\n'.join(out.splitlines()[-2:])
|
||||
logger.error('%s error details: %s' % (headphones.CONFIG.ENCODER, outlast2lines))
|
||||
logger.error(f"{headphones.CONFIG.ENCODER} error details: {outlast2lines}")
|
||||
out = out.rstrip("\n")
|
||||
logger.debug(out)
|
||||
encoded = False
|
||||
else:
|
||||
logger.info('%s encoded in %s', musicSource, getTimeEncode(startMusicTime))
|
||||
logger.info(f"{musicSource} encoded in {getTimeEncode(startMusicTime)}")
|
||||
encoded = True
|
||||
|
||||
return encoded
|
||||
|
||||
|
||||
def getTimeEncode(start):
|
||||
seconds = int(time.time() - start)
|
||||
hours = seconds / 3600
|
||||
seconds -= 3600 * hours
|
||||
minutes = seconds / 60
|
||||
seconds -= 60 * minutes
|
||||
return "%02d:%02d:%02d" % (hours, minutes, seconds)
|
||||
finish = time.time()
|
||||
seconds = int(finish - start)
|
||||
return datetime.timedelta(seconds=seconds)
|
||||
|
||||
@@ -1,28 +1,13 @@
|
||||
# This file is part of Headphones.
|
||||
#
|
||||
# Headphones is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Headphones is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from urllib import urlencode, quote_plus
|
||||
import urllib
|
||||
from urllib.parse import urlencode, quote_plus
|
||||
import urllib.request, urllib.parse, urllib.error
|
||||
import subprocess
|
||||
import json
|
||||
from email.mime.text import MIMEText
|
||||
import smtplib
|
||||
import email.utils
|
||||
from httplib import HTTPSConnection
|
||||
from urlparse import parse_qsl
|
||||
import urllib2
|
||||
from http.client import HTTPSConnection
|
||||
from urllib.parse import parse_qsl
|
||||
import urllib.request, urllib.error, urllib.parse
|
||||
import requests as requests
|
||||
|
||||
import os.path
|
||||
@@ -31,8 +16,8 @@ from pynma import pynma
|
||||
import cherrypy
|
||||
import headphones
|
||||
import gntp.notifier
|
||||
import oauth2 as oauth
|
||||
import pythontwitter as twitter
|
||||
#import oauth2 as oauth
|
||||
import twitter
|
||||
|
||||
|
||||
class GROWL(object):
|
||||
@@ -81,10 +66,10 @@ class GROWL(object):
|
||||
try:
|
||||
growl.register()
|
||||
except gntp.notifier.errors.NetworkError:
|
||||
logger.warning(u'Growl notification failed: network error')
|
||||
logger.warning('Growl notification failed: network error')
|
||||
return
|
||||
except gntp.notifier.errors.AuthError:
|
||||
logger.warning(u'Growl notification failed: authentication error')
|
||||
logger.warning('Growl notification failed: authentication error')
|
||||
return
|
||||
|
||||
# Fix message
|
||||
@@ -105,10 +90,10 @@ class GROWL(object):
|
||||
icon=image
|
||||
)
|
||||
except gntp.notifier.errors.NetworkError:
|
||||
logger.warning(u'Growl notification failed: network error')
|
||||
logger.warning('Growl notification failed: network error')
|
||||
return
|
||||
|
||||
logger.info(u"Growl notifications sent.")
|
||||
logger.info("Growl notifications sent.")
|
||||
|
||||
def updateLibrary(self):
|
||||
# For uniformity reasons not removed
|
||||
@@ -157,13 +142,13 @@ class PROWL(object):
|
||||
request_status = response.status
|
||||
|
||||
if request_status == 200:
|
||||
logger.info(u"Prowl notifications sent.")
|
||||
logger.info("Prowl notifications sent.")
|
||||
return True
|
||||
elif request_status == 401:
|
||||
logger.info(u"Prowl auth failed: %s" % response.reason)
|
||||
logger.info("Prowl auth failed: %s" % response.reason)
|
||||
return False
|
||||
else:
|
||||
logger.info(u"Prowl notification failed.")
|
||||
logger.info("Prowl notification failed.")
|
||||
return False
|
||||
|
||||
def updateLibrary(self):
|
||||
@@ -202,7 +187,7 @@ class XBMC(object):
|
||||
self.password = headphones.CONFIG.XBMC_PASSWORD
|
||||
|
||||
def _sendhttp(self, host, command):
|
||||
url_command = urllib.urlencode(command)
|
||||
url_command = urllib.parse.urlencode(command)
|
||||
url = host + '/xbmcCmds/xbmcHttp/?' + url_command
|
||||
|
||||
if self.password:
|
||||
@@ -295,10 +280,10 @@ class LMS(object):
|
||||
|
||||
content = {'Content-Type': 'application/json'}
|
||||
|
||||
req = urllib2.Request(host + '/jsonrpc.js', data, content)
|
||||
req = urllib.request.Request(host + '/jsonrpc.js', data, content)
|
||||
|
||||
try:
|
||||
handle = urllib2.urlopen(req)
|
||||
handle = urllib.request.urlopen(req)
|
||||
except Exception as e:
|
||||
logger.warn('Error opening LMS url: %s' % e)
|
||||
return
|
||||
@@ -424,7 +409,7 @@ class Plex(object):
|
||||
sections = r.getElementsByTagName('Directory')
|
||||
|
||||
if not sections:
|
||||
logger.info(u"Plex Media Server not running on: " + host)
|
||||
logger.info("Plex Media Server not running on: " + host)
|
||||
return False
|
||||
|
||||
for s in sections:
|
||||
@@ -483,9 +468,9 @@ class NMA(object):
|
||||
api = headphones.CONFIG.NMA_APIKEY
|
||||
nma_priority = headphones.CONFIG.NMA_PRIORITY
|
||||
|
||||
logger.debug(u"NMA title: " + title)
|
||||
logger.debug(u"NMA API: " + api)
|
||||
logger.debug(u"NMA Priority: " + str(nma_priority))
|
||||
logger.debug("NMA title: " + title)
|
||||
logger.debug("NMA API: " + api)
|
||||
logger.debug("NMA Priority: " + str(nma_priority))
|
||||
|
||||
if snatched:
|
||||
event = snatched + " snatched!"
|
||||
@@ -495,8 +480,8 @@ class NMA(object):
|
||||
message = "Headphones has downloaded and postprocessed: " + \
|
||||
artist + ' [' + album + ']'
|
||||
|
||||
logger.debug(u"NMA event: " + event)
|
||||
logger.debug(u"NMA message: " + message)
|
||||
logger.debug("NMA event: " + event)
|
||||
logger.debug("NMA message: " + message)
|
||||
|
||||
batch = False
|
||||
|
||||
@@ -510,8 +495,8 @@ class NMA(object):
|
||||
response = p.push(title, event, message, priority=nma_priority,
|
||||
batch_mode=batch)
|
||||
|
||||
if not response[api][u'code'] == u'200':
|
||||
logger.error(u'Could not send notification to NotifyMyAndroid')
|
||||
if not response[api]['code'] == '200':
|
||||
logger.error('Could not send notification to NotifyMyAndroid')
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
@@ -543,10 +528,10 @@ class PUSHBULLET(object):
|
||||
data=json.dumps(data))
|
||||
|
||||
if response:
|
||||
logger.info(u"PushBullet notifications sent.")
|
||||
logger.info("PushBullet notifications sent.")
|
||||
return True
|
||||
else:
|
||||
logger.info(u"PushBullet notification failed.")
|
||||
logger.info("PushBullet notification failed.")
|
||||
return False
|
||||
|
||||
|
||||
@@ -557,9 +542,9 @@ class PUSHALOT(object):
|
||||
|
||||
pushalot_authorizationtoken = headphones.CONFIG.PUSHALOT_APIKEY
|
||||
|
||||
logger.debug(u"Pushalot event: " + event)
|
||||
logger.debug(u"Pushalot message: " + message)
|
||||
logger.debug(u"Pushalot api: " + pushalot_authorizationtoken)
|
||||
logger.debug("Pushalot event: " + event)
|
||||
logger.debug("Pushalot message: " + message)
|
||||
logger.debug("Pushalot api: " + pushalot_authorizationtoken)
|
||||
|
||||
http_handler = HTTPSConnection("pushalot.com")
|
||||
|
||||
@@ -576,18 +561,18 @@ class PUSHALOT(object):
|
||||
response = http_handler.getresponse()
|
||||
request_status = response.status
|
||||
|
||||
logger.debug(u"Pushalot response status: %r" % request_status)
|
||||
logger.debug(u"Pushalot response headers: %r" % response.getheaders())
|
||||
logger.debug(u"Pushalot response body: %r" % response.read())
|
||||
logger.debug("Pushalot response status: %r" % request_status)
|
||||
logger.debug("Pushalot response headers: %r" % response.getheaders())
|
||||
logger.debug("Pushalot response body: %r" % response.read())
|
||||
|
||||
if request_status == 200:
|
||||
logger.info(u"Pushalot notifications sent.")
|
||||
logger.info("Pushalot notifications sent.")
|
||||
return True
|
||||
elif request_status == 410:
|
||||
logger.info(u"Pushalot auth failed: %s" % response.reason)
|
||||
logger.info("Pushalot auth failed: %s" % response.reason)
|
||||
return False
|
||||
else:
|
||||
logger.info(u"Pushalot notification failed.")
|
||||
logger.info("Pushalot notification failed.")
|
||||
return False
|
||||
|
||||
|
||||
@@ -618,7 +603,7 @@ class JOIN(object):
|
||||
else:
|
||||
self.url += '&deviceId={deviceid}'
|
||||
|
||||
response = urllib2.urlopen(self.url.format(apikey=self.apikey,
|
||||
response = urllib.request.urlopen(self.url.format(apikey=self.apikey,
|
||||
title=quote_plus(event),
|
||||
text=quote_plus(
|
||||
message.encode(
|
||||
@@ -627,10 +612,10 @@ class JOIN(object):
|
||||
deviceid=self.deviceid))
|
||||
|
||||
if response:
|
||||
logger.info(u"Join notifications sent.")
|
||||
logger.info("Join notifications sent.")
|
||||
return True
|
||||
else:
|
||||
logger.error(u"Join notification failed.")
|
||||
logger.error("Join notification failed.")
|
||||
return False
|
||||
|
||||
|
||||
@@ -669,7 +654,7 @@ class Synoindex(object):
|
||||
out, error = p.communicate()
|
||||
# synoindex never returns any codes other than '0',
|
||||
# highly irritating
|
||||
except OSError, e:
|
||||
except OSError as e:
|
||||
logger.warn("Error sending notification: %s" % str(e))
|
||||
|
||||
def notify_multiple(self, path_list):
|
||||
@@ -710,10 +695,10 @@ class PUSHOVER(object):
|
||||
headers=headers, data=data)
|
||||
|
||||
if response:
|
||||
logger.info(u"Pushover notifications sent.")
|
||||
logger.info("Pushover notifications sent.")
|
||||
return True
|
||||
else:
|
||||
logger.error(u"Pushover notification failed.")
|
||||
logger.error("Pushover notification failed.")
|
||||
return False
|
||||
|
||||
def updateLibrary(self):
|
||||
@@ -832,7 +817,7 @@ class TwitterNotifier(object):
|
||||
access_token_key = headphones.CONFIG.TWITTER_USERNAME
|
||||
access_token_secret = headphones.CONFIG.TWITTER_PASSWORD
|
||||
|
||||
logger.info(u"Sending tweet: " + message)
|
||||
logger.info("Sending tweet: " + message)
|
||||
|
||||
api = twitter.Api(username, password, access_token_key,
|
||||
access_token_secret)
|
||||
@@ -840,7 +825,7 @@ class TwitterNotifier(object):
|
||||
try:
|
||||
api.PostUpdate(message)
|
||||
except Exception as e:
|
||||
logger.info(u"Error Sending Tweet: %s" % e)
|
||||
logger.info("Error Sending Tweet: %s" % e)
|
||||
return False
|
||||
|
||||
return True
|
||||
@@ -938,7 +923,7 @@ class BOXCAR(object):
|
||||
message += '<br></br><a href="http://musicbrainz.org/' \
|
||||
'release-group/%s">MusicBrainz</a>' % rgid
|
||||
|
||||
data = urllib.urlencode({
|
||||
data = urllib.parse.urlencode({
|
||||
'user_credentials': headphones.CONFIG.BOXCAR_TOKEN,
|
||||
'notification[title]': title.encode('utf-8'),
|
||||
'notification[long_message]': message.encode('utf-8'),
|
||||
@@ -947,12 +932,12 @@ class BOXCAR(object):
|
||||
"/headphoneslogo.png"
|
||||
})
|
||||
|
||||
req = urllib2.Request(self.url)
|
||||
handle = urllib2.urlopen(req, data)
|
||||
req = urllib.request.Request(self.url)
|
||||
handle = urllib.request.urlopen(req, data)
|
||||
handle.close()
|
||||
return True
|
||||
|
||||
except urllib2.URLError as e:
|
||||
except urllib.error.URLError as e:
|
||||
logger.warn('Error sending Boxcar2 Notification: %s' % e)
|
||||
return False
|
||||
|
||||
@@ -1011,7 +996,7 @@ class Email(object):
|
||||
mailserver.quit()
|
||||
return True
|
||||
|
||||
except Exception, e:
|
||||
except Exception as e:
|
||||
logger.warn('Error sending Email: %s' % e)
|
||||
return False
|
||||
|
||||
@@ -1044,15 +1029,15 @@ class TELEGRAM(object):
|
||||
payload = {'chat_id': userid, 'parse_mode': "HTML", 'caption': status + message}
|
||||
try:
|
||||
response = requests.post(TELEGRAM_API % (token, "sendPhoto"), data=payload, files=image_file)
|
||||
except Exception, e:
|
||||
logger.info(u'Telegram notify failed: ' + str(e))
|
||||
except Exception as e:
|
||||
logger.info('Telegram notify failed: ' + str(e))
|
||||
# Sent text
|
||||
else:
|
||||
payload = {'chat_id': userid, 'parse_mode': "HTML", 'text': status + message}
|
||||
try:
|
||||
response = requests.post(TELEGRAM_API % (token, "sendMessage"), data=payload)
|
||||
except Exception, e:
|
||||
logger.info(u'Telegram notify failed: ' + str(e))
|
||||
except Exception as e:
|
||||
logger.info('Telegram notify failed: ' + str(e))
|
||||
|
||||
# Error logging
|
||||
sent_successfuly = True
|
||||
@@ -1060,7 +1045,7 @@ class TELEGRAM(object):
|
||||
logger.info("Could not send notification to TelegramBot (token=%s). Response: [%s]", token, response.text)
|
||||
sent_successfuly = False
|
||||
|
||||
logger.info(u"Telegram notifications sent.")
|
||||
logger.info("Telegram notifications sent.")
|
||||
return sent_successfuly
|
||||
|
||||
|
||||
@@ -1080,15 +1065,15 @@ class SLACK(object):
|
||||
|
||||
try:
|
||||
response = requests.post(SLACK_URL, json=payload)
|
||||
except Exception, e:
|
||||
logger.info(u'Slack notify failed: ' + str(e))
|
||||
except Exception as e:
|
||||
logger.info('Slack notify failed: ' + str(e))
|
||||
|
||||
sent_successfuly = True
|
||||
if not response.status_code == 200:
|
||||
logger.info(
|
||||
u'Could not send notification to Slack. Response: [%s]',
|
||||
'Could not send notification to Slack. Response: [%s]',
|
||||
(response.text))
|
||||
sent_successfuly = False
|
||||
|
||||
logger.info(u"Slack notifications sent.")
|
||||
logger.info("Slack notifications sent.")
|
||||
return sent_successfuly
|
||||
|
||||
@@ -20,8 +20,8 @@
|
||||
|
||||
|
||||
from base64 import standard_b64encode
|
||||
import httplib
|
||||
import xmlrpclib
|
||||
import http.client
|
||||
import xmlrpc.client
|
||||
|
||||
import headphones
|
||||
from headphones import logger
|
||||
@@ -32,7 +32,7 @@ def sendNZB(nzb):
|
||||
nzbgetXMLrpc = "%(protocol)s://%(username)s:%(password)s@%(host)s/xmlrpc"
|
||||
|
||||
if not headphones.CONFIG.NZBGET_HOST:
|
||||
logger.error(u"No NZBget host found in configuration. Please configure it.")
|
||||
logger.error("No NZBget host found in configuration. Please configure it.")
|
||||
return False
|
||||
|
||||
if headphones.CONFIG.NZBGET_HOST.startswith('https://'):
|
||||
@@ -46,25 +46,25 @@ def sendNZB(nzb):
|
||||
"username": headphones.CONFIG.NZBGET_USERNAME,
|
||||
"password": headphones.CONFIG.NZBGET_PASSWORD}
|
||||
|
||||
nzbGetRPC = xmlrpclib.ServerProxy(url)
|
||||
nzbGetRPC = xmlrpc.client.ServerProxy(url)
|
||||
try:
|
||||
if nzbGetRPC.writelog("INFO", "headphones connected to drop of %s any moment now." % (
|
||||
nzb.name + ".nzb")):
|
||||
logger.debug(u"Successfully connected to NZBget")
|
||||
logger.debug("Successfully connected to NZBget")
|
||||
else:
|
||||
logger.info(u"Successfully connected to NZBget, but unable to send a message" % (
|
||||
logger.info("Successfully connected to NZBget, but unable to send a message" % (
|
||||
nzb.name + ".nzb"))
|
||||
|
||||
except httplib.socket.error:
|
||||
except http.client.socket.error:
|
||||
logger.error(
|
||||
u"Please check your NZBget host and port (if it is running). NZBget is not responding to this combination")
|
||||
"Please check your NZBget host and port (if it is running). NZBget is not responding to this combination")
|
||||
return False
|
||||
|
||||
except xmlrpclib.ProtocolError, e:
|
||||
except xmlrpc.client.ProtocolError as e:
|
||||
if e.errmsg == "Unauthorized":
|
||||
logger.error(u"NZBget password is incorrect.")
|
||||
logger.error("NZBget password is incorrect.")
|
||||
else:
|
||||
logger.error(u"Protocol Error: " + e.errmsg)
|
||||
logger.error("Protocol Error: " + e.errmsg)
|
||||
return False
|
||||
|
||||
nzbcontent64 = None
|
||||
@@ -72,8 +72,8 @@ def sendNZB(nzb):
|
||||
data = nzb.extraInfo[0]
|
||||
nzbcontent64 = standard_b64encode(data)
|
||||
|
||||
logger.info(u"Sending NZB to NZBget")
|
||||
logger.debug(u"URL: " + url)
|
||||
logger.info("Sending NZB to NZBget")
|
||||
logger.debug("URL: " + url)
|
||||
|
||||
dupekey = ""
|
||||
dupescore = 0
|
||||
@@ -131,12 +131,12 @@ def sendNZB(nzb):
|
||||
nzb.url)
|
||||
|
||||
if nzbget_result:
|
||||
logger.debug(u"NZB sent to NZBget successfully")
|
||||
logger.debug("NZB sent to NZBget successfully")
|
||||
return True
|
||||
else:
|
||||
logger.error(u"NZBget could not add %s to the queue" % (nzb.name + ".nzb"))
|
||||
logger.error("NZBget could not add %s to the queue" % (nzb.name + ".nzb"))
|
||||
return False
|
||||
except:
|
||||
logger.error(
|
||||
u"Connect Error to NZBget: could not add %s to the queue" % (nzb.name + ".nzb"))
|
||||
"Connect Error to NZBget: could not add %s to the queue" % (nzb.name + ".nzb"))
|
||||
return False
|
||||
|
||||
@@ -30,7 +30,7 @@ syntax elements are supported:
|
||||
nonempty value only if any variable or optional inside returned
|
||||
nonempty value, ignoring literals (like {'{'$That'}'}).
|
||||
"""
|
||||
from __future__ import print_function
|
||||
|
||||
from enum import Enum
|
||||
|
||||
__author__ = "Andrzej Ciarkowski <andrzej.ciarkowski@gmail.com>"
|
||||
@@ -111,9 +111,9 @@ class _OptionalBlock(_Generator):
|
||||
# type: (Mapping[str,str]) -> str
|
||||
res = [(isinstance(x, _Generator), x.render(replacement)) for x in self._scope]
|
||||
if any((t[0] and t[1] is not None and len(t[1]) != 0) for t in res):
|
||||
return u"".join(t[1] for t in res)
|
||||
return "".join(t[1] for t in res)
|
||||
else:
|
||||
return u""
|
||||
return ""
|
||||
|
||||
def __eq__(self, other):
|
||||
"""
|
||||
@@ -122,15 +122,15 @@ class _OptionalBlock(_Generator):
|
||||
return isinstance(other, _OptionalBlock) and self._scope == other._scope
|
||||
|
||||
|
||||
_OPTIONAL_START = u'{'
|
||||
_OPTIONAL_END = u'}'
|
||||
_ESCAPE_CHAR = u'\''
|
||||
_REPLACEMENT_START = u'$'
|
||||
_OPTIONAL_START = '{'
|
||||
_OPTIONAL_END = '}'
|
||||
_ESCAPE_CHAR = '\''
|
||||
_REPLACEMENT_START = '$'
|
||||
|
||||
|
||||
def _is_replacement_valid(c):
|
||||
# type: (str) -> bool
|
||||
return c.isalnum() or c == u'_'
|
||||
return c.isalnum() or c == '_'
|
||||
|
||||
|
||||
class _State(Enum):
|
||||
@@ -243,7 +243,7 @@ class Pattern(object):
|
||||
def __call__(self, replacement):
|
||||
# type: (Mapping[str,str]) -> str
|
||||
'''Execute path rendering/substitution based on replacement dictionary.'''
|
||||
return u"".join(p.render(replacement) for p in self._pattern)
|
||||
return "".join(p.render(replacement) for p in self._pattern)
|
||||
|
||||
def _get_warnings(self):
|
||||
# type: () -> str
|
||||
@@ -262,6 +262,6 @@ def render(pattern, replacement):
|
||||
|
||||
if __name__ == "__main__":
|
||||
# primitive test ;)
|
||||
p = Pattern(u"{$Disc.}$Track - $Artist - $Title{ [$Year]}")
|
||||
d = {'$Disc': '', '$Track': '05', '$Artist': u'Grzegżółka', '$Title': u'Błona kapłona', '$Year': '2019'}
|
||||
assert p(d) == u"05 - Grzegżółka - Błona kapłona [2019]"
|
||||
p = Pattern("{$Disc.}$Track - $Artist - $Title{ [$Year]}")
|
||||
d = {'$Disc': '', '$Track': '05', '$Artist': 'Grzegżółka', '$Title': 'Błona kapłona', '$Year': '2019'}
|
||||
assert p(d) == "05 - Grzegżółka - Błona kapłona [2019]"
|
||||
|
||||
@@ -19,7 +19,7 @@ Test module for pathrender.
|
||||
import headphones.pathrender as _pr
|
||||
from headphones.pathrender import Pattern, Warnings
|
||||
|
||||
from unittestcompat import TestCase
|
||||
from .unittestcompat import TestCase
|
||||
|
||||
|
||||
__author__ = "Andrzej Ciarkowski <andrzej.ciarkowski@gmail.com>"
|
||||
@@ -32,21 +32,21 @@ class PathRenderTest(TestCase):
|
||||
|
||||
def test_parsing(self):
|
||||
"""pathrender: pattern parsing"""
|
||||
pattern = Pattern(u"{$Disc.}$Track - $Artist - $Title{ [$Year]}")
|
||||
pattern = Pattern("{$Disc.}$Track - $Artist - $Title{ [$Year]}")
|
||||
expected = [
|
||||
_pr._OptionalBlock([
|
||||
_pr._Replacement(u"$Disc"),
|
||||
_pr._LiteralText(u".")
|
||||
_pr._Replacement("$Disc"),
|
||||
_pr._LiteralText(".")
|
||||
]),
|
||||
_pr._Replacement(u"$Track"),
|
||||
_pr._LiteralText(u" - "),
|
||||
_pr._Replacement(u"$Artist"),
|
||||
_pr._LiteralText(u" - "),
|
||||
_pr._Replacement(u"$Title"),
|
||||
_pr._Replacement("$Track"),
|
||||
_pr._LiteralText(" - "),
|
||||
_pr._Replacement("$Artist"),
|
||||
_pr._LiteralText(" - "),
|
||||
_pr._Replacement("$Title"),
|
||||
_pr._OptionalBlock([
|
||||
_pr._LiteralText(u" ["),
|
||||
_pr._Replacement(u"$Year"),
|
||||
_pr._LiteralText(u"]")
|
||||
_pr._LiteralText(" ["),
|
||||
_pr._Replacement("$Year"),
|
||||
_pr._LiteralText("]")
|
||||
])
|
||||
]
|
||||
self.assertEqual(expected, pattern._pattern)
|
||||
@@ -54,27 +54,27 @@ class PathRenderTest(TestCase):
|
||||
|
||||
def test_parsing_warnings(self):
|
||||
"""pathrender: pattern parsing with warnings"""
|
||||
pattern = Pattern(u"{$Disc.}$Track - $Artist - $Title{ [$Year]")
|
||||
pattern = Pattern("{$Disc.}$Track - $Artist - $Title{ [$Year]")
|
||||
self.assertEqual(set([Warnings.UNCLOSED_OPTIONAL]), pattern.warnings)
|
||||
pattern = Pattern(u"{$Disc.}$Track - $Artist - $Title{ [$Year]'}")
|
||||
pattern = Pattern("{$Disc.}$Track - $Artist - $Title{ [$Year]'}")
|
||||
self.assertEqual(set([Warnings.UNCLOSED_ESCAPE, Warnings.UNCLOSED_OPTIONAL]), pattern.warnings)
|
||||
|
||||
def test_replacement(self):
|
||||
"""pathrender: _Replacement variable substitution"""
|
||||
r = _pr._Replacement(u"$Title")
|
||||
r = _pr._Replacement("$Title")
|
||||
subst = {'$Title': 'foo', '$Track': 'bar'}
|
||||
res = r.render(subst)
|
||||
self.assertEqual(res, u'foo', 'check valid replacement')
|
||||
self.assertEqual(res, 'foo', 'check valid replacement')
|
||||
subst = {}
|
||||
res = r.render(subst)
|
||||
self.assertEqual(res, u'$Title', 'check missing replacement')
|
||||
self.assertEqual(res, '$Title', 'check missing replacement')
|
||||
subst = {'$Title': None}
|
||||
res = r.render(subst)
|
||||
self.assertEqual(res, '', 'check render() works with None')
|
||||
|
||||
def test_literal(self):
|
||||
"""pathrender: _Literal text rendering"""
|
||||
l = _pr._LiteralText(u"foo")
|
||||
l = _pr._LiteralText("foo")
|
||||
subst = {'$foo': 'bar'}
|
||||
res = l.render(subst)
|
||||
self.assertEqual(res, 'foo')
|
||||
@@ -82,12 +82,12 @@ class PathRenderTest(TestCase):
|
||||
def test_optional(self):
|
||||
"""pathrender: _OptionalBlock element processing"""
|
||||
o = _pr._OptionalBlock([
|
||||
_pr._Replacement(u"$Title"),
|
||||
_pr._LiteralText(u".foobar")
|
||||
_pr._Replacement("$Title"),
|
||||
_pr._LiteralText(".foobar")
|
||||
])
|
||||
subst = {'$Title': 'foo', '$Track': 'bar'}
|
||||
res = o.render(subst)
|
||||
self.assertEqual(res, u'foo.foobar', 'check non-empty replacement')
|
||||
self.assertEqual(res, 'foo.foobar', 'check non-empty replacement')
|
||||
subst = {'$Title': ''}
|
||||
res = o.render(subst)
|
||||
self.assertEqual(res, '', 'check empty replacement')
|
||||
|
||||
@@ -25,7 +25,7 @@ import headphones
|
||||
from beets import autotag
|
||||
from beets import config as beetsconfig
|
||||
from beets import logging as beetslogging
|
||||
from beets.mediafile import MediaFile, FileTypeError, UnreadableFileError
|
||||
from mediafile import MediaFile, FileTypeError, UnreadableFileError
|
||||
from beetsplug import lyrics as beetslyrics
|
||||
from headphones import notifiers, utorrent, transmission, deluge, qbittorrent
|
||||
from headphones import db, albumart, librarysync
|
||||
@@ -65,8 +65,8 @@ def checkFolder():
|
||||
folder_name = torrent_folder_name
|
||||
|
||||
if folder_name:
|
||||
album_path = os.path.join(download_dir, folder_name).encode(
|
||||
headphones.SYS_ENCODING, 'replace')
|
||||
print(folder_name)
|
||||
album_path = os.path.join(download_dir, folder_name)
|
||||
logger.debug("Checking if %s exists" % album_path)
|
||||
|
||||
if os.path.exists(album_path):
|
||||
@@ -80,6 +80,7 @@ def checkFolder():
|
||||
|
||||
|
||||
def verify(albumid, albumpath, Kind=None, forced=False, keep_original_folder=False, single=False):
|
||||
print(albumpath)
|
||||
myDB = db.DBConnection()
|
||||
release = myDB.action('SELECT * from albums WHERE AlbumID=?', [albumid]).fetchone()
|
||||
tracks = myDB.select('SELECT * from tracks WHERE AlbumID=?', [albumid])
|
||||
@@ -135,11 +136,11 @@ def verify(albumid, albumpath, Kind=None, forced=False, keep_original_folder=Fal
|
||||
if headphones.CONFIG.RENAME_FROZEN:
|
||||
renameUnprocessedFolder(albumpath, tag="Frozen")
|
||||
else:
|
||||
logger.warn(u"Won't rename %s to mark as 'Frozen', because it is disabled.",
|
||||
albumpath.decode(headphones.SYS_ENCODING, 'replace'))
|
||||
logger.warn("Won't rename %s to mark as 'Frozen', because it is disabled.",
|
||||
albumpath)
|
||||
return
|
||||
|
||||
logger.info(u"Now adding/updating artist: " + release_dict['artist_name'])
|
||||
logger.info("Now adding/updating artist: " + release_dict['artist_name'])
|
||||
|
||||
if release_dict['artist_name'].startswith('The '):
|
||||
sortname = release_dict['artist_name'][4:]
|
||||
@@ -161,7 +162,7 @@ def verify(albumid, albumpath, Kind=None, forced=False, keep_original_folder=Fal
|
||||
|
||||
myDB.upsert("artists", newValueDict, controlValueDict)
|
||||
|
||||
logger.info(u"Now adding album: " + release_dict['title'])
|
||||
logger.info("Now adding album: " + release_dict['title'])
|
||||
controlValueDict = {"AlbumID": albumid}
|
||||
|
||||
newValueDict = {"ArtistID": release_dict['artist_id'],
|
||||
@@ -202,7 +203,7 @@ def verify(albumid, albumpath, Kind=None, forced=False, keep_original_folder=Fal
|
||||
newValueDict = {"Status": "Paused"}
|
||||
|
||||
myDB.upsert("artists", newValueDict, controlValueDict)
|
||||
logger.info(u"Addition complete for: " + release_dict['title'] + " - " + release_dict[
|
||||
logger.info("Addition complete for: " + release_dict['title'] + " - " + release_dict[
|
||||
'artist_name'])
|
||||
|
||||
release = myDB.action('SELECT * from albums WHERE AlbumID=?', [albumid]).fetchone()
|
||||
@@ -211,17 +212,18 @@ def verify(albumid, albumpath, Kind=None, forced=False, keep_original_folder=Fal
|
||||
downloaded_track_list = []
|
||||
downloaded_cuecount = 0
|
||||
|
||||
for r, d, f in os.walk(albumpath):
|
||||
for files in f:
|
||||
if any(files.lower().endswith('.' + x.lower()) for x in headphones.MEDIA_FORMATS):
|
||||
downloaded_track_list.append(os.path.join(r, files))
|
||||
elif files.lower().endswith('.cue'):
|
||||
media_extensions = tuple(map(lambda x: '.' + x, headphones.MEDIA_FORMATS))
|
||||
|
||||
for root, dirs, files in os.walk(albumpath):
|
||||
for file in files:
|
||||
if file.endswith(media_extensions):
|
||||
downloaded_track_list.append(os.path.join(root, file))
|
||||
elif file.endswith('.cue'):
|
||||
downloaded_cuecount += 1
|
||||
# if any of the files end in *.part, we know the torrent isn't done yet. Process if forced, though
|
||||
elif files.lower().endswith(('.part', '.utpart')) and not forced:
|
||||
elif file.endswith(('.part', '.utpart')) and not forced:
|
||||
logger.info(
|
||||
"Looks like " + os.path.basename(albumpath).decode(headphones.SYS_ENCODING,
|
||||
'replace') + " isn't complete yet. Will try again on the next run")
|
||||
"Looks like " + os.path.basename(albumpath) + " isn't complete yet. Will try again on the next run")
|
||||
return
|
||||
|
||||
# Force single file through
|
||||
@@ -264,10 +266,7 @@ def verify(albumid, albumpath, Kind=None, forced=False, keep_original_folder=Fal
|
||||
try:
|
||||
f = MediaFile(downloaded_track)
|
||||
except Exception as e:
|
||||
logger.info(
|
||||
u"Exception from MediaFile for: " + downloaded_track.decode(headphones.SYS_ENCODING,
|
||||
'replace') + u" : " + unicode(
|
||||
e))
|
||||
logger.info(f"Exception from MediaFile for {downloaded_track}: {e}")
|
||||
continue
|
||||
|
||||
if not f.artist:
|
||||
@@ -275,10 +274,10 @@ def verify(albumid, albumpath, Kind=None, forced=False, keep_original_folder=Fal
|
||||
if not f.album:
|
||||
continue
|
||||
|
||||
metaartist = helpers.latinToAscii(f.artist.lower()).encode('UTF-8')
|
||||
dbartist = helpers.latinToAscii(release['ArtistName'].lower()).encode('UTF-8')
|
||||
metaalbum = helpers.latinToAscii(f.album.lower()).encode('UTF-8')
|
||||
dbalbum = helpers.latinToAscii(release['AlbumTitle'].lower()).encode('UTF-8')
|
||||
metaartist = helpers.latinToAscii(f.artist.lower())
|
||||
dbartist = helpers.latinToAscii(release['ArtistName'].lower())
|
||||
metaalbum = helpers.latinToAscii(f.album.lower())
|
||||
dbalbum = helpers.latinToAscii(release['AlbumTitle'].lower())
|
||||
|
||||
logger.debug('Matching metadata artist: %s with artist name: %s' % (metaartist, dbartist))
|
||||
logger.debug('Matching metadata album: %s with album name: %s' % (metaalbum, dbalbum))
|
||||
@@ -298,8 +297,8 @@ def verify(albumid, albumpath, Kind=None, forced=False, keep_original_folder=Fal
|
||||
if not track['TrackTitle']:
|
||||
continue
|
||||
|
||||
dbtrack = helpers.latinToAscii(track['TrackTitle'].lower()).encode('UTF-8')
|
||||
filetrack = helpers.latinToAscii(split_track_name).encode('UTF-8')
|
||||
dbtrack = helpers.latinToAscii(track['TrackTitle'].lower())
|
||||
filetrack = helpers.latinToAscii(split_track_name)
|
||||
logger.debug('Checking if track title: %s is in file name: %s' % (dbtrack, filetrack))
|
||||
|
||||
if dbtrack in filetrack:
|
||||
@@ -340,11 +339,9 @@ def verify(albumid, albumpath, Kind=None, forced=False, keep_original_folder=Fal
|
||||
keep_original_folder, forced, single)
|
||||
return
|
||||
|
||||
logger.warn(u'Could not identify album: %s. It may not be the intended album.',
|
||||
albumpath.decode(headphones.SYS_ENCODING, 'replace'))
|
||||
logger.warn(f"Could not identify {albumpath}. It may not be the intended album")
|
||||
markAsUnprocessed(albumid, albumpath, keep_original_folder)
|
||||
|
||||
|
||||
def markAsUnprocessed(albumid, albumpath, keep_original_folder=False):
|
||||
myDB = db.DBConnection()
|
||||
myDB.action(
|
||||
@@ -354,13 +351,19 @@ def markAsUnprocessed(albumid, albumpath, keep_original_folder=False):
|
||||
if headphones.CONFIG.RENAME_UNPROCESSED and not keep_original_folder:
|
||||
renameUnprocessedFolder(albumpath, tag="Unprocessed")
|
||||
else:
|
||||
logger.warn(u"Won't rename %s to mark as 'Unprocessed', because it is disabled or folder is being kept.",
|
||||
albumpath.decode(headphones.SYS_ENCODING, 'replace'))
|
||||
logger.warn(
|
||||
f"Won't rename {albumpath} to mark as 'Unprocessed', "
|
||||
f"because it is disabled or folder is being kept."
|
||||
)
|
||||
return
|
||||
|
||||
|
||||
def doPostProcessing(albumid, albumpath, release, tracks, downloaded_track_list, Kind=None,
|
||||
keep_original_folder=False, forced=False, single=False):
|
||||
logger.info('Starting post-processing for: %s - %s' % (release['ArtistName'], release['AlbumTitle']))
|
||||
logger.info(
|
||||
f"Starting post-processing for: {release['ArtistName']} - "
|
||||
f"{release['AlbumTitle']}"
|
||||
)
|
||||
new_folder = None
|
||||
|
||||
# Preserve the torrent dir
|
||||
@@ -393,12 +396,10 @@ def doPostProcessing(albumid, albumpath, release, tracks, downloaded_track_list,
|
||||
f = MediaFile(downloaded_track)
|
||||
builder.add_media_file(f)
|
||||
except (FileTypeError, UnreadableFileError):
|
||||
logger.error("Track file is not a valid media file: %s. Not continuing.",
|
||||
downloaded_track.decode(headphones.SYS_ENCODING, "replace"))
|
||||
logger.error(f"`{downloaded_track}` is not a valid media file. Not continuing.")
|
||||
return
|
||||
except IOError:
|
||||
logger.error("Unable to find media file: %s. Not continuing.", downloaded_track.decode(
|
||||
headphones.SYS_ENCODING, "replace"))
|
||||
logger.error(f"Unable to find `{downloaded_track}`. Not continuing.")
|
||||
if new_folder:
|
||||
shutil.rmtree(new_folder)
|
||||
return
|
||||
@@ -416,9 +417,10 @@ def doPostProcessing(albumid, albumpath, release, tracks, downloaded_track_list,
|
||||
fp.seek(0)
|
||||
except IOError as e:
|
||||
logger.debug("Write check exact error: %s", e)
|
||||
logger.error("Track file is not writable. This is required "
|
||||
"for some post processing steps: %s. Not continuing.",
|
||||
downloaded_track.decode(headphones.SYS_ENCODING, "replace"))
|
||||
logger.error(
|
||||
f"`{downloaded_track}` is not writable. This is required "
|
||||
"for some post processing steps. Not continuing."
|
||||
)
|
||||
if new_folder:
|
||||
shutil.rmtree(new_folder)
|
||||
return
|
||||
@@ -475,7 +477,8 @@ def doPostProcessing(albumid, albumpath, release, tracks, downloaded_track_list,
|
||||
else:
|
||||
albumpaths = [albumpath]
|
||||
|
||||
updateFilePermissions(albumpaths)
|
||||
if headphones.CONFIG.FILE_PERMISSIONS_ENABLED:
|
||||
updateFilePermissions(albumpaths)
|
||||
|
||||
myDB = db.DBConnection()
|
||||
myDB.action('UPDATE albums SET status = "Downloaded" WHERE AlbumID=?', [albumid])
|
||||
@@ -491,7 +494,7 @@ def doPostProcessing(albumid, albumpath, release, tracks, downloaded_track_list,
|
||||
if seed_snatched:
|
||||
hash = seed_snatched['TorrentHash']
|
||||
torrent_removed = False
|
||||
logger.info(u'%s - %s. Checking if torrent has finished seeding and can be removed' % (
|
||||
logger.info('%s - %s. Checking if torrent has finished seeding and can be removed' % (
|
||||
release['ArtistName'], release['AlbumTitle']))
|
||||
if headphones.CONFIG.TORRENT_DOWNLOADER == 1:
|
||||
torrent_removed = transmission.removeTorrent(hash, True)
|
||||
@@ -517,18 +520,18 @@ def doPostProcessing(albumid, albumpath, release, tracks, downloaded_track_list,
|
||||
ArtistName=release['ArtistName'])
|
||||
|
||||
logger.info(
|
||||
u'Post-processing for %s - %s complete' % (release['ArtistName'], release['AlbumTitle']))
|
||||
'Post-processing for %s - %s complete' % (release['ArtistName'], release['AlbumTitle']))
|
||||
|
||||
pushmessage = release['ArtistName'] + ' - ' + release['AlbumTitle']
|
||||
statusmessage = "Download and Postprocessing completed"
|
||||
|
||||
if headphones.CONFIG.GROWL_ENABLED:
|
||||
logger.info(u"Growl request")
|
||||
logger.info("Growl request")
|
||||
growl = notifiers.GROWL()
|
||||
growl.notify(pushmessage, statusmessage)
|
||||
|
||||
if headphones.CONFIG.PROWL_ENABLED:
|
||||
logger.info(u"Prowl request")
|
||||
logger.info("Prowl request")
|
||||
prowl = notifiers.PROWL()
|
||||
prowl.notify(pushmessage, statusmessage)
|
||||
|
||||
@@ -559,7 +562,7 @@ def doPostProcessing(albumid, albumpath, release, tracks, downloaded_track_list,
|
||||
nma.notify(release['ArtistName'], release['AlbumTitle'])
|
||||
|
||||
if headphones.CONFIG.PUSHALOT_ENABLED:
|
||||
logger.info(u"Pushalot request")
|
||||
logger.info("Pushalot request")
|
||||
pushalot = notifiers.PUSHALOT()
|
||||
pushalot.notify(pushmessage, statusmessage)
|
||||
|
||||
@@ -569,35 +572,36 @@ def doPostProcessing(albumid, albumpath, release, tracks, downloaded_track_list,
|
||||
syno.notify(albumpath)
|
||||
|
||||
if headphones.CONFIG.PUSHOVER_ENABLED:
|
||||
logger.info(u"Pushover request")
|
||||
logger.info("Pushover request")
|
||||
pushover = notifiers.PUSHOVER()
|
||||
pushover.notify(pushmessage, "Headphones")
|
||||
|
||||
if headphones.CONFIG.PUSHBULLET_ENABLED:
|
||||
logger.info(u"PushBullet request")
|
||||
logger.info("PushBullet request")
|
||||
pushbullet = notifiers.PUSHBULLET()
|
||||
pushbullet.notify(pushmessage, statusmessage)
|
||||
|
||||
if headphones.CONFIG.JOIN_ENABLED:
|
||||
logger.info(u"Join request")
|
||||
logger.info("Join request")
|
||||
join = notifiers.JOIN()
|
||||
join.notify(pushmessage, statusmessage)
|
||||
|
||||
if headphones.CONFIG.TELEGRAM_ENABLED:
|
||||
logger.info(u"Telegram request")
|
||||
logger.info("Telegram request")
|
||||
telegram = notifiers.TELEGRAM()
|
||||
telegram.notify(statusmessage, pushmessage)
|
||||
|
||||
if headphones.CONFIG.TWITTER_ENABLED:
|
||||
logger.info(u"Sending Twitter notification")
|
||||
twitter = notifiers.TwitterNotifier()
|
||||
twitter.notify_download(pushmessage)
|
||||
logger.info("Twitter notifications temporarily disabled")
|
||||
#logger.info("Sending Twitter notification")
|
||||
#twitter = notifiers.TwitterNotifier()
|
||||
#twitter.notify_download(pushmessage)
|
||||
|
||||
if headphones.CONFIG.OSX_NOTIFY_ENABLED:
|
||||
from headphones import cache
|
||||
c = cache.Cache()
|
||||
album_art = c.get_artwork_from_cache(None, release['AlbumID'])
|
||||
logger.info(u"Sending OS X notification")
|
||||
logger.info("Sending OS X notification")
|
||||
osx_notify = notifiers.OSX_NOTIFY()
|
||||
osx_notify.notify(release['ArtistName'],
|
||||
release['AlbumTitle'],
|
||||
@@ -605,13 +609,13 @@ def doPostProcessing(albumid, albumpath, release, tracks, downloaded_track_list,
|
||||
image=album_art)
|
||||
|
||||
if headphones.CONFIG.BOXCAR_ENABLED:
|
||||
logger.info(u"Sending Boxcar2 notification")
|
||||
logger.info("Sending Boxcar2 notification")
|
||||
boxcar = notifiers.BOXCAR()
|
||||
boxcar.notify('Headphones processed: ' + pushmessage,
|
||||
statusmessage, release['AlbumID'])
|
||||
|
||||
if headphones.CONFIG.SUBSONIC_ENABLED:
|
||||
logger.info(u"Sending Subsonic update")
|
||||
logger.info("Sending Subsonic update")
|
||||
subsonic = notifiers.SubSonicNotifier()
|
||||
subsonic.notify(albumpaths)
|
||||
|
||||
@@ -620,7 +624,7 @@ def doPostProcessing(albumid, albumpath, release, tracks, downloaded_track_list,
|
||||
mpc.notify()
|
||||
|
||||
if headphones.CONFIG.EMAIL_ENABLED:
|
||||
logger.info(u"Sending Email notification")
|
||||
logger.info("Sending Email notification")
|
||||
email = notifiers.Email()
|
||||
subject = release['ArtistName'] + ' - ' + release['AlbumTitle']
|
||||
email.notify(subject, "Download and Postprocessing completed")
|
||||
@@ -636,23 +640,21 @@ def embedAlbumArt(artwork, downloaded_track_list):
|
||||
try:
|
||||
f = MediaFile(downloaded_track)
|
||||
except:
|
||||
logger.error(u'Could not read %s. Not adding album art' % downloaded_track.decode(
|
||||
headphones.SYS_ENCODING, 'replace'))
|
||||
logger.error(f"Could not read {downloaded_track}. Not adding album art")
|
||||
continue
|
||||
|
||||
logger.debug('Adding album art to: %s' % downloaded_track)
|
||||
logger.debug(f"Adding album art to `{downloaded_track}`")
|
||||
|
||||
try:
|
||||
f.art = artwork
|
||||
f.save()
|
||||
except Exception as e:
|
||||
logger.error(u'Error embedding album art to: %s. Error: %s' % (
|
||||
downloaded_track.decode(headphones.SYS_ENCODING, 'replace'), str(e)))
|
||||
logger.error(f"Error embedding album art to `{downloaded_track}`: {e}")
|
||||
continue
|
||||
|
||||
|
||||
def addAlbumArt(artwork, albumpath, release, metadata_dict):
|
||||
logger.info('Adding album art to folder')
|
||||
logger.info(f"Adding album art to `{albumpath}`")
|
||||
md = metadata.album_metadata(albumpath, release, metadata_dict)
|
||||
|
||||
ext = ".jpg"
|
||||
@@ -663,8 +665,7 @@ def addAlbumArt(artwork, albumpath, release, metadata_dict):
|
||||
album_art_name = helpers.pattern_substitute(
|
||||
headphones.CONFIG.ALBUM_ART_FORMAT.strip(), md) + ext
|
||||
|
||||
album_art_name = helpers.replace_illegal_chars(album_art_name).encode(
|
||||
headphones.SYS_ENCODING, 'replace')
|
||||
album_art_name = helpers.replace_illegal_chars(album_art_name)
|
||||
|
||||
if headphones.CONFIG.FILE_UNDERSCORES:
|
||||
album_art_name = album_art_name.replace(' ', '_')
|
||||
@@ -684,14 +685,13 @@ def cleanupFiles(albumpath):
|
||||
logger.info('Cleaning up files')
|
||||
|
||||
for r, d, f in os.walk(albumpath):
|
||||
for files in f:
|
||||
if not any(files.lower().endswith('.' + x.lower()) for x in headphones.MEDIA_FORMATS):
|
||||
logger.debug('Removing: %s' % files)
|
||||
for file in f:
|
||||
if not any(file.lower().endswith('.' + x.lower()) for x in headphones.MEDIA_FORMATS):
|
||||
logger.debug('Removing: %s' % file)
|
||||
try:
|
||||
os.remove(os.path.join(r, files))
|
||||
os.remove(os.path.join(r, file))
|
||||
except Exception as e:
|
||||
logger.error(u'Could not remove file: %s. Error: %s' % (
|
||||
files.decode(headphones.SYS_ENCODING, 'replace'), e))
|
||||
logger.error('Could not remove file: %s. Error: %s' % (file, e))
|
||||
|
||||
|
||||
def renameNFO(albumpath):
|
||||
@@ -701,19 +701,16 @@ def renameNFO(albumpath):
|
||||
for file in f:
|
||||
if file.lower().endswith('.nfo'):
|
||||
if not file.lower().endswith('.orig.nfo'):
|
||||
logger.debug('Renaming: "%s" to "%s"' % (
|
||||
file.decode(headphones.SYS_ENCODING, 'replace'),
|
||||
file.decode(headphones.SYS_ENCODING, 'replace') + '-orig'))
|
||||
try:
|
||||
new_file_name = os.path.join(r, file)[:-3] + 'orig.nfo'
|
||||
logger.debug(f"Renaming `{file}` to `{new_file_name}`")
|
||||
os.rename(os.path.join(r, file), new_file_name)
|
||||
except Exception as e:
|
||||
logger.error(u'Could not rename file: %s. Error: %s' % (
|
||||
os.path.join(r, file).decode(headphones.SYS_ENCODING, 'replace'), e))
|
||||
logger.error(f"Could not rename {file}: {e}")
|
||||
|
||||
|
||||
def moveFiles(albumpath, release, metadata_dict):
|
||||
logger.info("Moving files: %s" % albumpath)
|
||||
logger.info(f"Moving files: `{albumpath}`")
|
||||
|
||||
md = metadata.album_metadata(albumpath, release, metadata_dict)
|
||||
folder = helpers.pattern_substitute(
|
||||
@@ -750,12 +747,8 @@ def moveFiles(albumpath, release, metadata_dict):
|
||||
make_lossy_folder = False
|
||||
make_lossless_folder = False
|
||||
|
||||
lossy_destination_path = os.path.normpath(
|
||||
os.path.join(headphones.CONFIG.DESTINATION_DIR, folder)).encode(headphones.SYS_ENCODING,
|
||||
'replace')
|
||||
lossless_destination_path = os.path.normpath(
|
||||
os.path.join(headphones.CONFIG.LOSSLESS_DESTINATION_DIR, folder)).encode(
|
||||
headphones.SYS_ENCODING, 'replace')
|
||||
lossy_destination_path = os.path.join(headphones.CONFIG.DESTINATION_DIR, folder)
|
||||
lossless_destination_path = os.path.join(headphones.CONFIG.LOSSLESS_DESTINATION_DIR, folder)
|
||||
|
||||
# If they set a destination dir for lossless media, only create the lossy folder if there is lossy media
|
||||
if headphones.CONFIG.LOSSLESS_DESTINATION_DIR:
|
||||
@@ -780,8 +773,9 @@ def moveFiles(albumpath, release, metadata_dict):
|
||||
shutil.rmtree(lossless_destination_path)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error deleting existing folder: %s. Creating duplicate folder. Error: %s" % (
|
||||
lossless_destination_path.decode(headphones.SYS_ENCODING, 'replace'), e))
|
||||
f"Error deleting `{lossless_destination_path}`. "
|
||||
f"Creating duplicate folder. Error: {e}"
|
||||
)
|
||||
create_duplicate_folder = True
|
||||
|
||||
if not headphones.CONFIG.REPLACE_EXISTING_FOLDERS or create_duplicate_folder:
|
||||
@@ -791,8 +785,11 @@ def moveFiles(albumpath, release, metadata_dict):
|
||||
while True:
|
||||
newfolder = temp_folder + '[%i]' % i
|
||||
lossless_destination_path = os.path.normpath(
|
||||
os.path.join(headphones.CONFIG.LOSSLESS_DESTINATION_DIR, newfolder)).encode(
|
||||
headphones.SYS_ENCODING, 'replace')
|
||||
os.path.join(
|
||||
headphones.CONFIG.LOSSLESS_DESTINATION_DIR,
|
||||
newfolder
|
||||
)
|
||||
)
|
||||
if os.path.exists(lossless_destination_path):
|
||||
i += 1
|
||||
else:
|
||||
@@ -818,8 +815,9 @@ def moveFiles(albumpath, release, metadata_dict):
|
||||
shutil.rmtree(lossy_destination_path)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error deleting existing folder: %s. Creating duplicate folder. Error: %s" % (
|
||||
lossy_destination_path.decode(headphones.SYS_ENCODING, 'replace'), e))
|
||||
f"Error deleting `{lossy_destination_path}`. "
|
||||
f"Creating duplicate folder. Error: {e}"
|
||||
)
|
||||
create_duplicate_folder = True
|
||||
|
||||
if not headphones.CONFIG.REPLACE_EXISTING_FOLDERS or create_duplicate_folder:
|
||||
@@ -829,8 +827,11 @@ def moveFiles(albumpath, release, metadata_dict):
|
||||
while True:
|
||||
newfolder = temp_folder + '[%i]' % i
|
||||
lossy_destination_path = os.path.normpath(
|
||||
os.path.join(headphones.CONFIG.DESTINATION_DIR, newfolder)).encode(
|
||||
headphones.SYS_ENCODING, 'replace')
|
||||
os.path.join(
|
||||
headphones.CONFIG.DESTINATION_DIR,
|
||||
newfolder
|
||||
)
|
||||
)
|
||||
if os.path.exists(lossy_destination_path):
|
||||
i += 1
|
||||
else:
|
||||
@@ -876,12 +877,11 @@ def moveFiles(albumpath, release, metadata_dict):
|
||||
os.remove(file_to_move)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Error deleting file '" + file_to_move.decode(headphones.SYS_ENCODING,
|
||||
'replace') + "' from source directory")
|
||||
f"Error deleting `{file_to_move}` from source directory")
|
||||
else:
|
||||
logger.error("Error copying '" + file_to_move.decode(headphones.SYS_ENCODING,
|
||||
'replace') + "'. Not deleting from download directory")
|
||||
|
||||
logger.error(
|
||||
f"Error copying `{file_to_move}`. "
|
||||
f"Not deleting from download directory")
|
||||
elif make_lossless_folder and not make_lossy_folder:
|
||||
|
||||
for file_to_move in files_to_move:
|
||||
@@ -910,20 +910,20 @@ def moveFiles(albumpath, release, metadata_dict):
|
||||
|
||||
if headphones.CONFIG.FOLDER_PERMISSIONS_ENABLED:
|
||||
try:
|
||||
os.chmod(os.path.normpath(temp_f).encode(headphones.SYS_ENCODING, 'replace'),
|
||||
os.chmod(os.path.normpath(temp_f),
|
||||
int(headphones.CONFIG.FOLDER_PERMISSIONS, 8))
|
||||
except Exception as e:
|
||||
logger.error("Error trying to change permissions on folder: %s. %s",
|
||||
temp_f.decode(headphones.SYS_ENCODING, 'replace'), e)
|
||||
logger.error(f"Error trying to change permissions on `{temp_f}`: {e}")
|
||||
else:
|
||||
logger.debug("Not changing folder permissions, since it is disabled: %s",
|
||||
temp_f.decode(headphones.SYS_ENCODING, 'replace'))
|
||||
logger.debug(
|
||||
f"Not changing permissions on `{temp_f}`, "
|
||||
"since it is disabled")
|
||||
|
||||
# If we failed to move all the files out of the directory, this will fail too
|
||||
try:
|
||||
shutil.rmtree(albumpath)
|
||||
except Exception as e:
|
||||
logger.error('Could not remove directory: %s. %s', albumpath, e)
|
||||
logger.error(f"Could not remove `{albumpath}`: {e}")
|
||||
|
||||
destination_paths = []
|
||||
|
||||
@@ -952,11 +952,15 @@ def correctMetadata(albumid, release, downloaded_track_list):
|
||||
headphones.LOSSY_MEDIA_FORMATS):
|
||||
lossy_items.append(beets.library.Item.from_path(downloaded_track))
|
||||
else:
|
||||
logger.warn("Skipping: %s because it is not a mutagen friendly file format",
|
||||
downloaded_track.decode(headphones.SYS_ENCODING, 'replace'))
|
||||
logger.warn(
|
||||
f"Skipping `{downloaded_track}` because it is "
|
||||
f"not a mutagen friendly file format"
|
||||
)
|
||||
continue
|
||||
except Exception as e:
|
||||
logger.error("Beets couldn't create an Item from: %s - not a media file? %s",
|
||||
downloaded_track.decode(headphones.SYS_ENCODING, 'replace'), str(e))
|
||||
logger.error(
|
||||
f"Beets couldn't create an Item from `{downloaded_track}`: {e}")
|
||||
continue
|
||||
|
||||
for items in [lossy_items, lossless_items]:
|
||||
|
||||
@@ -1018,11 +1022,9 @@ def correctMetadata(albumid, release, downloaded_track_list):
|
||||
for item in items:
|
||||
try:
|
||||
item.write()
|
||||
logger.info("Successfully applied metadata to: %s",
|
||||
item.path.decode(headphones.SYS_ENCODING, 'replace'))
|
||||
logger.info(f"Successfully applied metadata to `{item.path}`")
|
||||
except Exception as e:
|
||||
logger.warn("Error writing metadata to '%s': %s",
|
||||
item.path.decode(headphones.SYS_ENCODING, 'replace'), str(e))
|
||||
logger.warn(f"Error writing metadata to `{item.path}: {e}")
|
||||
return False
|
||||
|
||||
return True
|
||||
@@ -1048,11 +1050,11 @@ def embedLyrics(downloaded_track_list):
|
||||
headphones.LOSSY_MEDIA_FORMATS):
|
||||
lossy_items.append(beets.library.Item.from_path(downloaded_track))
|
||||
else:
|
||||
logger.warn("Skipping: %s because it is not a mutagen friendly file format",
|
||||
downloaded_track.decode(headphones.SYS_ENCODING, 'replace'))
|
||||
logger.warn(
|
||||
f"Skipping `{downloaded_track}` because it is "
|
||||
f"not a mutagen friendly file format")
|
||||
except Exception as e:
|
||||
logger.error("Beets couldn't create an Item from: %s - not a media file? %s",
|
||||
downloaded_track.decode(headphones.SYS_ENCODING, 'replace'), str(e))
|
||||
logger.error(f"Beets couldn't create an Item from `{downloaded_track}`: {e}")
|
||||
|
||||
for items in [lossy_items, lossless_items]:
|
||||
|
||||
@@ -1067,7 +1069,7 @@ def embedLyrics(downloaded_track_list):
|
||||
if any(lyrics):
|
||||
break
|
||||
|
||||
lyrics = u"\n\n---\n\n".join([l for l in lyrics if l])
|
||||
lyrics = "\n\n---\n\n".join([l for l in lyrics if l])
|
||||
|
||||
if lyrics:
|
||||
logger.debug('Adding lyrics to: %s', item.title)
|
||||
@@ -1099,8 +1101,7 @@ def renameFiles(albumpath, downloaded_track_list, release):
|
||||
headphones.CONFIG.FILE_FORMAT.strip(), md
|
||||
).replace('/', '_') + ext
|
||||
|
||||
new_file_name = helpers.replace_illegal_chars(new_file_name).encode(
|
||||
headphones.SYS_ENCODING, 'replace')
|
||||
new_file_name = helpers.replace_illegal_chars(new_file_name)
|
||||
|
||||
if headphones.CONFIG.FILE_UNDERSCORES:
|
||||
new_file_name = new_file_name.replace(' ', '_')
|
||||
@@ -1111,37 +1112,28 @@ def renameFiles(albumpath, downloaded_track_list, release):
|
||||
new_file = os.path.join(albumpath, new_file_name)
|
||||
|
||||
if downloaded_track == new_file_name:
|
||||
logger.debug("Renaming for: " + downloaded_track.decode(
|
||||
headphones.SYS_ENCODING, 'replace') + " is not neccessary")
|
||||
logger.debug(f"Renaming for {downloaded_track} is not neccessary")
|
||||
continue
|
||||
|
||||
logger.debug('Renaming %s ---> %s',
|
||||
downloaded_track.decode(headphones.SYS_ENCODING, 'replace'),
|
||||
new_file_name.decode(headphones.SYS_ENCODING, 'replace'))
|
||||
logger.debug(f"Renaming {downloaded_track} ---> {new_file_name}")
|
||||
try:
|
||||
os.rename(downloaded_track, new_file)
|
||||
except Exception as e:
|
||||
logger.error('Error renaming file: %s. Error: %s',
|
||||
downloaded_track.decode(headphones.SYS_ENCODING, 'replace'), e)
|
||||
logger.error(f"Error renaming {downloaded_track}: {e}")
|
||||
continue
|
||||
|
||||
|
||||
def updateFilePermissions(albumpaths):
|
||||
for folder in albumpaths:
|
||||
logger.info("Updating file permissions in %s", folder)
|
||||
logger.info(f"Updating file permissions in `{folder}`")
|
||||
for r, d, f in os.walk(folder):
|
||||
for files in f:
|
||||
full_path = os.path.join(r, files)
|
||||
if headphones.CONFIG.FILE_PERMISSIONS_ENABLED:
|
||||
try:
|
||||
os.chmod(full_path, int(headphones.CONFIG.FILE_PERMISSIONS, 8))
|
||||
except:
|
||||
logger.error("Could not change permissions for file: %s", full_path)
|
||||
continue
|
||||
else:
|
||||
logger.debug("Not changing file permissions, since it is disabled: %s",
|
||||
full_path.decode(headphones.SYS_ENCODING, 'replace'))
|
||||
|
||||
try:
|
||||
os.chmod(full_path, int(headphones.CONFIG.FILE_PERMISSIONS, 8))
|
||||
except:
|
||||
logger.error(f"Could not change permissions for `{full_path}`")
|
||||
continue
|
||||
|
||||
def renameUnprocessedFolder(path, tag):
|
||||
"""
|
||||
@@ -1168,18 +1160,16 @@ def forcePostProcess(dir=None, expand_subfolders=True, album_dir=None, keep_orig
|
||||
ignored = 0
|
||||
|
||||
if album_dir:
|
||||
folders = [album_dir.encode(headphones.SYS_ENCODING, 'replace')]
|
||||
folders = [album_dir]
|
||||
else:
|
||||
download_dirs = []
|
||||
|
||||
if dir:
|
||||
download_dirs.append(dir.encode(headphones.SYS_ENCODING, 'replace'))
|
||||
download_dirs.append(dir)
|
||||
if headphones.CONFIG.DOWNLOAD_DIR and not dir:
|
||||
download_dirs.append(
|
||||
headphones.CONFIG.DOWNLOAD_DIR.encode(headphones.SYS_ENCODING, 'replace'))
|
||||
download_dirs.append(headphones.CONFIG.DOWNLOAD_DIR)
|
||||
if headphones.CONFIG.DOWNLOAD_TORRENT_DIR and not dir:
|
||||
download_dirs.append(
|
||||
headphones.CONFIG.DOWNLOAD_TORRENT_DIR.encode(headphones.SYS_ENCODING, 'replace'))
|
||||
download_dirs.append(headphones.CONFIG.DOWNLOAD_TORRENT_DIR)
|
||||
|
||||
# If DOWNLOAD_DIR and DOWNLOAD_TORRENT_DIR are the same, remove the duplicate to prevent us from trying to process the same folder twice.
|
||||
download_dirs = list(set(download_dirs))
|
||||
@@ -1223,7 +1213,7 @@ def forcePostProcess(dir=None, expand_subfolders=True, album_dir=None, keep_orig
|
||||
myDB = db.DBConnection()
|
||||
|
||||
for folder in folders:
|
||||
folder_basename = os.path.basename(folder).decode(headphones.SYS_ENCODING, 'replace')
|
||||
folder_basename = os.path.basename(folder)
|
||||
logger.info('Processing: %s', folder_basename)
|
||||
|
||||
# Attempt 1: First try to see if there's a match in the snatched table,
|
||||
|
||||
@@ -13,9 +13,9 @@
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import urllib
|
||||
import urllib2
|
||||
import cookielib
|
||||
import urllib.request, urllib.parse, urllib.error
|
||||
import urllib.request, urllib.error, urllib.parse
|
||||
import http.cookiejar
|
||||
import json
|
||||
import time
|
||||
import mimetypes
|
||||
@@ -61,23 +61,23 @@ class qbittorrentclient(object):
|
||||
self.version = 2
|
||||
except Exception as e:
|
||||
logger.warning("Error with qBittorrent v2 api, check settings or update, will try v1: %s" % e)
|
||||
self.cookiejar = cookielib.CookieJar()
|
||||
self.cookiejar = http.cookiejar.CookieJar()
|
||||
self.opener = self._make_opener()
|
||||
self._get_sid(self.base_url, self.username, self.password)
|
||||
self.version = 1
|
||||
|
||||
def _make_opener(self):
|
||||
# create opener with cookie handler to carry QBitTorrent SID cookie
|
||||
cookie_handler = urllib2.HTTPCookieProcessor(self.cookiejar)
|
||||
cookie_handler = urllib.request.HTTPCookieProcessor(self.cookiejar)
|
||||
handlers = [cookie_handler]
|
||||
return urllib2.build_opener(*handlers)
|
||||
return urllib.request.build_opener(*handlers)
|
||||
|
||||
def _get_sid(self, base_url, username, password):
|
||||
# login so we can capture SID cookie
|
||||
login_data = urllib.urlencode({'username': username, 'password': password})
|
||||
login_data = urllib.parse.urlencode({'username': username, 'password': password})
|
||||
try:
|
||||
self.opener.open(base_url + '/login', login_data)
|
||||
except urllib2.URLError as err:
|
||||
except urllib.error.URLError as err:
|
||||
logger.debug('Error getting SID. qBittorrent responded with error: ' + str(err.reason))
|
||||
return
|
||||
for cookie in self.cookiejar:
|
||||
@@ -95,14 +95,14 @@ class qbittorrentclient(object):
|
||||
data, headers = encode_multipart(args, files)
|
||||
else:
|
||||
if args:
|
||||
data = urllib.urlencode(args)
|
||||
data = urllib.parse.urlencode(args)
|
||||
if content_type:
|
||||
headers['Content-Type'] = content_type
|
||||
|
||||
logger.debug('%s' % json.dumps(headers, indent=4))
|
||||
logger.debug('%s' % data)
|
||||
|
||||
request = urllib2.Request(url, data, headers)
|
||||
request = urllib.request.Request(url, data, headers)
|
||||
try:
|
||||
response = self.opener.open(request)
|
||||
info = response.info()
|
||||
@@ -117,7 +117,7 @@ class qbittorrentclient(object):
|
||||
return response.code, json.loads(resp)
|
||||
logger.debug('response code: %s' % str(response.code))
|
||||
return response.code, None
|
||||
except urllib2.URLError as err:
|
||||
except urllib.error.URLError as err:
|
||||
logger.debug('Failed URL: %s' % url)
|
||||
logger.debug('QBitTorrent webUI raised the following error: %s' % str(err))
|
||||
return None, None
|
||||
@@ -319,7 +319,7 @@ def encode_multipart(args, files, boundary=None):
|
||||
lines = []
|
||||
|
||||
if args:
|
||||
for name, value in args.items():
|
||||
for name, value in list(args.items()):
|
||||
lines.extend((
|
||||
'--{0}'.format(boundary),
|
||||
'Content-Disposition: form-data; name="{0}"'.format(escape_quote(name)),
|
||||
@@ -329,7 +329,7 @@ def encode_multipart(args, files, boundary=None):
|
||||
logger.debug(''.join(lines))
|
||||
|
||||
if files:
|
||||
for name, value in files.items():
|
||||
for name, value in list(files.items()):
|
||||
filename = value['filename']
|
||||
if 'mimetype' in value:
|
||||
mimetype = value['mimetype']
|
||||
|
||||
@@ -138,7 +138,7 @@ def request_soup(url, **kwargs):
|
||||
no exceptions are raised.
|
||||
"""
|
||||
|
||||
parser = kwargs.pop("parser", "html5lib")
|
||||
parser = kwargs.pop("parser", "html.parser")
|
||||
response = request_response(url, **kwargs)
|
||||
|
||||
if response is not None:
|
||||
@@ -222,7 +222,7 @@ def server_message(response):
|
||||
if response.headers.get("content-type") and \
|
||||
"text/html" in response.headers.get("content-type"):
|
||||
try:
|
||||
soup = BeautifulSoup(response.content, "html5lib")
|
||||
soup = BeautifulSoup(response.content, "html.parser")
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import urllib
|
||||
import urllib.request, urllib.parse, urllib.error
|
||||
import time
|
||||
from urlparse import urlparse
|
||||
from urllib.parse import urlparse
|
||||
import re
|
||||
|
||||
import requests as requests
|
||||
@@ -68,10 +68,10 @@ class Rutracker(object):
|
||||
return self.loggedin
|
||||
|
||||
def has_bb_session_cookie(self, response):
|
||||
if 'bb_session' in response.cookies.keys():
|
||||
if 'bb_session' in list(response.cookies.keys()):
|
||||
return True
|
||||
# Rutracker randomly send a 302 redirect code, cookie may be present in response history
|
||||
return next(('bb_session' in r.cookies.keys() for r in response.history), False)
|
||||
return next(('bb_session' in list(r.cookies.keys()) for r in response.history), False)
|
||||
|
||||
def searchurl(self, artist, album, year, format):
|
||||
"""
|
||||
@@ -99,10 +99,10 @@ class Rutracker(object):
|
||||
# sort by size, descending.
|
||||
sort = '&o=7&s=2'
|
||||
try:
|
||||
searchurl = "%s?nm=%s%s%s" % (self.search_referer, urllib.quote(searchterm), format, sort)
|
||||
searchurl = "%s?nm=%s%s%s" % (self.search_referer, urllib.parse.quote(searchterm), format, sort)
|
||||
except:
|
||||
searchterm = searchterm.encode('utf-8')
|
||||
searchurl = "%s?nm=%s%s%s" % (self.search_referer, urllib.quote(searchterm), format, sort)
|
||||
searchurl = "%s?nm=%s%s%s" % (self.search_referer, urllib.parse.quote(searchterm), format, sort)
|
||||
logger.info("Searching rutracker using term: %s", searchterm)
|
||||
|
||||
return searchurl
|
||||
@@ -114,7 +114,7 @@ class Rutracker(object):
|
||||
try:
|
||||
headers = {'Referer': self.search_referer}
|
||||
r = self.session.get(url=searchurl, headers=headers, timeout=self.timeout)
|
||||
soup = BeautifulSoup(r.content, 'html5lib')
|
||||
soup = BeautifulSoup(r.content, 'html.parser')
|
||||
|
||||
# Debug
|
||||
# logger.debug (soup.prettify())
|
||||
@@ -123,7 +123,7 @@ class Rutracker(object):
|
||||
if not self.still_logged_in(soup):
|
||||
self.login()
|
||||
r = self.session.get(url=searchurl, timeout=self.timeout)
|
||||
soup = BeautifulSoup(r.content, 'html5lib')
|
||||
soup = BeautifulSoup(r.content, 'html.parser')
|
||||
if not self.still_logged_in(soup):
|
||||
logger.error("Error getting rutracker data")
|
||||
return None
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
# Stolen from Sick-Beard's sab.py #
|
||||
###################################
|
||||
|
||||
import cookielib
|
||||
import http.cookiejar
|
||||
|
||||
import headphones
|
||||
from headphones.common import USER_AGENT
|
||||
@@ -74,29 +74,28 @@ def sendNZB(nzb):
|
||||
|
||||
# if we get a raw data result we want to upload it to SAB
|
||||
elif nzb.resultType == "nzbdata":
|
||||
# Sanitize the file a bit, since we can only use ascii chars with MultiPartPostHandler
|
||||
nzbdata = helpers.latinToAscii(nzb.extraInfo[0])
|
||||
nzbdata = nzb.extraInfo[0]
|
||||
params['mode'] = 'addfile'
|
||||
files = {"nzbfile": (helpers.latinToAscii(nzb.name) + ".nzb", nzbdata)}
|
||||
files = {"nzbfile": (nzb.name + ".nzb", nzbdata)}
|
||||
headers = {'User-Agent': USER_AGENT}
|
||||
|
||||
logger.info("Attempting to connect to SABnzbd on url: %s" % headphones.CONFIG.SAB_HOST)
|
||||
if nzb.resultType == "nzb":
|
||||
response = sab_api_call('send_nzb', params=params)
|
||||
elif nzb.resultType == "nzbdata":
|
||||
cookies = cookielib.CookieJar()
|
||||
cookies = http.cookiejar.CookieJar()
|
||||
response = sab_api_call('send_nzb', params=params, method="post", files=files,
|
||||
cookies=cookies, headers=headers)
|
||||
|
||||
if not response:
|
||||
logger.info(u"No data returned from SABnzbd, NZB not sent")
|
||||
logger.info("No data returned from SABnzbd, NZB not sent")
|
||||
return False
|
||||
|
||||
if response['status']:
|
||||
logger.info(u"NZB sent to SABnzbd successfully")
|
||||
logger.info("NZB sent to SABnzbd successfully")
|
||||
return True
|
||||
else:
|
||||
logger.error(u"Error sending NZB to SABnzbd: %s" % response['error'])
|
||||
logger.error("Error sending NZB to SABnzbd: %s" % response['error'])
|
||||
return False
|
||||
|
||||
|
||||
|
||||
@@ -19,11 +19,11 @@ from base64 import b16encode, b32decode
|
||||
from hashlib import sha1
|
||||
import string
|
||||
import random
|
||||
import urllib
|
||||
import urllib.request, urllib.parse, urllib.error
|
||||
import datetime
|
||||
import subprocess
|
||||
import unicodedata
|
||||
import urlparse
|
||||
import urllib.parse
|
||||
|
||||
import os
|
||||
import re
|
||||
@@ -35,7 +35,8 @@ import headphones
|
||||
from headphones.common import USER_AGENT
|
||||
from headphones import logger, db, helpers, classes, sab, nzbget, request
|
||||
from headphones import utorrent, transmission, notifiers, rutracker, deluge, qbittorrent
|
||||
from bencode import bencode, bdecode
|
||||
from bencode import encode as bencode
|
||||
from bencode import decode as bdecode
|
||||
|
||||
# Magnet to torrent services, for Black hole. Stolen from CouchPotato.
|
||||
TORRENT_TO_MAGNET_SERVICES = [
|
||||
@@ -56,14 +57,11 @@ def fix_url(s, charset="utf-8"):
|
||||
Fix the URL so it is proper formatted and encoded.
|
||||
"""
|
||||
|
||||
if isinstance(s, unicode):
|
||||
s = s.encode(charset, 'ignore')
|
||||
scheme, netloc, path, qs, anchor = urllib.parse.urlsplit(s)
|
||||
path = urllib.parse.quote(path, '/%')
|
||||
qs = urllib.parse.quote_plus(qs, ':&=')
|
||||
|
||||
scheme, netloc, path, qs, anchor = urlparse.urlsplit(s)
|
||||
path = urllib.quote(path, '/%')
|
||||
qs = urllib.quote_plus(qs, ':&=')
|
||||
|
||||
return urlparse.urlunsplit((scheme, netloc, path, qs, anchor))
|
||||
return urllib.parse.urlunsplit((scheme, netloc, path, qs, anchor))
|
||||
|
||||
|
||||
def torrent_to_file(target_file, data):
|
||||
@@ -88,14 +86,10 @@ def torrent_to_file(target_file, data):
|
||||
try:
|
||||
os.chmod(target_file, int(headphones.CONFIG.FILE_PERMISSIONS, 8))
|
||||
except OSError as e:
|
||||
logger.warn(
|
||||
"Could not change permissions for file '%s': %s. Continuing.",
|
||||
target_file.decode(headphones.SYS_ENCODING, "replace"),
|
||||
e.message)
|
||||
logger.warn(f"Could not change permissions for `{target_file}`: {e}")
|
||||
else:
|
||||
logger.debug(
|
||||
"Not changing file permissions, since it is disabled: %s",
|
||||
target_file.decode(headphones.SYS_ENCODING, "replace"))
|
||||
f"Not changing file permissions for `{target_file}, since it is disabled")
|
||||
|
||||
# Done
|
||||
return True
|
||||
@@ -332,19 +326,13 @@ def do_sorted_search(album, new, losslessOnly, choose_specific_download=False):
|
||||
if not sorted_search_results:
|
||||
return
|
||||
|
||||
logger.info(u"Making sure we can download the best result")
|
||||
logger.info("Making sure we can download the best result")
|
||||
(data, bestqual) = preprocess(sorted_search_results)
|
||||
|
||||
if data and bestqual:
|
||||
send_to_downloader(data, bestqual, album)
|
||||
|
||||
|
||||
def removeDisallowedFilenameChars(filename):
|
||||
validFilenameChars = "-_.() %s%s" % (string.ascii_letters, string.digits)
|
||||
cleanedFilename = unicodedata.normalize('NFKD', filename).encode('ASCII', 'ignore').lower()
|
||||
return ''.join(c for c in cleanedFilename if c in validFilenameChars)
|
||||
|
||||
|
||||
def more_filtering(results, album, albumlength, new):
|
||||
low_size_limit = None
|
||||
high_size_limit = None
|
||||
@@ -533,9 +521,9 @@ def searchNZB(album, new=False, losslessOnly=False, albumlength=None,
|
||||
else:
|
||||
term = cleanartist + ' ' + cleanalbum
|
||||
|
||||
# Replace bad characters in the term and unicode it
|
||||
term = re.sub('[\.\-\/]', ' ', term).encode('utf-8')
|
||||
artistterm = re.sub('[\.\-\/]', ' ', cleanartist).encode('utf-8')
|
||||
# Replace bad characters in the term
|
||||
term = re.sub('[\.\-\/]', ' ', term)
|
||||
artistterm = re.sub('[\.\-\/]', ' ', cleanartist)
|
||||
|
||||
# If Preferred Bitrate and High Limit and Allow Lossless then get both lossy and lossless
|
||||
if headphones.CONFIG.PREFERRED_QUALITY == 2 and headphones.CONFIG.PREFERRED_BITRATE and headphones.CONFIG.PREFERRED_BITRATE_HIGH_BUFFER and headphones.CONFIG.PREFERRED_BITRATE_ALLOW_LOSSLESS:
|
||||
@@ -582,7 +570,7 @@ def searchNZB(album, new=False, losslessOnly=False, albumlength=None,
|
||||
# Process feed
|
||||
if data:
|
||||
if not len(data.entries):
|
||||
logger.info(u"No results found from %s for %s" % ('Headphones Index', term))
|
||||
logger.info("No results found from %s for %s" % ('Headphones Index', term))
|
||||
else:
|
||||
for item in data.entries:
|
||||
try:
|
||||
@@ -593,7 +581,7 @@ def searchNZB(album, new=False, losslessOnly=False, albumlength=None,
|
||||
resultlist.append((title, size, url, provider, 'nzb', True))
|
||||
logger.info('Found %s. Size: %s' % (title, helpers.bytes_to_mb(size)))
|
||||
except Exception as e:
|
||||
logger.error(u"An unknown error occurred trying to parse the feed: %s" % e)
|
||||
logger.error("An unknown error occurred trying to parse the feed: %s" % e)
|
||||
|
||||
if headphones.CONFIG.NEWZNAB:
|
||||
provider = "newznab"
|
||||
@@ -653,7 +641,7 @@ def searchNZB(album, new=False, losslessOnly=False, albumlength=None,
|
||||
# Process feed
|
||||
if data:
|
||||
if not len(data.entries):
|
||||
logger.info(u"No results found from %s for %s", newznab_host[0], term)
|
||||
logger.info("No results found from %s for %s", newznab_host[0], term)
|
||||
else:
|
||||
for item in data.entries:
|
||||
try:
|
||||
@@ -703,7 +691,7 @@ def searchNZB(album, new=False, losslessOnly=False, albumlength=None,
|
||||
# Process feed
|
||||
if data:
|
||||
if not len(data.entries):
|
||||
logger.info(u"No results found from nzbs.org for %s" % term)
|
||||
logger.info("No results found from nzbs.org for %s" % term)
|
||||
else:
|
||||
for item in data.entries:
|
||||
try:
|
||||
@@ -750,7 +738,7 @@ def searchNZB(album, new=False, losslessOnly=False, albumlength=None,
|
||||
# Parse response
|
||||
if data:
|
||||
if 'notice' in data:
|
||||
logger.info(u"No results returned from omgwtfnzbs: %s" % data['notice'])
|
||||
logger.info("No results returned from omgwtfnzbs: %s" % data['notice'])
|
||||
else:
|
||||
for item in data:
|
||||
try:
|
||||
@@ -780,7 +768,7 @@ def searchNZB(album, new=False, losslessOnly=False, albumlength=None,
|
||||
|
||||
|
||||
def send_to_downloader(data, bestqual, album):
|
||||
logger.info(u'Found best result from %s: <a href="%s">%s</a> - %s', bestqual[3], bestqual[2],
|
||||
logger.info('Found best result from %s: <a href="%s">%s</a> - %s', bestqual[3], bestqual[2],
|
||||
bestqual[0], helpers.bytes_to_mb(bestqual[1]))
|
||||
# Get rid of any dodgy chars here so we can prevent sab from renaming our downloads
|
||||
kind = bestqual[4]
|
||||
@@ -831,8 +819,8 @@ def send_to_downloader(data, bestqual, album):
|
||||
return
|
||||
else:
|
||||
folder_name = '%s - %s [%s]' % (
|
||||
helpers.latinToAscii(album['ArtistName']).encode('UTF-8').replace('/', '_'),
|
||||
helpers.latinToAscii(album['AlbumTitle']).encode('UTF-8').replace('/', '_'),
|
||||
helpers.latinToAscii(album['ArtistName']).replace('/', '_'),
|
||||
helpers.latinToAscii(album['AlbumTitle']).replace('/', '_'),
|
||||
get_year_from_release_date(album['ReleaseDate']))
|
||||
|
||||
# Blackhole
|
||||
@@ -1058,31 +1046,31 @@ def send_to_downloader(data, bestqual, album):
|
||||
name = folder_name if folder_name else None
|
||||
|
||||
if headphones.CONFIG.GROWL_ENABLED and headphones.CONFIG.GROWL_ONSNATCH:
|
||||
logger.info(u"Sending Growl notification")
|
||||
logger.info("Sending Growl notification")
|
||||
growl = notifiers.GROWL()
|
||||
growl.notify(name, "Download started")
|
||||
if headphones.CONFIG.PROWL_ENABLED and headphones.CONFIG.PROWL_ONSNATCH:
|
||||
logger.info(u"Sending Prowl notification")
|
||||
logger.info("Sending Prowl notification")
|
||||
prowl = notifiers.PROWL()
|
||||
prowl.notify(name, "Download started")
|
||||
if headphones.CONFIG.PUSHOVER_ENABLED and headphones.CONFIG.PUSHOVER_ONSNATCH:
|
||||
logger.info(u"Sending Pushover notification")
|
||||
logger.info("Sending Pushover notification")
|
||||
prowl = notifiers.PUSHOVER()
|
||||
prowl.notify(name, "Download started")
|
||||
if headphones.CONFIG.PUSHBULLET_ENABLED and headphones.CONFIG.PUSHBULLET_ONSNATCH:
|
||||
logger.info(u"Sending PushBullet notification")
|
||||
logger.info("Sending PushBullet notification")
|
||||
pushbullet = notifiers.PUSHBULLET()
|
||||
pushbullet.notify(name, "Download started")
|
||||
if headphones.CONFIG.JOIN_ENABLED and headphones.CONFIG.JOIN_ONSNATCH:
|
||||
logger.info(u"Sending Join notification")
|
||||
logger.info("Sending Join notification")
|
||||
join = notifiers.JOIN()
|
||||
join.notify(name, "Download started")
|
||||
if headphones.CONFIG.SLACK_ENABLED and headphones.CONFIG.SLACK_ONSNATCH:
|
||||
logger.info(u"Sending Slack notification")
|
||||
logger.info("Sending Slack notification")
|
||||
slack = notifiers.SLACK()
|
||||
slack.notify(name, "Download started")
|
||||
if headphones.CONFIG.TELEGRAM_ENABLED and headphones.CONFIG.TELEGRAM_ONSNATCH:
|
||||
logger.info(u"Sending Telegram notification")
|
||||
logger.info("Sending Telegram notification")
|
||||
from headphones import cache
|
||||
c = cache.Cache()
|
||||
album_art = c.get_artwork_from_cache(None, rgid)
|
||||
@@ -1090,34 +1078,35 @@ def send_to_downloader(data, bestqual, album):
|
||||
message = 'Snatched from ' + provider + '. ' + name
|
||||
telegram.notify(message, "Snatched: " + title, rgid, image=album_art)
|
||||
if headphones.CONFIG.TWITTER_ENABLED and headphones.CONFIG.TWITTER_ONSNATCH:
|
||||
logger.info(u"Sending Twitter notification")
|
||||
twitter = notifiers.TwitterNotifier()
|
||||
twitter.notify_snatch(name)
|
||||
logger.info("Twitter notifications temporarily disabled")
|
||||
#logger.info("Sending Twitter notification")
|
||||
#twitter = notifiers.TwitterNotifier()
|
||||
#twitter.notify_snatch(name)
|
||||
if headphones.CONFIG.NMA_ENABLED and headphones.CONFIG.NMA_ONSNATCH:
|
||||
logger.info(u"Sending NMA notification")
|
||||
logger.info("Sending NMA notification")
|
||||
nma = notifiers.NMA()
|
||||
nma.notify(snatched=name)
|
||||
if headphones.CONFIG.PUSHALOT_ENABLED and headphones.CONFIG.PUSHALOT_ONSNATCH:
|
||||
logger.info(u"Sending Pushalot notification")
|
||||
logger.info("Sending Pushalot notification")
|
||||
pushalot = notifiers.PUSHALOT()
|
||||
pushalot.notify(name, "Download started")
|
||||
if headphones.CONFIG.OSX_NOTIFY_ENABLED and headphones.CONFIG.OSX_NOTIFY_ONSNATCH:
|
||||
from headphones import cache
|
||||
c = cache.Cache()
|
||||
album_art = c.get_artwork_from_cache(None, rgid)
|
||||
logger.info(u"Sending OS X notification")
|
||||
logger.info("Sending OS X notification")
|
||||
osx_notify = notifiers.OSX_NOTIFY()
|
||||
osx_notify.notify(artist,
|
||||
albumname,
|
||||
'Snatched: ' + provider + '. ' + name,
|
||||
image=album_art)
|
||||
if headphones.CONFIG.BOXCAR_ENABLED and headphones.CONFIG.BOXCAR_ONSNATCH:
|
||||
logger.info(u"Sending Boxcar2 notification")
|
||||
logger.info("Sending Boxcar2 notification")
|
||||
b2msg = 'From ' + provider + '<br></br>' + name
|
||||
boxcar = notifiers.BOXCAR()
|
||||
boxcar.notify('Headphones snatched: ' + title, b2msg, rgid)
|
||||
if headphones.CONFIG.EMAIL_ENABLED and headphones.CONFIG.EMAIL_ONSNATCH:
|
||||
logger.info(u"Sending Email notification")
|
||||
logger.info("Sending Email notification")
|
||||
email = notifiers.Email()
|
||||
message = 'Snatched from ' + provider + '. ' + name
|
||||
email.notify("Snatched: " + title, message)
|
||||
@@ -1252,12 +1241,12 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None,
|
||||
else:
|
||||
usersearchterm = ''
|
||||
|
||||
semi_clean_artist_term = re.sub('[\.\-\/]', ' ', semi_cleanartist).encode('utf-8', 'replace')
|
||||
semi_clean_album_term = re.sub('[\.\-\/]', ' ', semi_cleanalbum).encode('utf-8', 'replace')
|
||||
# Replace bad characters in the term and unicode it
|
||||
term = re.sub('[\.\-\/]', ' ', term).encode('utf-8')
|
||||
artistterm = re.sub('[\.\-\/]', ' ', cleanartist).encode('utf-8', 'replace')
|
||||
albumterm = re.sub('[\.\-\/]', ' ', cleanalbum).encode('utf-8', 'replace')
|
||||
semi_clean_artist_term = re.sub('[\.\-\/]', ' ', semi_cleanartist)
|
||||
semi_clean_album_term = re.sub('[\.\-\/]', ' ', semi_cleanalbum)
|
||||
# Replace bad characters in the term
|
||||
term = re.sub('[\.\-\/]', ' ', term)
|
||||
artistterm = re.sub('[\.\-\/]', ' ', cleanartist)
|
||||
albumterm = re.sub('[\.\-\/]', ' ', cleanalbum)
|
||||
|
||||
# If Preferred Bitrate and High Limit and Allow Lossless then get both lossy and lossless
|
||||
if headphones.CONFIG.PREFERRED_QUALITY == 2 and headphones.CONFIG.PREFERRED_BITRATE and headphones.CONFIG.PREFERRED_BITRATE_HIGH_BUFFER and headphones.CONFIG.PREFERRED_BITRATE_ALLOW_LOSSLESS:
|
||||
@@ -1333,7 +1322,7 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None,
|
||||
if data:
|
||||
items = data.find_all('item')
|
||||
if not items:
|
||||
logger.info(u"No results found from %s for %s", provider, term)
|
||||
logger.info("No results found from %s for %s", provider, term)
|
||||
else:
|
||||
for item in items:
|
||||
try:
|
||||
@@ -1427,7 +1416,7 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None,
|
||||
# Process feed
|
||||
if data:
|
||||
if not len(data.entries):
|
||||
logger.info(u"No results found from %s for %s", provider, term)
|
||||
logger.info("No results found from %s for %s", provider, term)
|
||||
else:
|
||||
for item in data.entries:
|
||||
try:
|
||||
@@ -1439,7 +1428,7 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None,
|
||||
logger.info('Found %s. Size: %s', title, helpers.bytes_to_mb(size))
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
u"An error occurred while trying to parse the response from Waffles.ch: %s",
|
||||
"An error occurred while trying to parse the response from Waffles.ch: %s",
|
||||
e)
|
||||
|
||||
# rutracker.org
|
||||
@@ -1448,7 +1437,7 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None,
|
||||
|
||||
# Ignore if release date not specified, results too unpredictable
|
||||
if not year and not usersearchterm:
|
||||
logger.info(u"Release date not specified, ignoring for rutracker.org")
|
||||
logger.info("Release date not specified, ignoring for rutracker.org")
|
||||
else:
|
||||
if headphones.CONFIG.PREFERRED_QUALITY == 3 or losslessOnly:
|
||||
format = 'lossless'
|
||||
@@ -1501,7 +1490,7 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None,
|
||||
bitrate_string = encoding_string
|
||||
if bitrate_string not in gazelleencoding.ALL_ENCODINGS:
|
||||
logger.info(
|
||||
u"Your preferred bitrate is not one of the available Orpheus.network filters, so not using it as a search parameter.")
|
||||
"Your preferred bitrate is not one of the available Orpheus.network filters, so not using it as a search parameter.")
|
||||
maxsize = 10000000000
|
||||
elif headphones.CONFIG.PREFERRED_QUALITY == 1 or allow_lossless: # Highest quality including lossless
|
||||
search_formats = [gazelleformat.FLAC, gazelleformat.MP3]
|
||||
@@ -1512,18 +1501,18 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None,
|
||||
|
||||
if not orpheusobj or not orpheusobj.logged_in():
|
||||
try:
|
||||
logger.info(u"Attempting to log in to Orpheus.network...")
|
||||
logger.info("Attempting to log in to Orpheus.network...")
|
||||
orpheusobj = gazelleapi.GazelleAPI(headphones.CONFIG.ORPHEUS_USERNAME,
|
||||
headphones.CONFIG.ORPHEUS_PASSWORD,
|
||||
headphones.CONFIG.ORPHEUS_URL)
|
||||
orpheusobj._login()
|
||||
except Exception as e:
|
||||
orpheusobj = None
|
||||
logger.error(u"Orpheus.network credentials incorrect or site is down. Error: %s %s" % (
|
||||
logger.error("Orpheus.network credentials incorrect or site is down. Error: %s %s" % (
|
||||
e.__class__.__name__, str(e)))
|
||||
|
||||
if orpheusobj and orpheusobj.logged_in():
|
||||
logger.info(u"Searching %s..." % provider)
|
||||
logger.info("Searching %s..." % provider)
|
||||
all_torrents = []
|
||||
|
||||
album_type = ""
|
||||
@@ -1570,18 +1559,18 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None,
|
||||
releasetype=album_type)['results'])
|
||||
|
||||
# filter on format, size, and num seeders
|
||||
logger.info(u"Filtering torrents by format, maximum size, and minimum seeders...")
|
||||
logger.info("Filtering torrents by format, maximum size, and minimum seeders...")
|
||||
match_torrents = [t for t in all_torrents if
|
||||
t.size <= maxsize and t.seeders >= minimumseeders]
|
||||
|
||||
logger.info(
|
||||
u"Remaining torrents: %s" % ", ".join(repr(torrent) for torrent in match_torrents))
|
||||
"Remaining torrents: %s" % ", ".join(repr(torrent) for torrent in match_torrents))
|
||||
|
||||
# sort by times d/l'd
|
||||
if not len(match_torrents):
|
||||
logger.info(u"No results found from %s for %s after filtering" % (provider, term))
|
||||
logger.info("No results found from %s for %s after filtering" % (provider, term))
|
||||
elif len(match_torrents) > 1:
|
||||
logger.info(u"Found %d matching releases from %s for %s - %s after filtering" %
|
||||
logger.info("Found %d matching releases from %s for %s - %s after filtering" %
|
||||
(len(match_torrents), provider, artistterm, albumterm))
|
||||
logger.info('Sorting torrents by number of seeders...')
|
||||
match_torrents.sort(key=lambda x: int(x.seeders), reverse=True)
|
||||
@@ -1595,7 +1584,7 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None,
|
||||
# match_torrents.sort(key=lambda x: re.match("mp3", x.getTorrentDetails(), flags=re.I), reverse=True)
|
||||
# match_torrents.sort(key=lambda x: str(bitrate) in x.getTorrentFolderName(), reverse=True)
|
||||
logger.info(
|
||||
u"New order: %s" % ", ".join(repr(torrent) for torrent in match_torrents))
|
||||
"New order: %s" % ", ".join(repr(torrent) for torrent in match_torrents))
|
||||
|
||||
for torrent in match_torrents:
|
||||
if not torrent.file_path:
|
||||
@@ -1632,7 +1621,7 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None,
|
||||
bitrate_string = encoding_string
|
||||
if bitrate_string not in gazelleencoding.ALL_ENCODINGS:
|
||||
logger.info(
|
||||
u"Your preferred bitrate is not one of the available RED filters, so not using it as a search parameter.")
|
||||
"Your preferred bitrate is not one of the available RED filters, so not using it as a search parameter.")
|
||||
maxsize = 10000000000
|
||||
elif headphones.CONFIG.PREFERRED_QUALITY == 1 or allow_lossless: # Highest quality including lossless
|
||||
search_formats = [gazelleformat.FLAC, gazelleformat.MP3]
|
||||
@@ -1643,18 +1632,18 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None,
|
||||
|
||||
if not redobj or not redobj.logged_in():
|
||||
try:
|
||||
logger.info(u"Attempting to log in to Redacted...")
|
||||
logger.info("Attempting to log in to Redacted...")
|
||||
redobj = gazelleapi.GazelleAPI(headphones.CONFIG.REDACTED_USERNAME,
|
||||
headphones.CONFIG.REDACTED_PASSWORD,
|
||||
providerurl)
|
||||
redobj._login()
|
||||
except Exception as e:
|
||||
redobj = None
|
||||
logger.error(u"Redacted credentials incorrect or site is down. Error: %s %s" % (
|
||||
logger.error("Redacted credentials incorrect or site is down. Error: %s %s" % (
|
||||
e.__class__.__name__, str(e)))
|
||||
|
||||
if redobj and redobj.logged_in():
|
||||
logger.info(u"Searching %s..." % provider)
|
||||
logger.info("Searching %s..." % provider)
|
||||
all_torrents = []
|
||||
for search_format in search_formats:
|
||||
if usersearchterm:
|
||||
@@ -1668,18 +1657,18 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None,
|
||||
encoding=bitrate_string)['results'])
|
||||
|
||||
# filter on format, size, and num seeders
|
||||
logger.info(u"Filtering torrents by format, maximum size, and minimum seeders...")
|
||||
logger.info("Filtering torrents by format, maximum size, and minimum seeders...")
|
||||
match_torrents = [t for t in all_torrents if
|
||||
t.size <= maxsize and t.seeders >= minimumseeders]
|
||||
|
||||
logger.info(
|
||||
u"Remaining torrents: %s" % ", ".join(repr(torrent) for torrent in match_torrents))
|
||||
"Remaining torrents: %s" % ", ".join(repr(torrent) for torrent in match_torrents))
|
||||
|
||||
# sort by times d/l'd
|
||||
if not len(match_torrents):
|
||||
logger.info(u"No results found from %s for %s after filtering" % (provider, term))
|
||||
logger.info("No results found from %s for %s after filtering" % (provider, term))
|
||||
elif len(match_torrents) > 1:
|
||||
logger.info(u"Found %d matching releases from %s for %s - %s after filtering" %
|
||||
logger.info("Found %d matching releases from %s for %s - %s after filtering" %
|
||||
(len(match_torrents), provider, artistterm, albumterm))
|
||||
logger.info(
|
||||
"Sorting torrents by times snatched and preferred bitrate %s..." % bitrate_string)
|
||||
@@ -1695,7 +1684,7 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None,
|
||||
# match_torrents.sort(key=lambda x: re.match("mp3", x.getTorrentDetails(), flags=re.I), reverse=True)
|
||||
# match_torrents.sort(key=lambda x: str(bitrate) in x.getTorrentFolderName(), reverse=True)
|
||||
logger.info(
|
||||
u"New order: %s" % ", ".join(repr(torrent) for torrent in match_torrents))
|
||||
"New order: %s" % ", ".join(repr(torrent) for torrent in match_torrents))
|
||||
|
||||
for torrent in match_torrents:
|
||||
if not torrent.file_path:
|
||||
@@ -1767,8 +1756,8 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None,
|
||||
if url.lower().startswith("//"):
|
||||
url = "http:" + url
|
||||
|
||||
formatted_size = re.search('Size (.*),', unicode(item)).group(1).replace(
|
||||
u'\xa0', ' ')
|
||||
formatted_size = re.search('Size (.*),', str(item)).group(1).replace(
|
||||
'\xa0', ' ')
|
||||
size = helpers.piratesize(formatted_size)
|
||||
|
||||
if size < maxsize and minimumseeders < seeds and url is not None:
|
||||
@@ -1781,7 +1770,7 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None,
|
||||
|
||||
resultlist.append((title, size, url, provider, "torrent", match))
|
||||
except Exception as e:
|
||||
logger.error(u"An unknown error occurred in the Pirate Bay parser: %s" % e)
|
||||
logger.error("An unknown error occurred in the Pirate Bay parser: %s" % e)
|
||||
|
||||
# Old Pirate Bay Compatible
|
||||
if headphones.CONFIG.OLDPIRATEBAY:
|
||||
@@ -1802,7 +1791,7 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None,
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2243.2 Safari/537.36'}
|
||||
provider_url = fix_url(headphones.CONFIG.OLDPIRATEBAY_URL) + \
|
||||
"/search.php?" + urllib.urlencode({"q": tpb_term, "iht": 6})
|
||||
"/search.php?" + urllib.parse.urlencode({"q": tpb_term, "iht": 6})
|
||||
|
||||
data = request.request_soup(url=provider_url, headers=headers)
|
||||
|
||||
@@ -1836,7 +1825,7 @@ def searchTorrent(album, new=False, losslessOnly=False, albumlength=None,
|
||||
resultlist.append((title, size, url, provider, "torrent", match))
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
u"An unknown error occurred in the Old Pirate Bay parser: %s" % e)
|
||||
"An unknown error occurred in the Old Pirate Bay parser: %s" % e)
|
||||
|
||||
# attempt to verify that this isn't a substring result
|
||||
# when looking for "Foo - Foo" we don't want "Foobar"
|
||||
|
||||
@@ -38,8 +38,8 @@ class SoftChrootTest(TestCase):
|
||||
cf = SoftChroot(path)
|
||||
self.assertIsNone(cf)
|
||||
|
||||
self.assertRegexpMatches(str(exc.exception), r'No such directory')
|
||||
self.assertRegexpMatches(str(exc.exception), path)
|
||||
self.assertRegex(str(exc.exception), r'No such directory')
|
||||
self.assertRegex(str(exc.exception), path)
|
||||
|
||||
@mock.patch('headphones.softchroot.os', wrap=os, name='OsMock')
|
||||
def test_create_on_file(self, os_mock):
|
||||
@@ -57,8 +57,8 @@ class SoftChrootTest(TestCase):
|
||||
|
||||
self.assertTrue(os_mock.path.isdir.called)
|
||||
|
||||
self.assertRegexpMatches(str(exc.exception), r'No such directory')
|
||||
self.assertRegexpMatches(str(exc.exception), path)
|
||||
self.assertRegex(str(exc.exception), r'No such directory')
|
||||
self.assertRegex(str(exc.exception), path)
|
||||
|
||||
@TestArgs(
|
||||
(None, None),
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
import time
|
||||
import json
|
||||
import base64
|
||||
import urlparse
|
||||
import urllib.parse
|
||||
import os
|
||||
|
||||
from headphones import logger, request
|
||||
@@ -57,7 +57,7 @@ def addTorrent(link, data=None):
|
||||
else:
|
||||
retid = False
|
||||
|
||||
logger.info(u"Torrent sent to Transmission successfully")
|
||||
logger.info("Torrent sent to Transmission successfully")
|
||||
return retid
|
||||
|
||||
else:
|
||||
@@ -167,7 +167,7 @@ def torrentAction(method, arguments):
|
||||
|
||||
# Fix the URL. We assume that the user does not point to the RPC endpoint,
|
||||
# so add it if it is missing.
|
||||
parts = list(urlparse.urlparse(host))
|
||||
parts = list(urllib.parse.urlparse(host))
|
||||
|
||||
if not parts[0] in ("http", "https"):
|
||||
parts[0] = "http"
|
||||
@@ -175,7 +175,7 @@ def torrentAction(method, arguments):
|
||||
if not parts[2].endswith("/rpc"):
|
||||
parts[2] += "/transmission/rpc"
|
||||
|
||||
host = urlparse.urlunparse(parts)
|
||||
host = urllib.parse.urlunparse(parts)
|
||||
data = {'method': method, 'arguments': arguments}
|
||||
data_json = json.dumps(data)
|
||||
auth = (username, password) if username and password else None
|
||||
@@ -205,5 +205,5 @@ def torrentAction(method, arguments):
|
||||
continue
|
||||
|
||||
resp_json = response.json()
|
||||
print resp_json
|
||||
print(resp_json)
|
||||
return resp_json
|
||||
|
||||
@@ -44,7 +44,7 @@ class TestCase(TC):
|
||||
|
||||
@_d
|
||||
def assertRegexpMatches(self, *args, **kw):
|
||||
return super(TestCase, self).assertRegexpMatches(*args, **kw)
|
||||
return super(TestCase, self).assertRegex(*args, **kw)
|
||||
|
||||
# -----------------------------------------------------------
|
||||
# NOT DUMMY ASSERTIONS
|
||||
|
||||
@@ -13,13 +13,13 @@
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import urllib
|
||||
import urllib.request, urllib.parse, urllib.error
|
||||
import json
|
||||
import time
|
||||
from collections import namedtuple
|
||||
import urllib2
|
||||
import urlparse
|
||||
import cookielib
|
||||
import urllib.request, urllib.error, urllib.parse
|
||||
import urllib.parse
|
||||
import http.cookiejar
|
||||
|
||||
import re
|
||||
import os
|
||||
@@ -52,23 +52,23 @@ class utorrentclient(object):
|
||||
|
||||
def _make_opener(self, realm, base_url, username, password):
|
||||
"""uTorrent API need HTTP Basic Auth and cookie support for token verify."""
|
||||
auth = urllib2.HTTPBasicAuthHandler()
|
||||
auth = urllib.request.HTTPBasicAuthHandler()
|
||||
auth.add_password(realm=realm, uri=base_url, user=username, passwd=password)
|
||||
opener = urllib2.build_opener(auth)
|
||||
urllib2.install_opener(opener)
|
||||
opener = urllib.request.build_opener(auth)
|
||||
urllib.request.install_opener(opener)
|
||||
|
||||
cookie_jar = cookielib.CookieJar()
|
||||
cookie_handler = urllib2.HTTPCookieProcessor(cookie_jar)
|
||||
cookie_jar = http.cookiejar.CookieJar()
|
||||
cookie_handler = urllib.request.HTTPCookieProcessor(cookie_jar)
|
||||
|
||||
handlers = [auth, cookie_handler]
|
||||
opener = urllib2.build_opener(*handlers)
|
||||
opener = urllib.request.build_opener(*handlers)
|
||||
return opener
|
||||
|
||||
def _get_token(self):
|
||||
url = urlparse.urljoin(self.base_url, 'gui/token.html')
|
||||
url = urllib.parse.urljoin(self.base_url, 'gui/token.html')
|
||||
try:
|
||||
response = self.opener.open(url)
|
||||
except urllib2.HTTPError as err:
|
||||
except urllib.error.HTTPError as err:
|
||||
logger.debug('URL: ' + str(url))
|
||||
logger.debug('Error getting Token. uTorrent responded with error: ' + str(err))
|
||||
return
|
||||
@@ -77,7 +77,7 @@ class utorrentclient(object):
|
||||
|
||||
def list(self, **kwargs):
|
||||
params = [('list', '1')]
|
||||
params += kwargs.items()
|
||||
params += list(kwargs.items())
|
||||
return self._action(params)
|
||||
|
||||
def add_url(self, url):
|
||||
@@ -150,8 +150,8 @@ class utorrentclient(object):
|
||||
if not self.token:
|
||||
return
|
||||
|
||||
url = self.base_url + '/gui/' + '?token=' + self.token + '&' + urllib.urlencode(params)
|
||||
request = urllib2.Request(url)
|
||||
url = self.base_url + '/gui/' + '?token=' + self.token + '&' + urllib.parse.urlencode(params)
|
||||
request = urllib.request.Request(url)
|
||||
|
||||
if body:
|
||||
request.add_data(body)
|
||||
@@ -162,7 +162,7 @@ class utorrentclient(object):
|
||||
try:
|
||||
response = self.opener.open(request)
|
||||
return response.code, json.loads(response.read())
|
||||
except urllib2.HTTPError as err:
|
||||
except urllib.error.HTTPError as err:
|
||||
logger.debug('URL: ' + str(url))
|
||||
logger.debug('uTorrent webUI raised the following error: ' + str(err))
|
||||
|
||||
|
||||
@@ -43,7 +43,7 @@ def runGit(args):
|
||||
shell=True,
|
||||
cwd=headphones.PROG_DIR)
|
||||
output, err = p.communicate()
|
||||
output = output.strip()
|
||||
output = output.decode('utf-8').strip()
|
||||
|
||||
logger.debug('Git output: ' + output)
|
||||
except OSError as e:
|
||||
|
||||
@@ -17,14 +17,14 @@
|
||||
|
||||
from operator import itemgetter
|
||||
import threading
|
||||
import hashlib
|
||||
import secrets
|
||||
import random
|
||||
import urllib
|
||||
import urllib.request, urllib.parse, urllib.error
|
||||
import json
|
||||
import time
|
||||
import cgi
|
||||
import sys
|
||||
import urllib2
|
||||
from html import escape as html_escape
|
||||
import urllib.request, urllib.error, urllib.parse
|
||||
|
||||
import os
|
||||
import re
|
||||
@@ -97,7 +97,7 @@ class WebInterface(object):
|
||||
# Serve the extras up as a dict to make things easier for new templates (append new extras to the end)
|
||||
extras_list = headphones.POSSIBLE_EXTRAS
|
||||
if artist['Extras']:
|
||||
artist_extras = map(int, artist['Extras'].split(','))
|
||||
artist_extras = list(map(int, artist['Extras'].split(',')))
|
||||
else:
|
||||
artist_extras = []
|
||||
|
||||
@@ -158,8 +158,8 @@ class WebInterface(object):
|
||||
else:
|
||||
searchresults = mb.findSeries(name, limit=100)
|
||||
return serve_template(templatename="searchresults.html",
|
||||
title='Search Results for: "' + cgi.escape(name) + '"',
|
||||
searchresults=searchresults, name=cgi.escape(name), type=type)
|
||||
title='Search Results for: "' + html_escape(name) + '"',
|
||||
searchresults=searchresults, name=html_escape(name), type=type)
|
||||
|
||||
@cherrypy.expose
|
||||
def addArtist(self, artistid):
|
||||
@@ -230,7 +230,7 @@ class WebInterface(object):
|
||||
|
||||
@cherrypy.expose
|
||||
def pauseArtist(self, ArtistID):
|
||||
logger.info(u"Pausing artist: " + ArtistID)
|
||||
logger.info("Pausing artist: " + ArtistID)
|
||||
myDB = db.DBConnection()
|
||||
controlValueDict = {'ArtistID': ArtistID}
|
||||
newValueDict = {'Status': 'Paused'}
|
||||
@@ -239,7 +239,7 @@ class WebInterface(object):
|
||||
|
||||
@cherrypy.expose
|
||||
def resumeArtist(self, ArtistID):
|
||||
logger.info(u"Resuming artist: " + ArtistID)
|
||||
logger.info("Resuming artist: " + ArtistID)
|
||||
myDB = db.DBConnection()
|
||||
controlValueDict = {'ArtistID': ArtistID}
|
||||
newValueDict = {'Status': 'Active'}
|
||||
@@ -252,9 +252,9 @@ class WebInterface(object):
|
||||
for name in namecheck:
|
||||
artistname = name['ArtistName']
|
||||
try:
|
||||
logger.info(u"Deleting all traces of artist: " + artistname)
|
||||
logger.info("Deleting all traces of artist: " + artistname)
|
||||
except TypeError:
|
||||
logger.info(u"Deleting all traces of artist: null")
|
||||
logger.info("Deleting all traces of artist: null")
|
||||
myDB.action('DELETE from artists WHERE ArtistID=?', [ArtistID])
|
||||
|
||||
from headphones import cache
|
||||
@@ -291,7 +291,7 @@ class WebInterface(object):
|
||||
myDB = db.DBConnection()
|
||||
artist_name = myDB.select('SELECT DISTINCT ArtistName FROM artists WHERE ArtistID=?', [ArtistID])[0][0]
|
||||
|
||||
logger.info(u"Scanning artist: %s", artist_name)
|
||||
logger.info("Scanning artist: %s", artist_name)
|
||||
|
||||
full_folder_format = headphones.CONFIG.FOLDER_FORMAT
|
||||
folder_format = re.findall(r'(.*?[Aa]rtist?)\.*', full_folder_format)[0]
|
||||
@@ -314,7 +314,7 @@ class WebInterface(object):
|
||||
sortname = artist
|
||||
|
||||
if sortname[0].isdigit():
|
||||
firstchar = u'0-9'
|
||||
firstchar = '0-9'
|
||||
else:
|
||||
firstchar = sortname[0]
|
||||
|
||||
@@ -363,7 +363,7 @@ class WebInterface(object):
|
||||
|
||||
@cherrypy.expose
|
||||
def deleteEmptyArtists(self):
|
||||
logger.info(u"Deleting all empty artists")
|
||||
logger.info("Deleting all empty artists")
|
||||
myDB = db.DBConnection()
|
||||
emptyArtistIDs = [row['ArtistID'] for row in
|
||||
myDB.select("SELECT ArtistID FROM artists WHERE LatestAlbum IS NULL")]
|
||||
@@ -423,7 +423,7 @@ class WebInterface(object):
|
||||
|
||||
@cherrypy.expose
|
||||
def queueAlbum(self, AlbumID, ArtistID=None, new=False, redirect=None, lossless=False):
|
||||
logger.info(u"Marking album: " + AlbumID + " as wanted...")
|
||||
logger.info("Marking album: " + AlbumID + " as wanted...")
|
||||
myDB = db.DBConnection()
|
||||
controlValueDict = {'AlbumID': AlbumID}
|
||||
if lossless:
|
||||
@@ -438,10 +438,11 @@ class WebInterface(object):
|
||||
raise cherrypy.HTTPRedirect(redirect)
|
||||
|
||||
@cherrypy.expose
|
||||
@cherrypy.tools.json_out()
|
||||
def choose_specific_download(self, AlbumID):
|
||||
results = searcher.searchforalbum(AlbumID, choose_specific_download=True)
|
||||
|
||||
results_as_dicts = []
|
||||
data = []
|
||||
|
||||
for result in results:
|
||||
result_dict = {
|
||||
@@ -452,35 +453,34 @@ class WebInterface(object):
|
||||
'kind': result[4],
|
||||
'matches': result[5]
|
||||
}
|
||||
results_as_dicts.append(result_dict)
|
||||
s = json.dumps(results_as_dicts)
|
||||
cherrypy.response.headers['Content-type'] = 'application/json'
|
||||
return s
|
||||
data.append(result_dict)
|
||||
return data
|
||||
|
||||
@cherrypy.expose
|
||||
@cherrypy.tools.json_out()
|
||||
def download_specific_release(self, AlbumID, title, size, url, provider, kind, **kwargs):
|
||||
# Handle situations where the torrent url contains arguments that are parsed
|
||||
if kwargs:
|
||||
url = urllib2.quote(url, safe=":?/=&") + '&' + urllib.urlencode(kwargs)
|
||||
url = urllib.parse.quote(url, safe=":?/=&") + '&' + urllib.parse.urlencode(kwargs)
|
||||
try:
|
||||
result = [(title, int(size), url, provider, kind)]
|
||||
except ValueError:
|
||||
result = [(title, float(size), url, provider, kind)]
|
||||
|
||||
logger.info(u"Making sure we can download the chosen result")
|
||||
logger.info("Making sure we can download the chosen result")
|
||||
(data, bestqual) = searcher.preprocess(result)
|
||||
|
||||
if data and bestqual:
|
||||
myDB = db.DBConnection()
|
||||
album = myDB.action('SELECT * from albums WHERE AlbumID=?', [AlbumID]).fetchone()
|
||||
searcher.send_to_downloader(data, bestqual, album)
|
||||
return json.dumps({'result': 'success'})
|
||||
return {'result': 'success'}
|
||||
else:
|
||||
return json.dumps({'result': 'failure'})
|
||||
return {'result': 'failure'}
|
||||
|
||||
@cherrypy.expose
|
||||
def unqueueAlbum(self, AlbumID, ArtistID):
|
||||
logger.info(u"Marking album: " + AlbumID + "as skipped...")
|
||||
logger.info("Marking album: " + AlbumID + "as skipped...")
|
||||
myDB = db.DBConnection()
|
||||
controlValueDict = {'AlbumID': AlbumID}
|
||||
newValueDict = {'Status': 'Skipped'}
|
||||
@@ -489,7 +489,7 @@ class WebInterface(object):
|
||||
|
||||
@cherrypy.expose
|
||||
def deleteAlbum(self, AlbumID, ArtistID=None):
|
||||
logger.info(u"Deleting all traces of album: " + AlbumID)
|
||||
logger.info("Deleting all traces of album: " + AlbumID)
|
||||
myDB = db.DBConnection()
|
||||
|
||||
myDB.action('DELETE from have WHERE Matched=?', [AlbumID])
|
||||
@@ -528,7 +528,7 @@ class WebInterface(object):
|
||||
|
||||
@cherrypy.expose
|
||||
def editSearchTerm(self, AlbumID, SearchTerm):
|
||||
logger.info(u"Updating search term for albumid: " + AlbumID)
|
||||
logger.info("Updating search term for albumid: " + AlbumID)
|
||||
myDB = db.DBConnection()
|
||||
controlValueDict = {'AlbumID': AlbumID}
|
||||
newValueDict = {'SearchTerm': SearchTerm}
|
||||
@@ -959,6 +959,7 @@ class WebInterface(object):
|
||||
raise cherrypy.HTTPRedirect("logs")
|
||||
|
||||
@cherrypy.expose
|
||||
@cherrypy.tools.json_out()
|
||||
def getLog(self, iDisplayStart=0, iDisplayLength=100, iSortCol_0=0, sSortDir_0="desc",
|
||||
sSearch="", **kwargs):
|
||||
iDisplayStart = int(iDisplayStart)
|
||||
@@ -981,13 +982,14 @@ class WebInterface(object):
|
||||
rows = filtered[iDisplayStart:(iDisplayStart + iDisplayLength)]
|
||||
rows = [[row[0], row[2], row[1]] for row in rows]
|
||||
|
||||
return json.dumps({
|
||||
return {
|
||||
'iTotalDisplayRecords': len(filtered),
|
||||
'iTotalRecords': len(headphones.LOG_LIST),
|
||||
'aaData': rows,
|
||||
})
|
||||
}
|
||||
|
||||
@cherrypy.expose
|
||||
@cherrypy.tools.json_out()
|
||||
def getArtists_json(self, iDisplayStart=0, iDisplayLength=100, sSearch="", iSortCol_0='0',
|
||||
sSortDir_0='asc', **kwargs):
|
||||
iDisplayStart = int(iDisplayStart)
|
||||
@@ -1055,61 +1057,58 @@ class WebInterface(object):
|
||||
|
||||
rows.append(row)
|
||||
|
||||
dict = {'iTotalDisplayRecords': len(filtered),
|
||||
data = {'iTotalDisplayRecords': len(filtered),
|
||||
'iTotalRecords': totalcount,
|
||||
'aaData': rows,
|
||||
}
|
||||
s = json.dumps(dict)
|
||||
cherrypy.response.headers['Content-type'] = 'application/json'
|
||||
return s
|
||||
return data
|
||||
|
||||
@cherrypy.expose
|
||||
@cherrypy.tools.json_out()
|
||||
def getAlbumsByArtist_json(self, artist=None):
|
||||
myDB = db.DBConnection()
|
||||
album_json = {}
|
||||
data = {}
|
||||
counter = 0
|
||||
album_list = myDB.select("SELECT AlbumTitle from albums WHERE ArtistName=?", [artist])
|
||||
for album in album_list:
|
||||
album_json[counter] = album['AlbumTitle']
|
||||
data[counter] = album['AlbumTitle']
|
||||
counter += 1
|
||||
json_albums = json.dumps(album_json)
|
||||
|
||||
cherrypy.response.headers['Content-type'] = 'application/json'
|
||||
return json_albums
|
||||
return data
|
||||
|
||||
@cherrypy.expose
|
||||
@cherrypy.tools.json_out()
|
||||
def getArtistjson(self, ArtistID, **kwargs):
|
||||
myDB = db.DBConnection()
|
||||
artist = myDB.action('SELECT * FROM artists WHERE ArtistID=?', [ArtistID]).fetchone()
|
||||
artist_json = json.dumps({
|
||||
return {
|
||||
'ArtistName': artist['ArtistName'],
|
||||
'Status': artist['Status']
|
||||
})
|
||||
return artist_json
|
||||
}
|
||||
|
||||
@cherrypy.expose
|
||||
@cherrypy.tools.json_out()
|
||||
def getAlbumjson(self, AlbumID, **kwargs):
|
||||
myDB = db.DBConnection()
|
||||
album = myDB.action('SELECT * from albums WHERE AlbumID=?', [AlbumID]).fetchone()
|
||||
album_json = json.dumps({
|
||||
return {
|
||||
'AlbumTitle': album['AlbumTitle'],
|
||||
'ArtistName': album['ArtistName'],
|
||||
'Status': album['Status']
|
||||
})
|
||||
return album_json
|
||||
}
|
||||
|
||||
@cherrypy.expose
|
||||
def clearhistory(self, type=None, date_added=None, title=None):
|
||||
myDB = db.DBConnection()
|
||||
if type:
|
||||
if type == 'all':
|
||||
logger.info(u"Clearing all history")
|
||||
logger.info("Clearing all history")
|
||||
myDB.action('DELETE from snatched WHERE Status NOT LIKE "Seed%"')
|
||||
else:
|
||||
logger.info(u"Clearing history where status is %s" % type)
|
||||
logger.info("Clearing history where status is %s" % type)
|
||||
myDB.action('DELETE from snatched WHERE Status=?', [type])
|
||||
else:
|
||||
logger.info(u"Deleting '%s' from history" % title)
|
||||
logger.info("Deleting '%s' from history" % title)
|
||||
myDB.action(
|
||||
'DELETE from snatched WHERE Status NOT LIKE "Seed%" AND Title=? AND DateAdded=?',
|
||||
[title, date_added])
|
||||
@@ -1117,7 +1116,7 @@ class WebInterface(object):
|
||||
|
||||
@cherrypy.expose
|
||||
def generateAPI(self):
|
||||
apikey = hashlib.sha224(str(random.getrandbits(256))).hexdigest()[0:32]
|
||||
apikey = secrets.token_hex(nbytes=16)
|
||||
logger.info("New API generated")
|
||||
return apikey
|
||||
|
||||
@@ -1418,7 +1417,7 @@ class WebInterface(object):
|
||||
"join_deviceid": headphones.CONFIG.JOIN_DEVICEID
|
||||
}
|
||||
|
||||
for k, v in config.iteritems():
|
||||
for k, v in config.items():
|
||||
if isinstance(v, headphones.config.path):
|
||||
# need to apply SoftChroot to paths:
|
||||
nv = headphones.SOFT_CHROOT.apply(v)
|
||||
@@ -1435,7 +1434,7 @@ class WebInterface(object):
|
||||
|
||||
extras_list = [extra_munges.get(x, x) for x in headphones.POSSIBLE_EXTRAS]
|
||||
if headphones.CONFIG.EXTRAS:
|
||||
extras = map(int, headphones.CONFIG.EXTRAS.split(','))
|
||||
extras = list(map(int, headphones.CONFIG.EXTRAS.split(',')))
|
||||
else:
|
||||
extras = []
|
||||
|
||||
@@ -1496,7 +1495,7 @@ class WebInterface(object):
|
||||
kwargs[plain_config] = kwargs[use_config]
|
||||
del kwargs[use_config]
|
||||
|
||||
for k, v in kwargs.iteritems():
|
||||
for k, v in kwargs.items():
|
||||
# TODO : HUGE crutch. It is all because there is no way to deal with options...
|
||||
try:
|
||||
_conf = headphones.CONFIG._define(k)
|
||||
@@ -1648,12 +1647,13 @@ class WebInterface(object):
|
||||
return a.fetchData()
|
||||
|
||||
@cherrypy.expose
|
||||
@cherrypy.tools.json_out()
|
||||
def getInfo(self, ArtistID=None, AlbumID=None):
|
||||
|
||||
from headphones import cache
|
||||
info_dict = cache.getInfo(ArtistID, AlbumID)
|
||||
|
||||
return json.dumps(info_dict)
|
||||
return info_dict
|
||||
|
||||
@cherrypy.expose
|
||||
def getArtwork(self, ArtistID=None, AlbumID=None):
|
||||
@@ -1670,6 +1670,7 @@ class WebInterface(object):
|
||||
# If you just want to get the last.fm image links for an album, make sure
|
||||
# to pass a releaseid and not a releasegroupid
|
||||
@cherrypy.expose
|
||||
@cherrypy.tools.json_out()
|
||||
def getImageLinks(self, ArtistID=None, AlbumID=None):
|
||||
from headphones import cache
|
||||
image_dict = cache.getImageLinks(ArtistID, AlbumID)
|
||||
@@ -1687,7 +1688,7 @@ class WebInterface(object):
|
||||
image_dict[
|
||||
'thumbnail'] = "http://coverartarchive.org/release/%s/front-250.jpg" % AlbumID
|
||||
|
||||
return json.dumps(image_dict)
|
||||
return image_dict
|
||||
|
||||
@cherrypy.expose
|
||||
def twitterStep1(self):
|
||||
@@ -1700,7 +1701,7 @@ class WebInterface(object):
|
||||
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
|
||||
tweet = notifiers.TwitterNotifier()
|
||||
result = tweet._get_credentials(key)
|
||||
logger.info(u"result: " + str(result))
|
||||
logger.info("result: " + str(result))
|
||||
if result:
|
||||
return "Key verification successful"
|
||||
else:
|
||||
@@ -1732,14 +1733,14 @@ class WebInterface(object):
|
||||
|
||||
@cherrypy.expose
|
||||
def testPushover(self):
|
||||
logger.info(u"Sending Pushover notification")
|
||||
logger.info("Sending Pushover notification")
|
||||
pushover = notifiers.PUSHOVER()
|
||||
result = pushover.notify("hooray!", "This is a test")
|
||||
return str(result)
|
||||
|
||||
@cherrypy.expose
|
||||
def testPlex(self):
|
||||
logger.info(u"Testing plex update")
|
||||
logger.info("Testing plex update")
|
||||
plex = notifiers.Plex()
|
||||
plex.update()
|
||||
|
||||
|
||||
@@ -111,12 +111,9 @@ def initialize(options):
|
||||
})
|
||||
conf['/api'] = {'tools.auth_basic.on': False}
|
||||
|
||||
# Prevent time-outs
|
||||
cherrypy.engine.timeout_monitor.unsubscribe()
|
||||
cherrypy.tree.mount(WebInterface(), str(options['http_root']), config=conf)
|
||||
|
||||
try:
|
||||
cherrypy.process.servers.check_port(str(options['http_host']), options['http_port'])
|
||||
cherrypy.server.start()
|
||||
except IOError:
|
||||
sys.stderr.write(
|
||||
|
||||
@@ -15,8 +15,8 @@
|
||||
# Lesser General Public License for more details.
|
||||
#
|
||||
|
||||
import urllib
|
||||
import urllib2
|
||||
import urllib.request, urllib.parse, urllib.error
|
||||
import urllib.request, urllib.error, urllib.parse
|
||||
import mimetools, mimetypes
|
||||
import os, sys
|
||||
|
||||
@@ -24,8 +24,8 @@ import os, sys
|
||||
# assigning a sequence.
|
||||
doseq = 1
|
||||
|
||||
class MultipartPostHandler(urllib2.BaseHandler):
|
||||
handler_order = urllib2.HTTPHandler.handler_order - 10 # needs to run first
|
||||
class MultipartPostHandler(urllib.request.BaseHandler):
|
||||
handler_order = urllib.request.HTTPHandler.handler_order - 10 # needs to run first
|
||||
|
||||
def http_request(self, request):
|
||||
data = request.get_data()
|
||||
@@ -33,23 +33,23 @@ class MultipartPostHandler(urllib2.BaseHandler):
|
||||
v_files = []
|
||||
v_vars = []
|
||||
try:
|
||||
for(key, value) in data.items():
|
||||
for(key, value) in list(data.items()):
|
||||
if type(value) in (file, list, tuple):
|
||||
v_files.append((key, value))
|
||||
else:
|
||||
v_vars.append((key, value))
|
||||
except TypeError:
|
||||
systype, value, traceback = sys.exc_info()
|
||||
raise TypeError, "not a valid non-string sequence or mapping object", traceback
|
||||
raise TypeError("not a valid non-string sequence or mapping object").with_traceback(traceback)
|
||||
|
||||
if len(v_files) == 0:
|
||||
data = urllib.urlencode(v_vars, doseq)
|
||||
data = urllib.parse.urlencode(v_vars, doseq)
|
||||
else:
|
||||
boundary, data = MultipartPostHandler.multipart_encode(v_vars, v_files)
|
||||
contenttype = 'multipart/form-data; boundary=%s' % boundary
|
||||
if(request.has_header('Content-Type')
|
||||
and request.get_header('Content-Type').find('multipart/form-data') != 0):
|
||||
print "Replacing %s with %s" % (request.get_header('content-type'), 'multipart/form-data')
|
||||
print("Replacing %s with %s" % (request.get_header('content-type'), 'multipart/form-data'))
|
||||
request.add_unredirected_header('Content-Type', contenttype)
|
||||
|
||||
request.add_data(data)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from __future__ import absolute_import
|
||||
|
||||
import sys
|
||||
|
||||
from apscheduler.executors.base import BaseExecutor, run_job
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from __future__ import absolute_import
|
||||
|
||||
import sys
|
||||
|
||||
from apscheduler.executors.base import BaseExecutor, run_job
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from __future__ import absolute_import
|
||||
|
||||
|
||||
from apscheduler.executors.base import BaseExecutor, run_job
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from __future__ import absolute_import
|
||||
|
||||
|
||||
from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError
|
||||
from apscheduler.util import datetime_to_utc_timestamp
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
from __future__ import absolute_import
|
||||
|
||||
|
||||
from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError
|
||||
from apscheduler.util import maybe_ref, datetime_to_utc_timestamp, utc_timestamp_to_datetime
|
||||
from apscheduler.job import Job
|
||||
|
||||
try:
|
||||
import cPickle as pickle
|
||||
import pickle as pickle
|
||||
except ImportError: # pragma: nocover
|
||||
import pickle
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from __future__ import absolute_import
|
||||
|
||||
|
||||
import six
|
||||
|
||||
@@ -7,7 +7,7 @@ from apscheduler.util import datetime_to_utc_timestamp, utc_timestamp_to_datetim
|
||||
from apscheduler.job import Job
|
||||
|
||||
try:
|
||||
import cPickle as pickle
|
||||
import pickle as pickle
|
||||
except ImportError: # pragma: nocover
|
||||
import pickle
|
||||
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
from __future__ import absolute_import
|
||||
|
||||
|
||||
from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError
|
||||
from apscheduler.util import maybe_ref, datetime_to_utc_timestamp, utc_timestamp_to_datetime
|
||||
from apscheduler.job import Job
|
||||
|
||||
try:
|
||||
import cPickle as pickle
|
||||
import pickle as pickle
|
||||
except ImportError: # pragma: nocover
|
||||
import pickle
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from __future__ import absolute_import
|
||||
|
||||
from functools import wraps
|
||||
|
||||
from apscheduler.schedulers.base import BaseScheduler
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from __future__ import absolute_import
|
||||
|
||||
from threading import Thread, Event
|
||||
|
||||
from apscheduler.schedulers.base import BaseScheduler
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from __future__ import print_function
|
||||
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from collections import MutableMapping
|
||||
from threading import RLock
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from __future__ import absolute_import
|
||||
|
||||
from threading import Event
|
||||
|
||||
from apscheduler.schedulers.base import BaseScheduler
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from __future__ import absolute_import
|
||||
|
||||
|
||||
from apscheduler.schedulers.blocking import BlockingScheduler
|
||||
from apscheduler.schedulers.base import BaseScheduler
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from __future__ import absolute_import
|
||||
|
||||
|
||||
from apscheduler.schedulers.base import BaseScheduler
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from __future__ import absolute_import
|
||||
|
||||
from datetime import timedelta
|
||||
from functools import wraps
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from __future__ import absolute_import
|
||||
|
||||
from functools import wraps
|
||||
|
||||
from apscheduler.schedulers.base import BaseScheduler
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
"""This module contains several handy functions primarily meant for internal use."""
|
||||
|
||||
from __future__ import division
|
||||
|
||||
from datetime import date, datetime, time, timedelta, tzinfo
|
||||
from inspect import isfunction, ismethod, getargspec
|
||||
from calendar import timegm
|
||||
@@ -23,7 +23,7 @@ __all__ = ('asint', 'asbool', 'astimezone', 'convert_to_datetime', 'datetime_to_
|
||||
|
||||
|
||||
class _Undefined(object):
|
||||
def __nonzero__(self):
|
||||
def __bool__(self):
|
||||
return False
|
||||
|
||||
def __bool__(self):
|
||||
@@ -116,7 +116,7 @@ def convert_to_datetime(input, tz, arg_name):
|
||||
m = _DATE_REGEX.match(input)
|
||||
if not m:
|
||||
raise ValueError('Invalid date string')
|
||||
values = [(k, int(v or 0)) for k, v in m.groupdict().items()]
|
||||
values = [(k, int(v or 0)) for k, v in list(m.groupdict().items())]
|
||||
values = dict(values)
|
||||
datetime_ = datetime(**values)
|
||||
else:
|
||||
|
||||
33
lib/beets/__init__.py
Executable file → Normal file
33
lib/beets/__init__.py
Executable file → Normal file
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This file is part of beets.
|
||||
# Copyright 2016, Adrian Sampson.
|
||||
#
|
||||
@@ -13,33 +12,29 @@
|
||||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
from __future__ import division, absolute_import, print_function
|
||||
|
||||
import os
|
||||
import confuse
|
||||
from sys import stderr
|
||||
|
||||
from beets.util import confit
|
||||
|
||||
# This particular version has been slightly modified to work with Headphones
|
||||
# https://github.com/rembo10/headphones
|
||||
__version__ = u'1.4.4-headphones'
|
||||
__author__ = u'Adrian Sampson <adrian@radbox.org>'
|
||||
__version__ = '1.6.0'
|
||||
__author__ = 'Adrian Sampson <adrian@radbox.org>'
|
||||
|
||||
|
||||
class IncludeLazyConfig(confit.LazyConfig):
|
||||
"""A version of Confit's LazyConfig that also merges in data from
|
||||
class IncludeLazyConfig(confuse.LazyConfig):
|
||||
"""A version of Confuse's LazyConfig that also merges in data from
|
||||
YAML files specified in an `include` setting.
|
||||
"""
|
||||
def read(self, user=True, defaults=True):
|
||||
super(IncludeLazyConfig, self).read(user, defaults)
|
||||
super().read(user, defaults)
|
||||
|
||||
try:
|
||||
for view in self['include']:
|
||||
filename = view.as_filename()
|
||||
if os.path.isfile(filename):
|
||||
self.set_file(filename)
|
||||
except confit.NotFoundError:
|
||||
self.set_file(view.as_filename())
|
||||
except confuse.NotFoundError:
|
||||
pass
|
||||
except confuse.ConfigReadError as err:
|
||||
stderr.write("configuration `import` failed: {}"
|
||||
.format(err.reason))
|
||||
|
||||
# headphones
|
||||
#config = IncludeLazyConfig('beets', __name__)
|
||||
config = IncludeLazyConfig(os.path.dirname(__file__), __name__)
|
||||
|
||||
config = IncludeLazyConfig('beets', __name__)
|
||||
|
||||
2
lib/beets/__main__.py
Executable file → Normal file
2
lib/beets/__main__.py
Executable file → Normal file
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This file is part of beets.
|
||||
# Copyright 2017, Adrian Sampson.
|
||||
#
|
||||
@@ -17,7 +16,6 @@
|
||||
`python -m beets`.
|
||||
"""
|
||||
|
||||
from __future__ import division, absolute_import, print_function
|
||||
|
||||
import sys
|
||||
from .ui import main
|
||||
|
||||
72
lib/beets/art.py
Executable file → Normal file
72
lib/beets/art.py
Executable file → Normal file
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This file is part of beets.
|
||||
# Copyright 2016, Adrian Sampson.
|
||||
#
|
||||
@@ -17,7 +16,6 @@
|
||||
music and items' embedded album art.
|
||||
"""
|
||||
|
||||
from __future__ import division, absolute_import, print_function
|
||||
|
||||
import subprocess
|
||||
import platform
|
||||
@@ -26,7 +24,7 @@ import os
|
||||
|
||||
from beets.util import displayable_path, syspath, bytestring_path
|
||||
from beets.util.artresizer import ArtResizer
|
||||
from beets import mediafile
|
||||
import mediafile
|
||||
|
||||
|
||||
def mediafile_image(image_path, maxwidth=None):
|
||||
@@ -43,7 +41,7 @@ def get_art(log, item):
|
||||
try:
|
||||
mf = mediafile.MediaFile(syspath(item.path))
|
||||
except mediafile.UnreadableFileError as exc:
|
||||
log.warning(u'Could not extract art from {0}: {1}',
|
||||
log.warning('Could not extract art from {0}: {1}',
|
||||
displayable_path(item.path), exc)
|
||||
return
|
||||
|
||||
@@ -51,26 +49,27 @@ def get_art(log, item):
|
||||
|
||||
|
||||
def embed_item(log, item, imagepath, maxwidth=None, itempath=None,
|
||||
compare_threshold=0, ifempty=False, as_album=False):
|
||||
compare_threshold=0, ifempty=False, as_album=False, id3v23=None,
|
||||
quality=0):
|
||||
"""Embed an image into the item's media file.
|
||||
"""
|
||||
# Conditions and filters.
|
||||
if compare_threshold:
|
||||
if not check_art_similarity(log, item, imagepath, compare_threshold):
|
||||
log.info(u'Image not similar; skipping.')
|
||||
log.info('Image not similar; skipping.')
|
||||
return
|
||||
if ifempty and get_art(log, item):
|
||||
log.info(u'media file already contained art')
|
||||
return
|
||||
log.info('media file already contained art')
|
||||
return
|
||||
if maxwidth and not as_album:
|
||||
imagepath = resize_image(log, imagepath, maxwidth)
|
||||
imagepath = resize_image(log, imagepath, maxwidth, quality)
|
||||
|
||||
# Get the `Image` object from the file.
|
||||
try:
|
||||
log.debug(u'embedding {0}', displayable_path(imagepath))
|
||||
log.debug('embedding {0}', displayable_path(imagepath))
|
||||
image = mediafile_image(imagepath, maxwidth)
|
||||
except IOError as exc:
|
||||
log.warning(u'could not read image file: {0}', exc)
|
||||
except OSError as exc:
|
||||
log.warning('could not read image file: {0}', exc)
|
||||
return
|
||||
|
||||
# Make sure the image kind is safe (some formats only support PNG
|
||||
@@ -80,36 +79,39 @@ def embed_item(log, item, imagepath, maxwidth=None, itempath=None,
|
||||
image.mime_type)
|
||||
return
|
||||
|
||||
item.try_write(path=itempath, tags={'images': [image]})
|
||||
item.try_write(path=itempath, tags={'images': [image]}, id3v23=id3v23)
|
||||
|
||||
|
||||
def embed_album(log, album, maxwidth=None, quiet=False,
|
||||
compare_threshold=0, ifempty=False):
|
||||
def embed_album(log, album, maxwidth=None, quiet=False, compare_threshold=0,
|
||||
ifempty=False, quality=0):
|
||||
"""Embed album art into all of the album's items.
|
||||
"""
|
||||
imagepath = album.artpath
|
||||
if not imagepath:
|
||||
log.info(u'No album art present for {0}', album)
|
||||
log.info('No album art present for {0}', album)
|
||||
return
|
||||
if not os.path.isfile(syspath(imagepath)):
|
||||
log.info(u'Album art not found at {0} for {1}',
|
||||
log.info('Album art not found at {0} for {1}',
|
||||
displayable_path(imagepath), album)
|
||||
return
|
||||
if maxwidth:
|
||||
imagepath = resize_image(log, imagepath, maxwidth)
|
||||
imagepath = resize_image(log, imagepath, maxwidth, quality)
|
||||
|
||||
log.info(u'Embedding album art into {0}', album)
|
||||
log.info('Embedding album art into {0}', album)
|
||||
|
||||
for item in album.items():
|
||||
embed_item(log, item, imagepath, maxwidth, None,
|
||||
compare_threshold, ifempty, as_album=True)
|
||||
embed_item(log, item, imagepath, maxwidth, None, compare_threshold,
|
||||
ifempty, as_album=True, quality=quality)
|
||||
|
||||
|
||||
def resize_image(log, imagepath, maxwidth):
|
||||
"""Returns path to an image resized to maxwidth.
|
||||
def resize_image(log, imagepath, maxwidth, quality):
|
||||
"""Returns path to an image resized to maxwidth and encoded with the
|
||||
specified quality level.
|
||||
"""
|
||||
log.debug(u'Resizing album art to {0} pixels wide', maxwidth)
|
||||
imagepath = ArtResizer.shared.resize(maxwidth, syspath(imagepath))
|
||||
log.debug('Resizing album art to {0} pixels wide and encoding at quality \
|
||||
level {1}', maxwidth, quality)
|
||||
imagepath = ArtResizer.shared.resize(maxwidth, syspath(imagepath),
|
||||
quality=quality)
|
||||
return imagepath
|
||||
|
||||
|
||||
@@ -131,7 +133,7 @@ def check_art_similarity(log, item, imagepath, compare_threshold):
|
||||
syspath(art, prefix=False),
|
||||
'-colorspace', 'gray', 'MIFF:-']
|
||||
compare_cmd = ['compare', '-metric', 'PHASH', '-', 'null:']
|
||||
log.debug(u'comparing images with pipeline {} | {}',
|
||||
log.debug('comparing images with pipeline {} | {}',
|
||||
convert_cmd, compare_cmd)
|
||||
convert_proc = subprocess.Popen(
|
||||
convert_cmd,
|
||||
@@ -155,7 +157,7 @@ def check_art_similarity(log, item, imagepath, compare_threshold):
|
||||
convert_proc.wait()
|
||||
if convert_proc.returncode:
|
||||
log.debug(
|
||||
u'ImageMagick convert failed with status {}: {!r}',
|
||||
'ImageMagick convert failed with status {}: {!r}',
|
||||
convert_proc.returncode,
|
||||
convert_stderr,
|
||||
)
|
||||
@@ -165,7 +167,7 @@ def check_art_similarity(log, item, imagepath, compare_threshold):
|
||||
stdout, stderr = compare_proc.communicate()
|
||||
if compare_proc.returncode:
|
||||
if compare_proc.returncode != 1:
|
||||
log.debug(u'ImageMagick compare failed: {0}, {1}',
|
||||
log.debug('ImageMagick compare failed: {0}, {1}',
|
||||
displayable_path(imagepath),
|
||||
displayable_path(art))
|
||||
return
|
||||
@@ -176,10 +178,10 @@ def check_art_similarity(log, item, imagepath, compare_threshold):
|
||||
try:
|
||||
phash_diff = float(out_str)
|
||||
except ValueError:
|
||||
log.debug(u'IM output is not a number: {0!r}', out_str)
|
||||
log.debug('IM output is not a number: {0!r}', out_str)
|
||||
return
|
||||
|
||||
log.debug(u'ImageMagick compare score: {0}', phash_diff)
|
||||
log.debug('ImageMagick compare score: {0}', phash_diff)
|
||||
return phash_diff <= compare_threshold
|
||||
|
||||
return True
|
||||
@@ -189,18 +191,18 @@ def extract(log, outpath, item):
|
||||
art = get_art(log, item)
|
||||
outpath = bytestring_path(outpath)
|
||||
if not art:
|
||||
log.info(u'No album art present in {0}, skipping.', item)
|
||||
log.info('No album art present in {0}, skipping.', item)
|
||||
return
|
||||
|
||||
# Add an extension to the filename.
|
||||
ext = mediafile.image_extension(art)
|
||||
if not ext:
|
||||
log.warning(u'Unknown image type in {0}.',
|
||||
log.warning('Unknown image type in {0}.',
|
||||
displayable_path(item.path))
|
||||
return
|
||||
outpath += bytestring_path('.' + ext)
|
||||
|
||||
log.info(u'Extracting album art from: {0} to: {1}',
|
||||
log.info('Extracting album art from: {0} to: {1}',
|
||||
item, displayable_path(outpath))
|
||||
with open(syspath(outpath), 'wb') as f:
|
||||
f.write(art)
|
||||
@@ -216,7 +218,7 @@ def extract_first(log, outpath, items):
|
||||
|
||||
def clear(log, lib, query):
|
||||
items = lib.items(query)
|
||||
log.info(u'Clearing album art from {0} items', len(items))
|
||||
log.info('Clearing album art from {0} items', len(items))
|
||||
for item in items:
|
||||
log.debug(u'Clearing art for {0}', item)
|
||||
log.debug('Clearing art for {0}', item)
|
||||
item.try_write(tags={'images': None})
|
||||
|
||||
127
lib/beets/autotag/__init__.py
Executable file → Normal file
127
lib/beets/autotag/__init__.py
Executable file → Normal file
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This file is part of beets.
|
||||
# Copyright 2016, Adrian Sampson.
|
||||
#
|
||||
@@ -16,19 +15,59 @@
|
||||
"""Facilities for automatically determining files' correct metadata.
|
||||
"""
|
||||
|
||||
from __future__ import division, absolute_import, print_function
|
||||
|
||||
from beets import logging
|
||||
from beets import config
|
||||
|
||||
# Parts of external interface.
|
||||
from .hooks import AlbumInfo, TrackInfo, AlbumMatch, TrackMatch # noqa
|
||||
from .hooks import ( # noqa
|
||||
AlbumInfo,
|
||||
TrackInfo,
|
||||
AlbumMatch,
|
||||
TrackMatch,
|
||||
Distance,
|
||||
)
|
||||
from .match import tag_item, tag_album, Proposal # noqa
|
||||
from .match import Recommendation # noqa
|
||||
|
||||
# Global logger.
|
||||
log = logging.getLogger('beets')
|
||||
|
||||
# Metadata fields that are already hardcoded, or where the tag name changes.
|
||||
SPECIAL_FIELDS = {
|
||||
'album': (
|
||||
'va',
|
||||
'releasegroup_id',
|
||||
'artist_id',
|
||||
'album_id',
|
||||
'mediums',
|
||||
'tracks',
|
||||
'year',
|
||||
'month',
|
||||
'day',
|
||||
'artist',
|
||||
'artist_credit',
|
||||
'artist_sort',
|
||||
'data_url'
|
||||
),
|
||||
'track': (
|
||||
'track_alt',
|
||||
'artist_id',
|
||||
'release_track_id',
|
||||
'medium',
|
||||
'index',
|
||||
'medium_index',
|
||||
'title',
|
||||
'artist_credit',
|
||||
'artist_sort',
|
||||
'artist',
|
||||
'track_id',
|
||||
'medium_total',
|
||||
'data_url',
|
||||
'length'
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
# Additional utilities for the main interface.
|
||||
|
||||
@@ -40,17 +79,17 @@ def apply_item_metadata(item, track_info):
|
||||
item.artist_credit = track_info.artist_credit
|
||||
item.title = track_info.title
|
||||
item.mb_trackid = track_info.track_id
|
||||
item.mb_releasetrackid = track_info.release_track_id
|
||||
if track_info.artist_id:
|
||||
item.mb_artistid = track_info.artist_id
|
||||
if track_info.data_source:
|
||||
item.data_source = track_info.data_source
|
||||
|
||||
if track_info.lyricist is not None:
|
||||
item.lyricist = track_info.lyricist
|
||||
if track_info.composer is not None:
|
||||
item.composer = track_info.composer
|
||||
if track_info.arranger is not None:
|
||||
item.arranger = track_info.arranger
|
||||
for field, value in track_info.items():
|
||||
# We only overwrite fields that are not already hardcoded.
|
||||
if field in SPECIAL_FIELDS['track']:
|
||||
continue
|
||||
if value is None:
|
||||
continue
|
||||
item[field] = value
|
||||
|
||||
# At the moment, the other metadata is left intact (including album
|
||||
# and track number). Perhaps these should be emptied?
|
||||
@@ -61,12 +100,19 @@ def apply_metadata(album_info, mapping):
|
||||
mapping from Items to TrackInfo objects.
|
||||
"""
|
||||
for item, track_info in mapping.items():
|
||||
# Album, artist, track count.
|
||||
if track_info.artist:
|
||||
item.artist = track_info.artist
|
||||
# Artist or artist credit.
|
||||
if config['artist_credit']:
|
||||
item.artist = (track_info.artist_credit or
|
||||
track_info.artist or
|
||||
album_info.artist_credit or
|
||||
album_info.artist)
|
||||
item.albumartist = (album_info.artist_credit or
|
||||
album_info.artist)
|
||||
else:
|
||||
item.artist = album_info.artist
|
||||
item.albumartist = album_info.artist
|
||||
item.artist = (track_info.artist or album_info.artist)
|
||||
item.albumartist = album_info.artist
|
||||
|
||||
# Album.
|
||||
item.album = album_info.album
|
||||
|
||||
# Artist sort and credit names.
|
||||
@@ -120,6 +166,7 @@ def apply_metadata(album_info, mapping):
|
||||
|
||||
# MusicBrainz IDs.
|
||||
item.mb_trackid = track_info.track_id
|
||||
item.mb_releasetrackid = track_info.release_track_id
|
||||
item.mb_albumid = album_info.album_id
|
||||
if track_info.artist_id:
|
||||
item.mb_artistid = track_info.artist_id
|
||||
@@ -131,34 +178,24 @@ def apply_metadata(album_info, mapping):
|
||||
# Compilation flag.
|
||||
item.comp = album_info.va
|
||||
|
||||
# Miscellaneous metadata.
|
||||
for field in ('albumtype',
|
||||
'label',
|
||||
'asin',
|
||||
'catalognum',
|
||||
'script',
|
||||
'language',
|
||||
'country',
|
||||
'albumstatus',
|
||||
'albumdisambig',
|
||||
'data_source',):
|
||||
value = getattr(album_info, field)
|
||||
if value is not None:
|
||||
item[field] = value
|
||||
if track_info.disctitle is not None:
|
||||
item.disctitle = track_info.disctitle
|
||||
|
||||
if track_info.media is not None:
|
||||
item.media = track_info.media
|
||||
|
||||
if track_info.lyricist is not None:
|
||||
item.lyricist = track_info.lyricist
|
||||
if track_info.composer is not None:
|
||||
item.composer = track_info.composer
|
||||
if track_info.arranger is not None:
|
||||
item.arranger = track_info.arranger
|
||||
|
||||
# Track alt.
|
||||
item.track_alt = track_info.track_alt
|
||||
|
||||
# Headphones seal of approval
|
||||
item.comments = 'tagged by headphones/beets'
|
||||
# Don't overwrite fields with empty values unless the
|
||||
# field is explicitly allowed to be overwritten
|
||||
for field, value in album_info.items():
|
||||
if field in SPECIAL_FIELDS['album']:
|
||||
continue
|
||||
clobber = field in config['overwrite_null']['album'].as_str_seq()
|
||||
if value is None and not clobber:
|
||||
continue
|
||||
item[field] = value
|
||||
|
||||
for field, value in track_info.items():
|
||||
if field in SPECIAL_FIELDS['track']:
|
||||
continue
|
||||
clobber = field in config['overwrite_null']['track'].as_str_seq()
|
||||
value = getattr(track_info, field)
|
||||
if value is None and not clobber:
|
||||
continue
|
||||
item[field] = value
|
||||
|
||||
214
lib/beets/autotag/hooks.py
Executable file → Normal file
214
lib/beets/autotag/hooks.py
Executable file → Normal file
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This file is part of beets.
|
||||
# Copyright 2016, Adrian Sampson.
|
||||
#
|
||||
@@ -14,7 +13,6 @@
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
"""Glue between metadata sources and the matching logic."""
|
||||
from __future__ import division, absolute_import, print_function
|
||||
|
||||
from collections import namedtuple
|
||||
from functools import total_ordering
|
||||
@@ -27,14 +25,36 @@ from beets.util import as_string
|
||||
from beets.autotag import mb
|
||||
from jellyfish import levenshtein_distance
|
||||
from unidecode import unidecode
|
||||
import six
|
||||
|
||||
log = logging.getLogger('beets')
|
||||
|
||||
# The name of the type for patterns in re changed in Python 3.7.
|
||||
try:
|
||||
Pattern = re._pattern_type
|
||||
except AttributeError:
|
||||
Pattern = re.Pattern
|
||||
|
||||
|
||||
# Classes used to represent candidate options.
|
||||
class AttrDict(dict):
|
||||
"""A dictionary that supports attribute ("dot") access, so `d.field`
|
||||
is equivalent to `d['field']`.
|
||||
"""
|
||||
|
||||
class AlbumInfo(object):
|
||||
def __getattr__(self, attr):
|
||||
if attr in self:
|
||||
return self.get(attr)
|
||||
else:
|
||||
raise AttributeError
|
||||
|
||||
def __setattr__(self, key, value):
|
||||
self.__setitem__(key, value)
|
||||
|
||||
def __hash__(self):
|
||||
return id(self)
|
||||
|
||||
|
||||
class AlbumInfo(AttrDict):
|
||||
"""Describes a canonical release that may be used to match a release
|
||||
in the library. Consists of these data members:
|
||||
|
||||
@@ -43,38 +63,22 @@ class AlbumInfo(object):
|
||||
- ``artist``: name of the release's primary artist
|
||||
- ``artist_id``
|
||||
- ``tracks``: list of TrackInfo objects making up the release
|
||||
- ``asin``: Amazon ASIN
|
||||
- ``albumtype``: string describing the kind of release
|
||||
- ``va``: boolean: whether the release has "various artists"
|
||||
- ``year``: release year
|
||||
- ``month``: release month
|
||||
- ``day``: release day
|
||||
- ``label``: music label responsible for the release
|
||||
- ``mediums``: the number of discs in this release
|
||||
- ``artist_sort``: name of the release's artist for sorting
|
||||
- ``releasegroup_id``: MBID for the album's release group
|
||||
- ``catalognum``: the label's catalog number for the release
|
||||
- ``script``: character set used for metadata
|
||||
- ``language``: human language of the metadata
|
||||
- ``country``: the release country
|
||||
- ``albumstatus``: MusicBrainz release status (Official, etc.)
|
||||
- ``media``: delivery mechanism (Vinyl, etc.)
|
||||
- ``albumdisambig``: MusicBrainz release disambiguation comment
|
||||
- ``artist_credit``: Release-specific artist name
|
||||
- ``data_source``: The original data source (MusicBrainz, Discogs, etc.)
|
||||
- ``data_url``: The data source release URL.
|
||||
|
||||
The fields up through ``tracks`` are required. The others are
|
||||
optional and may be None.
|
||||
``mediums`` along with the fields up through ``tracks`` are required.
|
||||
The others are optional and may be None.
|
||||
"""
|
||||
def __init__(self, album, album_id, artist, artist_id, tracks, asin=None,
|
||||
albumtype=None, va=False, year=None, month=None, day=None,
|
||||
label=None, mediums=None, artist_sort=None,
|
||||
releasegroup_id=None, catalognum=None, script=None,
|
||||
language=None, country=None, albumstatus=None, media=None,
|
||||
albumdisambig=None, artist_credit=None, original_year=None,
|
||||
original_month=None, original_day=None, data_source=None,
|
||||
data_url=None):
|
||||
|
||||
def __init__(self, tracks, album=None, album_id=None, artist=None,
|
||||
artist_id=None, asin=None, albumtype=None, va=False,
|
||||
year=None, month=None, day=None, label=None, mediums=None,
|
||||
artist_sort=None, releasegroup_id=None, catalognum=None,
|
||||
script=None, language=None, country=None, style=None,
|
||||
genre=None, albumstatus=None, media=None, albumdisambig=None,
|
||||
releasegroupdisambig=None, artist_credit=None,
|
||||
original_year=None, original_month=None,
|
||||
original_day=None, data_source=None, data_url=None,
|
||||
discogs_albumid=None, discogs_labelid=None,
|
||||
discogs_artistid=None, **kwargs):
|
||||
self.album = album
|
||||
self.album_id = album_id
|
||||
self.artist = artist
|
||||
@@ -94,15 +98,22 @@ class AlbumInfo(object):
|
||||
self.script = script
|
||||
self.language = language
|
||||
self.country = country
|
||||
self.style = style
|
||||
self.genre = genre
|
||||
self.albumstatus = albumstatus
|
||||
self.media = media
|
||||
self.albumdisambig = albumdisambig
|
||||
self.releasegroupdisambig = releasegroupdisambig
|
||||
self.artist_credit = artist_credit
|
||||
self.original_year = original_year
|
||||
self.original_month = original_month
|
||||
self.original_day = original_day
|
||||
self.data_source = data_source
|
||||
self.data_url = data_url
|
||||
self.discogs_albumid = discogs_albumid
|
||||
self.discogs_labelid = discogs_labelid
|
||||
self.discogs_artistid = discogs_artistid
|
||||
self.update(kwargs)
|
||||
|
||||
# Work around a bug in python-musicbrainz-ngs that causes some
|
||||
# strings to be bytes rather than Unicode.
|
||||
@@ -112,53 +123,49 @@ class AlbumInfo(object):
|
||||
constituent `TrackInfo` objects, are decoded to Unicode.
|
||||
"""
|
||||
for fld in ['album', 'artist', 'albumtype', 'label', 'artist_sort',
|
||||
'catalognum', 'script', 'language', 'country',
|
||||
'albumstatus', 'albumdisambig', 'artist_credit', 'media']:
|
||||
'catalognum', 'script', 'language', 'country', 'style',
|
||||
'genre', 'albumstatus', 'albumdisambig',
|
||||
'releasegroupdisambig', 'artist_credit',
|
||||
'media', 'discogs_albumid', 'discogs_labelid',
|
||||
'discogs_artistid']:
|
||||
value = getattr(self, fld)
|
||||
if isinstance(value, bytes):
|
||||
setattr(self, fld, value.decode(codec, 'ignore'))
|
||||
|
||||
if self.tracks:
|
||||
for track in self.tracks:
|
||||
track.decode(codec)
|
||||
for track in self.tracks:
|
||||
track.decode(codec)
|
||||
|
||||
def copy(self):
|
||||
dupe = AlbumInfo([])
|
||||
dupe.update(self)
|
||||
dupe.tracks = [track.copy() for track in self.tracks]
|
||||
return dupe
|
||||
|
||||
|
||||
class TrackInfo(object):
|
||||
class TrackInfo(AttrDict):
|
||||
"""Describes a canonical track present on a release. Appears as part
|
||||
of an AlbumInfo's ``tracks`` list. Consists of these data members:
|
||||
|
||||
- ``title``: name of the track
|
||||
- ``track_id``: MusicBrainz ID; UUID fragment only
|
||||
- ``artist``: individual track artist name
|
||||
- ``artist_id``
|
||||
- ``length``: float: duration of the track in seconds
|
||||
- ``index``: position on the entire release
|
||||
- ``media``: delivery mechanism (Vinyl, etc.)
|
||||
- ``medium``: the disc number this track appears on in the album
|
||||
- ``medium_index``: the track's position on the disc
|
||||
- ``medium_total``: the number of tracks on the item's disc
|
||||
- ``artist_sort``: name of the track artist for sorting
|
||||
- ``disctitle``: name of the individual medium (subtitle)
|
||||
- ``artist_credit``: Recording-specific artist name
|
||||
- ``data_source``: The original data source (MusicBrainz, Discogs, etc.)
|
||||
- ``data_url``: The data source release URL.
|
||||
- ``lyricist``: individual track lyricist name
|
||||
- ``composer``: individual track composer name
|
||||
- ``arranger`: individual track arranger name
|
||||
- ``track_alt``: alternative track number (tape, vinyl, etc.)
|
||||
|
||||
Only ``title`` and ``track_id`` are required. The rest of the fields
|
||||
may be None. The indices ``index``, ``medium``, and ``medium_index``
|
||||
are all 1-based.
|
||||
"""
|
||||
def __init__(self, title, track_id, artist=None, artist_id=None,
|
||||
length=None, index=None, medium=None, medium_index=None,
|
||||
medium_total=None, artist_sort=None, disctitle=None,
|
||||
artist_credit=None, data_source=None, data_url=None,
|
||||
media=None, lyricist=None, composer=None, arranger=None,
|
||||
track_alt=None):
|
||||
|
||||
def __init__(self, title=None, track_id=None, release_track_id=None,
|
||||
artist=None, artist_id=None, length=None, index=None,
|
||||
medium=None, medium_index=None, medium_total=None,
|
||||
artist_sort=None, disctitle=None, artist_credit=None,
|
||||
data_source=None, data_url=None, media=None, lyricist=None,
|
||||
composer=None, composer_sort=None, arranger=None,
|
||||
track_alt=None, work=None, mb_workid=None,
|
||||
work_disambig=None, bpm=None, initial_key=None, genre=None,
|
||||
**kwargs):
|
||||
self.title = title
|
||||
self.track_id = track_id
|
||||
self.release_track_id = release_track_id
|
||||
self.artist = artist
|
||||
self.artist_id = artist_id
|
||||
self.length = length
|
||||
@@ -174,8 +181,16 @@ class TrackInfo(object):
|
||||
self.data_url = data_url
|
||||
self.lyricist = lyricist
|
||||
self.composer = composer
|
||||
self.composer_sort = composer_sort
|
||||
self.arranger = arranger
|
||||
self.track_alt = track_alt
|
||||
self.work = work
|
||||
self.mb_workid = mb_workid
|
||||
self.work_disambig = work_disambig
|
||||
self.bpm = bpm
|
||||
self.initial_key = initial_key
|
||||
self.genre = genre
|
||||
self.update(kwargs)
|
||||
|
||||
# As above, work around a bug in python-musicbrainz-ngs.
|
||||
def decode(self, codec='utf-8'):
|
||||
@@ -188,6 +203,11 @@ class TrackInfo(object):
|
||||
if isinstance(value, bytes):
|
||||
setattr(self, fld, value.decode(codec, 'ignore'))
|
||||
|
||||
def copy(self):
|
||||
dupe = TrackInfo()
|
||||
dupe.update(self)
|
||||
return dupe
|
||||
|
||||
|
||||
# Candidate distance scoring.
|
||||
|
||||
@@ -215,8 +235,8 @@ def _string_dist_basic(str1, str2):
|
||||
transliteration/lowering to ASCII characters. Normalized by string
|
||||
length.
|
||||
"""
|
||||
assert isinstance(str1, six.text_type)
|
||||
assert isinstance(str2, six.text_type)
|
||||
assert isinstance(str1, str)
|
||||
assert isinstance(str2, str)
|
||||
str1 = as_string(unidecode(str1))
|
||||
str2 = as_string(unidecode(str2))
|
||||
str1 = re.sub(r'[^a-z0-9]', '', str1.lower())
|
||||
@@ -244,9 +264,9 @@ def string_dist(str1, str2):
|
||||
# "something, the".
|
||||
for word in SD_END_WORDS:
|
||||
if str1.endswith(', %s' % word):
|
||||
str1 = '%s %s' % (word, str1[:-len(word) - 2])
|
||||
str1 = '{} {}'.format(word, str1[:-len(word) - 2])
|
||||
if str2.endswith(', %s' % word):
|
||||
str2 = '%s %s' % (word, str2[:-len(word) - 2])
|
||||
str2 = '{} {}'.format(word, str2[:-len(word) - 2])
|
||||
|
||||
# Perform a couple of basic normalizing substitutions.
|
||||
for pat, repl in SD_REPLACE:
|
||||
@@ -284,11 +304,12 @@ def string_dist(str1, str2):
|
||||
return base_dist + penalty
|
||||
|
||||
|
||||
class LazyClassProperty(object):
|
||||
class LazyClassProperty:
|
||||
"""A decorator implementing a read-only property that is *lazy* in
|
||||
the sense that the getter is only invoked once. Subsequent accesses
|
||||
through *any* instance use the cached result.
|
||||
"""
|
||||
|
||||
def __init__(self, getter):
|
||||
self.getter = getter
|
||||
self.computed = False
|
||||
@@ -301,17 +322,17 @@ class LazyClassProperty(object):
|
||||
|
||||
|
||||
@total_ordering
|
||||
@six.python_2_unicode_compatible
|
||||
class Distance(object):
|
||||
class Distance:
|
||||
"""Keeps track of multiple distance penalties. Provides a single
|
||||
weighted distance for all penalties as well as a weighted distance
|
||||
for each individual penalty.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._penalties = {}
|
||||
|
||||
@LazyClassProperty
|
||||
def _weights(cls): # noqa
|
||||
def _weights(cls): # noqa: N805
|
||||
"""A dictionary from keys to floating-point weights.
|
||||
"""
|
||||
weights_view = config['match']['distance_weights']
|
||||
@@ -389,7 +410,7 @@ class Distance(object):
|
||||
return other - self.distance
|
||||
|
||||
def __str__(self):
|
||||
return "{0:.2f}".format(self.distance)
|
||||
return f"{self.distance:.2f}"
|
||||
|
||||
# Behave like a dict.
|
||||
|
||||
@@ -416,7 +437,7 @@ class Distance(object):
|
||||
"""
|
||||
if not isinstance(dist, Distance):
|
||||
raise ValueError(
|
||||
u'`dist` must be a Distance object, not {0}'.format(type(dist))
|
||||
'`dist` must be a Distance object, not {}'.format(type(dist))
|
||||
)
|
||||
for key, penalties in dist._penalties.items():
|
||||
self._penalties.setdefault(key, []).extend(penalties)
|
||||
@@ -428,7 +449,7 @@ class Distance(object):
|
||||
be a compiled regular expression, in which case it will be
|
||||
matched against `value2`.
|
||||
"""
|
||||
if isinstance(value1, re._pattern_type):
|
||||
if isinstance(value1, Pattern):
|
||||
return bool(value1.match(value2))
|
||||
return value1 == value2
|
||||
|
||||
@@ -440,7 +461,7 @@ class Distance(object):
|
||||
"""
|
||||
if not 0.0 <= dist <= 1.0:
|
||||
raise ValueError(
|
||||
u'`dist` must be between 0.0 and 1.0, not {0}'.format(dist)
|
||||
f'`dist` must be between 0.0 and 1.0, not {dist}'
|
||||
)
|
||||
self._penalties.setdefault(key, []).append(dist)
|
||||
|
||||
@@ -534,7 +555,10 @@ def album_for_mbid(release_id):
|
||||
if the ID is not found.
|
||||
"""
|
||||
try:
|
||||
return mb.album_for_id(release_id)
|
||||
album = mb.album_for_id(release_id)
|
||||
if album:
|
||||
plugins.send('albuminfo_received', info=album)
|
||||
return album
|
||||
except mb.MusicBrainzAPIError as exc:
|
||||
exc.log(log)
|
||||
|
||||
@@ -544,12 +568,14 @@ def track_for_mbid(recording_id):
|
||||
if the ID is not found.
|
||||
"""
|
||||
try:
|
||||
return mb.track_for_id(recording_id)
|
||||
track = mb.track_for_id(recording_id)
|
||||
if track:
|
||||
plugins.send('trackinfo_received', info=track)
|
||||
return track
|
||||
except mb.MusicBrainzAPIError as exc:
|
||||
exc.log(log)
|
||||
|
||||
|
||||
@plugins.notify_info_yielded(u'albuminfo_received')
|
||||
def albums_for_id(album_id):
|
||||
"""Get a list of albums for an ID."""
|
||||
a = album_for_mbid(album_id)
|
||||
@@ -557,10 +583,10 @@ def albums_for_id(album_id):
|
||||
yield a
|
||||
for a in plugins.album_for_id(album_id):
|
||||
if a:
|
||||
plugins.send('albuminfo_received', info=a)
|
||||
yield a
|
||||
|
||||
|
||||
@plugins.notify_info_yielded(u'trackinfo_received')
|
||||
def tracks_for_id(track_id):
|
||||
"""Get a list of tracks for an ID."""
|
||||
t = track_for_mbid(track_id)
|
||||
@@ -568,39 +594,43 @@ def tracks_for_id(track_id):
|
||||
yield t
|
||||
for t in plugins.track_for_id(track_id):
|
||||
if t:
|
||||
plugins.send('trackinfo_received', info=t)
|
||||
yield t
|
||||
|
||||
|
||||
@plugins.notify_info_yielded(u'albuminfo_received')
|
||||
def album_candidates(items, artist, album, va_likely):
|
||||
@plugins.notify_info_yielded('albuminfo_received')
|
||||
def album_candidates(items, artist, album, va_likely, extra_tags):
|
||||
"""Search for album matches. ``items`` is a list of Item objects
|
||||
that make up the album. ``artist`` and ``album`` are the respective
|
||||
names (strings), which may be derived from the item list or may be
|
||||
entered by the user. ``va_likely`` is a boolean indicating whether
|
||||
the album is likely to be a "various artists" release.
|
||||
the album is likely to be a "various artists" release. ``extra_tags``
|
||||
is an optional dictionary of additional tags used to further
|
||||
constrain the search.
|
||||
"""
|
||||
|
||||
# Base candidates if we have album and artist to match.
|
||||
if artist and album:
|
||||
try:
|
||||
for candidate in mb.match_album(artist, album, len(items)):
|
||||
yield candidate
|
||||
yield from mb.match_album(artist, album, len(items),
|
||||
extra_tags)
|
||||
except mb.MusicBrainzAPIError as exc:
|
||||
exc.log(log)
|
||||
|
||||
# Also add VA matches from MusicBrainz where appropriate.
|
||||
if va_likely and album:
|
||||
try:
|
||||
for candidate in mb.match_album(None, album, len(items)):
|
||||
yield candidate
|
||||
yield from mb.match_album(None, album, len(items),
|
||||
extra_tags)
|
||||
except mb.MusicBrainzAPIError as exc:
|
||||
exc.log(log)
|
||||
|
||||
# Candidates from plugins.
|
||||
for candidate in plugins.candidates(items, artist, album, va_likely):
|
||||
yield candidate
|
||||
yield from plugins.candidates(items, artist, album, va_likely,
|
||||
extra_tags)
|
||||
|
||||
|
||||
@plugins.notify_info_yielded(u'trackinfo_received')
|
||||
@plugins.notify_info_yielded('trackinfo_received')
|
||||
def item_candidates(item, artist, title):
|
||||
"""Search for item matches. ``item`` is the Item to be matched.
|
||||
``artist`` and ``title`` are strings and either reflect the item or
|
||||
@@ -610,11 +640,9 @@ def item_candidates(item, artist, title):
|
||||
# MusicBrainz candidates.
|
||||
if artist and title:
|
||||
try:
|
||||
for candidate in mb.match_track(artist, title):
|
||||
yield candidate
|
||||
yield from mb.match_track(artist, title)
|
||||
except mb.MusicBrainzAPIError as exc:
|
||||
exc.log(log)
|
||||
|
||||
# Plugin candidates.
|
||||
for candidate in plugins.item_candidates(item, artist, title):
|
||||
yield candidate
|
||||
yield from plugins.item_candidates(item, artist, title)
|
||||
|
||||
55
lib/beets/autotag/match.py
Executable file → Normal file
55
lib/beets/autotag/match.py
Executable file → Normal file
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This file is part of beets.
|
||||
# Copyright 2016, Adrian Sampson.
|
||||
#
|
||||
@@ -17,7 +16,6 @@
|
||||
releases and tracks.
|
||||
"""
|
||||
|
||||
from __future__ import division, absolute_import, print_function
|
||||
|
||||
import datetime
|
||||
import re
|
||||
@@ -35,7 +33,7 @@ from beets.util.enumeration import OrderedEnum
|
||||
# album level to determine whether a given release is likely a VA
|
||||
# release and also on the track level to to remove the penalty for
|
||||
# differing artists.
|
||||
VA_ARTISTS = (u'', u'various artists', u'various', u'va', u'unknown')
|
||||
VA_ARTISTS = ('', 'various artists', 'various', 'va', 'unknown')
|
||||
|
||||
# Global logger.
|
||||
log = logging.getLogger('beets')
|
||||
@@ -108,7 +106,7 @@ def assign_items(items, tracks):
|
||||
log.debug('...done.')
|
||||
|
||||
# Produce the output matching.
|
||||
mapping = dict((items[i], tracks[j]) for (i, j) in matching)
|
||||
mapping = {items[i]: tracks[j] for (i, j) in matching}
|
||||
extra_items = list(set(items) - set(mapping.keys()))
|
||||
extra_items.sort(key=lambda i: (i.disc, i.track, i.title))
|
||||
extra_tracks = list(set(tracks) - set(mapping.values()))
|
||||
@@ -276,16 +274,16 @@ def match_by_id(items):
|
||||
try:
|
||||
first = next(albumids)
|
||||
except StopIteration:
|
||||
log.debug(u'No album ID found.')
|
||||
log.debug('No album ID found.')
|
||||
return None
|
||||
|
||||
# Is there a consensus on the MB album ID?
|
||||
for other in albumids:
|
||||
if other != first:
|
||||
log.debug(u'No album ID consensus.')
|
||||
log.debug('No album ID consensus.')
|
||||
return None
|
||||
# If all album IDs are equal, look up the album.
|
||||
log.debug(u'Searching for discovered album ID: {0}', first)
|
||||
log.debug('Searching for discovered album ID: {0}', first)
|
||||
return hooks.album_for_mbid(first)
|
||||
|
||||
|
||||
@@ -351,23 +349,23 @@ def _add_candidate(items, results, info):
|
||||
checking the track count, ordering the items, checking for
|
||||
duplicates, and calculating the distance.
|
||||
"""
|
||||
log.debug(u'Candidate: {0} - {1} ({2})',
|
||||
log.debug('Candidate: {0} - {1} ({2})',
|
||||
info.artist, info.album, info.album_id)
|
||||
|
||||
# Discard albums with zero tracks.
|
||||
if not info.tracks:
|
||||
log.debug(u'No tracks.')
|
||||
log.debug('No tracks.')
|
||||
return
|
||||
|
||||
# Don't duplicate.
|
||||
if info.album_id in results:
|
||||
log.debug(u'Duplicate.')
|
||||
log.debug('Duplicate.')
|
||||
return
|
||||
|
||||
# Discard matches without required tags.
|
||||
for req_tag in config['match']['required'].as_str_seq():
|
||||
if getattr(info, req_tag) is None:
|
||||
log.debug(u'Ignored. Missing required tag: {0}', req_tag)
|
||||
log.debug('Ignored. Missing required tag: {0}', req_tag)
|
||||
return
|
||||
|
||||
# Find mapping between the items and the track info.
|
||||
@@ -380,10 +378,10 @@ def _add_candidate(items, results, info):
|
||||
penalties = [key for key, _ in dist]
|
||||
for penalty in config['match']['ignored'].as_str_seq():
|
||||
if penalty in penalties:
|
||||
log.debug(u'Ignored. Penalty: {0}', penalty)
|
||||
log.debug('Ignored. Penalty: {0}', penalty)
|
||||
return
|
||||
|
||||
log.debug(u'Success. Distance: {0}', dist)
|
||||
log.debug('Success. Distance: {0}', dist)
|
||||
results[info.album_id] = hooks.AlbumMatch(dist, info, mapping,
|
||||
extra_items, extra_tracks)
|
||||
|
||||
@@ -411,7 +409,7 @@ def tag_album(items, search_artist=None, search_album=None,
|
||||
likelies, consensus = current_metadata(items)
|
||||
cur_artist = likelies['artist']
|
||||
cur_album = likelies['album']
|
||||
log.debug(u'Tagging {0} - {1}', cur_artist, cur_album)
|
||||
log.debug('Tagging {0} - {1}', cur_artist, cur_album)
|
||||
|
||||
# The output result (distance, AlbumInfo) tuples (keyed by MB album
|
||||
# ID).
|
||||
@@ -420,7 +418,7 @@ def tag_album(items, search_artist=None, search_album=None,
|
||||
# Search by explicit ID.
|
||||
if search_ids:
|
||||
for search_id in search_ids:
|
||||
log.debug(u'Searching for album ID: {0}', search_id)
|
||||
log.debug('Searching for album ID: {0}', search_id)
|
||||
for id_candidate in hooks.albums_for_id(search_id):
|
||||
_add_candidate(items, candidates, id_candidate)
|
||||
|
||||
@@ -431,13 +429,13 @@ def tag_album(items, search_artist=None, search_album=None,
|
||||
if id_info:
|
||||
_add_candidate(items, candidates, id_info)
|
||||
rec = _recommendation(list(candidates.values()))
|
||||
log.debug(u'Album ID match recommendation is {0}', rec)
|
||||
log.debug('Album ID match recommendation is {0}', rec)
|
||||
if candidates and not config['import']['timid']:
|
||||
# If we have a very good MBID match, return immediately.
|
||||
# Otherwise, this match will compete against metadata-based
|
||||
# matches.
|
||||
if rec == Recommendation.strong:
|
||||
log.debug(u'ID match.')
|
||||
log.debug('ID match.')
|
||||
return cur_artist, cur_album, \
|
||||
Proposal(list(candidates.values()), rec)
|
||||
|
||||
@@ -445,22 +443,29 @@ def tag_album(items, search_artist=None, search_album=None,
|
||||
if not (search_artist and search_album):
|
||||
# No explicit search terms -- use current metadata.
|
||||
search_artist, search_album = cur_artist, cur_album
|
||||
log.debug(u'Search terms: {0} - {1}', search_artist, search_album)
|
||||
log.debug('Search terms: {0} - {1}', search_artist, search_album)
|
||||
|
||||
extra_tags = None
|
||||
if config['musicbrainz']['extra_tags']:
|
||||
tag_list = config['musicbrainz']['extra_tags'].get()
|
||||
extra_tags = {k: v for (k, v) in likelies.items() if k in tag_list}
|
||||
log.debug('Additional search terms: {0}', extra_tags)
|
||||
|
||||
# Is this album likely to be a "various artist" release?
|
||||
va_likely = ((not consensus['artist']) or
|
||||
(search_artist.lower() in VA_ARTISTS) or
|
||||
any(item.comp for item in items))
|
||||
log.debug(u'Album might be VA: {0}', va_likely)
|
||||
log.debug('Album might be VA: {0}', va_likely)
|
||||
|
||||
# Get the results from the data sources.
|
||||
for matched_candidate in hooks.album_candidates(items,
|
||||
search_artist,
|
||||
search_album,
|
||||
va_likely):
|
||||
va_likely,
|
||||
extra_tags):
|
||||
_add_candidate(items, candidates, matched_candidate)
|
||||
|
||||
log.debug(u'Evaluating {0} candidates.', len(candidates))
|
||||
log.debug('Evaluating {0} candidates.', len(candidates))
|
||||
# Sort and get the recommendation.
|
||||
candidates = _sort_candidates(candidates.values())
|
||||
rec = _recommendation(candidates)
|
||||
@@ -485,7 +490,7 @@ def tag_item(item, search_artist=None, search_title=None,
|
||||
trackids = search_ids or [t for t in [item.mb_trackid] if t]
|
||||
if trackids:
|
||||
for trackid in trackids:
|
||||
log.debug(u'Searching for track ID: {0}', trackid)
|
||||
log.debug('Searching for track ID: {0}', trackid)
|
||||
for track_info in hooks.tracks_for_id(trackid):
|
||||
dist = track_distance(item, track_info, incl_artist=True)
|
||||
candidates[track_info.track_id] = \
|
||||
@@ -494,7 +499,7 @@ def tag_item(item, search_artist=None, search_title=None,
|
||||
rec = _recommendation(_sort_candidates(candidates.values()))
|
||||
if rec == Recommendation.strong and \
|
||||
not config['import']['timid']:
|
||||
log.debug(u'Track ID match.')
|
||||
log.debug('Track ID match.')
|
||||
return Proposal(_sort_candidates(candidates.values()), rec)
|
||||
|
||||
# If we're searching by ID, don't proceed.
|
||||
@@ -507,7 +512,7 @@ def tag_item(item, search_artist=None, search_title=None,
|
||||
# Search terms.
|
||||
if not (search_artist and search_title):
|
||||
search_artist, search_title = item.artist, item.title
|
||||
log.debug(u'Item search terms: {0} - {1}', search_artist, search_title)
|
||||
log.debug('Item search terms: {0} - {1}', search_artist, search_title)
|
||||
|
||||
# Get and evaluate candidate metadata.
|
||||
for track_info in hooks.item_candidates(item, search_artist, search_title):
|
||||
@@ -515,7 +520,7 @@ def tag_item(item, search_artist=None, search_title=None,
|
||||
candidates[track_info.track_id] = hooks.TrackMatch(dist, track_info)
|
||||
|
||||
# Sort by distance and return with recommendation.
|
||||
log.debug(u'Found {0} candidates.', len(candidates))
|
||||
log.debug('Found {0} candidates.', len(candidates))
|
||||
candidates = _sort_candidates(candidates.values())
|
||||
rec = _recommendation(candidates)
|
||||
return Proposal(candidates, rec)
|
||||
|
||||
233
lib/beets/autotag/mb.py
Executable file → Normal file
233
lib/beets/autotag/mb.py
Executable file → Normal file
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This file is part of beets.
|
||||
# Copyright 2016, Adrian Sampson.
|
||||
#
|
||||
@@ -15,55 +14,72 @@
|
||||
|
||||
"""Searches for albums in the MusicBrainz database.
|
||||
"""
|
||||
from __future__ import division, absolute_import, print_function
|
||||
|
||||
import musicbrainzngs
|
||||
import re
|
||||
import traceback
|
||||
from six.moves.urllib.parse import urljoin
|
||||
|
||||
from beets import logging
|
||||
from beets import plugins
|
||||
import beets.autotag.hooks
|
||||
import beets
|
||||
from beets import util
|
||||
from beets import config
|
||||
import six
|
||||
from collections import Counter
|
||||
from urllib.parse import urljoin
|
||||
|
||||
VARIOUS_ARTISTS_ID = '89ad4ac3-39f7-470e-963a-56509c546377'
|
||||
|
||||
if util.SNI_SUPPORTED:
|
||||
BASE_URL = 'https://musicbrainz.org/'
|
||||
else:
|
||||
BASE_URL = 'http://musicbrainz.org/'
|
||||
BASE_URL = 'https://musicbrainz.org/'
|
||||
|
||||
SKIPPED_TRACKS = ['[data track]']
|
||||
|
||||
FIELDS_TO_MB_KEYS = {
|
||||
'catalognum': 'catno',
|
||||
'country': 'country',
|
||||
'label': 'label',
|
||||
'media': 'format',
|
||||
'year': 'date',
|
||||
}
|
||||
|
||||
musicbrainzngs.set_useragent('beets', beets.__version__,
|
||||
'http://beets.io/')
|
||||
'https://beets.io/')
|
||||
|
||||
|
||||
class MusicBrainzAPIError(util.HumanReadableException):
|
||||
"""An error while talking to MusicBrainz. The `query` field is the
|
||||
parameter to the action and may have any type.
|
||||
"""
|
||||
|
||||
def __init__(self, reason, verb, query, tb=None):
|
||||
self.query = query
|
||||
if isinstance(reason, musicbrainzngs.WebServiceError):
|
||||
reason = u'MusicBrainz not reachable'
|
||||
super(MusicBrainzAPIError, self).__init__(reason, verb, tb)
|
||||
reason = 'MusicBrainz not reachable'
|
||||
super().__init__(reason, verb, tb)
|
||||
|
||||
def get_message(self):
|
||||
return u'{0} in {1} with query {2}'.format(
|
||||
return '{} in {} with query {}'.format(
|
||||
self._reasonstr(), self.verb, repr(self.query)
|
||||
)
|
||||
|
||||
|
||||
log = logging.getLogger('beets')
|
||||
|
||||
RELEASE_INCLUDES = ['artists', 'media', 'recordings', 'release-groups',
|
||||
'labels', 'artist-credits', 'aliases',
|
||||
'recording-level-rels', 'work-rels',
|
||||
'work-level-rels', 'artist-rels']
|
||||
TRACK_INCLUDES = ['artists', 'aliases']
|
||||
'work-level-rels', 'artist-rels', 'isrcs']
|
||||
BROWSE_INCLUDES = ['artist-credits', 'work-rels',
|
||||
'artist-rels', 'recording-rels', 'release-rels']
|
||||
if "work-level-rels" in musicbrainzngs.VALID_BROWSE_INCLUDES['recording']:
|
||||
BROWSE_INCLUDES.append("work-level-rels")
|
||||
BROWSE_CHUNKSIZE = 100
|
||||
BROWSE_MAXTRACKS = 500
|
||||
TRACK_INCLUDES = ['artists', 'aliases', 'isrcs']
|
||||
if 'work-level-rels' in musicbrainzngs.VALID_INCLUDES['recording']:
|
||||
TRACK_INCLUDES += ['work-level-rels', 'artist-rels']
|
||||
if 'genres' in musicbrainzngs.VALID_INCLUDES['recording']:
|
||||
RELEASE_INCLUDES += ['genres']
|
||||
|
||||
|
||||
def track_url(trackid):
|
||||
@@ -79,7 +95,11 @@ def configure():
|
||||
from the beets configuration. This should be called at startup.
|
||||
"""
|
||||
hostname = config['musicbrainz']['host'].as_str()
|
||||
musicbrainzngs.set_hostname(hostname)
|
||||
https = config['musicbrainz']['https'].get(bool)
|
||||
# Only call set_hostname when a custom server is configured. Since
|
||||
# musicbrainz-ngs connects to musicbrainz.org with HTTPS by default
|
||||
if hostname != "musicbrainz.org":
|
||||
musicbrainzngs.set_hostname(hostname, https)
|
||||
musicbrainzngs.set_rate_limit(
|
||||
config['musicbrainz']['ratelimit_interval'].as_number(),
|
||||
config['musicbrainz']['ratelimit'].get(int),
|
||||
@@ -109,6 +129,24 @@ def _preferred_alias(aliases):
|
||||
return matches[0]
|
||||
|
||||
|
||||
def _preferred_release_event(release):
|
||||
"""Given a release, select and return the user's preferred release
|
||||
event as a tuple of (country, release_date). Fall back to the
|
||||
default release event if a preferred event is not found.
|
||||
"""
|
||||
countries = config['match']['preferred']['countries'].as_str_seq()
|
||||
|
||||
for country in countries:
|
||||
for event in release.get('release-event-list', {}):
|
||||
try:
|
||||
if country in event['area']['iso-3166-1-code-list']:
|
||||
return country, event['date']
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
return release.get('country'), release.get('date')
|
||||
|
||||
|
||||
def _flatten_artist_credit(credit):
|
||||
"""Given a list representing an ``artist-credit`` block, flatten the
|
||||
data into a triple of joined artist name strings: canonical, sort, and
|
||||
@@ -118,7 +156,7 @@ def _flatten_artist_credit(credit):
|
||||
artist_sort_parts = []
|
||||
artist_credit_parts = []
|
||||
for el in credit:
|
||||
if isinstance(el, six.string_types):
|
||||
if isinstance(el, str):
|
||||
# Join phrase.
|
||||
artist_parts.append(el)
|
||||
artist_credit_parts.append(el)
|
||||
@@ -165,13 +203,13 @@ def track_info(recording, index=None, medium=None, medium_index=None,
|
||||
the number of tracks on the medium. Each number is a 1-based index.
|
||||
"""
|
||||
info = beets.autotag.hooks.TrackInfo(
|
||||
recording['title'],
|
||||
recording['id'],
|
||||
title=recording['title'],
|
||||
track_id=recording['id'],
|
||||
index=index,
|
||||
medium=medium,
|
||||
medium_index=medium_index,
|
||||
medium_total=medium_total,
|
||||
data_source=u'MusicBrainz',
|
||||
data_source='MusicBrainz',
|
||||
data_url=track_url(recording['id']),
|
||||
)
|
||||
|
||||
@@ -187,11 +225,22 @@ def track_info(recording, index=None, medium=None, medium_index=None,
|
||||
if recording.get('length'):
|
||||
info.length = int(recording['length']) / (1000.0)
|
||||
|
||||
info.trackdisambig = recording.get('disambiguation')
|
||||
|
||||
if recording.get('isrc-list'):
|
||||
info.isrc = ';'.join(recording['isrc-list'])
|
||||
|
||||
lyricist = []
|
||||
composer = []
|
||||
composer_sort = []
|
||||
for work_relation in recording.get('work-relation-list', ()):
|
||||
if work_relation['type'] != 'performance':
|
||||
continue
|
||||
info.work = work_relation['work']['title']
|
||||
info.mb_workid = work_relation['work']['id']
|
||||
if 'disambiguation' in work_relation['work']:
|
||||
info.work_disambig = work_relation['work']['disambiguation']
|
||||
|
||||
for artist_relation in work_relation['work'].get(
|
||||
'artist-relation-list', ()):
|
||||
if 'type' in artist_relation:
|
||||
@@ -200,10 +249,13 @@ def track_info(recording, index=None, medium=None, medium_index=None,
|
||||
lyricist.append(artist_relation['artist']['name'])
|
||||
elif type == 'composer':
|
||||
composer.append(artist_relation['artist']['name'])
|
||||
composer_sort.append(
|
||||
artist_relation['artist']['sort-name'])
|
||||
if lyricist:
|
||||
info.lyricist = u', '.join(lyricist)
|
||||
info.lyricist = ', '.join(lyricist)
|
||||
if composer:
|
||||
info.composer = u', '.join(composer)
|
||||
info.composer = ', '.join(composer)
|
||||
info.composer_sort = ', '.join(composer_sort)
|
||||
|
||||
arranger = []
|
||||
for artist_relation in recording.get('artist-relation-list', ()):
|
||||
@@ -212,7 +264,12 @@ def track_info(recording, index=None, medium=None, medium_index=None,
|
||||
if type == 'arranger':
|
||||
arranger.append(artist_relation['artist']['name'])
|
||||
if arranger:
|
||||
info.arranger = u', '.join(arranger)
|
||||
info.arranger = ', '.join(arranger)
|
||||
|
||||
# Supplementary fields provided by plugins
|
||||
extra_trackdatas = plugins.send('mb_track_extract', data=recording)
|
||||
for extra_trackdata in extra_trackdatas:
|
||||
info.update(extra_trackdata)
|
||||
|
||||
info.decode()
|
||||
return info
|
||||
@@ -246,6 +303,26 @@ def album_info(release):
|
||||
artist_name, artist_sort_name, artist_credit_name = \
|
||||
_flatten_artist_credit(release['artist-credit'])
|
||||
|
||||
ntracks = sum(len(m['track-list']) for m in release['medium-list'])
|
||||
|
||||
# The MusicBrainz API omits 'artist-relation-list' and 'work-relation-list'
|
||||
# when the release has more than 500 tracks. So we use browse_recordings
|
||||
# on chunks of tracks to recover the same information in this case.
|
||||
if ntracks > BROWSE_MAXTRACKS:
|
||||
log.debug('Album {} has too many tracks', release['id'])
|
||||
recording_list = []
|
||||
for i in range(0, ntracks, BROWSE_CHUNKSIZE):
|
||||
log.debug('Retrieving tracks starting at {}', i)
|
||||
recording_list.extend(musicbrainzngs.browse_recordings(
|
||||
release=release['id'], limit=BROWSE_CHUNKSIZE,
|
||||
includes=BROWSE_INCLUDES,
|
||||
offset=i)['recording-list'])
|
||||
track_map = {r['id']: r for r in recording_list}
|
||||
for medium in release['medium-list']:
|
||||
for recording in medium['track-list']:
|
||||
recording_info = track_map[recording['recording']['id']]
|
||||
recording['recording'] = recording_info
|
||||
|
||||
# Basic info.
|
||||
track_infos = []
|
||||
index = 0
|
||||
@@ -253,11 +330,29 @@ def album_info(release):
|
||||
disctitle = medium.get('title')
|
||||
format = medium.get('format')
|
||||
|
||||
if format in config['match']['ignored_media'].as_str_seq():
|
||||
continue
|
||||
|
||||
all_tracks = medium['track-list']
|
||||
if ('data-track-list' in medium
|
||||
and not config['match']['ignore_data_tracks']):
|
||||
all_tracks += medium['data-track-list']
|
||||
track_count = len(all_tracks)
|
||||
|
||||
if 'pregap' in medium:
|
||||
all_tracks.insert(0, medium['pregap'])
|
||||
|
||||
for track in all_tracks:
|
||||
|
||||
if ('title' in track['recording'] and
|
||||
track['recording']['title'] in SKIPPED_TRACKS):
|
||||
continue
|
||||
|
||||
if ('video' in track['recording'] and
|
||||
track['recording']['video'] == 'true' and
|
||||
config['match']['ignore_video_tracks']):
|
||||
continue
|
||||
|
||||
# Basic information from the recording.
|
||||
index += 1
|
||||
ti = track_info(
|
||||
@@ -265,8 +360,9 @@ def album_info(release):
|
||||
index,
|
||||
int(medium['position']),
|
||||
int(track['position']),
|
||||
len(medium['track-list']),
|
||||
track_count,
|
||||
)
|
||||
ti.release_track_id = track['id']
|
||||
ti.disctitle = disctitle
|
||||
ti.media = format
|
||||
ti.track_alt = track['number']
|
||||
@@ -285,15 +381,15 @@ def album_info(release):
|
||||
track_infos.append(ti)
|
||||
|
||||
info = beets.autotag.hooks.AlbumInfo(
|
||||
release['title'],
|
||||
release['id'],
|
||||
artist_name,
|
||||
release['artist-credit'][0]['artist']['id'],
|
||||
track_infos,
|
||||
album=release['title'],
|
||||
album_id=release['id'],
|
||||
artist=artist_name,
|
||||
artist_id=release['artist-credit'][0]['artist']['id'],
|
||||
tracks=track_infos,
|
||||
mediums=len(release['medium-list']),
|
||||
artist_sort=artist_sort_name,
|
||||
artist_credit=artist_credit_name,
|
||||
data_source=u'MusicBrainz',
|
||||
data_source='MusicBrainz',
|
||||
data_url=album_url(release['id']),
|
||||
)
|
||||
info.va = info.artist_id == VARIOUS_ARTISTS_ID
|
||||
@@ -301,25 +397,36 @@ def album_info(release):
|
||||
info.artist = config['va_name'].as_str()
|
||||
info.asin = release.get('asin')
|
||||
info.releasegroup_id = release['release-group']['id']
|
||||
info.country = release.get('country')
|
||||
info.albumstatus = release.get('status')
|
||||
|
||||
# Build up the disambiguation string from the release group and release.
|
||||
disambig = []
|
||||
# Get the disambiguation strings at the release and release group level.
|
||||
if release['release-group'].get('disambiguation'):
|
||||
disambig.append(release['release-group'].get('disambiguation'))
|
||||
info.releasegroupdisambig = \
|
||||
release['release-group'].get('disambiguation')
|
||||
if release.get('disambiguation'):
|
||||
disambig.append(release.get('disambiguation'))
|
||||
info.albumdisambig = u', '.join(disambig)
|
||||
info.albumdisambig = release.get('disambiguation')
|
||||
|
||||
# Release type not always populated.
|
||||
# Get the "classic" Release type. This data comes from a legacy API
|
||||
# feature before MusicBrainz supported multiple release types.
|
||||
if 'type' in release['release-group']:
|
||||
reltype = release['release-group']['type']
|
||||
if reltype:
|
||||
info.albumtype = reltype.lower()
|
||||
|
||||
# Release dates.
|
||||
release_date = release.get('date')
|
||||
# Set the new-style "primary" and "secondary" release types.
|
||||
albumtypes = []
|
||||
if 'primary-type' in release['release-group']:
|
||||
rel_primarytype = release['release-group']['primary-type']
|
||||
if rel_primarytype:
|
||||
albumtypes.append(rel_primarytype.lower())
|
||||
if 'secondary-type-list' in release['release-group']:
|
||||
if release['release-group']['secondary-type-list']:
|
||||
for sec_type in release['release-group']['secondary-type-list']:
|
||||
albumtypes.append(sec_type.lower())
|
||||
info.albumtypes = '; '.join(albumtypes)
|
||||
|
||||
# Release events.
|
||||
info.country, release_date = _preferred_release_event(release)
|
||||
release_group_date = release['release-group'].get('first-release-date')
|
||||
if not release_date:
|
||||
# Fall back if release-specific date is not available.
|
||||
@@ -347,17 +454,33 @@ def album_info(release):
|
||||
first_medium = release['medium-list'][0]
|
||||
info.media = first_medium.get('format')
|
||||
|
||||
if config['musicbrainz']['genres']:
|
||||
sources = [
|
||||
release['release-group'].get('genre-list', []),
|
||||
release.get('genre-list', []),
|
||||
]
|
||||
genres = Counter()
|
||||
for source in sources:
|
||||
for genreitem in source:
|
||||
genres[genreitem['name']] += int(genreitem['count'])
|
||||
info.genre = '; '.join(g[0] for g in sorted(genres.items(),
|
||||
key=lambda g: -g[1]))
|
||||
|
||||
extra_albumdatas = plugins.send('mb_album_extract', data=release)
|
||||
for extra_albumdata in extra_albumdatas:
|
||||
info.update(extra_albumdata)
|
||||
|
||||
info.decode()
|
||||
return info
|
||||
|
||||
|
||||
def match_album(artist, album, tracks=None):
|
||||
def match_album(artist, album, tracks=None, extra_tags=None):
|
||||
"""Searches for a single album ("release" in MusicBrainz parlance)
|
||||
and returns an iterator over AlbumInfo objects. May raise a
|
||||
MusicBrainzAPIError.
|
||||
|
||||
The query consists of an artist name, an album name, and,
|
||||
optionally, a number of tracks on the album.
|
||||
optionally, a number of tracks on the album and any other extra tags.
|
||||
"""
|
||||
# Build search criteria.
|
||||
criteria = {'release': album.lower().strip()}
|
||||
@@ -367,14 +490,24 @@ def match_album(artist, album, tracks=None):
|
||||
# Various Artists search.
|
||||
criteria['arid'] = VARIOUS_ARTISTS_ID
|
||||
if tracks is not None:
|
||||
criteria['tracks'] = six.text_type(tracks)
|
||||
criteria['tracks'] = str(tracks)
|
||||
|
||||
# Additional search cues from existing metadata.
|
||||
if extra_tags:
|
||||
for tag in extra_tags:
|
||||
key = FIELDS_TO_MB_KEYS[tag]
|
||||
value = str(extra_tags.get(tag, '')).lower().strip()
|
||||
if key == 'catno':
|
||||
value = value.replace(' ', '')
|
||||
if value:
|
||||
criteria[key] = value
|
||||
|
||||
# Abort if we have no search terms.
|
||||
if not any(criteria.values()):
|
||||
return
|
||||
|
||||
try:
|
||||
log.debug(u'Searching for MusicBrainz releases with: {!r}', criteria)
|
||||
log.debug('Searching for MusicBrainz releases with: {!r}', criteria)
|
||||
res = musicbrainzngs.search_releases(
|
||||
limit=config['musicbrainz']['searchlimit'].get(int), **criteria)
|
||||
except musicbrainzngs.MusicBrainzError as exc:
|
||||
@@ -415,7 +548,7 @@ def _parse_id(s):
|
||||
no ID can be found, return None.
|
||||
"""
|
||||
# Find the first thing that looks like a UUID/MBID.
|
||||
match = re.search(u'[a-f0-9]{8}(-[a-f0-9]{4}){3}-[a-f0-9]{12}', s)
|
||||
match = re.search('[a-f0-9]{8}(-[a-f0-9]{4}){3}-[a-f0-9]{12}', s)
|
||||
if match:
|
||||
return match.group()
|
||||
|
||||
@@ -425,19 +558,19 @@ def album_for_id(releaseid):
|
||||
object or None if the album is not found. May raise a
|
||||
MusicBrainzAPIError.
|
||||
"""
|
||||
log.debug(u'Requesting MusicBrainz release {}', releaseid)
|
||||
log.debug('Requesting MusicBrainz release {}', releaseid)
|
||||
albumid = _parse_id(releaseid)
|
||||
if not albumid:
|
||||
log.debug(u'Invalid MBID ({0}).', releaseid)
|
||||
log.debug('Invalid MBID ({0}).', releaseid)
|
||||
return
|
||||
try:
|
||||
res = musicbrainzngs.get_release_by_id(albumid,
|
||||
RELEASE_INCLUDES)
|
||||
except musicbrainzngs.ResponseError:
|
||||
log.debug(u'Album ID match failed.')
|
||||
log.debug('Album ID match failed.')
|
||||
return None
|
||||
except musicbrainzngs.MusicBrainzError as exc:
|
||||
raise MusicBrainzAPIError(exc, u'get release by ID', albumid,
|
||||
raise MusicBrainzAPIError(exc, 'get release by ID', albumid,
|
||||
traceback.format_exc())
|
||||
return album_info(res['release'])
|
||||
|
||||
@@ -448,14 +581,14 @@ def track_for_id(releaseid):
|
||||
"""
|
||||
trackid = _parse_id(releaseid)
|
||||
if not trackid:
|
||||
log.debug(u'Invalid MBID ({0}).', releaseid)
|
||||
log.debug('Invalid MBID ({0}).', releaseid)
|
||||
return
|
||||
try:
|
||||
res = musicbrainzngs.get_recording_by_id(trackid, TRACK_INCLUDES)
|
||||
except musicbrainzngs.ResponseError:
|
||||
log.debug(u'Track ID match failed.')
|
||||
log.debug('Track ID match failed.')
|
||||
return None
|
||||
except musicbrainzngs.MusicBrainzError as exc:
|
||||
raise MusicBrainzAPIError(exc, u'get recording by ID', trackid,
|
||||
raise MusicBrainzAPIError(exc, 'get recording by ID', trackid,
|
||||
traceback.format_exc())
|
||||
return track_info(res['recording'])
|
||||
|
||||
23
lib/beets/config_default.yaml
Executable file → Normal file
23
lib/beets/config_default.yaml
Executable file → Normal file
@@ -7,9 +7,12 @@ import:
|
||||
move: no
|
||||
link: no
|
||||
hardlink: no
|
||||
reflink: no
|
||||
delete: no
|
||||
resume: ask
|
||||
incremental: no
|
||||
incremental_skip_later: no
|
||||
from_scratch: no
|
||||
quiet_fallback: skip
|
||||
none_rec_action: ask
|
||||
timid: no
|
||||
@@ -25,6 +28,8 @@ import:
|
||||
pretend: false
|
||||
search_ids: []
|
||||
duplicate_action: ask
|
||||
bell: no
|
||||
set_fields: {}
|
||||
|
||||
clutter: ["Thumbs.DB", ".DS_Store"]
|
||||
ignore: [".*", "*~", "System Volume Information", "lost+found"]
|
||||
@@ -38,11 +43,22 @@ replace:
|
||||
'\.$': _
|
||||
'\s+$': ''
|
||||
'^\s+': ''
|
||||
'^-': _
|
||||
path_sep_replace: _
|
||||
drive_sep_replace: _
|
||||
asciify_paths: false
|
||||
art_filename: cover
|
||||
max_filename_length: 0
|
||||
|
||||
aunique:
|
||||
keys: albumartist album
|
||||
disambiguators: albumtype year label catalognum albumdisambig releasegroupdisambig
|
||||
bracket: '[]'
|
||||
|
||||
overwrite_null:
|
||||
album: []
|
||||
track: []
|
||||
|
||||
plugins: []
|
||||
pluginpath: []
|
||||
threaded: yes
|
||||
@@ -51,6 +67,7 @@ per_disc_numbering: no
|
||||
verbose: 0
|
||||
terminal_encoding:
|
||||
original_date: no
|
||||
artist_credit: no
|
||||
id3v23: no
|
||||
va_name: "Various Artists"
|
||||
|
||||
@@ -85,9 +102,12 @@ statefile: state.pickle
|
||||
|
||||
musicbrainz:
|
||||
host: musicbrainz.org
|
||||
https: no
|
||||
ratelimit: 1
|
||||
ratelimit_interval: 1.0
|
||||
searchlimit: 5
|
||||
extra_tags: []
|
||||
genres: no
|
||||
|
||||
match:
|
||||
strong_rec_thresh: 0.04
|
||||
@@ -122,5 +142,8 @@ match:
|
||||
original_year: no
|
||||
ignored: []
|
||||
required: []
|
||||
ignored_media: []
|
||||
ignore_data_tracks: yes
|
||||
ignore_video_tracks: yes
|
||||
track_length_grace: 10
|
||||
track_length_max: 30
|
||||
|
||||
2
lib/beets/dbcore/__init__.py
Executable file → Normal file
2
lib/beets/dbcore/__init__.py
Executable file → Normal file
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This file is part of beets.
|
||||
# Copyright 2016, Adrian Sampson.
|
||||
#
|
||||
@@ -16,7 +15,6 @@
|
||||
"""DBCore is an abstract database package that forms the basis for beets'
|
||||
Library.
|
||||
"""
|
||||
from __future__ import division, absolute_import, print_function
|
||||
|
||||
from .db import Model, Database
|
||||
from .query import Query, FieldQuery, MatchQuery, AndQuery, OrQuery
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This file is part of beets.
|
||||
# Copyright 2016, Adrian Sampson.
|
||||
#
|
||||
@@ -15,38 +14,56 @@
|
||||
|
||||
"""The central Model and Database constructs for DBCore.
|
||||
"""
|
||||
from __future__ import division, absolute_import, print_function
|
||||
|
||||
import time
|
||||
import os
|
||||
import re
|
||||
from collections import defaultdict
|
||||
import threading
|
||||
import sqlite3
|
||||
import contextlib
|
||||
import collections
|
||||
|
||||
import beets
|
||||
from beets.util.functemplate import Template
|
||||
from beets.util import functemplate
|
||||
from beets.util import py3_path
|
||||
from beets.dbcore import types
|
||||
from .query import MatchQuery, NullSort, TrueQuery
|
||||
import six
|
||||
from collections.abc import Mapping
|
||||
|
||||
|
||||
class FormattedMapping(collections.Mapping):
|
||||
class DBAccessError(Exception):
|
||||
"""The SQLite database became inaccessible.
|
||||
|
||||
This can happen when trying to read or write the database when, for
|
||||
example, the database file is deleted or otherwise disappears. There
|
||||
is probably no way to recover from this error.
|
||||
"""
|
||||
|
||||
|
||||
class FormattedMapping(Mapping):
|
||||
"""A `dict`-like formatted view of a model.
|
||||
|
||||
The accessor `mapping[key]` returns the formatted version of
|
||||
`model[key]` as a unicode string.
|
||||
|
||||
The `included_keys` parameter allows filtering the fields that are
|
||||
returned. By default all fields are returned. Limiting to specific keys can
|
||||
avoid expensive per-item database queries.
|
||||
|
||||
If `for_path` is true, all path separators in the formatted values
|
||||
are replaced.
|
||||
"""
|
||||
|
||||
def __init__(self, model, for_path=False):
|
||||
ALL_KEYS = '*'
|
||||
|
||||
def __init__(self, model, included_keys=ALL_KEYS, for_path=False):
|
||||
self.for_path = for_path
|
||||
self.model = model
|
||||
self.model_keys = model.keys(True)
|
||||
if included_keys == self.ALL_KEYS:
|
||||
# Performance note: this triggers a database query.
|
||||
self.model_keys = self.model.keys(True)
|
||||
else:
|
||||
self.model_keys = included_keys
|
||||
|
||||
def __getitem__(self, key):
|
||||
if key in self.model_keys:
|
||||
@@ -63,7 +80,7 @@ class FormattedMapping(collections.Mapping):
|
||||
def get(self, key, default=None):
|
||||
if default is None:
|
||||
default = self.model._type(key).format(None)
|
||||
return super(FormattedMapping, self).get(key, default)
|
||||
return super().get(key, default)
|
||||
|
||||
def _get_formatted(self, model, key):
|
||||
value = model._type(key).format(model.get(key))
|
||||
@@ -72,6 +89,11 @@ class FormattedMapping(collections.Mapping):
|
||||
|
||||
if self.for_path:
|
||||
sep_repl = beets.config['path_sep_replace'].as_str()
|
||||
sep_drive = beets.config['drive_sep_replace'].as_str()
|
||||
|
||||
if re.match(r'^\w:', value):
|
||||
value = re.sub(r'(?<=^\w):', sep_drive, value)
|
||||
|
||||
for sep in (os.path.sep, os.path.altsep):
|
||||
if sep:
|
||||
value = value.replace(sep, sep_repl)
|
||||
@@ -79,11 +101,105 @@ class FormattedMapping(collections.Mapping):
|
||||
return value
|
||||
|
||||
|
||||
class LazyConvertDict:
|
||||
"""Lazily convert types for attributes fetched from the database
|
||||
"""
|
||||
|
||||
def __init__(self, model_cls):
|
||||
"""Initialize the object empty
|
||||
"""
|
||||
self.data = {}
|
||||
self.model_cls = model_cls
|
||||
self._converted = {}
|
||||
|
||||
def init(self, data):
|
||||
"""Set the base data that should be lazily converted
|
||||
"""
|
||||
self.data = data
|
||||
|
||||
def _convert(self, key, value):
|
||||
"""Convert the attribute type according the the SQL type
|
||||
"""
|
||||
return self.model_cls._type(key).from_sql(value)
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
"""Set an attribute value, assume it's already converted
|
||||
"""
|
||||
self._converted[key] = value
|
||||
|
||||
def __getitem__(self, key):
|
||||
"""Get an attribute value, converting the type on demand
|
||||
if needed
|
||||
"""
|
||||
if key in self._converted:
|
||||
return self._converted[key]
|
||||
elif key in self.data:
|
||||
value = self._convert(key, self.data[key])
|
||||
self._converted[key] = value
|
||||
return value
|
||||
|
||||
def __delitem__(self, key):
|
||||
"""Delete both converted and base data
|
||||
"""
|
||||
if key in self._converted:
|
||||
del self._converted[key]
|
||||
if key in self.data:
|
||||
del self.data[key]
|
||||
|
||||
def keys(self):
|
||||
"""Get a list of available field names for this object.
|
||||
"""
|
||||
return list(self._converted.keys()) + list(self.data.keys())
|
||||
|
||||
def copy(self):
|
||||
"""Create a copy of the object.
|
||||
"""
|
||||
new = self.__class__(self.model_cls)
|
||||
new.data = self.data.copy()
|
||||
new._converted = self._converted.copy()
|
||||
return new
|
||||
|
||||
# Act like a dictionary.
|
||||
|
||||
def update(self, values):
|
||||
"""Assign all values in the given dict.
|
||||
"""
|
||||
for key, value in values.items():
|
||||
self[key] = value
|
||||
|
||||
def items(self):
|
||||
"""Iterate over (key, value) pairs that this object contains.
|
||||
Computed fields are not included.
|
||||
"""
|
||||
for key in self:
|
||||
yield key, self[key]
|
||||
|
||||
def get(self, key, default=None):
|
||||
"""Get the value for a given key or `default` if it does not
|
||||
exist.
|
||||
"""
|
||||
if key in self:
|
||||
return self[key]
|
||||
else:
|
||||
return default
|
||||
|
||||
def __contains__(self, key):
|
||||
"""Determine whether `key` is an attribute on this object.
|
||||
"""
|
||||
return key in self.keys()
|
||||
|
||||
def __iter__(self):
|
||||
"""Iterate over the available field names (excluding computed
|
||||
fields).
|
||||
"""
|
||||
return iter(self.keys())
|
||||
|
||||
|
||||
# Abstract base for model classes.
|
||||
|
||||
class Model(object):
|
||||
class Model:
|
||||
"""An abstract object representing an object in the database. Model
|
||||
objects act like dictionaries (i.e., the allow subscript access like
|
||||
objects act like dictionaries (i.e., they allow subscript access like
|
||||
``obj['field']``). The same field set is available via attribute
|
||||
access as a shortcut (i.e., ``obj.field``). Three kinds of attributes are
|
||||
available:
|
||||
@@ -134,12 +250,22 @@ class Model(object):
|
||||
are subclasses of `Sort`.
|
||||
"""
|
||||
|
||||
_queries = {}
|
||||
"""Named queries that use a field-like `name:value` syntax but which
|
||||
do not relate to any specific field.
|
||||
"""
|
||||
|
||||
_always_dirty = False
|
||||
"""By default, fields only become "dirty" when their value actually
|
||||
changes. Enabling this flag marks fields as dirty even when the new
|
||||
value is the same as the old value (e.g., `o.f = o.f`).
|
||||
"""
|
||||
|
||||
_revision = -1
|
||||
"""A revision number from when the model was loaded from or written
|
||||
to the database.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def _getters(cls):
|
||||
"""Return a mapping from field names to getter functions.
|
||||
@@ -163,8 +289,8 @@ class Model(object):
|
||||
"""
|
||||
self._db = db
|
||||
self._dirty = set()
|
||||
self._values_fixed = {}
|
||||
self._values_flex = {}
|
||||
self._values_fixed = LazyConvertDict(self)
|
||||
self._values_flex = LazyConvertDict(self)
|
||||
|
||||
# Initial contents.
|
||||
self.update(values)
|
||||
@@ -178,23 +304,25 @@ class Model(object):
|
||||
ordinary construction are bypassed.
|
||||
"""
|
||||
obj = cls(db)
|
||||
for key, value in fixed_values.items():
|
||||
obj._values_fixed[key] = cls._type(key).from_sql(value)
|
||||
for key, value in flex_values.items():
|
||||
obj._values_flex[key] = cls._type(key).from_sql(value)
|
||||
|
||||
obj._values_fixed.init(fixed_values)
|
||||
obj._values_flex.init(flex_values)
|
||||
|
||||
return obj
|
||||
|
||||
def __repr__(self):
|
||||
return '{0}({1})'.format(
|
||||
return '{}({})'.format(
|
||||
type(self).__name__,
|
||||
', '.join('{0}={1!r}'.format(k, v) for k, v in dict(self).items()),
|
||||
', '.join(f'{k}={v!r}' for k, v in dict(self).items()),
|
||||
)
|
||||
|
||||
def clear_dirty(self):
|
||||
"""Mark all fields as *clean* (i.e., not needing to be stored to
|
||||
the database).
|
||||
the database). Also update the revision.
|
||||
"""
|
||||
self._dirty = set()
|
||||
if self._db:
|
||||
self._revision = self._db.revision
|
||||
|
||||
def _check_db(self, need_id=True):
|
||||
"""Ensure that this object is associated with a database row: it
|
||||
@@ -203,10 +331,25 @@ class Model(object):
|
||||
"""
|
||||
if not self._db:
|
||||
raise ValueError(
|
||||
u'{0} has no database'.format(type(self).__name__)
|
||||
'{} has no database'.format(type(self).__name__)
|
||||
)
|
||||
if need_id and not self.id:
|
||||
raise ValueError(u'{0} has no id'.format(type(self).__name__))
|
||||
raise ValueError('{} has no id'.format(type(self).__name__))
|
||||
|
||||
def copy(self):
|
||||
"""Create a copy of the model object.
|
||||
|
||||
The field values and other state is duplicated, but the new copy
|
||||
remains associated with the same database as the old object.
|
||||
(A simple `copy.deepcopy` will not work because it would try to
|
||||
duplicate the SQLite connection.)
|
||||
"""
|
||||
new = self.__class__()
|
||||
new._db = self._db
|
||||
new._values_fixed = self._values_fixed.copy()
|
||||
new._values_flex = self._values_flex.copy()
|
||||
new._dirty = self._dirty.copy()
|
||||
return new
|
||||
|
||||
# Essential field accessors.
|
||||
|
||||
@@ -219,22 +362,36 @@ class Model(object):
|
||||
"""
|
||||
return cls._fields.get(key) or cls._types.get(key) or types.DEFAULT
|
||||
|
||||
def __getitem__(self, key):
|
||||
"""Get the value for a field. Raise a KeyError if the field is
|
||||
not available.
|
||||
def _get(self, key, default=None, raise_=False):
|
||||
"""Get the value for a field, or `default`. Alternatively,
|
||||
raise a KeyError if the field is not available.
|
||||
"""
|
||||
getters = self._getters()
|
||||
if key in getters: # Computed.
|
||||
return getters[key](self)
|
||||
elif key in self._fields: # Fixed.
|
||||
return self._values_fixed.get(key)
|
||||
if key in self._values_fixed:
|
||||
return self._values_fixed[key]
|
||||
else:
|
||||
return self._type(key).null
|
||||
elif key in self._values_flex: # Flexible.
|
||||
return self._values_flex[key]
|
||||
else:
|
||||
elif raise_:
|
||||
raise KeyError(key)
|
||||
else:
|
||||
return default
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
"""Assign the value for a field.
|
||||
get = _get
|
||||
|
||||
def __getitem__(self, key):
|
||||
"""Get the value for a field. Raise a KeyError if the field is
|
||||
not available.
|
||||
"""
|
||||
return self._get(key, raise_=True)
|
||||
|
||||
def _setitem(self, key, value):
|
||||
"""Assign the value for a field, return whether new and old value
|
||||
differ.
|
||||
"""
|
||||
# Choose where to place the value.
|
||||
if key in self._fields:
|
||||
@@ -248,21 +405,29 @@ class Model(object):
|
||||
# Assign value and possibly mark as dirty.
|
||||
old_value = source.get(key)
|
||||
source[key] = value
|
||||
if self._always_dirty or old_value != value:
|
||||
changed = old_value != value
|
||||
if self._always_dirty or changed:
|
||||
self._dirty.add(key)
|
||||
|
||||
return changed
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
"""Assign the value for a field.
|
||||
"""
|
||||
self._setitem(key, value)
|
||||
|
||||
def __delitem__(self, key):
|
||||
"""Remove a flexible attribute from the model.
|
||||
"""
|
||||
if key in self._values_flex: # Flexible.
|
||||
del self._values_flex[key]
|
||||
self._dirty.add(key) # Mark for dropping on store.
|
||||
elif key in self._fields: # Fixed
|
||||
setattr(self, key, self._type(key).null)
|
||||
elif key in self._getters(): # Computed.
|
||||
raise KeyError(u'computed field {0} cannot be deleted'.format(key))
|
||||
elif key in self._fields: # Fixed.
|
||||
raise KeyError(u'fixed field {0} cannot be deleted'.format(key))
|
||||
raise KeyError(f'computed field {key} cannot be deleted')
|
||||
else:
|
||||
raise KeyError(u'no such field {0}'.format(key))
|
||||
raise KeyError(f'no such field {key}')
|
||||
|
||||
def keys(self, computed=False):
|
||||
"""Get a list of available field names for this object. The
|
||||
@@ -297,19 +462,10 @@ class Model(object):
|
||||
for key in self:
|
||||
yield key, self[key]
|
||||
|
||||
def get(self, key, default=None):
|
||||
"""Get the value for a given key or `default` if it does not
|
||||
exist.
|
||||
"""
|
||||
if key in self:
|
||||
return self[key]
|
||||
else:
|
||||
return default
|
||||
|
||||
def __contains__(self, key):
|
||||
"""Determine whether `key` is an attribute on this object.
|
||||
"""
|
||||
return key in self.keys(True)
|
||||
return key in self.keys(computed=True)
|
||||
|
||||
def __iter__(self):
|
||||
"""Iterate over the available field names (excluding computed
|
||||
@@ -321,22 +477,22 @@ class Model(object):
|
||||
|
||||
def __getattr__(self, key):
|
||||
if key.startswith('_'):
|
||||
raise AttributeError(u'model has no attribute {0!r}'.format(key))
|
||||
raise AttributeError(f'model has no attribute {key!r}')
|
||||
else:
|
||||
try:
|
||||
return self[key]
|
||||
except KeyError:
|
||||
raise AttributeError(u'no such field {0!r}'.format(key))
|
||||
raise AttributeError(f'no such field {key!r}')
|
||||
|
||||
def __setattr__(self, key, value):
|
||||
if key.startswith('_'):
|
||||
super(Model, self).__setattr__(key, value)
|
||||
super().__setattr__(key, value)
|
||||
else:
|
||||
self[key] = value
|
||||
|
||||
def __delattr__(self, key):
|
||||
if key.startswith('_'):
|
||||
super(Model, self).__delattr__(key)
|
||||
super().__delattr__(key)
|
||||
else:
|
||||
del self[key]
|
||||
|
||||
@@ -365,7 +521,7 @@ class Model(object):
|
||||
with self._db.transaction() as tx:
|
||||
# Main table update.
|
||||
if assignments:
|
||||
query = 'UPDATE {0} SET {1} WHERE id=?'.format(
|
||||
query = 'UPDATE {} SET {} WHERE id=?'.format(
|
||||
self._table, assignments
|
||||
)
|
||||
subvars.append(self.id)
|
||||
@@ -376,7 +532,7 @@ class Model(object):
|
||||
if key in self._dirty:
|
||||
self._dirty.remove(key)
|
||||
tx.mutate(
|
||||
'INSERT INTO {0} '
|
||||
'INSERT INTO {} '
|
||||
'(entity_id, key, value) '
|
||||
'VALUES (?, ?, ?);'.format(self._flex_table),
|
||||
(self.id, key, value),
|
||||
@@ -385,7 +541,7 @@ class Model(object):
|
||||
# Deleted flexible attributes.
|
||||
for key in self._dirty:
|
||||
tx.mutate(
|
||||
'DELETE FROM {0} '
|
||||
'DELETE FROM {} '
|
||||
'WHERE entity_id=? AND key=?'.format(self._flex_table),
|
||||
(self.id, key)
|
||||
)
|
||||
@@ -394,12 +550,18 @@ class Model(object):
|
||||
|
||||
def load(self):
|
||||
"""Refresh the object's metadata from the library database.
|
||||
|
||||
If check_revision is true, the database is only queried loaded when a
|
||||
transaction has been committed since the item was last loaded.
|
||||
"""
|
||||
self._check_db()
|
||||
if not self._dirty and self._db.revision == self._revision:
|
||||
# Exit early
|
||||
return
|
||||
stored_obj = self._db._get(type(self), self.id)
|
||||
assert stored_obj is not None, u"object {0} not in DB".format(self.id)
|
||||
self._values_fixed = {}
|
||||
self._values_flex = {}
|
||||
assert stored_obj is not None, f"object {self.id} not in DB"
|
||||
self._values_fixed = LazyConvertDict(self)
|
||||
self._values_flex = LazyConvertDict(self)
|
||||
self.update(dict(stored_obj))
|
||||
self.clear_dirty()
|
||||
|
||||
@@ -409,11 +571,11 @@ class Model(object):
|
||||
self._check_db()
|
||||
with self._db.transaction() as tx:
|
||||
tx.mutate(
|
||||
'DELETE FROM {0} WHERE id=?'.format(self._table),
|
||||
f'DELETE FROM {self._table} WHERE id=?',
|
||||
(self.id,)
|
||||
)
|
||||
tx.mutate(
|
||||
'DELETE FROM {0} WHERE entity_id=?'.format(self._flex_table),
|
||||
f'DELETE FROM {self._flex_table} WHERE entity_id=?',
|
||||
(self.id,)
|
||||
)
|
||||
|
||||
@@ -431,7 +593,7 @@ class Model(object):
|
||||
|
||||
with self._db.transaction() as tx:
|
||||
new_id = tx.mutate(
|
||||
'INSERT INTO {0} DEFAULT VALUES'.format(self._table)
|
||||
f'INSERT INTO {self._table} DEFAULT VALUES'
|
||||
)
|
||||
self.id = new_id
|
||||
self.added = time.time()
|
||||
@@ -446,11 +608,11 @@ class Model(object):
|
||||
|
||||
_formatter = FormattedMapping
|
||||
|
||||
def formatted(self, for_path=False):
|
||||
def formatted(self, included_keys=_formatter.ALL_KEYS, for_path=False):
|
||||
"""Get a mapping containing all values on this object formatted
|
||||
as human-readable unicode strings.
|
||||
"""
|
||||
return self._formatter(self, for_path)
|
||||
return self._formatter(self, included_keys, for_path)
|
||||
|
||||
def evaluate_template(self, template, for_path=False):
|
||||
"""Evaluate a template (a string or a `Template` object) using
|
||||
@@ -458,9 +620,9 @@ class Model(object):
|
||||
separators will be added to the template.
|
||||
"""
|
||||
# Perform substitution.
|
||||
if isinstance(template, six.string_types):
|
||||
template = Template(template)
|
||||
return template.substitute(self.formatted(for_path),
|
||||
if isinstance(template, str):
|
||||
template = functemplate.template(template)
|
||||
return template.substitute(self.formatted(for_path=for_path),
|
||||
self._template_funcs())
|
||||
|
||||
# Parsing.
|
||||
@@ -469,8 +631,8 @@ class Model(object):
|
||||
def _parse(cls, key, string):
|
||||
"""Parse a string as a value for the given key.
|
||||
"""
|
||||
if not isinstance(string, six.string_types):
|
||||
raise TypeError(u"_parse() argument must be a string")
|
||||
if not isinstance(string, str):
|
||||
raise TypeError("_parse() argument must be a string")
|
||||
|
||||
return cls._type(key).parse(string)
|
||||
|
||||
@@ -482,11 +644,13 @@ class Model(object):
|
||||
|
||||
# Database controller and supporting interfaces.
|
||||
|
||||
class Results(object):
|
||||
class Results:
|
||||
"""An item query result set. Iterating over the collection lazily
|
||||
constructs LibModel objects that reflect database rows.
|
||||
"""
|
||||
def __init__(self, model_class, rows, db, query=None, sort=None):
|
||||
|
||||
def __init__(self, model_class, rows, db, flex_rows,
|
||||
query=None, sort=None):
|
||||
"""Create a result set that will construct objects of type
|
||||
`model_class`.
|
||||
|
||||
@@ -506,6 +670,7 @@ class Results(object):
|
||||
self.db = db
|
||||
self.query = query
|
||||
self.sort = sort
|
||||
self.flex_rows = flex_rows
|
||||
|
||||
# We keep a queue of rows we haven't yet consumed for
|
||||
# materialization. We preserve the original total number of
|
||||
@@ -527,6 +692,10 @@ class Results(object):
|
||||
a `Results` object a second time should be much faster than the
|
||||
first.
|
||||
"""
|
||||
|
||||
# Index flexible attributes by the item ID, so we have easier access
|
||||
flex_attrs = self._get_indexed_flex_attrs()
|
||||
|
||||
index = 0 # Position in the materialized objects.
|
||||
while index < len(self._objects) or self._rows:
|
||||
# Are there previously-materialized objects to produce?
|
||||
@@ -539,7 +708,7 @@ class Results(object):
|
||||
else:
|
||||
while self._rows:
|
||||
row = self._rows.pop(0)
|
||||
obj = self._make_model(row)
|
||||
obj = self._make_model(row, flex_attrs.get(row['id'], {}))
|
||||
# If there is a slow-query predicate, ensurer that the
|
||||
# object passes it.
|
||||
if not self.query or self.query.match(obj):
|
||||
@@ -561,20 +730,24 @@ class Results(object):
|
||||
# Objects are pre-sorted (i.e., by the database).
|
||||
return self._get_objects()
|
||||
|
||||
def _make_model(self, row):
|
||||
# Get the flexible attributes for the object.
|
||||
with self.db.transaction() as tx:
|
||||
flex_rows = tx.query(
|
||||
'SELECT * FROM {0} WHERE entity_id=?'.format(
|
||||
self.model_class._flex_table
|
||||
),
|
||||
(row['id'],)
|
||||
)
|
||||
def _get_indexed_flex_attrs(self):
|
||||
""" Index flexible attributes by the entity id they belong to
|
||||
"""
|
||||
flex_values = {}
|
||||
for row in self.flex_rows:
|
||||
if row['entity_id'] not in flex_values:
|
||||
flex_values[row['entity_id']] = {}
|
||||
|
||||
flex_values[row['entity_id']][row['key']] = row['value']
|
||||
|
||||
return flex_values
|
||||
|
||||
def _make_model(self, row, flex_values={}):
|
||||
""" Create a Model object for the given row
|
||||
"""
|
||||
cols = dict(row)
|
||||
values = dict((k, v) for (k, v) in cols.items()
|
||||
if not k[:4] == 'flex')
|
||||
flex_values = dict((row['key'], row['value']) for row in flex_rows)
|
||||
values = {k: v for (k, v) in cols.items()
|
||||
if not k[:4] == 'flex'}
|
||||
|
||||
# Construct the Python object
|
||||
obj = self.model_class._awaken(self.db, values, flex_values)
|
||||
@@ -623,7 +796,7 @@ class Results(object):
|
||||
next(it)
|
||||
return next(it)
|
||||
except StopIteration:
|
||||
raise IndexError(u'result index {0} out of range'.format(n))
|
||||
raise IndexError(f'result index {n} out of range')
|
||||
|
||||
def get(self):
|
||||
"""Return the first matching object, or None if no objects
|
||||
@@ -636,10 +809,16 @@ class Results(object):
|
||||
return None
|
||||
|
||||
|
||||
class Transaction(object):
|
||||
class Transaction:
|
||||
"""A context manager for safe, concurrent access to the database.
|
||||
All SQL commands should be executed through a transaction.
|
||||
"""
|
||||
|
||||
_mutated = False
|
||||
"""A flag storing whether a mutation has been executed in the
|
||||
current transaction.
|
||||
"""
|
||||
|
||||
def __init__(self, db):
|
||||
self.db = db
|
||||
|
||||
@@ -661,12 +840,15 @@ class Transaction(object):
|
||||
entered but not yet exited transaction. If it is the last active
|
||||
transaction, the database updates are committed.
|
||||
"""
|
||||
# Beware of races; currently secured by db._db_lock
|
||||
self.db.revision += self._mutated
|
||||
with self.db._tx_stack() as stack:
|
||||
assert stack.pop() is self
|
||||
empty = not stack
|
||||
if empty:
|
||||
# Ending a "root" transaction. End the SQLite transaction.
|
||||
self.db._connection().commit()
|
||||
self._mutated = False
|
||||
self.db._db_lock.release()
|
||||
|
||||
def query(self, statement, subvals=()):
|
||||
@@ -680,28 +862,52 @@ class Transaction(object):
|
||||
"""Execute an SQL statement with substitution values and return
|
||||
the row ID of the last affected row.
|
||||
"""
|
||||
cursor = self.db._connection().execute(statement, subvals)
|
||||
return cursor.lastrowid
|
||||
try:
|
||||
cursor = self.db._connection().execute(statement, subvals)
|
||||
except sqlite3.OperationalError as e:
|
||||
# In two specific cases, SQLite reports an error while accessing
|
||||
# the underlying database file. We surface these exceptions as
|
||||
# DBAccessError so the application can abort.
|
||||
if e.args[0] in ("attempt to write a readonly database",
|
||||
"unable to open database file"):
|
||||
raise DBAccessError(e.args[0])
|
||||
else:
|
||||
raise
|
||||
else:
|
||||
self._mutated = True
|
||||
return cursor.lastrowid
|
||||
|
||||
def script(self, statements):
|
||||
"""Execute a string containing multiple SQL statements."""
|
||||
# We don't know whether this mutates, but quite likely it does.
|
||||
self._mutated = True
|
||||
self.db._connection().executescript(statements)
|
||||
|
||||
|
||||
class Database(object):
|
||||
class Database:
|
||||
"""A container for Model objects that wraps an SQLite database as
|
||||
the backend.
|
||||
"""
|
||||
|
||||
_models = ()
|
||||
"""The Model subclasses representing tables in this database.
|
||||
"""
|
||||
|
||||
supports_extensions = hasattr(sqlite3.Connection, 'enable_load_extension')
|
||||
"""Whether or not the current version of SQLite supports extensions"""
|
||||
|
||||
revision = 0
|
||||
"""The current revision of the database. To be increased whenever
|
||||
data is written in a transaction.
|
||||
"""
|
||||
|
||||
def __init__(self, path, timeout=5.0):
|
||||
self.path = path
|
||||
self.timeout = timeout
|
||||
|
||||
self._connections = {}
|
||||
self._tx_stacks = defaultdict(list)
|
||||
self._extensions = []
|
||||
|
||||
# A lock to protect the _connections and _tx_stacks maps, which
|
||||
# both map thread IDs to private resources.
|
||||
@@ -751,6 +957,13 @@ class Database(object):
|
||||
py3_path(self.path), timeout=self.timeout
|
||||
)
|
||||
|
||||
if self.supports_extensions:
|
||||
conn.enable_load_extension(True)
|
||||
|
||||
# Load any extension that are already loaded for other connections.
|
||||
for path in self._extensions:
|
||||
conn.load_extension(path)
|
||||
|
||||
# Access SELECT results like dictionaries.
|
||||
conn.row_factory = sqlite3.Row
|
||||
return conn
|
||||
@@ -779,6 +992,18 @@ class Database(object):
|
||||
"""
|
||||
return Transaction(self)
|
||||
|
||||
def load_extension(self, path):
|
||||
"""Load an SQLite extension into all open connections."""
|
||||
if not self.supports_extensions:
|
||||
raise ValueError(
|
||||
'this sqlite3 installation does not support extensions')
|
||||
|
||||
self._extensions.append(path)
|
||||
|
||||
# Load the extension into every open connection.
|
||||
for conn in self._connections.values():
|
||||
conn.load_extension(path)
|
||||
|
||||
# Schema setup and migration.
|
||||
|
||||
def _make_table(self, table, fields):
|
||||
@@ -788,7 +1013,7 @@ class Database(object):
|
||||
# Get current schema.
|
||||
with self.transaction() as tx:
|
||||
rows = tx.query('PRAGMA table_info(%s)' % table)
|
||||
current_fields = set([row[1] for row in rows])
|
||||
current_fields = {row[1] for row in rows}
|
||||
|
||||
field_names = set(fields.keys())
|
||||
if current_fields.issuperset(field_names):
|
||||
@@ -799,9 +1024,9 @@ class Database(object):
|
||||
# No table exists.
|
||||
columns = []
|
||||
for name, typ in fields.items():
|
||||
columns.append('{0} {1}'.format(name, typ.sql))
|
||||
setup_sql = 'CREATE TABLE {0} ({1});\n'.format(table,
|
||||
', '.join(columns))
|
||||
columns.append(f'{name} {typ.sql}')
|
||||
setup_sql = 'CREATE TABLE {} ({});\n'.format(table,
|
||||
', '.join(columns))
|
||||
|
||||
else:
|
||||
# Table exists does not match the field set.
|
||||
@@ -809,7 +1034,7 @@ class Database(object):
|
||||
for name, typ in fields.items():
|
||||
if name in current_fields:
|
||||
continue
|
||||
setup_sql += 'ALTER TABLE {0} ADD COLUMN {1} {2};\n'.format(
|
||||
setup_sql += 'ALTER TABLE {} ADD COLUMN {} {};\n'.format(
|
||||
table, name, typ.sql
|
||||
)
|
||||
|
||||
@@ -845,17 +1070,31 @@ class Database(object):
|
||||
where, subvals = query.clause()
|
||||
order_by = sort.order_clause()
|
||||
|
||||
sql = ("SELECT * FROM {0} WHERE {1} {2}").format(
|
||||
sql = ("SELECT * FROM {} WHERE {} {}").format(
|
||||
model_cls._table,
|
||||
where or '1',
|
||||
"ORDER BY {0}".format(order_by) if order_by else '',
|
||||
f"ORDER BY {order_by}" if order_by else '',
|
||||
)
|
||||
|
||||
# Fetch flexible attributes for items matching the main query.
|
||||
# Doing the per-item filtering in python is faster than issuing
|
||||
# one query per item to sqlite.
|
||||
flex_sql = ("""
|
||||
SELECT * FROM {} WHERE entity_id IN
|
||||
(SELECT id FROM {} WHERE {});
|
||||
""".format(
|
||||
model_cls._flex_table,
|
||||
model_cls._table,
|
||||
where or '1',
|
||||
)
|
||||
)
|
||||
|
||||
with self.transaction() as tx:
|
||||
rows = tx.query(sql, subvals)
|
||||
flex_rows = tx.query(flex_sql, subvals)
|
||||
|
||||
return Results(
|
||||
model_cls, rows, self,
|
||||
model_cls, rows, self, flex_rows,
|
||||
None if where else query, # Slow query component.
|
||||
sort if sort.is_slow() else None, # Slow sort component.
|
||||
)
|
||||
|
||||
230
lib/beets/dbcore/query.py
Executable file → Normal file
230
lib/beets/dbcore/query.py
Executable file → Normal file
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This file is part of beets.
|
||||
# Copyright 2016, Adrian Sampson.
|
||||
#
|
||||
@@ -15,7 +14,6 @@
|
||||
|
||||
"""The Query type hierarchy for DBCore.
|
||||
"""
|
||||
from __future__ import division, absolute_import, print_function
|
||||
|
||||
import re
|
||||
from operator import mul
|
||||
@@ -23,10 +21,6 @@ from beets import util
|
||||
from datetime import datetime, timedelta
|
||||
import unicodedata
|
||||
from functools import reduce
|
||||
import six
|
||||
|
||||
if not six.PY2:
|
||||
buffer = memoryview # sqlite won't accept memoryview in python 2
|
||||
|
||||
|
||||
class ParsingError(ValueError):
|
||||
@@ -40,29 +34,32 @@ class InvalidQueryError(ParsingError):
|
||||
|
||||
The query should be a unicode string or a list, which will be space-joined.
|
||||
"""
|
||||
|
||||
def __init__(self, query, explanation):
|
||||
if isinstance(query, list):
|
||||
query = " ".join(query)
|
||||
message = u"'{0}': {1}".format(query, explanation)
|
||||
super(InvalidQueryError, self).__init__(message)
|
||||
message = f"'{query}': {explanation}"
|
||||
super().__init__(message)
|
||||
|
||||
|
||||
class InvalidQueryArgumentTypeError(ParsingError):
|
||||
class InvalidQueryArgumentValueError(ParsingError):
|
||||
"""Represent a query argument that could not be converted as expected.
|
||||
|
||||
It exists to be caught in upper stack levels so a meaningful (i.e. with the
|
||||
query) InvalidQueryError can be raised.
|
||||
"""
|
||||
|
||||
def __init__(self, what, expected, detail=None):
|
||||
message = u"'{0}' is not {1}".format(what, expected)
|
||||
message = f"'{what}' is not {expected}"
|
||||
if detail:
|
||||
message = u"{0}: {1}".format(message, detail)
|
||||
super(InvalidQueryArgumentTypeError, self).__init__(message)
|
||||
message = f"{message}: {detail}"
|
||||
super().__init__(message)
|
||||
|
||||
|
||||
class Query(object):
|
||||
class Query:
|
||||
"""An abstract class representing a query into the item database.
|
||||
"""
|
||||
|
||||
def clause(self):
|
||||
"""Generate an SQLite expression implementing the query.
|
||||
|
||||
@@ -79,7 +76,7 @@ class Query(object):
|
||||
raise NotImplementedError
|
||||
|
||||
def __repr__(self):
|
||||
return "{0.__class__.__name__}()".format(self)
|
||||
return f"{self.__class__.__name__}()"
|
||||
|
||||
def __eq__(self, other):
|
||||
return type(self) == type(other)
|
||||
@@ -95,6 +92,7 @@ class FieldQuery(Query):
|
||||
string. Subclasses may also provide `col_clause` to implement the
|
||||
same matching functionality in SQLite.
|
||||
"""
|
||||
|
||||
def __init__(self, field, pattern, fast=True):
|
||||
self.field = field
|
||||
self.pattern = pattern
|
||||
@@ -125,7 +123,7 @@ class FieldQuery(Query):
|
||||
"{0.fast})".format(self))
|
||||
|
||||
def __eq__(self, other):
|
||||
return super(FieldQuery, self).__eq__(other) and \
|
||||
return super().__eq__(other) and \
|
||||
self.field == other.field and self.pattern == other.pattern
|
||||
|
||||
def __hash__(self):
|
||||
@@ -134,6 +132,7 @@ class FieldQuery(Query):
|
||||
|
||||
class MatchQuery(FieldQuery):
|
||||
"""A query that looks for exact matches in an item field."""
|
||||
|
||||
def col_clause(self):
|
||||
return self.field + " = ?", [self.pattern]
|
||||
|
||||
@@ -143,19 +142,16 @@ class MatchQuery(FieldQuery):
|
||||
|
||||
|
||||
class NoneQuery(FieldQuery):
|
||||
"""A query that checks whether a field is null."""
|
||||
|
||||
def __init__(self, field, fast=True):
|
||||
super(NoneQuery, self).__init__(field, None, fast)
|
||||
super().__init__(field, None, fast)
|
||||
|
||||
def col_clause(self):
|
||||
return self.field + " IS NULL", ()
|
||||
|
||||
@classmethod
|
||||
def match(cls, item):
|
||||
try:
|
||||
return item[cls.field] is None
|
||||
except KeyError:
|
||||
return True
|
||||
def match(self, item):
|
||||
return item.get(self.field) is None
|
||||
|
||||
def __repr__(self):
|
||||
return "{0.__class__.__name__}({0.field!r}, {0.fast})".format(self)
|
||||
@@ -165,6 +161,7 @@ class StringFieldQuery(FieldQuery):
|
||||
"""A FieldQuery that converts values to strings before matching
|
||||
them.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def value_match(cls, pattern, value):
|
||||
"""Determine whether the value matches the pattern. The value
|
||||
@@ -182,11 +179,12 @@ class StringFieldQuery(FieldQuery):
|
||||
|
||||
class SubstringQuery(StringFieldQuery):
|
||||
"""A query that matches a substring in a specific item field."""
|
||||
|
||||
def col_clause(self):
|
||||
pattern = (self.pattern
|
||||
.replace('\\', '\\\\')
|
||||
.replace('%', '\\%')
|
||||
.replace('_', '\\_'))
|
||||
.replace('\\', '\\\\')
|
||||
.replace('%', '\\%')
|
||||
.replace('_', '\\_'))
|
||||
search = '%' + pattern + '%'
|
||||
clause = self.field + " like ? escape '\\'"
|
||||
subvals = [search]
|
||||
@@ -204,16 +202,17 @@ class RegexpQuery(StringFieldQuery):
|
||||
Raises InvalidQueryError when the pattern is not a valid regular
|
||||
expression.
|
||||
"""
|
||||
|
||||
def __init__(self, field, pattern, fast=True):
|
||||
super(RegexpQuery, self).__init__(field, pattern, fast)
|
||||
super().__init__(field, pattern, fast)
|
||||
pattern = self._normalize(pattern)
|
||||
try:
|
||||
self.pattern = re.compile(self.pattern)
|
||||
except re.error as exc:
|
||||
# Invalid regular expression.
|
||||
raise InvalidQueryArgumentTypeError(pattern,
|
||||
u"a regular expression",
|
||||
format(exc))
|
||||
raise InvalidQueryArgumentValueError(pattern,
|
||||
"a regular expression",
|
||||
format(exc))
|
||||
|
||||
@staticmethod
|
||||
def _normalize(s):
|
||||
@@ -231,9 +230,10 @@ class BooleanQuery(MatchQuery):
|
||||
"""Matches a boolean field. Pattern should either be a boolean or a
|
||||
string reflecting a boolean.
|
||||
"""
|
||||
|
||||
def __init__(self, field, pattern, fast=True):
|
||||
super(BooleanQuery, self).__init__(field, pattern, fast)
|
||||
if isinstance(pattern, six.string_types):
|
||||
super().__init__(field, pattern, fast)
|
||||
if isinstance(pattern, str):
|
||||
self.pattern = util.str2bool(pattern)
|
||||
self.pattern = int(self.pattern)
|
||||
|
||||
@@ -244,17 +244,18 @@ class BytesQuery(MatchQuery):
|
||||
`unicode` equivalently in Python 2. Always use this query instead of
|
||||
`MatchQuery` when matching on BLOB values.
|
||||
"""
|
||||
|
||||
def __init__(self, field, pattern):
|
||||
super(BytesQuery, self).__init__(field, pattern)
|
||||
super().__init__(field, pattern)
|
||||
|
||||
# Use a buffer/memoryview representation of the pattern for SQLite
|
||||
# matching. This instructs SQLite to treat the blob as binary
|
||||
# rather than encoded Unicode.
|
||||
if isinstance(self.pattern, (six.text_type, bytes)):
|
||||
if isinstance(self.pattern, six.text_type):
|
||||
if isinstance(self.pattern, (str, bytes)):
|
||||
if isinstance(self.pattern, str):
|
||||
self.pattern = self.pattern.encode('utf-8')
|
||||
self.buf_pattern = buffer(self.pattern)
|
||||
elif isinstance(self.pattern, buffer):
|
||||
self.buf_pattern = memoryview(self.pattern)
|
||||
elif isinstance(self.pattern, memoryview):
|
||||
self.buf_pattern = self.pattern
|
||||
self.pattern = bytes(self.pattern)
|
||||
|
||||
@@ -270,6 +271,7 @@ class NumericQuery(FieldQuery):
|
||||
Raises InvalidQueryError when the pattern does not represent an int or
|
||||
a float.
|
||||
"""
|
||||
|
||||
def _convert(self, s):
|
||||
"""Convert a string to a numeric type (float or int).
|
||||
|
||||
@@ -285,10 +287,10 @@ class NumericQuery(FieldQuery):
|
||||
try:
|
||||
return float(s)
|
||||
except ValueError:
|
||||
raise InvalidQueryArgumentTypeError(s, u"an int or a float")
|
||||
raise InvalidQueryArgumentValueError(s, "an int or a float")
|
||||
|
||||
def __init__(self, field, pattern, fast=True):
|
||||
super(NumericQuery, self).__init__(field, pattern, fast)
|
||||
super().__init__(field, pattern, fast)
|
||||
|
||||
parts = pattern.split('..', 1)
|
||||
if len(parts) == 1:
|
||||
@@ -306,7 +308,7 @@ class NumericQuery(FieldQuery):
|
||||
if self.field not in item:
|
||||
return False
|
||||
value = item[self.field]
|
||||
if isinstance(value, six.string_types):
|
||||
if isinstance(value, str):
|
||||
value = self._convert(value)
|
||||
|
||||
if self.point is not None:
|
||||
@@ -323,20 +325,21 @@ class NumericQuery(FieldQuery):
|
||||
return self.field + '=?', (self.point,)
|
||||
else:
|
||||
if self.rangemin is not None and self.rangemax is not None:
|
||||
return (u'{0} >= ? AND {0} <= ?'.format(self.field),
|
||||
return ('{0} >= ? AND {0} <= ?'.format(self.field),
|
||||
(self.rangemin, self.rangemax))
|
||||
elif self.rangemin is not None:
|
||||
return u'{0} >= ?'.format(self.field), (self.rangemin,)
|
||||
return f'{self.field} >= ?', (self.rangemin,)
|
||||
elif self.rangemax is not None:
|
||||
return u'{0} <= ?'.format(self.field), (self.rangemax,)
|
||||
return f'{self.field} <= ?', (self.rangemax,)
|
||||
else:
|
||||
return u'1', ()
|
||||
return '1', ()
|
||||
|
||||
|
||||
class CollectionQuery(Query):
|
||||
"""An abstract query class that aggregates other queries. Can be
|
||||
indexed like a list to access the sub-queries.
|
||||
"""
|
||||
|
||||
def __init__(self, subqueries=()):
|
||||
self.subqueries = subqueries
|
||||
|
||||
@@ -374,7 +377,7 @@ class CollectionQuery(Query):
|
||||
return "{0.__class__.__name__}({0.subqueries!r})".format(self)
|
||||
|
||||
def __eq__(self, other):
|
||||
return super(CollectionQuery, self).__eq__(other) and \
|
||||
return super().__eq__(other) and \
|
||||
self.subqueries == other.subqueries
|
||||
|
||||
def __hash__(self):
|
||||
@@ -389,6 +392,7 @@ class AnyFieldQuery(CollectionQuery):
|
||||
any field. The individual field query class is provided to the
|
||||
constructor.
|
||||
"""
|
||||
|
||||
def __init__(self, pattern, fields, cls):
|
||||
self.pattern = pattern
|
||||
self.fields = fields
|
||||
@@ -397,7 +401,7 @@ class AnyFieldQuery(CollectionQuery):
|
||||
subqueries = []
|
||||
for field in self.fields:
|
||||
subqueries.append(cls(field, pattern, True))
|
||||
super(AnyFieldQuery, self).__init__(subqueries)
|
||||
super().__init__(subqueries)
|
||||
|
||||
def clause(self):
|
||||
return self.clause_with_joiner('or')
|
||||
@@ -413,7 +417,7 @@ class AnyFieldQuery(CollectionQuery):
|
||||
"{0.query_class.__name__})".format(self))
|
||||
|
||||
def __eq__(self, other):
|
||||
return super(AnyFieldQuery, self).__eq__(other) and \
|
||||
return super().__eq__(other) and \
|
||||
self.query_class == other.query_class
|
||||
|
||||
def __hash__(self):
|
||||
@@ -424,6 +428,7 @@ class MutableCollectionQuery(CollectionQuery):
|
||||
"""A collection query whose subqueries may be modified after the
|
||||
query is initialized.
|
||||
"""
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
self.subqueries[key] = value
|
||||
|
||||
@@ -433,33 +438,36 @@ class MutableCollectionQuery(CollectionQuery):
|
||||
|
||||
class AndQuery(MutableCollectionQuery):
|
||||
"""A conjunction of a list of other queries."""
|
||||
|
||||
def clause(self):
|
||||
return self.clause_with_joiner('and')
|
||||
|
||||
def match(self, item):
|
||||
return all([q.match(item) for q in self.subqueries])
|
||||
return all(q.match(item) for q in self.subqueries)
|
||||
|
||||
|
||||
class OrQuery(MutableCollectionQuery):
|
||||
"""A conjunction of a list of other queries."""
|
||||
|
||||
def clause(self):
|
||||
return self.clause_with_joiner('or')
|
||||
|
||||
def match(self, item):
|
||||
return any([q.match(item) for q in self.subqueries])
|
||||
return any(q.match(item) for q in self.subqueries)
|
||||
|
||||
|
||||
class NotQuery(Query):
|
||||
"""A query that matches the negation of its `subquery`, as a shorcut for
|
||||
performing `not(subquery)` without using regular expressions.
|
||||
"""
|
||||
|
||||
def __init__(self, subquery):
|
||||
self.subquery = subquery
|
||||
|
||||
def clause(self):
|
||||
clause, subvals = self.subquery.clause()
|
||||
if clause:
|
||||
return 'not ({0})'.format(clause), subvals
|
||||
return f'not ({clause})', subvals
|
||||
else:
|
||||
# If there is no clause, there is nothing to negate. All the logic
|
||||
# is handled by match() for slow queries.
|
||||
@@ -472,7 +480,7 @@ class NotQuery(Query):
|
||||
return "{0.__class__.__name__}({0.subquery!r})".format(self)
|
||||
|
||||
def __eq__(self, other):
|
||||
return super(NotQuery, self).__eq__(other) and \
|
||||
return super().__eq__(other) and \
|
||||
self.subquery == other.subquery
|
||||
|
||||
def __hash__(self):
|
||||
@@ -481,6 +489,7 @@ class NotQuery(Query):
|
||||
|
||||
class TrueQuery(Query):
|
||||
"""A query that always matches."""
|
||||
|
||||
def clause(self):
|
||||
return '1', ()
|
||||
|
||||
@@ -490,6 +499,7 @@ class TrueQuery(Query):
|
||||
|
||||
class FalseQuery(Query):
|
||||
"""A query that never matches."""
|
||||
|
||||
def clause(self):
|
||||
return '0', ()
|
||||
|
||||
@@ -526,42 +536,88 @@ def _parse_periods(pattern):
|
||||
return (start, end)
|
||||
|
||||
|
||||
class Period(object):
|
||||
class Period:
|
||||
"""A period of time given by a date, time and precision.
|
||||
|
||||
Example: 2014-01-01 10:50:30 with precision 'month' represents all
|
||||
instants of time during January 2014.
|
||||
"""
|
||||
|
||||
precisions = ('year', 'month', 'day')
|
||||
date_formats = ('%Y', '%Y-%m', '%Y-%m-%d')
|
||||
precisions = ('year', 'month', 'day', 'hour', 'minute', 'second')
|
||||
date_formats = (
|
||||
('%Y',), # year
|
||||
('%Y-%m',), # month
|
||||
('%Y-%m-%d',), # day
|
||||
('%Y-%m-%dT%H', '%Y-%m-%d %H'), # hour
|
||||
('%Y-%m-%dT%H:%M', '%Y-%m-%d %H:%M'), # minute
|
||||
('%Y-%m-%dT%H:%M:%S', '%Y-%m-%d %H:%M:%S') # second
|
||||
)
|
||||
relative_units = {'y': 365, 'm': 30, 'w': 7, 'd': 1}
|
||||
relative_re = '(?P<sign>[+|-]?)(?P<quantity>[0-9]+)' + \
|
||||
'(?P<timespan>[y|m|w|d])'
|
||||
|
||||
def __init__(self, date, precision):
|
||||
"""Create a period with the given date (a `datetime` object) and
|
||||
precision (a string, one of "year", "month", or "day").
|
||||
precision (a string, one of "year", "month", "day", "hour", "minute",
|
||||
or "second").
|
||||
"""
|
||||
if precision not in Period.precisions:
|
||||
raise ValueError(u'Invalid precision {0}'.format(precision))
|
||||
raise ValueError(f'Invalid precision {precision}')
|
||||
self.date = date
|
||||
self.precision = precision
|
||||
|
||||
@classmethod
|
||||
def parse(cls, string):
|
||||
"""Parse a date and return a `Period` object or `None` if the
|
||||
string is empty.
|
||||
string is empty, or raise an InvalidQueryArgumentValueError if
|
||||
the string cannot be parsed to a date.
|
||||
|
||||
The date may be absolute or relative. Absolute dates look like
|
||||
`YYYY`, or `YYYY-MM-DD`, or `YYYY-MM-DD HH:MM:SS`, etc. Relative
|
||||
dates have three parts:
|
||||
|
||||
- Optionally, a ``+`` or ``-`` sign indicating the future or the
|
||||
past. The default is the future.
|
||||
- A number: how much to add or subtract.
|
||||
- A letter indicating the unit: days, weeks, months or years
|
||||
(``d``, ``w``, ``m`` or ``y``). A "month" is exactly 30 days
|
||||
and a "year" is exactly 365 days.
|
||||
"""
|
||||
|
||||
def find_date_and_format(string):
|
||||
for ord, format in enumerate(cls.date_formats):
|
||||
for format_option in format:
|
||||
try:
|
||||
date = datetime.strptime(string, format_option)
|
||||
return date, ord
|
||||
except ValueError:
|
||||
# Parsing failed.
|
||||
pass
|
||||
return (None, None)
|
||||
|
||||
if not string:
|
||||
return None
|
||||
ordinal = string.count('-')
|
||||
if ordinal >= len(cls.date_formats):
|
||||
# Too many components.
|
||||
return None
|
||||
date_format = cls.date_formats[ordinal]
|
||||
try:
|
||||
date = datetime.strptime(string, date_format)
|
||||
except ValueError:
|
||||
# Parsing failed.
|
||||
return None
|
||||
|
||||
# Check for a relative date.
|
||||
match_dq = re.match(cls.relative_re, string)
|
||||
if match_dq:
|
||||
sign = match_dq.group('sign')
|
||||
quantity = match_dq.group('quantity')
|
||||
timespan = match_dq.group('timespan')
|
||||
|
||||
# Add or subtract the given amount of time from the current
|
||||
# date.
|
||||
multiplier = -1 if sign == '-' else 1
|
||||
days = cls.relative_units[timespan]
|
||||
date = datetime.now() + \
|
||||
timedelta(days=int(quantity) * days) * multiplier
|
||||
return cls(date, cls.precisions[5])
|
||||
|
||||
# Check for an absolute date.
|
||||
date, ordinal = find_date_and_format(string)
|
||||
if date is None:
|
||||
raise InvalidQueryArgumentValueError(string,
|
||||
'a valid date/time string')
|
||||
precision = cls.precisions[ordinal]
|
||||
return cls(date, precision)
|
||||
|
||||
@@ -580,11 +636,17 @@ class Period(object):
|
||||
return date.replace(year=date.year + 1, month=1)
|
||||
elif 'day' == precision:
|
||||
return date + timedelta(days=1)
|
||||
elif 'hour' == precision:
|
||||
return date + timedelta(hours=1)
|
||||
elif 'minute' == precision:
|
||||
return date + timedelta(minutes=1)
|
||||
elif 'second' == precision:
|
||||
return date + timedelta(seconds=1)
|
||||
else:
|
||||
raise ValueError(u'unhandled precision {0}'.format(precision))
|
||||
raise ValueError(f'unhandled precision {precision}')
|
||||
|
||||
|
||||
class DateInterval(object):
|
||||
class DateInterval:
|
||||
"""A closed-open interval of dates.
|
||||
|
||||
A left endpoint of None means since the beginning of time.
|
||||
@@ -593,7 +655,7 @@ class DateInterval(object):
|
||||
|
||||
def __init__(self, start, end):
|
||||
if start is not None and end is not None and not start < end:
|
||||
raise ValueError(u"start date {0} is not before end date {1}"
|
||||
raise ValueError("start date {} is not before end date {}"
|
||||
.format(start, end))
|
||||
self.start = start
|
||||
self.end = end
|
||||
@@ -614,7 +676,7 @@ class DateInterval(object):
|
||||
return True
|
||||
|
||||
def __str__(self):
|
||||
return '[{0}, {1})'.format(self.start, self.end)
|
||||
return f'[{self.start}, {self.end})'
|
||||
|
||||
|
||||
class DateQuery(FieldQuery):
|
||||
@@ -626,8 +688,9 @@ class DateQuery(FieldQuery):
|
||||
The value of a date field can be matched against a date interval by
|
||||
using an ellipsis interval syntax similar to that of NumericQuery.
|
||||
"""
|
||||
|
||||
def __init__(self, field, pattern, fast=True):
|
||||
super(DateQuery, self).__init__(field, pattern, fast)
|
||||
super().__init__(field, pattern, fast)
|
||||
start, end = _parse_periods(pattern)
|
||||
self.interval = DateInterval.from_periods(start, end)
|
||||
|
||||
@@ -635,7 +698,7 @@ class DateQuery(FieldQuery):
|
||||
if self.field not in item:
|
||||
return False
|
||||
timestamp = float(item[self.field])
|
||||
date = datetime.utcfromtimestamp(timestamp)
|
||||
date = datetime.fromtimestamp(timestamp)
|
||||
return self.interval.contains(date)
|
||||
|
||||
_clause_tmpl = "{0} {1} ?"
|
||||
@@ -669,6 +732,7 @@ class DurationQuery(NumericQuery):
|
||||
Raises InvalidQueryError when the pattern does not represent an int, float
|
||||
or M:SS time interval.
|
||||
"""
|
||||
|
||||
def _convert(self, s):
|
||||
"""Convert a M:SS or numeric string to a float.
|
||||
|
||||
@@ -683,14 +747,14 @@ class DurationQuery(NumericQuery):
|
||||
try:
|
||||
return float(s)
|
||||
except ValueError:
|
||||
raise InvalidQueryArgumentTypeError(
|
||||
raise InvalidQueryArgumentValueError(
|
||||
s,
|
||||
u"a M:SS string or a float")
|
||||
"a M:SS string or a float")
|
||||
|
||||
|
||||
# Sorting.
|
||||
|
||||
class Sort(object):
|
||||
class Sort:
|
||||
"""An abstract class representing a sort operation for a query into
|
||||
the item database.
|
||||
"""
|
||||
@@ -777,13 +841,13 @@ class MultipleSort(Sort):
|
||||
return items
|
||||
|
||||
def __repr__(self):
|
||||
return 'MultipleSort({!r})'.format(self.sorts)
|
||||
return f'MultipleSort({self.sorts!r})'
|
||||
|
||||
def __hash__(self):
|
||||
return hash(tuple(self.sorts))
|
||||
|
||||
def __eq__(self, other):
|
||||
return super(MultipleSort, self).__eq__(other) and \
|
||||
return super().__eq__(other) and \
|
||||
self.sorts == other.sorts
|
||||
|
||||
|
||||
@@ -791,6 +855,7 @@ class FieldSort(Sort):
|
||||
"""An abstract sort criterion that orders by a specific field (of
|
||||
any kind).
|
||||
"""
|
||||
|
||||
def __init__(self, field, ascending=True, case_insensitive=True):
|
||||
self.field = field
|
||||
self.ascending = ascending
|
||||
@@ -803,14 +868,14 @@ class FieldSort(Sort):
|
||||
|
||||
def key(item):
|
||||
field_val = item.get(self.field, '')
|
||||
if self.case_insensitive and isinstance(field_val, six.text_type):
|
||||
if self.case_insensitive and isinstance(field_val, str):
|
||||
field_val = field_val.lower()
|
||||
return field_val
|
||||
|
||||
return sorted(objs, key=key, reverse=not self.ascending)
|
||||
|
||||
def __repr__(self):
|
||||
return '<{0}: {1}{2}>'.format(
|
||||
return '<{}: {}{}>'.format(
|
||||
type(self).__name__,
|
||||
self.field,
|
||||
'+' if self.ascending else '-',
|
||||
@@ -820,7 +885,7 @@ class FieldSort(Sort):
|
||||
return hash((self.field, self.ascending))
|
||||
|
||||
def __eq__(self, other):
|
||||
return super(FieldSort, self).__eq__(other) and \
|
||||
return super().__eq__(other) and \
|
||||
self.field == other.field and \
|
||||
self.ascending == other.ascending
|
||||
|
||||
@@ -828,6 +893,7 @@ class FieldSort(Sort):
|
||||
class FixedFieldSort(FieldSort):
|
||||
"""Sort object to sort on a fixed field.
|
||||
"""
|
||||
|
||||
def order_clause(self):
|
||||
order = "ASC" if self.ascending else "DESC"
|
||||
if self.case_insensitive:
|
||||
@@ -837,19 +903,21 @@ class FixedFieldSort(FieldSort):
|
||||
'ELSE {0} END)'.format(self.field)
|
||||
else:
|
||||
field = self.field
|
||||
return "{0} {1}".format(field, order)
|
||||
return f"{field} {order}"
|
||||
|
||||
|
||||
class SlowFieldSort(FieldSort):
|
||||
"""A sort criterion by some model field other than a fixed field:
|
||||
i.e., a computed or flexible field.
|
||||
"""
|
||||
|
||||
def is_slow(self):
|
||||
return True
|
||||
|
||||
|
||||
class NullSort(Sort):
|
||||
"""No sorting. Leave results unsorted."""
|
||||
|
||||
def sort(self, items):
|
||||
return items
|
||||
|
||||
|
||||
70
lib/beets/dbcore/queryparse.py
Executable file → Normal file
70
lib/beets/dbcore/queryparse.py
Executable file → Normal file
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This file is part of beets.
|
||||
# Copyright 2016, Adrian Sampson.
|
||||
#
|
||||
@@ -15,12 +14,10 @@
|
||||
|
||||
"""Parsing of strings into DBCore queries.
|
||||
"""
|
||||
from __future__ import division, absolute_import, print_function
|
||||
|
||||
import re
|
||||
import itertools
|
||||
from . import query
|
||||
import beets
|
||||
|
||||
PARSE_QUERY_PART_REGEX = re.compile(
|
||||
# Non-capturing optional segment for the keyword.
|
||||
@@ -89,7 +86,7 @@ def parse_query_part(part, query_classes={}, prefixes={},
|
||||
assert match # Regex should always match
|
||||
negate = bool(match.group(1))
|
||||
key = match.group(2)
|
||||
term = match.group(3).replace('\:', ':')
|
||||
term = match.group(3).replace('\\:', ':')
|
||||
|
||||
# Check whether there's a prefix in the query and use the
|
||||
# corresponding query type.
|
||||
@@ -119,12 +116,13 @@ def construct_query_part(model_cls, prefixes, query_part):
|
||||
if not query_part:
|
||||
return query.TrueQuery()
|
||||
|
||||
# Use `model_cls` to build up a map from field names to `Query`
|
||||
# classes.
|
||||
# Use `model_cls` to build up a map from field (or query) names to
|
||||
# `Query` classes.
|
||||
query_classes = {}
|
||||
for k, t in itertools.chain(model_cls._fields.items(),
|
||||
model_cls._types.items()):
|
||||
query_classes[k] = t.query
|
||||
query_classes.update(model_cls._queries) # Non-field queries.
|
||||
|
||||
# Parse the string.
|
||||
key, pattern, query_class, negate = \
|
||||
@@ -137,26 +135,27 @@ def construct_query_part(model_cls, prefixes, query_part):
|
||||
# The query type matches a specific field, but none was
|
||||
# specified. So we use a version of the query that matches
|
||||
# any field.
|
||||
q = query.AnyFieldQuery(pattern, model_cls._search_fields,
|
||||
query_class)
|
||||
if negate:
|
||||
return query.NotQuery(q)
|
||||
else:
|
||||
return q
|
||||
out_query = query.AnyFieldQuery(pattern, model_cls._search_fields,
|
||||
query_class)
|
||||
else:
|
||||
# Non-field query type.
|
||||
if negate:
|
||||
return query.NotQuery(query_class(pattern))
|
||||
else:
|
||||
return query_class(pattern)
|
||||
out_query = query_class(pattern)
|
||||
|
||||
# Otherwise, this must be a `FieldQuery`. Use the field name to
|
||||
# construct the query object.
|
||||
key = key.lower()
|
||||
q = query_class(key.lower(), pattern, key in model_cls._fields)
|
||||
# Field queries get constructed according to the name of the field
|
||||
# they are querying.
|
||||
elif issubclass(query_class, query.FieldQuery):
|
||||
key = key.lower()
|
||||
out_query = query_class(key.lower(), pattern, key in model_cls._fields)
|
||||
|
||||
# Non-field (named) query.
|
||||
else:
|
||||
out_query = query_class(pattern)
|
||||
|
||||
# Apply negation.
|
||||
if negate:
|
||||
return query.NotQuery(q)
|
||||
return q
|
||||
return query.NotQuery(out_query)
|
||||
else:
|
||||
return out_query
|
||||
|
||||
|
||||
def query_from_strings(query_cls, model_cls, prefixes, query_parts):
|
||||
@@ -172,11 +171,13 @@ def query_from_strings(query_cls, model_cls, prefixes, query_parts):
|
||||
return query_cls(subqueries)
|
||||
|
||||
|
||||
def construct_sort_part(model_cls, part):
|
||||
def construct_sort_part(model_cls, part, case_insensitive=True):
|
||||
"""Create a `Sort` from a single string criterion.
|
||||
|
||||
`model_cls` is the `Model` being queried. `part` is a single string
|
||||
ending in ``+`` or ``-`` indicating the sort.
|
||||
ending in ``+`` or ``-`` indicating the sort. `case_insensitive`
|
||||
indicates whether or not the sort should be performed in a case
|
||||
sensitive manner.
|
||||
"""
|
||||
assert part, "part must be a field name and + or -"
|
||||
field = part[:-1]
|
||||
@@ -185,7 +186,6 @@ def construct_sort_part(model_cls, part):
|
||||
assert direction in ('+', '-'), "part must end with + or -"
|
||||
is_ascending = direction == '+'
|
||||
|
||||
case_insensitive = beets.config['sort_case_insensitive'].get(bool)
|
||||
if field in model_cls._sorts:
|
||||
sort = model_cls._sorts[field](model_cls, is_ascending,
|
||||
case_insensitive)
|
||||
@@ -197,21 +197,23 @@ def construct_sort_part(model_cls, part):
|
||||
return sort
|
||||
|
||||
|
||||
def sort_from_strings(model_cls, sort_parts):
|
||||
def sort_from_strings(model_cls, sort_parts, case_insensitive=True):
|
||||
"""Create a `Sort` from a list of sort criteria (strings).
|
||||
"""
|
||||
if not sort_parts:
|
||||
sort = query.NullSort()
|
||||
elif len(sort_parts) == 1:
|
||||
sort = construct_sort_part(model_cls, sort_parts[0])
|
||||
sort = construct_sort_part(model_cls, sort_parts[0], case_insensitive)
|
||||
else:
|
||||
sort = query.MultipleSort()
|
||||
for part in sort_parts:
|
||||
sort.add_sort(construct_sort_part(model_cls, part))
|
||||
sort.add_sort(construct_sort_part(model_cls, part,
|
||||
case_insensitive))
|
||||
return sort
|
||||
|
||||
|
||||
def parse_sorted_query(model_cls, parts, prefixes={}):
|
||||
def parse_sorted_query(model_cls, parts, prefixes={},
|
||||
case_insensitive=True):
|
||||
"""Given a list of strings, create the `Query` and `Sort` that they
|
||||
represent.
|
||||
"""
|
||||
@@ -222,8 +224,8 @@ def parse_sorted_query(model_cls, parts, prefixes={}):
|
||||
# Split up query in to comma-separated subqueries, each representing
|
||||
# an AndQuery, which need to be joined together in one OrQuery
|
||||
subquery_parts = []
|
||||
for part in parts + [u',']:
|
||||
if part.endswith(u','):
|
||||
for part in parts + [',']:
|
||||
if part.endswith(','):
|
||||
# Ensure we can catch "foo, bar" as well as "foo , bar"
|
||||
last_subquery_part = part[:-1]
|
||||
if last_subquery_part:
|
||||
@@ -237,8 +239,8 @@ def parse_sorted_query(model_cls, parts, prefixes={}):
|
||||
else:
|
||||
# Sort parts (1) end in + or -, (2) don't have a field, and
|
||||
# (3) consist of more than just the + or -.
|
||||
if part.endswith((u'+', u'-')) \
|
||||
and u':' not in part \
|
||||
if part.endswith(('+', '-')) \
|
||||
and ':' not in part \
|
||||
and len(part) > 1:
|
||||
sort_parts.append(part)
|
||||
else:
|
||||
@@ -246,5 +248,5 @@ def parse_sorted_query(model_cls, parts, prefixes={}):
|
||||
|
||||
# Avoid needlessly wrapping single statements in an OR
|
||||
q = query.OrQuery(query_parts) if len(query_parts) > 1 else query_parts[0]
|
||||
s = sort_from_strings(model_cls, sort_parts)
|
||||
s = sort_from_strings(model_cls, sort_parts, case_insensitive)
|
||||
return q, s
|
||||
|
||||
70
lib/beets/dbcore/types.py
Executable file → Normal file
70
lib/beets/dbcore/types.py
Executable file → Normal file
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This file is part of beets.
|
||||
# Copyright 2016, Adrian Sampson.
|
||||
#
|
||||
@@ -15,25 +14,20 @@
|
||||
|
||||
"""Representation of type information for DBCore model fields.
|
||||
"""
|
||||
from __future__ import division, absolute_import, print_function
|
||||
|
||||
from . import query
|
||||
from beets.util import str2bool
|
||||
import six
|
||||
|
||||
if not six.PY2:
|
||||
buffer = memoryview # sqlite won't accept memoryview in python 2
|
||||
|
||||
|
||||
# Abstract base.
|
||||
|
||||
class Type(object):
|
||||
class Type:
|
||||
"""An object encapsulating the type of a model field. Includes
|
||||
information about how to store, query, format, and parse a given
|
||||
field.
|
||||
"""
|
||||
|
||||
sql = u'TEXT'
|
||||
sql = 'TEXT'
|
||||
"""The SQLite column type for the value.
|
||||
"""
|
||||
|
||||
@@ -41,7 +35,7 @@ class Type(object):
|
||||
"""The `Query` subclass to be used when querying the field.
|
||||
"""
|
||||
|
||||
model_type = six.text_type
|
||||
model_type = str
|
||||
"""The Python type that is used to represent the value in the model.
|
||||
|
||||
The model is guaranteed to return a value of this type if the field
|
||||
@@ -63,11 +57,11 @@ class Type(object):
|
||||
value = self.null
|
||||
# `self.null` might be `None`
|
||||
if value is None:
|
||||
value = u''
|
||||
value = ''
|
||||
if isinstance(value, bytes):
|
||||
value = value.decode('utf-8', 'ignore')
|
||||
|
||||
return six.text_type(value)
|
||||
return str(value)
|
||||
|
||||
def parse(self, string):
|
||||
"""Parse a (possibly human-written) string and return the
|
||||
@@ -97,16 +91,16 @@ class Type(object):
|
||||
For fixed fields the type of `value` is determined by the column
|
||||
type affinity given in the `sql` property and the SQL to Python
|
||||
mapping of the database adapter. For more information see:
|
||||
http://www.sqlite.org/datatype3.html
|
||||
https://www.sqlite.org/datatype3.html
|
||||
https://docs.python.org/2/library/sqlite3.html#sqlite-and-python-types
|
||||
|
||||
Flexible fields have the type affinity `TEXT`. This means the
|
||||
`sql_value` is either a `buffer`/`memoryview` or a `unicode` object`
|
||||
`sql_value` is either a `memoryview` or a `unicode` object`
|
||||
and the method must handle these in addition.
|
||||
"""
|
||||
if isinstance(sql_value, buffer):
|
||||
if isinstance(sql_value, memoryview):
|
||||
sql_value = bytes(sql_value).decode('utf-8', 'ignore')
|
||||
if isinstance(sql_value, six.text_type):
|
||||
if isinstance(sql_value, str):
|
||||
return self.parse(sql_value)
|
||||
else:
|
||||
return self.normalize(sql_value)
|
||||
@@ -127,10 +121,18 @@ class Default(Type):
|
||||
class Integer(Type):
|
||||
"""A basic integer type.
|
||||
"""
|
||||
sql = u'INTEGER'
|
||||
sql = 'INTEGER'
|
||||
query = query.NumericQuery
|
||||
model_type = int
|
||||
|
||||
def normalize(self, value):
|
||||
try:
|
||||
return self.model_type(round(float(value)))
|
||||
except ValueError:
|
||||
return self.null
|
||||
except TypeError:
|
||||
return self.null
|
||||
|
||||
|
||||
class PaddedInt(Integer):
|
||||
"""An integer field that is formatted with a given number of digits,
|
||||
@@ -140,19 +142,25 @@ class PaddedInt(Integer):
|
||||
self.digits = digits
|
||||
|
||||
def format(self, value):
|
||||
return u'{0:0{1}d}'.format(value or 0, self.digits)
|
||||
return '{0:0{1}d}'.format(value or 0, self.digits)
|
||||
|
||||
|
||||
class NullPaddedInt(PaddedInt):
|
||||
"""Same as `PaddedInt`, but does not normalize `None` to `0.0`.
|
||||
"""
|
||||
null = None
|
||||
|
||||
|
||||
class ScaledInt(Integer):
|
||||
"""An integer whose formatting operation scales the number by a
|
||||
constant and adds a suffix. Good for units with large magnitudes.
|
||||
"""
|
||||
def __init__(self, unit, suffix=u''):
|
||||
def __init__(self, unit, suffix=''):
|
||||
self.unit = unit
|
||||
self.suffix = suffix
|
||||
|
||||
def format(self, value):
|
||||
return u'{0}{1}'.format((value or 0) // self.unit, self.suffix)
|
||||
return '{}{}'.format((value or 0) // self.unit, self.suffix)
|
||||
|
||||
|
||||
class Id(Integer):
|
||||
@@ -163,18 +171,22 @@ class Id(Integer):
|
||||
|
||||
def __init__(self, primary=True):
|
||||
if primary:
|
||||
self.sql = u'INTEGER PRIMARY KEY'
|
||||
self.sql = 'INTEGER PRIMARY KEY'
|
||||
|
||||
|
||||
class Float(Type):
|
||||
"""A basic floating-point type.
|
||||
"""A basic floating-point type. The `digits` parameter specifies how
|
||||
many decimal places to use in the human-readable representation.
|
||||
"""
|
||||
sql = u'REAL'
|
||||
sql = 'REAL'
|
||||
query = query.NumericQuery
|
||||
model_type = float
|
||||
|
||||
def __init__(self, digits=1):
|
||||
self.digits = digits
|
||||
|
||||
def format(self, value):
|
||||
return u'{0:.1f}'.format(value or 0.0)
|
||||
return '{0:.{1}f}'.format(value or 0, self.digits)
|
||||
|
||||
|
||||
class NullFloat(Float):
|
||||
@@ -186,19 +198,25 @@ class NullFloat(Float):
|
||||
class String(Type):
|
||||
"""A Unicode string type.
|
||||
"""
|
||||
sql = u'TEXT'
|
||||
sql = 'TEXT'
|
||||
query = query.SubstringQuery
|
||||
|
||||
def normalize(self, value):
|
||||
if value is None:
|
||||
return self.null
|
||||
else:
|
||||
return self.model_type(value)
|
||||
|
||||
|
||||
class Boolean(Type):
|
||||
"""A boolean type.
|
||||
"""
|
||||
sql = u'INTEGER'
|
||||
sql = 'INTEGER'
|
||||
query = query.BooleanQuery
|
||||
model_type = bool
|
||||
|
||||
def format(self, value):
|
||||
return six.text_type(bool(value))
|
||||
return str(bool(value))
|
||||
|
||||
def parse(self, string):
|
||||
return str2bool(string)
|
||||
|
||||
379
lib/beets/importer.py
Executable file → Normal file
379
lib/beets/importer.py
Executable file → Normal file
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This file is part of beets.
|
||||
# Copyright 2016, Adrian Sampson.
|
||||
#
|
||||
@@ -13,7 +12,6 @@
|
||||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
from __future__ import division, absolute_import, print_function
|
||||
|
||||
"""Provides the basic, interface-agnostic workflow for importing and
|
||||
autotagging music files.
|
||||
@@ -37,10 +35,10 @@ from beets import dbcore
|
||||
from beets import plugins
|
||||
from beets import util
|
||||
from beets import config
|
||||
from beets.util import pipeline, sorted_walk, ancestry
|
||||
from beets.util import pipeline, sorted_walk, ancestry, MoveOperation
|
||||
from beets.util import syspath, normpath, displayable_path
|
||||
from enum import Enum
|
||||
from beets import mediafile
|
||||
import mediafile
|
||||
|
||||
action = Enum('action',
|
||||
['SKIP', 'ASIS', 'TRACKS', 'APPLY', 'ALBUMS', 'RETAG'])
|
||||
@@ -75,7 +73,7 @@ def _open_state():
|
||||
# unpickling, including ImportError. We use a catch-all
|
||||
# exception to avoid enumerating them all (the docs don't even have a
|
||||
# full list!).
|
||||
log.debug(u'state file could not be read: {0}', exc)
|
||||
log.debug('state file could not be read: {0}', exc)
|
||||
return {}
|
||||
|
||||
|
||||
@@ -84,8 +82,8 @@ def _save_state(state):
|
||||
try:
|
||||
with open(config['statefile'].as_filename(), 'wb') as f:
|
||||
pickle.dump(state, f)
|
||||
except IOError as exc:
|
||||
log.error(u'state file could not be written: {0}', exc)
|
||||
except OSError as exc:
|
||||
log.error('state file could not be written: {0}', exc)
|
||||
|
||||
|
||||
# Utilities for reading and writing the beets progress file, which
|
||||
@@ -174,10 +172,11 @@ def history_get():
|
||||
|
||||
# Abstract session class.
|
||||
|
||||
class ImportSession(object):
|
||||
class ImportSession:
|
||||
"""Controls an import action. Subclasses should implement methods to
|
||||
communicate with the user or otherwise make decisions.
|
||||
"""
|
||||
|
||||
def __init__(self, lib, loghandler, paths, query):
|
||||
"""Create a session. `lib` is a Library object. `loghandler` is a
|
||||
logging.Handler. Either `paths` or `query` is non-null and indicates
|
||||
@@ -187,7 +186,9 @@ class ImportSession(object):
|
||||
self.logger = self._setup_logging(loghandler)
|
||||
self.paths = paths
|
||||
self.query = query
|
||||
self._is_resuming = dict()
|
||||
self._is_resuming = {}
|
||||
self._merged_items = set()
|
||||
self._merged_dirs = set()
|
||||
|
||||
# Normalize the paths.
|
||||
if self.paths:
|
||||
@@ -220,19 +221,31 @@ class ImportSession(object):
|
||||
iconfig['resume'] = False
|
||||
iconfig['incremental'] = False
|
||||
|
||||
# Copy, move, link, and hardlink are mutually exclusive.
|
||||
if iconfig['reflink']:
|
||||
iconfig['reflink'] = iconfig['reflink'] \
|
||||
.as_choice(['auto', True, False])
|
||||
|
||||
# Copy, move, reflink, link, and hardlink are mutually exclusive.
|
||||
if iconfig['move']:
|
||||
iconfig['copy'] = False
|
||||
iconfig['link'] = False
|
||||
iconfig['hardlink'] = False
|
||||
iconfig['reflink'] = False
|
||||
elif iconfig['link']:
|
||||
iconfig['copy'] = False
|
||||
iconfig['move'] = False
|
||||
iconfig['hardlink'] = False
|
||||
iconfig['reflink'] = False
|
||||
elif iconfig['hardlink']:
|
||||
iconfig['copy'] = False
|
||||
iconfig['move'] = False
|
||||
iconfig['link'] = False
|
||||
iconfig['reflink'] = False
|
||||
elif iconfig['reflink']:
|
||||
iconfig['copy'] = False
|
||||
iconfig['move'] = False
|
||||
iconfig['link'] = False
|
||||
iconfig['hardlink'] = False
|
||||
|
||||
# Only delete when copying.
|
||||
if not iconfig['copy']:
|
||||
@@ -244,7 +257,7 @@ class ImportSession(object):
|
||||
"""Log a message about a given album to the importer log. The status
|
||||
should reflect the reason the album couldn't be tagged.
|
||||
"""
|
||||
self.logger.info(u'{0} {1}', status, displayable_path(paths))
|
||||
self.logger.info('{0} {1}', status, displayable_path(paths))
|
||||
|
||||
def log_choice(self, task, duplicate=False):
|
||||
"""Logs the task's current choice if it should be logged. If
|
||||
@@ -255,17 +268,17 @@ class ImportSession(object):
|
||||
if duplicate:
|
||||
# Duplicate: log all three choices (skip, keep both, and trump).
|
||||
if task.should_remove_duplicates:
|
||||
self.tag_log(u'duplicate-replace', paths)
|
||||
self.tag_log('duplicate-replace', paths)
|
||||
elif task.choice_flag in (action.ASIS, action.APPLY):
|
||||
self.tag_log(u'duplicate-keep', paths)
|
||||
self.tag_log('duplicate-keep', paths)
|
||||
elif task.choice_flag is (action.SKIP):
|
||||
self.tag_log(u'duplicate-skip', paths)
|
||||
self.tag_log('duplicate-skip', paths)
|
||||
else:
|
||||
# Non-duplicate: log "skip" and "asis" choices.
|
||||
if task.choice_flag is action.ASIS:
|
||||
self.tag_log(u'asis', paths)
|
||||
self.tag_log('asis', paths)
|
||||
elif task.choice_flag is action.SKIP:
|
||||
self.tag_log(u'skip', paths)
|
||||
self.tag_log('skip', paths)
|
||||
|
||||
def should_resume(self, path):
|
||||
raise NotImplementedError
|
||||
@@ -282,7 +295,7 @@ class ImportSession(object):
|
||||
def run(self):
|
||||
"""Run the import task.
|
||||
"""
|
||||
self.logger.info(u'import started {0}', time.asctime())
|
||||
self.logger.info('import started {0}', time.asctime())
|
||||
self.set_config(config['import'])
|
||||
|
||||
# Set up the pipeline.
|
||||
@@ -311,6 +324,8 @@ class ImportSession(object):
|
||||
stages += [import_asis(self)]
|
||||
|
||||
# Plugin stages.
|
||||
for stage_func in plugins.early_import_stages():
|
||||
stages.append(plugin_stage(self, stage_func))
|
||||
for stage_func in plugins.import_stages():
|
||||
stages.append(plugin_stage(self, stage_func))
|
||||
|
||||
@@ -350,6 +365,24 @@ class ImportSession(object):
|
||||
self._history_dirs = history_get()
|
||||
return self._history_dirs
|
||||
|
||||
def already_merged(self, paths):
|
||||
"""Returns true if all the paths being imported were part of a merge
|
||||
during previous tasks.
|
||||
"""
|
||||
for path in paths:
|
||||
if path not in self._merged_items \
|
||||
and path not in self._merged_dirs:
|
||||
return False
|
||||
return True
|
||||
|
||||
def mark_merged(self, paths):
|
||||
"""Mark paths and directories as merged for future reimport tasks.
|
||||
"""
|
||||
self._merged_items.update(paths)
|
||||
dirs = {os.path.dirname(path) if os.path.isfile(path) else path
|
||||
for path in paths}
|
||||
self._merged_dirs.update(dirs)
|
||||
|
||||
def is_resuming(self, toppath):
|
||||
"""Return `True` if user wants to resume import of this path.
|
||||
|
||||
@@ -367,7 +400,7 @@ class ImportSession(object):
|
||||
# Either accept immediately or prompt for input to decide.
|
||||
if self.want_resume is True or \
|
||||
self.should_resume(toppath):
|
||||
log.warning(u'Resuming interrupted import of {0}',
|
||||
log.warning('Resuming interrupted import of {0}',
|
||||
util.displayable_path(toppath))
|
||||
self._is_resuming[toppath] = True
|
||||
else:
|
||||
@@ -377,11 +410,12 @@ class ImportSession(object):
|
||||
|
||||
# The importer task class.
|
||||
|
||||
class BaseImportTask(object):
|
||||
class BaseImportTask:
|
||||
"""An abstract base class for importer tasks.
|
||||
|
||||
Tasks flow through the importer pipeline. Each stage can update
|
||||
them. """
|
||||
|
||||
def __init__(self, toppath, paths, items):
|
||||
"""Create a task. The primary fields that define a task are:
|
||||
|
||||
@@ -419,7 +453,7 @@ class ImportTask(BaseImportTask):
|
||||
from the `candidates` list.
|
||||
|
||||
* `find_duplicates()` Returns a list of albums from `lib` with the
|
||||
same artist and album name as the task.
|
||||
same artist and album name as the task.
|
||||
|
||||
* `apply_metadata()` Sets the attributes of the items from the
|
||||
task's `match` attribute.
|
||||
@@ -429,17 +463,22 @@ class ImportTask(BaseImportTask):
|
||||
* `manipulate_files()` Copy, move, and write files depending on the
|
||||
session configuration.
|
||||
|
||||
* `set_fields()` Sets the fields given at CLI or configuration to
|
||||
the specified values.
|
||||
|
||||
* `finalize()` Update the import progress and cleanup the file
|
||||
system.
|
||||
"""
|
||||
|
||||
def __init__(self, toppath, paths, items):
|
||||
super(ImportTask, self).__init__(toppath, paths, items)
|
||||
super().__init__(toppath, paths, items)
|
||||
self.choice_flag = None
|
||||
self.cur_album = None
|
||||
self.cur_artist = None
|
||||
self.candidates = []
|
||||
self.rec = None
|
||||
self.should_remove_duplicates = False
|
||||
self.should_merge_duplicates = False
|
||||
self.is_album = True
|
||||
self.search_ids = [] # user-supplied candidate IDs.
|
||||
|
||||
@@ -510,6 +549,10 @@ class ImportTask(BaseImportTask):
|
||||
def apply_metadata(self):
|
||||
"""Copy metadata from match info to the items.
|
||||
"""
|
||||
if config['import']['from_scratch']:
|
||||
for item in self.match.mapping:
|
||||
item.clear()
|
||||
|
||||
autotag.apply_metadata(self.match.info, self.match.mapping)
|
||||
|
||||
def duplicate_items(self, lib):
|
||||
@@ -520,23 +563,45 @@ class ImportTask(BaseImportTask):
|
||||
|
||||
def remove_duplicates(self, lib):
|
||||
duplicate_items = self.duplicate_items(lib)
|
||||
log.debug(u'removing {0} old duplicated items', len(duplicate_items))
|
||||
log.debug('removing {0} old duplicated items', len(duplicate_items))
|
||||
for item in duplicate_items:
|
||||
item.remove()
|
||||
if lib.directory in util.ancestry(item.path):
|
||||
log.debug(u'deleting duplicate {0}',
|
||||
log.debug('deleting duplicate {0}',
|
||||
util.displayable_path(item.path))
|
||||
util.remove(item.path)
|
||||
util.prune_dirs(os.path.dirname(item.path),
|
||||
lib.directory)
|
||||
|
||||
def set_fields(self, lib):
|
||||
"""Sets the fields given at CLI or configuration to the specified
|
||||
values, for both the album and all its items.
|
||||
"""
|
||||
items = self.imported_items()
|
||||
for field, view in config['import']['set_fields'].items():
|
||||
value = view.get()
|
||||
log.debug('Set field {1}={2} for {0}',
|
||||
displayable_path(self.paths),
|
||||
field,
|
||||
value)
|
||||
self.album[field] = value
|
||||
for item in items:
|
||||
item[field] = value
|
||||
with lib.transaction():
|
||||
for item in items:
|
||||
item.store()
|
||||
self.album.store()
|
||||
|
||||
def finalize(self, session):
|
||||
"""Save progress, clean up files, and emit plugin event.
|
||||
"""
|
||||
# Update progress.
|
||||
if session.want_resume:
|
||||
self.save_progress()
|
||||
if session.config['incremental']:
|
||||
if session.config['incremental'] and not (
|
||||
# Should we skip recording to incremental list?
|
||||
self.skip and session.config['incremental_skip_later']
|
||||
):
|
||||
self.save_history()
|
||||
|
||||
self.cleanup(copy=session.config['copy'],
|
||||
@@ -609,17 +674,18 @@ class ImportTask(BaseImportTask):
|
||||
return []
|
||||
|
||||
duplicates = []
|
||||
task_paths = set(i.path for i in self.items if i)
|
||||
task_paths = {i.path for i in self.items if i}
|
||||
duplicate_query = dbcore.AndQuery((
|
||||
dbcore.MatchQuery('albumartist', artist),
|
||||
dbcore.MatchQuery('album', album),
|
||||
))
|
||||
|
||||
for album in lib.albums(duplicate_query):
|
||||
# Check whether the album is identical in contents, in which
|
||||
# case it is not a duplicate (will be replaced).
|
||||
album_paths = set(i.path for i in album.items())
|
||||
if album_paths != task_paths:
|
||||
# Check whether the album paths are all present in the task
|
||||
# i.e. album is being completely re-imported by the task,
|
||||
# in which case it is not a duplicate (will be replaced).
|
||||
album_paths = {i.path for i in album.items()}
|
||||
if not (album_paths <= task_paths):
|
||||
duplicates.append(album)
|
||||
return duplicates
|
||||
|
||||
@@ -659,20 +725,28 @@ class ImportTask(BaseImportTask):
|
||||
for item in self.items:
|
||||
item.update(changes)
|
||||
|
||||
def manipulate_files(self, move=False, copy=False, write=False,
|
||||
link=False, hardlink=False, session=None):
|
||||
def manipulate_files(self, operation=None, write=False, session=None):
|
||||
""" Copy, move, link, hardlink or reflink (depending on `operation`) the files
|
||||
as well as write metadata.
|
||||
|
||||
`operation` should be an instance of `util.MoveOperation`.
|
||||
|
||||
If `write` is `True` metadata is written to the files.
|
||||
"""
|
||||
|
||||
items = self.imported_items()
|
||||
# Save the original paths of all items for deletion and pruning
|
||||
# in the next step (finalization).
|
||||
self.old_paths = [item.path for item in items]
|
||||
for item in items:
|
||||
if move or copy or link or hardlink:
|
||||
if operation is not None:
|
||||
# In copy and link modes, treat re-imports specially:
|
||||
# move in-library files. (Out-of-library files are
|
||||
# copied/moved as usual).
|
||||
old_path = item.path
|
||||
if (copy or link or hardlink) and self.replaced_items[item] \
|
||||
and session.lib.directory in util.ancestry(old_path):
|
||||
if (operation != MoveOperation.MOVE
|
||||
and self.replaced_items[item]
|
||||
and session.lib.directory in util.ancestry(old_path)):
|
||||
item.move()
|
||||
# We moved the item, so remove the
|
||||
# now-nonexistent file from old_paths.
|
||||
@@ -680,7 +754,7 @@ class ImportTask(BaseImportTask):
|
||||
else:
|
||||
# A normal import. Just copy files and keep track of
|
||||
# old paths.
|
||||
item.move(copy, link, hardlink)
|
||||
item.move(operation)
|
||||
|
||||
if write and (self.apply or self.choice_flag == action.RETAG):
|
||||
item.try_write()
|
||||
@@ -699,6 +773,8 @@ class ImportTask(BaseImportTask):
|
||||
self.record_replaced(lib)
|
||||
self.remove_replaced(lib)
|
||||
self.album = lib.add_album(self.imported_items())
|
||||
if 'data_source' in self.imported_items()[0]:
|
||||
self.album.data_source = self.imported_items()[0].data_source
|
||||
self.reimport_metadata(lib)
|
||||
|
||||
def record_replaced(self, lib):
|
||||
@@ -717,7 +793,7 @@ class ImportTask(BaseImportTask):
|
||||
if (not dup_item.album_id or
|
||||
dup_item.album_id in replaced_album_ids):
|
||||
continue
|
||||
replaced_album = dup_item.get_album()
|
||||
replaced_album = dup_item._cached_album
|
||||
if replaced_album:
|
||||
replaced_album_ids.add(dup_item.album_id)
|
||||
self.replaced_albums[replaced_album.path] = replaced_album
|
||||
@@ -734,8 +810,8 @@ class ImportTask(BaseImportTask):
|
||||
self.album.artpath = replaced_album.artpath
|
||||
self.album.store()
|
||||
log.debug(
|
||||
u'Reimported album: added {0}, flexible '
|
||||
u'attributes {1} from album {2} for {3}',
|
||||
'Reimported album: added {0}, flexible '
|
||||
'attributes {1} from album {2} for {3}',
|
||||
self.album.added,
|
||||
replaced_album._values_flex.keys(),
|
||||
replaced_album.id,
|
||||
@@ -748,16 +824,16 @@ class ImportTask(BaseImportTask):
|
||||
if dup_item.added and dup_item.added != item.added:
|
||||
item.added = dup_item.added
|
||||
log.debug(
|
||||
u'Reimported item added {0} '
|
||||
u'from item {1} for {2}',
|
||||
'Reimported item added {0} '
|
||||
'from item {1} for {2}',
|
||||
item.added,
|
||||
dup_item.id,
|
||||
displayable_path(item.path)
|
||||
)
|
||||
item.update(dup_item._values_flex)
|
||||
log.debug(
|
||||
u'Reimported item flexible attributes {0} '
|
||||
u'from item {1} for {2}',
|
||||
'Reimported item flexible attributes {0} '
|
||||
'from item {1} for {2}',
|
||||
dup_item._values_flex.keys(),
|
||||
dup_item.id,
|
||||
displayable_path(item.path)
|
||||
@@ -770,10 +846,10 @@ class ImportTask(BaseImportTask):
|
||||
"""
|
||||
for item in self.imported_items():
|
||||
for dup_item in self.replaced_items[item]:
|
||||
log.debug(u'Replacing item {0}: {1}',
|
||||
log.debug('Replacing item {0}: {1}',
|
||||
dup_item.id, displayable_path(item.path))
|
||||
dup_item.remove()
|
||||
log.debug(u'{0} of {1} items replaced',
|
||||
log.debug('{0} of {1} items replaced',
|
||||
sum(bool(l) for l in self.replaced_items.values()),
|
||||
len(self.imported_items()))
|
||||
|
||||
@@ -811,7 +887,7 @@ class SingletonImportTask(ImportTask):
|
||||
"""
|
||||
|
||||
def __init__(self, toppath, item):
|
||||
super(SingletonImportTask, self).__init__(toppath, [item.path], [item])
|
||||
super().__init__(toppath, [item.path], [item])
|
||||
self.item = item
|
||||
self.is_album = False
|
||||
self.paths = [item.path]
|
||||
@@ -877,6 +953,19 @@ class SingletonImportTask(ImportTask):
|
||||
def reload(self):
|
||||
self.item.load()
|
||||
|
||||
def set_fields(self, lib):
|
||||
"""Sets the fields given at CLI or configuration to the specified
|
||||
values, for the singleton item.
|
||||
"""
|
||||
for field, view in config['import']['set_fields'].items():
|
||||
value = view.get()
|
||||
log.debug('Set field {1}={2} for {0}',
|
||||
displayable_path(self.paths),
|
||||
field,
|
||||
value)
|
||||
self.item[field] = value
|
||||
self.item.store()
|
||||
|
||||
|
||||
# FIXME The inheritance relationships are inverted. This is why there
|
||||
# are so many methods which pass. More responsibility should be delegated to
|
||||
@@ -891,7 +980,7 @@ class SentinelImportTask(ImportTask):
|
||||
"""
|
||||
|
||||
def __init__(self, toppath, paths):
|
||||
super(SentinelImportTask, self).__init__(toppath, paths, ())
|
||||
super().__init__(toppath, paths, ())
|
||||
# TODO Remove the remaining attributes eventually
|
||||
self.should_remove_duplicates = False
|
||||
self.is_album = True
|
||||
@@ -935,7 +1024,7 @@ class ArchiveImportTask(SentinelImportTask):
|
||||
"""
|
||||
|
||||
def __init__(self, toppath):
|
||||
super(ArchiveImportTask, self).__init__(toppath, ())
|
||||
super().__init__(toppath, ())
|
||||
self.extracted = False
|
||||
|
||||
@classmethod
|
||||
@@ -964,14 +1053,20 @@ class ArchiveImportTask(SentinelImportTask):
|
||||
cls._handlers = []
|
||||
from zipfile import is_zipfile, ZipFile
|
||||
cls._handlers.append((is_zipfile, ZipFile))
|
||||
from tarfile import is_tarfile, TarFile
|
||||
cls._handlers.append((is_tarfile, TarFile))
|
||||
import tarfile
|
||||
cls._handlers.append((tarfile.is_tarfile, tarfile.open))
|
||||
try:
|
||||
from rarfile import is_rarfile, RarFile
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
cls._handlers.append((is_rarfile, RarFile))
|
||||
try:
|
||||
from py7zr import is_7zfile, SevenZipFile
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
cls._handlers.append((is_7zfile, SevenZipFile))
|
||||
|
||||
return cls._handlers
|
||||
|
||||
@@ -979,7 +1074,7 @@ class ArchiveImportTask(SentinelImportTask):
|
||||
"""Removes the temporary directory the archive was extracted to.
|
||||
"""
|
||||
if self.extracted:
|
||||
log.debug(u'Removing extracted directory: {0}',
|
||||
log.debug('Removing extracted directory: {0}',
|
||||
displayable_path(self.toppath))
|
||||
shutil.rmtree(self.toppath)
|
||||
|
||||
@@ -991,9 +1086,9 @@ class ArchiveImportTask(SentinelImportTask):
|
||||
if path_test(util.py3_path(self.toppath)):
|
||||
break
|
||||
|
||||
extract_to = mkdtemp()
|
||||
archive = handler_class(util.py3_path(self.toppath), mode='r')
|
||||
try:
|
||||
extract_to = mkdtemp()
|
||||
archive = handler_class(util.py3_path(self.toppath), mode='r')
|
||||
archive.extractall(extract_to)
|
||||
finally:
|
||||
archive.close()
|
||||
@@ -1001,10 +1096,11 @@ class ArchiveImportTask(SentinelImportTask):
|
||||
self.toppath = extract_to
|
||||
|
||||
|
||||
class ImportTaskFactory(object):
|
||||
class ImportTaskFactory:
|
||||
"""Generate album and singleton import tasks for all media files
|
||||
indicated by a path.
|
||||
"""
|
||||
|
||||
def __init__(self, toppath, session):
|
||||
"""Create a new task factory.
|
||||
|
||||
@@ -1042,14 +1138,12 @@ class ImportTaskFactory(object):
|
||||
if self.session.config['singletons']:
|
||||
for path in paths:
|
||||
tasks = self._create(self.singleton(path))
|
||||
for task in tasks:
|
||||
yield task
|
||||
yield from tasks
|
||||
yield self.sentinel(dirs)
|
||||
|
||||
else:
|
||||
tasks = self._create(self.album(paths, dirs))
|
||||
for task in tasks:
|
||||
yield task
|
||||
yield from tasks
|
||||
|
||||
# Produce the final sentinel for this toppath to indicate that
|
||||
# it is finished. This is usually just a SentinelImportTask, but
|
||||
@@ -1097,7 +1191,7 @@ class ImportTaskFactory(object):
|
||||
"""Return a `SingletonImportTask` for the music file.
|
||||
"""
|
||||
if self.session.already_imported(self.toppath, [path]):
|
||||
log.debug(u'Skipping previously-imported path: {0}',
|
||||
log.debug('Skipping previously-imported path: {0}',
|
||||
displayable_path(path))
|
||||
self.skipped += 1
|
||||
return None
|
||||
@@ -1118,10 +1212,10 @@ class ImportTaskFactory(object):
|
||||
return None
|
||||
|
||||
if dirs is None:
|
||||
dirs = list(set(os.path.dirname(p) for p in paths))
|
||||
dirs = list({os.path.dirname(p) for p in paths})
|
||||
|
||||
if self.session.already_imported(self.toppath, dirs):
|
||||
log.debug(u'Skipping previously-imported path: {0}',
|
||||
log.debug('Skipping previously-imported path: {0}',
|
||||
displayable_path(dirs))
|
||||
self.skipped += 1
|
||||
return None
|
||||
@@ -1151,22 +1245,22 @@ class ImportTaskFactory(object):
|
||||
|
||||
if not (self.session.config['move'] or
|
||||
self.session.config['copy']):
|
||||
log.warning(u"Archive importing requires either "
|
||||
u"'copy' or 'move' to be enabled.")
|
||||
log.warning("Archive importing requires either "
|
||||
"'copy' or 'move' to be enabled.")
|
||||
return
|
||||
|
||||
log.debug(u'Extracting archive: {0}',
|
||||
log.debug('Extracting archive: {0}',
|
||||
displayable_path(self.toppath))
|
||||
archive_task = ArchiveImportTask(self.toppath)
|
||||
try:
|
||||
archive_task.extract()
|
||||
except Exception as exc:
|
||||
log.error(u'extraction failed: {0}', exc)
|
||||
log.error('extraction failed: {0}', exc)
|
||||
return
|
||||
|
||||
# Now read albums from the extracted directory.
|
||||
self.toppath = archive_task.toppath
|
||||
log.debug(u'Archive extracted to: {0}', self.toppath)
|
||||
log.debug('Archive extracted to: {0}', self.toppath)
|
||||
return archive_task
|
||||
|
||||
def read_item(self, path):
|
||||
@@ -1182,12 +1276,33 @@ class ImportTaskFactory(object):
|
||||
# Silently ignore non-music files.
|
||||
pass
|
||||
elif isinstance(exc.reason, mediafile.UnreadableFileError):
|
||||
log.warning(u'unreadable file: {0}', displayable_path(path))
|
||||
log.warning('unreadable file: {0}', displayable_path(path))
|
||||
else:
|
||||
log.error(u'error reading {0}: {1}',
|
||||
log.error('error reading {0}: {1}',
|
||||
displayable_path(path), exc)
|
||||
|
||||
|
||||
# Pipeline utilities
|
||||
|
||||
def _freshen_items(items):
|
||||
# Clear IDs from re-tagged items so they appear "fresh" when
|
||||
# we add them back to the library.
|
||||
for item in items:
|
||||
item.id = None
|
||||
item.album_id = None
|
||||
|
||||
|
||||
def _extend_pipeline(tasks, *stages):
|
||||
# Return pipeline extension for stages with list of tasks
|
||||
if type(tasks) == list:
|
||||
task_iter = iter(tasks)
|
||||
else:
|
||||
task_iter = tasks
|
||||
|
||||
ipl = pipeline.Pipeline([task_iter] + list(stages))
|
||||
return pipeline.multiple(ipl.pull())
|
||||
|
||||
|
||||
# Full-album pipeline stages.
|
||||
|
||||
def read_tasks(session):
|
||||
@@ -1202,17 +1317,16 @@ def read_tasks(session):
|
||||
|
||||
# Generate tasks.
|
||||
task_factory = ImportTaskFactory(toppath, session)
|
||||
for t in task_factory.tasks():
|
||||
yield t
|
||||
yield from task_factory.tasks()
|
||||
skipped += task_factory.skipped
|
||||
|
||||
if not task_factory.imported:
|
||||
log.warning(u'No files imported from {0}',
|
||||
log.warning('No files imported from {0}',
|
||||
displayable_path(toppath))
|
||||
|
||||
# Show skipped directories (due to incremental/resume).
|
||||
if skipped:
|
||||
log.info(u'Skipped {0} paths.', skipped)
|
||||
log.info('Skipped {0} paths.', skipped)
|
||||
|
||||
|
||||
def query_tasks(session):
|
||||
@@ -1230,15 +1344,10 @@ def query_tasks(session):
|
||||
else:
|
||||
# Search for albums.
|
||||
for album in session.lib.albums(session.query):
|
||||
log.debug(u'yielding album {0}: {1} - {2}',
|
||||
log.debug('yielding album {0}: {1} - {2}',
|
||||
album.id, album.albumartist, album.album)
|
||||
items = list(album.items())
|
||||
|
||||
# Clear IDs from re-tagged items so they appear "fresh" when
|
||||
# we add them back to the library.
|
||||
for item in items:
|
||||
item.id = None
|
||||
item.album_id = None
|
||||
_freshen_items(items)
|
||||
|
||||
task = ImportTask(None, [album.item_dir()], items)
|
||||
for task in task.handle_created(session):
|
||||
@@ -1258,7 +1367,7 @@ def lookup_candidates(session, task):
|
||||
return
|
||||
|
||||
plugins.send('import_task_start', session=session, task=task)
|
||||
log.debug(u'Looking up: {0}', displayable_path(task.paths))
|
||||
log.debug('Looking up: {0}', displayable_path(task.paths))
|
||||
|
||||
# Restrict the initial lookup to IDs specified by the user via the -m
|
||||
# option. Currently all the IDs are passed onto the tasks directly.
|
||||
@@ -1284,6 +1393,9 @@ def user_query(session, task):
|
||||
if task.skip:
|
||||
return task
|
||||
|
||||
if session.already_merged(task.paths):
|
||||
return pipeline.BUBBLE
|
||||
|
||||
# Ask the user for a choice.
|
||||
task.choose_match(session)
|
||||
plugins.send('import_task_choice', session=session, task=task)
|
||||
@@ -1294,28 +1406,41 @@ def user_query(session, task):
|
||||
def emitter(task):
|
||||
for item in task.items:
|
||||
task = SingletonImportTask(task.toppath, item)
|
||||
for new_task in task.handle_created(session):
|
||||
yield new_task
|
||||
yield from task.handle_created(session)
|
||||
yield SentinelImportTask(task.toppath, task.paths)
|
||||
|
||||
ipl = pipeline.Pipeline([
|
||||
emitter(task),
|
||||
lookup_candidates(session),
|
||||
user_query(session),
|
||||
])
|
||||
return pipeline.multiple(ipl.pull())
|
||||
return _extend_pipeline(emitter(task),
|
||||
lookup_candidates(session),
|
||||
user_query(session))
|
||||
|
||||
# As albums: group items by albums and create task for each album
|
||||
if task.choice_flag is action.ALBUMS:
|
||||
ipl = pipeline.Pipeline([
|
||||
iter([task]),
|
||||
group_albums(session),
|
||||
lookup_candidates(session),
|
||||
user_query(session)
|
||||
])
|
||||
return pipeline.multiple(ipl.pull())
|
||||
return _extend_pipeline([task],
|
||||
group_albums(session),
|
||||
lookup_candidates(session),
|
||||
user_query(session))
|
||||
|
||||
resolve_duplicates(session, task)
|
||||
|
||||
if task.should_merge_duplicates:
|
||||
# Create a new task for tagging the current items
|
||||
# and duplicates together
|
||||
duplicate_items = task.duplicate_items(session.lib)
|
||||
|
||||
# Duplicates would be reimported so make them look "fresh"
|
||||
_freshen_items(duplicate_items)
|
||||
duplicate_paths = [item.path for item in duplicate_items]
|
||||
|
||||
# Record merged paths in the session so they are not reimported
|
||||
session.mark_merged(duplicate_paths)
|
||||
|
||||
merged_task = ImportTask(None, task.paths + duplicate_paths,
|
||||
task.items + duplicate_items)
|
||||
|
||||
return _extend_pipeline([merged_task],
|
||||
lookup_candidates(session),
|
||||
user_query(session))
|
||||
|
||||
apply_choice(session, task)
|
||||
return task
|
||||
|
||||
@@ -1327,28 +1452,32 @@ def resolve_duplicates(session, task):
|
||||
if task.choice_flag in (action.ASIS, action.APPLY, action.RETAG):
|
||||
found_duplicates = task.find_duplicates(session.lib)
|
||||
if found_duplicates:
|
||||
log.debug(u'found duplicates: {}'.format(
|
||||
log.debug('found duplicates: {}'.format(
|
||||
[o.id for o in found_duplicates]
|
||||
))
|
||||
|
||||
# Get the default action to follow from config.
|
||||
duplicate_action = config['import']['duplicate_action'].as_choice({
|
||||
u'skip': u's',
|
||||
u'keep': u'k',
|
||||
u'remove': u'r',
|
||||
u'ask': u'a',
|
||||
'skip': 's',
|
||||
'keep': 'k',
|
||||
'remove': 'r',
|
||||
'merge': 'm',
|
||||
'ask': 'a',
|
||||
})
|
||||
log.debug(u'default action for duplicates: {0}', duplicate_action)
|
||||
log.debug('default action for duplicates: {0}', duplicate_action)
|
||||
|
||||
if duplicate_action == u's':
|
||||
if duplicate_action == 's':
|
||||
# Skip new.
|
||||
task.set_choice(action.SKIP)
|
||||
elif duplicate_action == u'k':
|
||||
elif duplicate_action == 'k':
|
||||
# Keep both. Do nothing; leave the choice intact.
|
||||
pass
|
||||
elif duplicate_action == u'r':
|
||||
elif duplicate_action == 'r':
|
||||
# Remove old.
|
||||
task.should_remove_duplicates = True
|
||||
elif duplicate_action == 'm':
|
||||
# Merge duplicates together
|
||||
task.should_merge_duplicates = True
|
||||
else:
|
||||
# No default action set; ask the session.
|
||||
session.resolve_duplicate(task, found_duplicates)
|
||||
@@ -1366,7 +1495,7 @@ def import_asis(session, task):
|
||||
if task.skip:
|
||||
return
|
||||
|
||||
log.info(u'{}', displayable_path(task.paths))
|
||||
log.info('{}', displayable_path(task.paths))
|
||||
task.set_choice(action.ASIS)
|
||||
apply_choice(session, task)
|
||||
|
||||
@@ -1385,6 +1514,14 @@ def apply_choice(session, task):
|
||||
|
||||
task.add(session.lib)
|
||||
|
||||
# If ``set_fields`` is set, set those fields to the
|
||||
# configured values.
|
||||
# NOTE: This cannot be done before the ``task.add()`` call above,
|
||||
# because then the ``ImportTask`` won't have an `album` for which
|
||||
# it can set the fields.
|
||||
if config['import']['set_fields']:
|
||||
task.set_fields(session.lib)
|
||||
|
||||
|
||||
@pipeline.mutator_stage
|
||||
def plugin_stage(session, func, task):
|
||||
@@ -1413,12 +1550,22 @@ def manipulate_files(session, task):
|
||||
if task.should_remove_duplicates:
|
||||
task.remove_duplicates(session.lib)
|
||||
|
||||
if session.config['move']:
|
||||
operation = MoveOperation.MOVE
|
||||
elif session.config['copy']:
|
||||
operation = MoveOperation.COPY
|
||||
elif session.config['link']:
|
||||
operation = MoveOperation.LINK
|
||||
elif session.config['hardlink']:
|
||||
operation = MoveOperation.HARDLINK
|
||||
elif session.config['reflink']:
|
||||
operation = MoveOperation.REFLINK
|
||||
else:
|
||||
operation = None
|
||||
|
||||
task.manipulate_files(
|
||||
move=session.config['move'],
|
||||
copy=session.config['copy'],
|
||||
operation,
|
||||
write=session.config['write'],
|
||||
link=session.config['link'],
|
||||
hardlink=session.config['hardlink'],
|
||||
session=session,
|
||||
)
|
||||
|
||||
@@ -1431,11 +1578,11 @@ def log_files(session, task):
|
||||
"""A coroutine (pipeline stage) to log each file to be imported.
|
||||
"""
|
||||
if isinstance(task, SingletonImportTask):
|
||||
log.info(u'Singleton: {0}', displayable_path(task.item['path']))
|
||||
log.info('Singleton: {0}', displayable_path(task.item['path']))
|
||||
elif task.items:
|
||||
log.info(u'Album: {0}', displayable_path(task.paths[0]))
|
||||
log.info('Album: {0}', displayable_path(task.paths[0]))
|
||||
for item in task.items:
|
||||
log.info(u' {0}', displayable_path(item['path']))
|
||||
log.info(' {0}', displayable_path(item['path']))
|
||||
|
||||
|
||||
def group_albums(session):
|
||||
@@ -1469,6 +1616,14 @@ MULTIDISC_MARKERS = (br'dis[ck]', br'cd')
|
||||
MULTIDISC_PAT_FMT = br'^(.*%s[\W_]*)\d'
|
||||
|
||||
|
||||
def is_subdir_of_any_in_list(path, dirs):
|
||||
"""Returns True if path os a subdirectory of any directory in dirs
|
||||
(a list). In other case, returns False.
|
||||
"""
|
||||
ancestors = ancestry(path)
|
||||
return any(d in ancestors for d in dirs)
|
||||
|
||||
|
||||
def albums_in_dir(path):
|
||||
"""Recursively searches the given directory and returns an iterable
|
||||
of (paths, items) where paths is a list of directories and items is
|
||||
@@ -1488,7 +1643,7 @@ def albums_in_dir(path):
|
||||
# and add the current directory. If so, just add the directory
|
||||
# and move on to the next directory. If not, stop collapsing.
|
||||
if collapse_paths:
|
||||
if (not collapse_pat and collapse_paths[0] in ancestry(root)) or \
|
||||
if (is_subdir_of_any_in_list(root, collapse_paths)) or \
|
||||
(collapse_pat and
|
||||
collapse_pat.match(os.path.basename(root))):
|
||||
# Still collapsing.
|
||||
|
||||
717
lib/beets/library.py
Executable file → Normal file
717
lib/beets/library.py
Executable file → Normal file
File diff suppressed because it is too large
Load Diff
16
lib/beets/logging.py
Executable file → Normal file
16
lib/beets/logging.py
Executable file → Normal file
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This file is part of beets.
|
||||
# Copyright 2016, Adrian Sampson.
|
||||
#
|
||||
@@ -21,13 +20,11 @@ that when getLogger(name) instantiates a logger that logger uses
|
||||
{}-style formatting.
|
||||
"""
|
||||
|
||||
from __future__ import division, absolute_import, print_function
|
||||
|
||||
from copy import copy
|
||||
from logging import * # noqa
|
||||
import subprocess
|
||||
import threading
|
||||
import six
|
||||
|
||||
|
||||
def logsafe(val):
|
||||
@@ -43,7 +40,7 @@ def logsafe(val):
|
||||
example.
|
||||
"""
|
||||
# Already Unicode.
|
||||
if isinstance(val, six.text_type):
|
||||
if isinstance(val, str):
|
||||
return val
|
||||
|
||||
# Bytestring: needs decoding.
|
||||
@@ -57,7 +54,7 @@ def logsafe(val):
|
||||
# A "problem" object: needs a workaround.
|
||||
elif isinstance(val, subprocess.CalledProcessError):
|
||||
try:
|
||||
return six.text_type(val)
|
||||
return str(val)
|
||||
except UnicodeDecodeError:
|
||||
# An object with a broken __unicode__ formatter. Use __str__
|
||||
# instead.
|
||||
@@ -74,7 +71,7 @@ class StrFormatLogger(Logger):
|
||||
instead of %-style formatting.
|
||||
"""
|
||||
|
||||
class _LogMessage(object):
|
||||
class _LogMessage:
|
||||
def __init__(self, msg, args, kwargs):
|
||||
self.msg = msg
|
||||
self.args = args
|
||||
@@ -82,22 +79,23 @@ class StrFormatLogger(Logger):
|
||||
|
||||
def __str__(self):
|
||||
args = [logsafe(a) for a in self.args]
|
||||
kwargs = dict((k, logsafe(v)) for (k, v) in self.kwargs.items())
|
||||
kwargs = {k: logsafe(v) for (k, v) in self.kwargs.items()}
|
||||
return self.msg.format(*args, **kwargs)
|
||||
|
||||
def _log(self, level, msg, args, exc_info=None, extra=None, **kwargs):
|
||||
"""Log msg.format(*args, **kwargs)"""
|
||||
m = self._LogMessage(msg, args, kwargs)
|
||||
return super(StrFormatLogger, self)._log(level, m, (), exc_info, extra)
|
||||
return super()._log(level, m, (), exc_info, extra)
|
||||
|
||||
|
||||
class ThreadLocalLevelLogger(Logger):
|
||||
"""A version of `Logger` whose level is thread-local instead of shared.
|
||||
"""
|
||||
|
||||
def __init__(self, name, level=NOTSET):
|
||||
self._thread_level = threading.local()
|
||||
self.default_level = NOTSET
|
||||
super(ThreadLocalLevelLogger, self).__init__(name, level)
|
||||
super().__init__(name, level)
|
||||
|
||||
@property
|
||||
def level(self):
|
||||
|
||||
2055
lib/beets/mediafile.py
Executable file → Normal file
2055
lib/beets/mediafile.py
Executable file → Normal file
File diff suppressed because it is too large
Load Diff
346
lib/beets/plugins.py
Executable file → Normal file
346
lib/beets/plugins.py
Executable file → Normal file
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This file is part of beets.
|
||||
# Copyright 2016, Adrian Sampson.
|
||||
#
|
||||
@@ -15,19 +14,19 @@
|
||||
|
||||
"""Support for beets plugins."""
|
||||
|
||||
from __future__ import division, absolute_import, print_function
|
||||
|
||||
import inspect
|
||||
import traceback
|
||||
import re
|
||||
import inspect
|
||||
import abc
|
||||
from collections import defaultdict
|
||||
from functools import wraps
|
||||
|
||||
|
||||
import beets
|
||||
from beets import logging
|
||||
from beets import mediafile
|
||||
import six
|
||||
import mediafile
|
||||
|
||||
|
||||
PLUGIN_NAMESPACE = 'beetsplug'
|
||||
|
||||
@@ -50,26 +49,28 @@ class PluginLogFilter(logging.Filter):
|
||||
"""A logging filter that identifies the plugin that emitted a log
|
||||
message.
|
||||
"""
|
||||
|
||||
def __init__(self, plugin):
|
||||
self.prefix = u'{0}: '.format(plugin.name)
|
||||
self.prefix = f'{plugin.name}: '
|
||||
|
||||
def filter(self, record):
|
||||
if hasattr(record.msg, 'msg') and isinstance(record.msg.msg,
|
||||
six.string_types):
|
||||
str):
|
||||
# A _LogMessage from our hacked-up Logging replacement.
|
||||
record.msg.msg = self.prefix + record.msg.msg
|
||||
elif isinstance(record.msg, six.string_types):
|
||||
elif isinstance(record.msg, str):
|
||||
record.msg = self.prefix + record.msg
|
||||
return True
|
||||
|
||||
|
||||
# Managing the plugins themselves.
|
||||
|
||||
class BeetsPlugin(object):
|
||||
class BeetsPlugin:
|
||||
"""The base class for all beets plugins. Plugins provide
|
||||
functionality by defining a subclass of BeetsPlugin and overriding
|
||||
the abstract methods defined here.
|
||||
"""
|
||||
|
||||
def __init__(self, name=None):
|
||||
"""Perform one-time plugin setup.
|
||||
"""
|
||||
@@ -81,6 +82,7 @@ class BeetsPlugin(object):
|
||||
self.template_fields = {}
|
||||
if not self.album_template_fields:
|
||||
self.album_template_fields = {}
|
||||
self.early_import_stages = []
|
||||
self.import_stages = []
|
||||
|
||||
self._log = log.getChild(self.name)
|
||||
@@ -94,6 +96,22 @@ class BeetsPlugin(object):
|
||||
"""
|
||||
return ()
|
||||
|
||||
def _set_stage_log_level(self, stages):
|
||||
"""Adjust all the stages in `stages` to WARNING logging level.
|
||||
"""
|
||||
return [self._set_log_level_and_params(logging.WARNING, stage)
|
||||
for stage in stages]
|
||||
|
||||
def get_early_import_stages(self):
|
||||
"""Return a list of functions that should be called as importer
|
||||
pipelines stages early in the pipeline.
|
||||
|
||||
The callables are wrapped versions of the functions in
|
||||
`self.early_import_stages`. Wrapping provides some bookkeeping for the
|
||||
plugin: specifically, the logging level is adjusted to WARNING.
|
||||
"""
|
||||
return self._set_stage_log_level(self.early_import_stages)
|
||||
|
||||
def get_import_stages(self):
|
||||
"""Return a list of functions that should be called as importer
|
||||
pipelines stages.
|
||||
@@ -102,8 +120,7 @@ class BeetsPlugin(object):
|
||||
`self.import_stages`. Wrapping provides some bookkeeping for the
|
||||
plugin: specifically, the logging level is adjusted to WARNING.
|
||||
"""
|
||||
return [self._set_log_level_and_params(logging.WARNING, import_stage)
|
||||
for import_stage in self.import_stages]
|
||||
return self._set_stage_log_level(self.import_stages)
|
||||
|
||||
def _set_log_level_and_params(self, base_log_level, func):
|
||||
"""Wrap `func` to temporarily set this plugin's logger level to
|
||||
@@ -111,27 +128,24 @@ class BeetsPlugin(object):
|
||||
value after the function returns). Also determines which params may not
|
||||
be sent for backwards-compatibility.
|
||||
"""
|
||||
argspec = inspect.getargspec(func)
|
||||
argspec = inspect.getfullargspec(func)
|
||||
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
assert self._log.level == logging.NOTSET
|
||||
|
||||
verbosity = beets.config['verbose'].get(int)
|
||||
log_level = max(logging.DEBUG, base_log_level - 10 * verbosity)
|
||||
self._log.setLevel(log_level)
|
||||
if argspec.varkw is None:
|
||||
kwargs = {k: v for k, v in kwargs.items()
|
||||
if k in argspec.args}
|
||||
|
||||
try:
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except TypeError as exc:
|
||||
if exc.args[0].startswith(func.__name__):
|
||||
# caused by 'func' and not stuff internal to 'func'
|
||||
kwargs = dict((arg, val) for arg, val in kwargs.items()
|
||||
if arg in argspec.args)
|
||||
return func(*args, **kwargs)
|
||||
else:
|
||||
raise
|
||||
return func(*args, **kwargs)
|
||||
finally:
|
||||
self._log.setLevel(logging.NOTSET)
|
||||
|
||||
return wrapper
|
||||
|
||||
def queries(self):
|
||||
@@ -151,7 +165,7 @@ class BeetsPlugin(object):
|
||||
"""
|
||||
return beets.autotag.hooks.Distance()
|
||||
|
||||
def candidates(self, items, artist, album, va_likely):
|
||||
def candidates(self, items, artist, album, va_likely, extra_tags=None):
|
||||
"""Should return a sequence of AlbumInfo objects that match the
|
||||
album whose items are provided.
|
||||
"""
|
||||
@@ -185,7 +199,7 @@ class BeetsPlugin(object):
|
||||
|
||||
``descriptor`` must be an instance of ``mediafile.MediaField``.
|
||||
"""
|
||||
# Defer impor to prevent circular dependency
|
||||
# Defer import to prevent circular dependency
|
||||
from beets import library
|
||||
mediafile.MediaFile.add_field(name, descriptor)
|
||||
library.Item._media_fields.add(name)
|
||||
@@ -248,14 +262,14 @@ def load_plugins(names=()):
|
||||
BeetsPlugin subclasses desired.
|
||||
"""
|
||||
for name in names:
|
||||
modname = '{0}.{1}'.format(PLUGIN_NAMESPACE, name)
|
||||
modname = f'{PLUGIN_NAMESPACE}.{name}'
|
||||
try:
|
||||
try:
|
||||
namespace = __import__(modname, None, None)
|
||||
except ImportError as exc:
|
||||
# Again, this is hacky:
|
||||
if exc.args[0].endswith(' ' + name):
|
||||
log.warning(u'** plugin {0} not found', name)
|
||||
log.warning('** plugin {0} not found', name)
|
||||
else:
|
||||
raise
|
||||
else:
|
||||
@@ -264,9 +278,9 @@ def load_plugins(names=()):
|
||||
and obj != BeetsPlugin and obj not in _classes:
|
||||
_classes.add(obj)
|
||||
|
||||
except:
|
||||
except Exception:
|
||||
log.warning(
|
||||
u'** error loading plugin {}:\n{}',
|
||||
'** error loading plugin {}:\n{}',
|
||||
name,
|
||||
traceback.format_exc(),
|
||||
)
|
||||
@@ -280,6 +294,11 @@ def find_plugins():
|
||||
currently loaded beets plugins. Loads the default plugin set
|
||||
first.
|
||||
"""
|
||||
if _instances:
|
||||
# After the first call, use cached instances for performance reasons.
|
||||
# See https://github.com/beetbox/beets/pull/3810
|
||||
return list(_instances.values())
|
||||
|
||||
load_plugins()
|
||||
plugins = []
|
||||
for cls in _classes:
|
||||
@@ -313,21 +332,31 @@ def queries():
|
||||
|
||||
def types(model_cls):
|
||||
# Gives us `item_types` and `album_types`
|
||||
attr_name = '{0}_types'.format(model_cls.__name__.lower())
|
||||
attr_name = f'{model_cls.__name__.lower()}_types'
|
||||
types = {}
|
||||
for plugin in find_plugins():
|
||||
plugin_types = getattr(plugin, attr_name, {})
|
||||
for field in plugin_types:
|
||||
if field in types and plugin_types[field] != types[field]:
|
||||
raise PluginConflictException(
|
||||
u'Plugin {0} defines flexible field {1} '
|
||||
u'which has already been defined with '
|
||||
u'another type.'.format(plugin.name, field)
|
||||
'Plugin {} defines flexible field {} '
|
||||
'which has already been defined with '
|
||||
'another type.'.format(plugin.name, field)
|
||||
)
|
||||
types.update(plugin_types)
|
||||
return types
|
||||
|
||||
|
||||
def named_queries(model_cls):
|
||||
# Gather `item_queries` and `album_queries` from the plugins.
|
||||
attr_name = f'{model_cls.__name__.lower()}_queries'
|
||||
queries = {}
|
||||
for plugin in find_plugins():
|
||||
plugin_queries = getattr(plugin, attr_name, {})
|
||||
queries.update(plugin_queries)
|
||||
return queries
|
||||
|
||||
|
||||
def track_distance(item, info):
|
||||
"""Gets the track distance calculated by all loaded plugins.
|
||||
Returns a Distance object.
|
||||
@@ -348,20 +377,19 @@ def album_distance(items, album_info, mapping):
|
||||
return dist
|
||||
|
||||
|
||||
def candidates(items, artist, album, va_likely):
|
||||
def candidates(items, artist, album, va_likely, extra_tags=None):
|
||||
"""Gets MusicBrainz candidates for an album from each plugin.
|
||||
"""
|
||||
for plugin in find_plugins():
|
||||
for candidate in plugin.candidates(items, artist, album, va_likely):
|
||||
yield candidate
|
||||
yield from plugin.candidates(items, artist, album, va_likely,
|
||||
extra_tags)
|
||||
|
||||
|
||||
def item_candidates(item, artist, title):
|
||||
"""Gets MusicBrainz candidates for an item from the plugins.
|
||||
"""
|
||||
for plugin in find_plugins():
|
||||
for item_candidate in plugin.item_candidates(item, artist, title):
|
||||
yield item_candidate
|
||||
yield from plugin.item_candidates(item, artist, title)
|
||||
|
||||
|
||||
def album_for_id(album_id):
|
||||
@@ -393,6 +421,14 @@ def template_funcs():
|
||||
return funcs
|
||||
|
||||
|
||||
def early_import_stages():
|
||||
"""Get a list of early import stage functions defined by plugins."""
|
||||
stages = []
|
||||
for plugin in find_plugins():
|
||||
stages += plugin.get_early_import_stages()
|
||||
return stages
|
||||
|
||||
|
||||
def import_stages():
|
||||
"""Get a list of import stage functions defined by plugins."""
|
||||
stages = []
|
||||
@@ -446,7 +482,7 @@ def send(event, **arguments):
|
||||
|
||||
Return a list of non-None values returned from the handlers.
|
||||
"""
|
||||
log.debug(u'Sending event: {0}', event)
|
||||
log.debug('Sending event: {0}', event)
|
||||
results = []
|
||||
for handler in event_handlers()[event]:
|
||||
result = handler(**arguments)
|
||||
@@ -464,7 +500,7 @@ def feat_tokens(for_artist=True):
|
||||
feat_words = ['ft', 'featuring', 'feat', 'feat.', 'ft.']
|
||||
if for_artist:
|
||||
feat_words += ['with', 'vs', 'and', 'con', '&']
|
||||
return '(?<=\s)(?:{0})(?=\s)'.format(
|
||||
return r'(?<=\s)(?:{})(?=\s)'.format(
|
||||
'|'.join(re.escape(x) for x in feat_words)
|
||||
)
|
||||
|
||||
@@ -478,9 +514,50 @@ def sanitize_choices(choices, choices_all):
|
||||
others = [x for x in choices_all if x not in choices]
|
||||
res = []
|
||||
for s in choices:
|
||||
if s in list(choices_all) + ['*']:
|
||||
if not (s in seen or seen.add(s)):
|
||||
res.extend(list(others) if s == '*' else [s])
|
||||
if s not in seen:
|
||||
if s in list(choices_all):
|
||||
res.append(s)
|
||||
elif s == '*':
|
||||
res.extend(others)
|
||||
seen.add(s)
|
||||
return res
|
||||
|
||||
|
||||
def sanitize_pairs(pairs, pairs_all):
|
||||
"""Clean up a single-element mapping configuration attribute as returned
|
||||
by Confuse's `Pairs` template: keep only two-element tuples present in
|
||||
pairs_all, remove duplicate elements, expand ('str', '*') and ('*', '*')
|
||||
wildcards while keeping the original order. Note that ('*', '*') and
|
||||
('*', 'whatever') have the same effect.
|
||||
|
||||
For example,
|
||||
|
||||
>>> sanitize_pairs(
|
||||
... [('foo', 'baz bar'), ('key', '*'), ('*', '*')],
|
||||
... [('foo', 'bar'), ('foo', 'baz'), ('foo', 'foobar'),
|
||||
... ('key', 'value')]
|
||||
... )
|
||||
[('foo', 'baz'), ('foo', 'bar'), ('key', 'value'), ('foo', 'foobar')]
|
||||
"""
|
||||
pairs_all = list(pairs_all)
|
||||
seen = set()
|
||||
others = [x for x in pairs_all if x not in pairs]
|
||||
res = []
|
||||
for k, values in pairs:
|
||||
for v in values.split():
|
||||
x = (k, v)
|
||||
if x in pairs_all:
|
||||
if x not in seen:
|
||||
seen.add(x)
|
||||
res.append(x)
|
||||
elif k == '*':
|
||||
new = [o for o in others if o not in seen]
|
||||
seen.update(new)
|
||||
res.extend(new)
|
||||
elif v == '*':
|
||||
new = [o for o in others if o not in seen and o[0] == k]
|
||||
seen.update(new)
|
||||
res.extend(new)
|
||||
return res
|
||||
|
||||
|
||||
@@ -498,3 +575,188 @@ def notify_info_yielded(event):
|
||||
yield v
|
||||
return decorated
|
||||
return decorator
|
||||
|
||||
|
||||
def get_distance(config, data_source, info):
|
||||
"""Returns the ``data_source`` weight and the maximum source weight
|
||||
for albums or individual tracks.
|
||||
"""
|
||||
dist = beets.autotag.Distance()
|
||||
if info.data_source == data_source:
|
||||
dist.add('source', config['source_weight'].as_number())
|
||||
return dist
|
||||
|
||||
|
||||
def apply_item_changes(lib, item, move, pretend, write):
|
||||
"""Store, move, and write the item according to the arguments.
|
||||
|
||||
:param lib: beets library.
|
||||
:type lib: beets.library.Library
|
||||
:param item: Item whose changes to apply.
|
||||
:type item: beets.library.Item
|
||||
:param move: Move the item if it's in the library.
|
||||
:type move: bool
|
||||
:param pretend: Return without moving, writing, or storing the item's
|
||||
metadata.
|
||||
:type pretend: bool
|
||||
:param write: Write the item's metadata to its media file.
|
||||
:type write: bool
|
||||
"""
|
||||
if pretend:
|
||||
return
|
||||
|
||||
from beets import util
|
||||
|
||||
# Move the item if it's in the library.
|
||||
if move and lib.directory in util.ancestry(item.path):
|
||||
item.move(with_album=False)
|
||||
|
||||
if write:
|
||||
item.try_write()
|
||||
|
||||
item.store()
|
||||
|
||||
|
||||
class MetadataSourcePlugin(metaclass=abc.ABCMeta):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.config.add({'source_weight': 0.5})
|
||||
|
||||
@abc.abstractproperty
|
||||
def id_regex(self):
|
||||
raise NotImplementedError
|
||||
|
||||
@abc.abstractproperty
|
||||
def data_source(self):
|
||||
raise NotImplementedError
|
||||
|
||||
@abc.abstractproperty
|
||||
def search_url(self):
|
||||
raise NotImplementedError
|
||||
|
||||
@abc.abstractproperty
|
||||
def album_url(self):
|
||||
raise NotImplementedError
|
||||
|
||||
@abc.abstractproperty
|
||||
def track_url(self):
|
||||
raise NotImplementedError
|
||||
|
||||
@abc.abstractmethod
|
||||
def _search_api(self, query_type, filters, keywords=''):
|
||||
raise NotImplementedError
|
||||
|
||||
@abc.abstractmethod
|
||||
def album_for_id(self, album_id):
|
||||
raise NotImplementedError
|
||||
|
||||
@abc.abstractmethod
|
||||
def track_for_id(self, track_id=None, track_data=None):
|
||||
raise NotImplementedError
|
||||
|
||||
@staticmethod
|
||||
def get_artist(artists, id_key='id', name_key='name'):
|
||||
"""Returns an artist string (all artists) and an artist_id (the main
|
||||
artist) for a list of artist object dicts.
|
||||
|
||||
For each artist, this function moves articles (such as 'a', 'an',
|
||||
and 'the') to the front and strips trailing disambiguation numbers. It
|
||||
returns a tuple containing the comma-separated string of all
|
||||
normalized artists and the ``id`` of the main/first artist.
|
||||
|
||||
:param artists: Iterable of artist dicts or lists returned by API.
|
||||
:type artists: list[dict] or list[list]
|
||||
:param id_key: Key or index corresponding to the value of ``id`` for
|
||||
the main/first artist. Defaults to 'id'.
|
||||
:type id_key: str or int
|
||||
:param name_key: Key or index corresponding to values of names
|
||||
to concatenate for the artist string (containing all artists).
|
||||
Defaults to 'name'.
|
||||
:type name_key: str or int
|
||||
:return: Normalized artist string.
|
||||
:rtype: str
|
||||
"""
|
||||
artist_id = None
|
||||
artist_names = []
|
||||
for artist in artists:
|
||||
if not artist_id:
|
||||
artist_id = artist[id_key]
|
||||
name = artist[name_key]
|
||||
# Strip disambiguation number.
|
||||
name = re.sub(r' \(\d+\)$', '', name)
|
||||
# Move articles to the front.
|
||||
name = re.sub(r'^(.*?), (a|an|the)$', r'\2 \1', name, flags=re.I)
|
||||
artist_names.append(name)
|
||||
artist = ', '.join(artist_names).replace(' ,', ',') or None
|
||||
return artist, artist_id
|
||||
|
||||
def _get_id(self, url_type, id_):
|
||||
"""Parse an ID from its URL if necessary.
|
||||
|
||||
:param url_type: Type of URL. Either 'album' or 'track'.
|
||||
:type url_type: str
|
||||
:param id_: Album/track ID or URL.
|
||||
:type id_: str
|
||||
:return: Album/track ID.
|
||||
:rtype: str
|
||||
"""
|
||||
self._log.debug(
|
||||
"Searching {} for {} '{}'", self.data_source, url_type, id_
|
||||
)
|
||||
match = re.search(self.id_regex['pattern'].format(url_type), str(id_))
|
||||
if match:
|
||||
id_ = match.group(self.id_regex['match_group'])
|
||||
if id_:
|
||||
return id_
|
||||
return None
|
||||
|
||||
def candidates(self, items, artist, album, va_likely, extra_tags=None):
|
||||
"""Returns a list of AlbumInfo objects for Search API results
|
||||
matching an ``album`` and ``artist`` (if not various).
|
||||
|
||||
:param items: List of items comprised by an album to be matched.
|
||||
:type items: list[beets.library.Item]
|
||||
:param artist: The artist of the album to be matched.
|
||||
:type artist: str
|
||||
:param album: The name of the album to be matched.
|
||||
:type album: str
|
||||
:param va_likely: True if the album to be matched likely has
|
||||
Various Artists.
|
||||
:type va_likely: bool
|
||||
:return: Candidate AlbumInfo objects.
|
||||
:rtype: list[beets.autotag.hooks.AlbumInfo]
|
||||
"""
|
||||
query_filters = {'album': album}
|
||||
if not va_likely:
|
||||
query_filters['artist'] = artist
|
||||
results = self._search_api(query_type='album', filters=query_filters)
|
||||
albums = [self.album_for_id(album_id=r['id']) for r in results]
|
||||
return [a for a in albums if a is not None]
|
||||
|
||||
def item_candidates(self, item, artist, title):
|
||||
"""Returns a list of TrackInfo objects for Search API results
|
||||
matching ``title`` and ``artist``.
|
||||
|
||||
:param item: Singleton item to be matched.
|
||||
:type item: beets.library.Item
|
||||
:param artist: The artist of the track to be matched.
|
||||
:type artist: str
|
||||
:param title: The title of the track to be matched.
|
||||
:type title: str
|
||||
:return: Candidate TrackInfo objects.
|
||||
:rtype: list[beets.autotag.hooks.TrackInfo]
|
||||
"""
|
||||
tracks = self._search_api(
|
||||
query_type='track', keywords=title, filters={'artist': artist}
|
||||
)
|
||||
return [self.track_for_id(track_data=track) for track in tracks]
|
||||
|
||||
def album_distance(self, items, album_info, mapping):
|
||||
return get_distance(
|
||||
data_source=self.data_source, info=album_info, config=self.config
|
||||
)
|
||||
|
||||
def track_distance(self, item, track_info):
|
||||
return get_distance(
|
||||
data_source=self.data_source, info=track_info, config=self.config
|
||||
)
|
||||
|
||||
113
lib/beets/random.py
Normal file
113
lib/beets/random.py
Normal file
@@ -0,0 +1,113 @@
|
||||
# This file is part of beets.
|
||||
# Copyright 2016, Philippe Mongeau.
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining
|
||||
# a copy of this software and associated documentation files (the
|
||||
# "Software"), to deal in the Software without restriction, including
|
||||
# without limitation the rights to use, copy, modify, merge, publish,
|
||||
# distribute, sublicense, and/or sell copies of the Software, and to
|
||||
# permit persons to whom the Software is furnished to do so, subject to
|
||||
# the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
"""Get a random song or album from the library.
|
||||
"""
|
||||
|
||||
import random
|
||||
from operator import attrgetter
|
||||
from itertools import groupby
|
||||
|
||||
|
||||
def _length(obj, album):
|
||||
"""Get the duration of an item or album.
|
||||
"""
|
||||
if album:
|
||||
return sum(i.length for i in obj.items())
|
||||
else:
|
||||
return obj.length
|
||||
|
||||
|
||||
def _equal_chance_permutation(objs, field='albumartist', random_gen=None):
|
||||
"""Generate (lazily) a permutation of the objects where every group
|
||||
with equal values for `field` have an equal chance of appearing in
|
||||
any given position.
|
||||
"""
|
||||
rand = random_gen or random
|
||||
|
||||
# Group the objects by artist so we can sample from them.
|
||||
key = attrgetter(field)
|
||||
objs.sort(key=key)
|
||||
objs_by_artists = {}
|
||||
for artist, v in groupby(objs, key):
|
||||
objs_by_artists[artist] = list(v)
|
||||
|
||||
# While we still have artists with music to choose from, pick one
|
||||
# randomly and pick a track from that artist.
|
||||
while objs_by_artists:
|
||||
# Choose an artist and an object for that artist, removing
|
||||
# this choice from the pool.
|
||||
artist = rand.choice(list(objs_by_artists.keys()))
|
||||
objs_from_artist = objs_by_artists[artist]
|
||||
i = rand.randint(0, len(objs_from_artist) - 1)
|
||||
yield objs_from_artist.pop(i)
|
||||
|
||||
# Remove the artist if we've used up all of its objects.
|
||||
if not objs_from_artist:
|
||||
del objs_by_artists[artist]
|
||||
|
||||
|
||||
def _take(iter, num):
|
||||
"""Return a list containing the first `num` values in `iter` (or
|
||||
fewer, if the iterable ends early).
|
||||
"""
|
||||
out = []
|
||||
for val in iter:
|
||||
out.append(val)
|
||||
num -= 1
|
||||
if num <= 0:
|
||||
break
|
||||
return out
|
||||
|
||||
|
||||
def _take_time(iter, secs, album):
|
||||
"""Return a list containing the first values in `iter`, which should
|
||||
be Item or Album objects, that add up to the given amount of time in
|
||||
seconds.
|
||||
"""
|
||||
out = []
|
||||
total_time = 0.0
|
||||
for obj in iter:
|
||||
length = _length(obj, album)
|
||||
if total_time + length <= secs:
|
||||
out.append(obj)
|
||||
total_time += length
|
||||
return out
|
||||
|
||||
|
||||
def random_objs(objs, album, number=1, time=None, equal_chance=False,
|
||||
random_gen=None):
|
||||
"""Get a random subset of the provided `objs`.
|
||||
|
||||
If `number` is provided, produce that many matches. Otherwise, if
|
||||
`time` is provided, instead select a list whose total time is close
|
||||
to that number of minutes. If `equal_chance` is true, give each
|
||||
artist an equal chance of being included so that artists with more
|
||||
songs are not represented disproportionately.
|
||||
"""
|
||||
rand = random_gen or random
|
||||
|
||||
# Permute the objects either in a straightforward way or an
|
||||
# artist-balanced way.
|
||||
if equal_chance:
|
||||
perm = _equal_chance_permutation(objs)
|
||||
else:
|
||||
perm = objs
|
||||
rand.shuffle(perm) # N.B. This shuffles the original list.
|
||||
|
||||
# Select objects by time our count.
|
||||
if time:
|
||||
return _take_time(perm, time * 60, album)
|
||||
else:
|
||||
return _take(perm, number)
|
||||
386
lib/beets/ui/__init__.py
Executable file → Normal file
386
lib/beets/ui/__init__.py
Executable file → Normal file
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This file is part of beets.
|
||||
# Copyright 2016, Adrian Sampson.
|
||||
#
|
||||
@@ -18,7 +17,6 @@ interface. To invoke the CLI, just call beets.ui.main(). The actual
|
||||
CLI commands are implemented in the ui.commands module.
|
||||
"""
|
||||
|
||||
from __future__ import division, absolute_import, print_function
|
||||
|
||||
import optparse
|
||||
import textwrap
|
||||
@@ -30,18 +28,18 @@ import re
|
||||
import struct
|
||||
import traceback
|
||||
import os.path
|
||||
from six.moves import input
|
||||
|
||||
from beets import logging
|
||||
from beets import library
|
||||
from beets import plugins
|
||||
from beets import util
|
||||
from beets.util.functemplate import Template
|
||||
from beets.util.functemplate import template
|
||||
from beets import config
|
||||
from beets.util import confit, as_string
|
||||
from beets.util import as_string
|
||||
from beets.autotag import mb
|
||||
from beets.dbcore import query as db_query
|
||||
import six
|
||||
from beets.dbcore import db
|
||||
import confuse
|
||||
|
||||
# On Windows platforms, use colorama to support "ANSI" terminal colors.
|
||||
if sys.platform == 'win32':
|
||||
@@ -60,8 +58,8 @@ log.propagate = False # Don't propagate to root handler.
|
||||
|
||||
|
||||
PF_KEY_QUERIES = {
|
||||
'comp': u'comp:true',
|
||||
'singleton': u'singleton:true',
|
||||
'comp': 'comp:true',
|
||||
'singleton': 'singleton:true',
|
||||
}
|
||||
|
||||
|
||||
@@ -111,10 +109,7 @@ def decargs(arglist):
|
||||
"""Given a list of command-line argument bytestrings, attempts to
|
||||
decode them to Unicode strings when running under Python 2.
|
||||
"""
|
||||
if six.PY2:
|
||||
return [s.decode(util.arg_encoding()) for s in arglist]
|
||||
else:
|
||||
return arglist
|
||||
return arglist
|
||||
|
||||
|
||||
def print_(*strings, **kwargs):
|
||||
@@ -129,29 +124,25 @@ def print_(*strings, **kwargs):
|
||||
(it defaults to a newline).
|
||||
"""
|
||||
if not strings:
|
||||
strings = [u'']
|
||||
assert isinstance(strings[0], six.text_type)
|
||||
strings = ['']
|
||||
assert isinstance(strings[0], str)
|
||||
|
||||
txt = u' '.join(strings)
|
||||
txt += kwargs.get('end', u'\n')
|
||||
txt = ' '.join(strings)
|
||||
txt += kwargs.get('end', '\n')
|
||||
|
||||
# Encode the string and write it to stdout.
|
||||
if six.PY2:
|
||||
# On Python 2, sys.stdout expects bytes.
|
||||
# On Python 3, sys.stdout expects text strings and uses the
|
||||
# exception-throwing encoding error policy. To avoid throwing
|
||||
# errors and use our configurable encoding override, we use the
|
||||
# underlying bytes buffer instead.
|
||||
if hasattr(sys.stdout, 'buffer'):
|
||||
out = txt.encode(_out_encoding(), 'replace')
|
||||
sys.stdout.write(out)
|
||||
sys.stdout.buffer.write(out)
|
||||
sys.stdout.buffer.flush()
|
||||
else:
|
||||
# On Python 3, sys.stdout expects text strings and uses the
|
||||
# exception-throwing encoding error policy. To avoid throwing
|
||||
# errors and use our configurable encoding override, we use the
|
||||
# underlying bytes buffer instead.
|
||||
if hasattr(sys.stdout, 'buffer'):
|
||||
out = txt.encode(_out_encoding(), 'replace')
|
||||
sys.stdout.buffer.write(out)
|
||||
else:
|
||||
# In our test harnesses (e.g., DummyOut), sys.stdout.buffer
|
||||
# does not exist. We instead just record the text string.
|
||||
sys.stdout.write(txt)
|
||||
# In our test harnesses (e.g., DummyOut), sys.stdout.buffer
|
||||
# does not exist. We instead just record the text string.
|
||||
sys.stdout.write(txt)
|
||||
|
||||
|
||||
# Configuration wrappers.
|
||||
@@ -201,19 +192,16 @@ def input_(prompt=None):
|
||||
"""
|
||||
# raw_input incorrectly sends prompts to stderr, not stdout, so we
|
||||
# use print_() explicitly to display prompts.
|
||||
# http://bugs.python.org/issue1927
|
||||
# https://bugs.python.org/issue1927
|
||||
if prompt:
|
||||
print_(prompt, end=u' ')
|
||||
print_(prompt, end=' ')
|
||||
|
||||
try:
|
||||
resp = input()
|
||||
except EOFError:
|
||||
raise UserError(u'stdin stream ended while input required')
|
||||
raise UserError('stdin stream ended while input required')
|
||||
|
||||
if six.PY2:
|
||||
return resp.decode(_in_encoding(), 'ignore')
|
||||
else:
|
||||
return resp
|
||||
return resp
|
||||
|
||||
|
||||
def input_options(options, require=False, prompt=None, fallback_prompt=None,
|
||||
@@ -257,7 +245,7 @@ def input_options(options, require=False, prompt=None, fallback_prompt=None,
|
||||
found_letter = letter
|
||||
break
|
||||
else:
|
||||
raise ValueError(u'no unambiguous lettering found')
|
||||
raise ValueError('no unambiguous lettering found')
|
||||
|
||||
letters[found_letter.lower()] = option
|
||||
index = option.index(found_letter)
|
||||
@@ -265,7 +253,7 @@ def input_options(options, require=False, prompt=None, fallback_prompt=None,
|
||||
# Mark the option's shortcut letter for display.
|
||||
if not require and (
|
||||
(default is None and not numrange and first) or
|
||||
(isinstance(default, six.string_types) and
|
||||
(isinstance(default, str) and
|
||||
found_letter.lower() == default.lower())):
|
||||
# The first option is the default; mark it.
|
||||
show_letter = '[%s]' % found_letter.upper()
|
||||
@@ -301,11 +289,11 @@ def input_options(options, require=False, prompt=None, fallback_prompt=None,
|
||||
prompt_part_lengths = []
|
||||
if numrange:
|
||||
if isinstance(default, int):
|
||||
default_name = six.text_type(default)
|
||||
default_name = str(default)
|
||||
default_name = colorize('action_default', default_name)
|
||||
tmpl = '# selection (default %s)'
|
||||
prompt_parts.append(tmpl % default_name)
|
||||
prompt_part_lengths.append(len(tmpl % six.text_type(default)))
|
||||
prompt_part_lengths.append(len(tmpl % str(default)))
|
||||
else:
|
||||
prompt_parts.append('# selection')
|
||||
prompt_part_lengths.append(len(prompt_parts[-1]))
|
||||
@@ -340,9 +328,9 @@ def input_options(options, require=False, prompt=None, fallback_prompt=None,
|
||||
# Make a fallback prompt too. This is displayed if the user enters
|
||||
# something that is not recognized.
|
||||
if not fallback_prompt:
|
||||
fallback_prompt = u'Enter one of '
|
||||
fallback_prompt = 'Enter one of '
|
||||
if numrange:
|
||||
fallback_prompt += u'%i-%i, ' % numrange
|
||||
fallback_prompt += '%i-%i, ' % numrange
|
||||
fallback_prompt += ', '.join(display_letters) + ':'
|
||||
|
||||
resp = input_(prompt)
|
||||
@@ -381,34 +369,41 @@ def input_yn(prompt, require=False):
|
||||
"yes" unless `require` is `True`, in which case there is no default.
|
||||
"""
|
||||
sel = input_options(
|
||||
('y', 'n'), require, prompt, u'Enter Y or N:'
|
||||
('y', 'n'), require, prompt, 'Enter Y or N:'
|
||||
)
|
||||
return sel == u'y'
|
||||
return sel == 'y'
|
||||
|
||||
|
||||
def input_select_objects(prompt, objs, rep):
|
||||
def input_select_objects(prompt, objs, rep, prompt_all=None):
|
||||
"""Prompt to user to choose all, none, or some of the given objects.
|
||||
Return the list of selected objects.
|
||||
|
||||
`prompt` is the prompt string to use for each question (it should be
|
||||
phrased as an imperative verb). `rep` is a function to call on each
|
||||
object to print it out when confirming objects individually.
|
||||
phrased as an imperative verb). If `prompt_all` is given, it is used
|
||||
instead of `prompt` for the first (yes(/no/select) question.
|
||||
`rep` is a function to call on each object to print it out when confirming
|
||||
objects individually.
|
||||
"""
|
||||
choice = input_options(
|
||||
(u'y', u'n', u's'), False,
|
||||
u'%s? (Yes/no/select)' % prompt)
|
||||
('y', 'n', 's'), False,
|
||||
'%s? (Yes/no/select)' % (prompt_all or prompt))
|
||||
print() # Blank line.
|
||||
|
||||
if choice == u'y': # Yes.
|
||||
if choice == 'y': # Yes.
|
||||
return objs
|
||||
|
||||
elif choice == u's': # Select.
|
||||
elif choice == 's': # Select.
|
||||
out = []
|
||||
for obj in objs:
|
||||
rep(obj)
|
||||
if input_yn(u'%s? (yes/no)' % prompt, True):
|
||||
answer = input_options(
|
||||
('y', 'n', 'q'), True, '%s? (yes/no/quit)' % prompt,
|
||||
'Enter Y or N:'
|
||||
)
|
||||
if answer == 'y':
|
||||
out.append(obj)
|
||||
print() # go to a new line
|
||||
elif answer == 'q':
|
||||
return out
|
||||
return out
|
||||
|
||||
else: # No.
|
||||
@@ -419,14 +414,14 @@ def input_select_objects(prompt, objs, rep):
|
||||
|
||||
def human_bytes(size):
|
||||
"""Formats size, a number of bytes, in a human-readable way."""
|
||||
powers = [u'', u'K', u'M', u'G', u'T', u'P', u'E', u'Z', u'Y', u'H']
|
||||
powers = ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y', 'H']
|
||||
unit = 'B'
|
||||
for power in powers:
|
||||
if size < 1024:
|
||||
return u"%3.1f %s%s" % (size, power, unit)
|
||||
return f"{size:3.1f} {power}{unit}"
|
||||
size /= 1024.0
|
||||
unit = u'iB'
|
||||
return u"big"
|
||||
unit = 'iB'
|
||||
return "big"
|
||||
|
||||
|
||||
def human_seconds(interval):
|
||||
@@ -434,13 +429,13 @@ def human_seconds(interval):
|
||||
interval using English words.
|
||||
"""
|
||||
units = [
|
||||
(1, u'second'),
|
||||
(60, u'minute'),
|
||||
(60, u'hour'),
|
||||
(24, u'day'),
|
||||
(7, u'week'),
|
||||
(52, u'year'),
|
||||
(10, u'decade'),
|
||||
(1, 'second'),
|
||||
(60, 'minute'),
|
||||
(60, 'hour'),
|
||||
(24, 'day'),
|
||||
(7, 'week'),
|
||||
(52, 'year'),
|
||||
(10, 'decade'),
|
||||
]
|
||||
for i in range(len(units) - 1):
|
||||
increment, suffix = units[i]
|
||||
@@ -453,7 +448,7 @@ def human_seconds(interval):
|
||||
increment, suffix = units[-1]
|
||||
interval /= float(increment)
|
||||
|
||||
return u"%3.1f %ss" % (interval, suffix)
|
||||
return f"{interval:3.1f} {suffix}s"
|
||||
|
||||
|
||||
def human_seconds_short(interval):
|
||||
@@ -461,13 +456,13 @@ def human_seconds_short(interval):
|
||||
string.
|
||||
"""
|
||||
interval = int(interval)
|
||||
return u'%i:%02i' % (interval // 60, interval % 60)
|
||||
return '%i:%02i' % (interval // 60, interval % 60)
|
||||
|
||||
|
||||
# Colorization.
|
||||
|
||||
# ANSI terminal colorization code heavily inspired by pygments:
|
||||
# http://dev.pocoo.org/hg/pygments-main/file/b2deea5b5030/pygments/console.py
|
||||
# https://bitbucket.org/birkenfeld/pygments-main/src/default/pygments/console.py
|
||||
# (pygments is by Tim Hatch, Armin Ronacher, et al.)
|
||||
COLOR_ESCAPE = "\x1b["
|
||||
DARK_COLORS = {
|
||||
@@ -514,7 +509,7 @@ def _colorize(color, text):
|
||||
elif color in LIGHT_COLORS:
|
||||
escape = COLOR_ESCAPE + "%i;01m" % (LIGHT_COLORS[color] + 30)
|
||||
else:
|
||||
raise ValueError(u'no such color %s', color)
|
||||
raise ValueError('no such color %s', color)
|
||||
return escape + text + RESET_COLOR
|
||||
|
||||
|
||||
@@ -522,22 +517,22 @@ def colorize(color_name, text):
|
||||
"""Colorize text if colored output is enabled. (Like _colorize but
|
||||
conditional.)
|
||||
"""
|
||||
if config['ui']['color']:
|
||||
global COLORS
|
||||
if not COLORS:
|
||||
COLORS = dict((name,
|
||||
config['ui']['colors'][name].as_str())
|
||||
for name in COLOR_NAMES)
|
||||
# In case a 3rd party plugin is still passing the actual color ('red')
|
||||
# instead of the abstract color name ('text_error')
|
||||
color = COLORS.get(color_name)
|
||||
if not color:
|
||||
log.debug(u'Invalid color_name: {0}', color_name)
|
||||
color = color_name
|
||||
return _colorize(color, text)
|
||||
else:
|
||||
if not config['ui']['color'] or 'NO_COLOR' in os.environ.keys():
|
||||
return text
|
||||
|
||||
global COLORS
|
||||
if not COLORS:
|
||||
COLORS = {name:
|
||||
config['ui']['colors'][name].as_str()
|
||||
for name in COLOR_NAMES}
|
||||
# In case a 3rd party plugin is still passing the actual color ('red')
|
||||
# instead of the abstract color name ('text_error')
|
||||
color = COLORS.get(color_name)
|
||||
if not color:
|
||||
log.debug('Invalid color_name: {0}', color_name)
|
||||
color = color_name
|
||||
return _colorize(color, text)
|
||||
|
||||
|
||||
def _colordiff(a, b, highlight='text_highlight',
|
||||
minor_highlight='text_highlight_minor'):
|
||||
@@ -546,11 +541,11 @@ def _colordiff(a, b, highlight='text_highlight',
|
||||
highlighted intelligently to show differences; other values are
|
||||
stringified and highlighted in their entirety.
|
||||
"""
|
||||
if not isinstance(a, six.string_types) \
|
||||
or not isinstance(b, six.string_types):
|
||||
if not isinstance(a, str) \
|
||||
or not isinstance(b, str):
|
||||
# Non-strings: use ordinary equality.
|
||||
a = six.text_type(a)
|
||||
b = six.text_type(b)
|
||||
a = str(a)
|
||||
b = str(b)
|
||||
if a == b:
|
||||
return a, b
|
||||
else:
|
||||
@@ -588,7 +583,7 @@ def _colordiff(a, b, highlight='text_highlight',
|
||||
else:
|
||||
assert(False)
|
||||
|
||||
return u''.join(a_out), u''.join(b_out)
|
||||
return ''.join(a_out), ''.join(b_out)
|
||||
|
||||
|
||||
def colordiff(a, b, highlight='text_highlight'):
|
||||
@@ -598,7 +593,7 @@ def colordiff(a, b, highlight='text_highlight'):
|
||||
if config['ui']['color']:
|
||||
return _colordiff(a, b, highlight)
|
||||
else:
|
||||
return six.text_type(a), six.text_type(b)
|
||||
return str(a), str(b)
|
||||
|
||||
|
||||
def get_path_formats(subview=None):
|
||||
@@ -609,12 +604,12 @@ def get_path_formats(subview=None):
|
||||
subview = subview or config['paths']
|
||||
for query, view in subview.items():
|
||||
query = PF_KEY_QUERIES.get(query, query) # Expand common queries.
|
||||
path_formats.append((query, Template(view.as_str())))
|
||||
path_formats.append((query, template(view.as_str())))
|
||||
return path_formats
|
||||
|
||||
|
||||
def get_replacements():
|
||||
"""Confit validation function that reads regex/string pairs.
|
||||
"""Confuse validation function that reads regex/string pairs.
|
||||
"""
|
||||
replacements = []
|
||||
for pattern, repl in config['replace'].get(dict).items():
|
||||
@@ -623,7 +618,7 @@ def get_replacements():
|
||||
replacements.append((re.compile(pattern), repl))
|
||||
except re.error:
|
||||
raise UserError(
|
||||
u'malformed regular expression in replace: {0}'.format(
|
||||
'malformed regular expression in replace: {}'.format(
|
||||
pattern
|
||||
)
|
||||
)
|
||||
@@ -644,7 +639,7 @@ def term_width():
|
||||
|
||||
try:
|
||||
buf = fcntl.ioctl(0, termios.TIOCGWINSZ, ' ' * 4)
|
||||
except IOError:
|
||||
except OSError:
|
||||
return fallback
|
||||
try:
|
||||
height, width = struct.unpack('hh', buf)
|
||||
@@ -656,10 +651,10 @@ def term_width():
|
||||
FLOAT_EPSILON = 0.01
|
||||
|
||||
|
||||
def _field_diff(field, old, new):
|
||||
"""Given two Model objects, format their values for `field` and
|
||||
highlight changes among them. Return a human-readable string. If the
|
||||
value has not changed, return None instead.
|
||||
def _field_diff(field, old, old_fmt, new, new_fmt):
|
||||
"""Given two Model objects and their formatted views, format their values
|
||||
for `field` and highlight changes among them. Return a human-readable
|
||||
string. If the value has not changed, return None instead.
|
||||
"""
|
||||
oldval = old.get(field)
|
||||
newval = new.get(field)
|
||||
@@ -672,18 +667,18 @@ def _field_diff(field, old, new):
|
||||
return None
|
||||
|
||||
# Get formatted values for output.
|
||||
oldstr = old.formatted().get(field, u'')
|
||||
newstr = new.formatted().get(field, u'')
|
||||
oldstr = old_fmt.get(field, '')
|
||||
newstr = new_fmt.get(field, '')
|
||||
|
||||
# For strings, highlight changes. For others, colorize the whole
|
||||
# thing.
|
||||
if isinstance(oldval, six.string_types):
|
||||
if isinstance(oldval, str):
|
||||
oldstr, newstr = colordiff(oldval, newstr)
|
||||
else:
|
||||
oldstr = colorize('text_error', oldstr)
|
||||
newstr = colorize('text_error', newstr)
|
||||
|
||||
return u'{0} -> {1}'.format(oldstr, newstr)
|
||||
return f'{oldstr} -> {newstr}'
|
||||
|
||||
|
||||
def show_model_changes(new, old=None, fields=None, always=False):
|
||||
@@ -698,6 +693,11 @@ def show_model_changes(new, old=None, fields=None, always=False):
|
||||
"""
|
||||
old = old or new._db._get(type(new), new.id)
|
||||
|
||||
# Keep the formatted views around instead of re-creating them in each
|
||||
# iteration step
|
||||
old_fmt = old.formatted()
|
||||
new_fmt = new.formatted()
|
||||
|
||||
# Build up lines showing changed fields.
|
||||
changes = []
|
||||
for field in old:
|
||||
@@ -706,25 +706,25 @@ def show_model_changes(new, old=None, fields=None, always=False):
|
||||
continue
|
||||
|
||||
# Detect and show difference for this field.
|
||||
line = _field_diff(field, old, new)
|
||||
line = _field_diff(field, old, old_fmt, new, new_fmt)
|
||||
if line:
|
||||
changes.append(u' {0}: {1}'.format(field, line))
|
||||
changes.append(f' {field}: {line}')
|
||||
|
||||
# New fields.
|
||||
for field in set(new) - set(old):
|
||||
if fields and field not in fields:
|
||||
continue
|
||||
|
||||
changes.append(u' {0}: {1}'.format(
|
||||
changes.append(' {}: {}'.format(
|
||||
field,
|
||||
colorize('text_highlight', new.formatted()[field])
|
||||
colorize('text_highlight', new_fmt[field])
|
||||
))
|
||||
|
||||
# Print changes.
|
||||
if changes or always:
|
||||
print_(format(old))
|
||||
if changes:
|
||||
print_(u'\n'.join(changes))
|
||||
print_('\n'.join(changes))
|
||||
|
||||
return bool(changes)
|
||||
|
||||
@@ -757,18 +757,55 @@ def show_path_changes(path_changes):
|
||||
if max_width > col_width:
|
||||
# Print every change over two lines
|
||||
for source, dest in zip(sources, destinations):
|
||||
log.info(u'{0} \n -> {1}', source, dest)
|
||||
color_source, color_dest = colordiff(source, dest)
|
||||
print_('{0} \n -> {1}'.format(color_source, color_dest))
|
||||
else:
|
||||
# Print every change on a single line, and add a header
|
||||
title_pad = max_width - len('Source ') + len(' -> ')
|
||||
|
||||
log.info(u'Source {0} Destination', ' ' * title_pad)
|
||||
print_('Source {0} Destination'.format(' ' * title_pad))
|
||||
for source, dest in zip(sources, destinations):
|
||||
pad = max_width - len(source)
|
||||
log.info(u'{0} {1} -> {2}', source, ' ' * pad, dest)
|
||||
color_source, color_dest = colordiff(source, dest)
|
||||
print_('{0} {1} -> {2}'.format(
|
||||
color_source,
|
||||
' ' * pad,
|
||||
color_dest,
|
||||
))
|
||||
|
||||
|
||||
class CommonOptionsParser(optparse.OptionParser, object):
|
||||
# Helper functions for option parsing.
|
||||
|
||||
def _store_dict(option, opt_str, value, parser):
|
||||
"""Custom action callback to parse options which have ``key=value``
|
||||
pairs as values. All such pairs passed for this option are
|
||||
aggregated into a dictionary.
|
||||
"""
|
||||
dest = option.dest
|
||||
option_values = getattr(parser.values, dest, None)
|
||||
|
||||
if option_values is None:
|
||||
# This is the first supplied ``key=value`` pair of option.
|
||||
# Initialize empty dictionary and get a reference to it.
|
||||
setattr(parser.values, dest, {})
|
||||
option_values = getattr(parser.values, dest)
|
||||
|
||||
# Decode the argument using the platform's argument encoding.
|
||||
value = util.text_string(value, util.arg_encoding())
|
||||
|
||||
try:
|
||||
key, value = value.split('=', 1)
|
||||
if not (key and value):
|
||||
raise ValueError
|
||||
except ValueError:
|
||||
raise UserError(
|
||||
"supplied argument `{}' is not of the form `key=value'"
|
||||
.format(value))
|
||||
|
||||
option_values[key] = value
|
||||
|
||||
|
||||
class CommonOptionsParser(optparse.OptionParser):
|
||||
"""Offers a simple way to add common formatting options.
|
||||
|
||||
Options available include:
|
||||
@@ -783,8 +820,9 @@ class CommonOptionsParser(optparse.OptionParser, object):
|
||||
|
||||
Each method is fully documented in the related method.
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CommonOptionsParser, self).__init__(*args, **kwargs)
|
||||
super().__init__(*args, **kwargs)
|
||||
self._album_flags = False
|
||||
# this serves both as an indicator that we offer the feature AND allows
|
||||
# us to check whether it has been specified on the CLI - bypassing the
|
||||
@@ -798,7 +836,7 @@ class CommonOptionsParser(optparse.OptionParser, object):
|
||||
Sets the album property on the options extracted from the CLI.
|
||||
"""
|
||||
album = optparse.Option(*flags, action='store_true',
|
||||
help=u'match albums instead of tracks')
|
||||
help='match albums instead of tracks')
|
||||
self.add_option(album)
|
||||
self._album_flags = set(flags)
|
||||
|
||||
@@ -816,7 +854,7 @@ class CommonOptionsParser(optparse.OptionParser, object):
|
||||
elif value:
|
||||
value, = decargs([value])
|
||||
else:
|
||||
value = u''
|
||||
value = ''
|
||||
|
||||
parser.values.format = value
|
||||
if target:
|
||||
@@ -843,14 +881,14 @@ class CommonOptionsParser(optparse.OptionParser, object):
|
||||
By default this affects both items and albums. If add_album_option()
|
||||
is used then the target will be autodetected.
|
||||
|
||||
Sets the format property to u'$path' on the options extracted from the
|
||||
Sets the format property to '$path' on the options extracted from the
|
||||
CLI.
|
||||
"""
|
||||
path = optparse.Option(*flags, nargs=0, action='callback',
|
||||
callback=self._set_format,
|
||||
callback_kwargs={'fmt': u'$path',
|
||||
callback_kwargs={'fmt': '$path',
|
||||
'store_true': True},
|
||||
help=u'print paths for matched items or albums')
|
||||
help='print paths for matched items or albums')
|
||||
self.add_option(path)
|
||||
|
||||
def add_format_option(self, flags=('-f', '--format'), target=None):
|
||||
@@ -870,7 +908,7 @@ class CommonOptionsParser(optparse.OptionParser, object):
|
||||
"""
|
||||
kwargs = {}
|
||||
if target:
|
||||
if isinstance(target, six.string_types):
|
||||
if isinstance(target, str):
|
||||
target = {'item': library.Item,
|
||||
'album': library.Album}[target]
|
||||
kwargs['target'] = target
|
||||
@@ -878,7 +916,7 @@ class CommonOptionsParser(optparse.OptionParser, object):
|
||||
opt = optparse.Option(*flags, action='callback',
|
||||
callback=self._set_format,
|
||||
callback_kwargs=kwargs,
|
||||
help=u'print with custom format')
|
||||
help='print with custom format')
|
||||
self.add_option(opt)
|
||||
|
||||
def add_all_common_options(self):
|
||||
@@ -893,14 +931,15 @@ class CommonOptionsParser(optparse.OptionParser, object):
|
||||
#
|
||||
# This is a fairly generic subcommand parser for optparse. It is
|
||||
# maintained externally here:
|
||||
# http://gist.github.com/462717
|
||||
# https://gist.github.com/462717
|
||||
# There you will also find a better description of the code and a more
|
||||
# succinct example program.
|
||||
|
||||
class Subcommand(object):
|
||||
class Subcommand:
|
||||
"""A subcommand of a root command-line application that may be
|
||||
invoked by a SubcommandOptionParser.
|
||||
"""
|
||||
|
||||
def __init__(self, name, parser=None, help='', aliases=(), hide=False):
|
||||
"""Creates a new subcommand. name is the primary way to invoke
|
||||
the subcommand; aliases are alternate names. parser is an
|
||||
@@ -928,7 +967,7 @@ class Subcommand(object):
|
||||
@root_parser.setter
|
||||
def root_parser(self, root_parser):
|
||||
self._root_parser = root_parser
|
||||
self.parser.prog = '{0} {1}'.format(
|
||||
self.parser.prog = '{} {}'.format(
|
||||
as_string(root_parser.get_prog_name()), self.name)
|
||||
|
||||
|
||||
@@ -944,13 +983,13 @@ class SubcommandsOptionParser(CommonOptionsParser):
|
||||
"""
|
||||
# A more helpful default usage.
|
||||
if 'usage' not in kwargs:
|
||||
kwargs['usage'] = u"""
|
||||
kwargs['usage'] = """
|
||||
%prog COMMAND [ARGS...]
|
||||
%prog help COMMAND"""
|
||||
kwargs['add_help_option'] = False
|
||||
|
||||
# Super constructor.
|
||||
super(SubcommandsOptionParser, self).__init__(*args, **kwargs)
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
# Our root parser needs to stop on the first unrecognized argument.
|
||||
self.disable_interspersed_args()
|
||||
@@ -967,7 +1006,7 @@ class SubcommandsOptionParser(CommonOptionsParser):
|
||||
# Add the list of subcommands to the help message.
|
||||
def format_help(self, formatter=None):
|
||||
# Get the original help message, to which we will append.
|
||||
out = super(SubcommandsOptionParser, self).format_help(formatter)
|
||||
out = super().format_help(formatter)
|
||||
if formatter is None:
|
||||
formatter = self.formatter
|
||||
|
||||
@@ -1053,7 +1092,7 @@ class SubcommandsOptionParser(CommonOptionsParser):
|
||||
cmdname = args.pop(0)
|
||||
subcommand = self._subcommand_for_name(cmdname)
|
||||
if not subcommand:
|
||||
raise UserError(u"unknown command '{0}'".format(cmdname))
|
||||
raise UserError(f"unknown command '{cmdname}'")
|
||||
|
||||
suboptions, subargs = subcommand.parse_args(args)
|
||||
return subcommand, suboptions, subargs
|
||||
@@ -1064,26 +1103,32 @@ optparse.Option.ALWAYS_TYPED_ACTIONS += ('callback',)
|
||||
|
||||
# The main entry point and bootstrapping.
|
||||
|
||||
def _load_plugins(config):
|
||||
"""Load the plugins specified in the configuration.
|
||||
def _load_plugins(options, config):
|
||||
"""Load the plugins specified on the command line or in the configuration.
|
||||
"""
|
||||
paths = config['pluginpath'].as_str_seq(split=False)
|
||||
paths = [util.normpath(p) for p in paths]
|
||||
log.debug(u'plugin paths: {0}', util.displayable_path(paths))
|
||||
log.debug('plugin paths: {0}', util.displayable_path(paths))
|
||||
|
||||
# On Python 3, the search paths need to be unicode.
|
||||
paths = [util.py3_path(p) for p in paths]
|
||||
|
||||
# Extend the `beetsplug` package to include the plugin paths.
|
||||
import beetsplug
|
||||
beetsplug.__path__ = paths + beetsplug.__path__
|
||||
beetsplug.__path__ = paths + list(beetsplug.__path__)
|
||||
|
||||
# For backwards compatibility, also support plugin paths that
|
||||
# *contain* a `beetsplug` package.
|
||||
sys.path += paths
|
||||
|
||||
plugins.load_plugins(config['plugins'].as_str_seq())
|
||||
plugins.send("pluginload")
|
||||
# If we were given any plugins on the command line, use those.
|
||||
if options.plugins is not None:
|
||||
plugin_list = (options.plugins.split(',')
|
||||
if len(options.plugins) > 0 else [])
|
||||
else:
|
||||
plugin_list = config['plugins'].as_str_seq()
|
||||
|
||||
plugins.load_plugins(plugin_list)
|
||||
return plugins
|
||||
|
||||
|
||||
@@ -1097,7 +1142,20 @@ def _setup(options, lib=None):
|
||||
|
||||
config = _configure(options)
|
||||
|
||||
plugins = _load_plugins(config)
|
||||
plugins = _load_plugins(options, config)
|
||||
|
||||
# Add types and queries defined by plugins.
|
||||
plugin_types_album = plugins.types(library.Album)
|
||||
library.Album._types.update(plugin_types_album)
|
||||
item_types = plugin_types_album.copy()
|
||||
item_types.update(library.Item._types)
|
||||
item_types.update(plugins.types(library.Item))
|
||||
library.Item._types = item_types
|
||||
|
||||
library.Item._queries.update(plugins.named_queries(library.Item))
|
||||
library.Album._queries.update(plugins.named_queries(library.Album))
|
||||
|
||||
plugins.send("pluginload")
|
||||
|
||||
# Get the default subcommands.
|
||||
from beets.ui.commands import default_commands
|
||||
@@ -1108,8 +1166,6 @@ def _setup(options, lib=None):
|
||||
if lib is None:
|
||||
lib = _open_library(config)
|
||||
plugins.send("library_opened", lib=lib)
|
||||
library.Item._types.update(plugins.types(library.Item))
|
||||
library.Album._types.update(plugins.types(library.Album))
|
||||
|
||||
return subcommands, plugins, lib
|
||||
|
||||
@@ -1121,9 +1177,11 @@ def _configure(options):
|
||||
# special handling lets specified plugins get loaded before we
|
||||
# finish parsing the command line.
|
||||
if getattr(options, 'config', None) is not None:
|
||||
config_path = options.config
|
||||
overlay_path = options.config
|
||||
del options.config
|
||||
config.set_file(config_path)
|
||||
config.set_file(overlay_path)
|
||||
else:
|
||||
overlay_path = None
|
||||
config.set_args(options)
|
||||
|
||||
# Configure the logger.
|
||||
@@ -1132,15 +1190,19 @@ def _configure(options):
|
||||
else:
|
||||
log.set_global_level(logging.INFO)
|
||||
|
||||
if overlay_path:
|
||||
log.debug('overlaying configuration: {0}',
|
||||
util.displayable_path(overlay_path))
|
||||
|
||||
config_path = config.user_config_path()
|
||||
if os.path.isfile(config_path):
|
||||
log.debug(u'user configuration: {0}',
|
||||
log.debug('user configuration: {0}',
|
||||
util.displayable_path(config_path))
|
||||
else:
|
||||
log.debug(u'no user configuration found at {0}',
|
||||
log.debug('no user configuration found at {0}',
|
||||
util.displayable_path(config_path))
|
||||
|
||||
log.debug(u'data directory: {0}',
|
||||
log.debug('data directory: {0}',
|
||||
util.displayable_path(config.config_dir()))
|
||||
return config
|
||||
|
||||
@@ -1157,13 +1219,14 @@ def _open_library(config):
|
||||
get_replacements(),
|
||||
)
|
||||
lib.get_item(0) # Test database connection.
|
||||
except (sqlite3.OperationalError, sqlite3.DatabaseError):
|
||||
log.debug(u'{}', traceback.format_exc())
|
||||
raise UserError(u"database file {0} could not be opened".format(
|
||||
util.displayable_path(dbpath)
|
||||
except (sqlite3.OperationalError, sqlite3.DatabaseError) as db_error:
|
||||
log.debug('{}', traceback.format_exc())
|
||||
raise UserError("database file {} cannot not be opened: {}".format(
|
||||
util.displayable_path(dbpath),
|
||||
db_error
|
||||
))
|
||||
log.debug(u'library database: {0}\n'
|
||||
u'library directory: {1}',
|
||||
log.debug('library database: {0}\n'
|
||||
'library directory: {1}',
|
||||
util.displayable_path(lib.path),
|
||||
util.displayable_path(lib.directory))
|
||||
return lib
|
||||
@@ -1177,15 +1240,17 @@ def _raw_main(args, lib=None):
|
||||
parser.add_format_option(flags=('--format-item',), target=library.Item)
|
||||
parser.add_format_option(flags=('--format-album',), target=library.Album)
|
||||
parser.add_option('-l', '--library', dest='library',
|
||||
help=u'library database file to use')
|
||||
help='library database file to use')
|
||||
parser.add_option('-d', '--directory', dest='directory',
|
||||
help=u"destination music directory")
|
||||
help="destination music directory")
|
||||
parser.add_option('-v', '--verbose', dest='verbose', action='count',
|
||||
help=u'log more details (use twice for even more)')
|
||||
help='log more details (use twice for even more)')
|
||||
parser.add_option('-c', '--config', dest='config',
|
||||
help=u'path to configuration file')
|
||||
help='path to configuration file')
|
||||
parser.add_option('-p', '--plugins', dest='plugins',
|
||||
help='a comma-separated list of plugins to load')
|
||||
parser.add_option('-h', '--help', dest='help', action='store_true',
|
||||
help=u'show this help message and exit')
|
||||
help='show this help message and exit')
|
||||
parser.add_option('--version', dest='version', action='store_true',
|
||||
help=optparse.SUPPRESS_HELP)
|
||||
|
||||
@@ -1220,7 +1285,7 @@ def main(args=None):
|
||||
_raw_main(args)
|
||||
except UserError as exc:
|
||||
message = exc.args[0] if exc.args else None
|
||||
log.error(u'error: {0}', message)
|
||||
log.error('error: {0}', message)
|
||||
sys.exit(1)
|
||||
except util.HumanReadableException as exc:
|
||||
exc.log(log)
|
||||
@@ -1231,18 +1296,25 @@ def main(args=None):
|
||||
log.debug('{}', traceback.format_exc())
|
||||
log.error('{}', exc)
|
||||
sys.exit(1)
|
||||
except confit.ConfigError as exc:
|
||||
log.error(u'configuration error: {0}', exc)
|
||||
except confuse.ConfigError as exc:
|
||||
log.error('configuration error: {0}', exc)
|
||||
sys.exit(1)
|
||||
except db_query.InvalidQueryError as exc:
|
||||
log.error(u'invalid query: {0}', exc)
|
||||
log.error('invalid query: {0}', exc)
|
||||
sys.exit(1)
|
||||
except IOError as exc:
|
||||
except OSError as exc:
|
||||
if exc.errno == errno.EPIPE:
|
||||
# "Broken pipe". End silently.
|
||||
pass
|
||||
sys.stderr.close()
|
||||
else:
|
||||
raise
|
||||
except KeyboardInterrupt:
|
||||
# Silently ignore ^C except in verbose mode.
|
||||
log.debug(u'{}', traceback.format_exc())
|
||||
log.debug('{}', traceback.format_exc())
|
||||
except db.DBAccessError as exc:
|
||||
log.error(
|
||||
'database access error: {0}\n'
|
||||
'the library file might have a permissions problem',
|
||||
exc
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
10
lib/beets/ui/completion_base.sh
Executable file → Normal file
10
lib/beets/ui/completion_base.sh
Executable file → Normal file
@@ -70,7 +70,7 @@ _beet_dispatch() {
|
||||
|
||||
# Replace command shortcuts
|
||||
if [[ -n $cmd ]] && _list_include_item "$aliases" "$cmd"; then
|
||||
eval "cmd=\$alias__$cmd"
|
||||
eval "cmd=\$alias__${cmd//-/_}"
|
||||
fi
|
||||
|
||||
case $cmd in
|
||||
@@ -94,8 +94,8 @@ _beet_dispatch() {
|
||||
_beet_complete() {
|
||||
if [[ $cur == -* ]]; then
|
||||
local opts flags completions
|
||||
eval "opts=\$opts__$cmd"
|
||||
eval "flags=\$flags__$cmd"
|
||||
eval "opts=\$opts__${cmd//-/_}"
|
||||
eval "flags=\$flags__${cmd//-/_}"
|
||||
completions="${flags___common} ${opts} ${flags}"
|
||||
COMPREPLY+=( $(compgen -W "$completions" -- $cur) )
|
||||
else
|
||||
@@ -129,7 +129,7 @@ _beet_complete_global() {
|
||||
COMPREPLY+=( $(compgen -W "$completions" -- $cur) )
|
||||
elif [[ -n $cur ]] && _list_include_item "$aliases" "$cur"; then
|
||||
local cmd
|
||||
eval "cmd=\$alias__$cur"
|
||||
eval "cmd=\$alias__${cur//-/_}"
|
||||
COMPREPLY+=( "$cmd" )
|
||||
else
|
||||
COMPREPLY+=( $(compgen -W "$commands" -- $cur) )
|
||||
@@ -138,7 +138,7 @@ _beet_complete_global() {
|
||||
|
||||
_beet_complete_query() {
|
||||
local opts
|
||||
eval "opts=\$opts__$cmd"
|
||||
eval "opts=\$opts__${cmd//-/_}"
|
||||
|
||||
if [[ $cur == -* ]] || _list_include_item "$opts" "$prev"; then
|
||||
_beet_complete
|
||||
|
||||
310
lib/beets/util/__init__.py
Executable file → Normal file
310
lib/beets/util/__init__.py
Executable file → Normal file
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This file is part of beets.
|
||||
# Copyright 2016, Adrian Sampson.
|
||||
#
|
||||
@@ -15,27 +14,28 @@
|
||||
|
||||
"""Miscellaneous utility functions."""
|
||||
|
||||
from __future__ import division, absolute_import, print_function
|
||||
import os
|
||||
import sys
|
||||
import errno
|
||||
import locale
|
||||
import re
|
||||
import tempfile
|
||||
import shutil
|
||||
import fnmatch
|
||||
from collections import Counter
|
||||
import functools
|
||||
from collections import Counter, namedtuple
|
||||
from multiprocessing.pool import ThreadPool
|
||||
import traceback
|
||||
import subprocess
|
||||
import platform
|
||||
import shlex
|
||||
from beets.util import hidden
|
||||
import six
|
||||
from unidecode import unidecode
|
||||
from enum import Enum
|
||||
|
||||
|
||||
MAX_FILENAME_LENGTH = 200
|
||||
WINDOWS_MAGIC_PREFIX = u'\\\\?\\'
|
||||
SNI_SUPPORTED = sys.version_info >= (2, 7, 9)
|
||||
WINDOWS_MAGIC_PREFIX = '\\\\?\\'
|
||||
|
||||
|
||||
class HumanReadableException(Exception):
|
||||
@@ -57,27 +57,27 @@ class HumanReadableException(Exception):
|
||||
self.reason = reason
|
||||
self.verb = verb
|
||||
self.tb = tb
|
||||
super(HumanReadableException, self).__init__(self.get_message())
|
||||
super().__init__(self.get_message())
|
||||
|
||||
def _gerund(self):
|
||||
"""Generate a (likely) gerund form of the English verb.
|
||||
"""
|
||||
if u' ' in self.verb:
|
||||
if ' ' in self.verb:
|
||||
return self.verb
|
||||
gerund = self.verb[:-1] if self.verb.endswith(u'e') else self.verb
|
||||
gerund += u'ing'
|
||||
gerund = self.verb[:-1] if self.verb.endswith('e') else self.verb
|
||||
gerund += 'ing'
|
||||
return gerund
|
||||
|
||||
def _reasonstr(self):
|
||||
"""Get the reason as a string."""
|
||||
if isinstance(self.reason, six.text_type):
|
||||
if isinstance(self.reason, str):
|
||||
return self.reason
|
||||
elif isinstance(self.reason, bytes):
|
||||
return self.reason.decode('utf-8', 'ignore')
|
||||
elif hasattr(self.reason, 'strerror'): # i.e., EnvironmentError
|
||||
return self.reason.strerror
|
||||
else:
|
||||
return u'"{0}"'.format(six.text_type(self.reason))
|
||||
return '"{}"'.format(str(self.reason))
|
||||
|
||||
def get_message(self):
|
||||
"""Create the human-readable description of the error, sans
|
||||
@@ -91,7 +91,7 @@ class HumanReadableException(Exception):
|
||||
"""
|
||||
if self.tb:
|
||||
logger.debug(self.tb)
|
||||
logger.error(u'{0}: {1}', self.error_kind, self.args[0])
|
||||
logger.error('{0}: {1}', self.error_kind, self.args[0])
|
||||
|
||||
|
||||
class FilesystemError(HumanReadableException):
|
||||
@@ -99,29 +99,41 @@ class FilesystemError(HumanReadableException):
|
||||
via a function in this module. The `paths` field is a sequence of
|
||||
pathnames involved in the operation.
|
||||
"""
|
||||
|
||||
def __init__(self, reason, verb, paths, tb=None):
|
||||
self.paths = paths
|
||||
super(FilesystemError, self).__init__(reason, verb, tb)
|
||||
super().__init__(reason, verb, tb)
|
||||
|
||||
def get_message(self):
|
||||
# Use a nicer English phrasing for some specific verbs.
|
||||
if self.verb in ('move', 'copy', 'rename'):
|
||||
clause = u'while {0} {1} to {2}'.format(
|
||||
clause = 'while {} {} to {}'.format(
|
||||
self._gerund(),
|
||||
displayable_path(self.paths[0]),
|
||||
displayable_path(self.paths[1])
|
||||
)
|
||||
elif self.verb in ('delete', 'write', 'create', 'read'):
|
||||
clause = u'while {0} {1}'.format(
|
||||
clause = 'while {} {}'.format(
|
||||
self._gerund(),
|
||||
displayable_path(self.paths[0])
|
||||
)
|
||||
else:
|
||||
clause = u'during {0} of paths {1}'.format(
|
||||
self.verb, u', '.join(displayable_path(p) for p in self.paths)
|
||||
clause = 'during {} of paths {}'.format(
|
||||
self.verb, ', '.join(displayable_path(p) for p in self.paths)
|
||||
)
|
||||
|
||||
return u'{0} {1}'.format(self._reasonstr(), clause)
|
||||
return f'{self._reasonstr()} {clause}'
|
||||
|
||||
|
||||
class MoveOperation(Enum):
|
||||
"""The file operations that e.g. various move functions can carry out.
|
||||
"""
|
||||
MOVE = 0
|
||||
COPY = 1
|
||||
LINK = 2
|
||||
HARDLINK = 3
|
||||
REFLINK = 4
|
||||
REFLINK_AUTO = 5
|
||||
|
||||
|
||||
def normpath(path):
|
||||
@@ -172,7 +184,7 @@ def sorted_walk(path, ignore=(), ignore_hidden=False, logger=None):
|
||||
contents = os.listdir(syspath(path))
|
||||
except OSError as exc:
|
||||
if logger:
|
||||
logger.warning(u'could not list directory {0}: {1}'.format(
|
||||
logger.warning('could not list directory {}: {}'.format(
|
||||
displayable_path(path), exc.strerror
|
||||
))
|
||||
return
|
||||
@@ -185,6 +197,10 @@ def sorted_walk(path, ignore=(), ignore_hidden=False, logger=None):
|
||||
skip = False
|
||||
for pat in ignore:
|
||||
if fnmatch.fnmatch(base, pat):
|
||||
if logger:
|
||||
logger.debug('ignoring {} due to ignore rule {}'.format(
|
||||
base, pat
|
||||
))
|
||||
skip = True
|
||||
break
|
||||
if skip:
|
||||
@@ -207,8 +223,14 @@ def sorted_walk(path, ignore=(), ignore_hidden=False, logger=None):
|
||||
for base in dirs:
|
||||
cur = os.path.join(path, base)
|
||||
# yield from sorted_walk(...)
|
||||
for res in sorted_walk(cur, ignore, ignore_hidden, logger):
|
||||
yield res
|
||||
yield from sorted_walk(cur, ignore, ignore_hidden, logger)
|
||||
|
||||
|
||||
def path_as_posix(path):
|
||||
"""Return the string representation of the path with forward (/)
|
||||
slashes.
|
||||
"""
|
||||
return path.replace(b'\\', b'/')
|
||||
|
||||
|
||||
def mkdirall(path):
|
||||
@@ -219,7 +241,7 @@ def mkdirall(path):
|
||||
if not os.path.isdir(syspath(ancestor)):
|
||||
try:
|
||||
os.mkdir(syspath(ancestor))
|
||||
except (OSError, IOError) as exc:
|
||||
except OSError as exc:
|
||||
raise FilesystemError(exc, 'create', (ancestor,),
|
||||
traceback.format_exc())
|
||||
|
||||
@@ -272,13 +294,13 @@ def prune_dirs(path, root=None, clutter=('.DS_Store', 'Thumbs.db')):
|
||||
continue
|
||||
clutter = [bytestring_path(c) for c in clutter]
|
||||
match_paths = [bytestring_path(d) for d in os.listdir(directory)]
|
||||
if fnmatch_all(match_paths, clutter):
|
||||
# Directory contains only clutter (or nothing).
|
||||
try:
|
||||
try:
|
||||
if fnmatch_all(match_paths, clutter):
|
||||
# Directory contains only clutter (or nothing).
|
||||
shutil.rmtree(directory)
|
||||
except OSError:
|
||||
else:
|
||||
break
|
||||
else:
|
||||
except OSError:
|
||||
break
|
||||
|
||||
|
||||
@@ -357,18 +379,18 @@ def bytestring_path(path):
|
||||
PATH_SEP = bytestring_path(os.sep)
|
||||
|
||||
|
||||
def displayable_path(path, separator=u'; '):
|
||||
def displayable_path(path, separator='; '):
|
||||
"""Attempts to decode a bytestring path to a unicode object for the
|
||||
purpose of displaying it to the user. If the `path` argument is a
|
||||
list or a tuple, the elements are joined with `separator`.
|
||||
"""
|
||||
if isinstance(path, (list, tuple)):
|
||||
return separator.join(displayable_path(p) for p in path)
|
||||
elif isinstance(path, six.text_type):
|
||||
elif isinstance(path, str):
|
||||
return path
|
||||
elif not isinstance(path, bytes):
|
||||
# A non-string object: just get its unicode representation.
|
||||
return six.text_type(path)
|
||||
return str(path)
|
||||
|
||||
try:
|
||||
return path.decode(_fsencoding(), 'ignore')
|
||||
@@ -387,7 +409,7 @@ def syspath(path, prefix=True):
|
||||
if os.path.__name__ != 'ntpath':
|
||||
return path
|
||||
|
||||
if not isinstance(path, six.text_type):
|
||||
if not isinstance(path, str):
|
||||
# Beets currently represents Windows paths internally with UTF-8
|
||||
# arbitrarily. But earlier versions used MBCS because it is
|
||||
# reported as the FS encoding by Windows. Try both.
|
||||
@@ -400,11 +422,11 @@ def syspath(path, prefix=True):
|
||||
path = path.decode(encoding, 'replace')
|
||||
|
||||
# Add the magic prefix if it isn't already there.
|
||||
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247.aspx
|
||||
# https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247.aspx
|
||||
if prefix and not path.startswith(WINDOWS_MAGIC_PREFIX):
|
||||
if path.startswith(u'\\\\'):
|
||||
if path.startswith('\\\\'):
|
||||
# UNC path. Final path should look like \\?\UNC\...
|
||||
path = u'UNC' + path[1:]
|
||||
path = 'UNC' + path[1:]
|
||||
path = WINDOWS_MAGIC_PREFIX + path
|
||||
|
||||
return path
|
||||
@@ -412,6 +434,8 @@ def syspath(path, prefix=True):
|
||||
|
||||
def samefile(p1, p2):
|
||||
"""Safer equality for paths."""
|
||||
if p1 == p2:
|
||||
return True
|
||||
return shutil._samefile(syspath(p1), syspath(p2))
|
||||
|
||||
|
||||
@@ -424,7 +448,7 @@ def remove(path, soft=True):
|
||||
return
|
||||
try:
|
||||
os.remove(path)
|
||||
except (OSError, IOError) as exc:
|
||||
except OSError as exc:
|
||||
raise FilesystemError(exc, 'delete', (path,), traceback.format_exc())
|
||||
|
||||
|
||||
@@ -439,10 +463,10 @@ def copy(path, dest, replace=False):
|
||||
path = syspath(path)
|
||||
dest = syspath(dest)
|
||||
if not replace and os.path.exists(dest):
|
||||
raise FilesystemError(u'file exists', 'copy', (path, dest))
|
||||
raise FilesystemError('file exists', 'copy', (path, dest))
|
||||
try:
|
||||
shutil.copyfile(path, dest)
|
||||
except (OSError, IOError) as exc:
|
||||
except OSError as exc:
|
||||
raise FilesystemError(exc, 'copy', (path, dest),
|
||||
traceback.format_exc())
|
||||
|
||||
@@ -455,24 +479,37 @@ def move(path, dest, replace=False):
|
||||
instead, in which case metadata will *not* be preserved. Paths are
|
||||
translated to system paths.
|
||||
"""
|
||||
if os.path.isdir(path):
|
||||
raise FilesystemError(u'source is directory', 'move', (path, dest))
|
||||
if os.path.isdir(dest):
|
||||
raise FilesystemError(u'destination is directory', 'move',
|
||||
(path, dest))
|
||||
if samefile(path, dest):
|
||||
return
|
||||
path = syspath(path)
|
||||
dest = syspath(dest)
|
||||
if os.path.exists(dest) and not replace:
|
||||
raise FilesystemError(u'file exists', 'rename', (path, dest))
|
||||
raise FilesystemError('file exists', 'rename', (path, dest))
|
||||
|
||||
# First, try renaming the file.
|
||||
try:
|
||||
os.rename(path, dest)
|
||||
os.replace(path, dest)
|
||||
except OSError:
|
||||
# Otherwise, copy and delete the original.
|
||||
tmp = tempfile.mktemp(suffix='.beets',
|
||||
prefix=py3_path(b'.' + os.path.basename(dest)),
|
||||
dir=py3_path(os.path.dirname(dest)))
|
||||
tmp = syspath(tmp)
|
||||
try:
|
||||
shutil.copyfile(path, dest)
|
||||
shutil.copyfile(path, tmp)
|
||||
os.replace(tmp, dest)
|
||||
tmp = None
|
||||
os.remove(path)
|
||||
except (OSError, IOError) as exc:
|
||||
except OSError as exc:
|
||||
raise FilesystemError(exc, 'move', (path, dest),
|
||||
traceback.format_exc())
|
||||
finally:
|
||||
if tmp is not None:
|
||||
os.remove(tmp)
|
||||
|
||||
|
||||
def link(path, dest, replace=False):
|
||||
@@ -484,18 +521,18 @@ def link(path, dest, replace=False):
|
||||
return
|
||||
|
||||
if os.path.exists(syspath(dest)) and not replace:
|
||||
raise FilesystemError(u'file exists', 'rename', (path, dest))
|
||||
raise FilesystemError('file exists', 'rename', (path, dest))
|
||||
try:
|
||||
os.symlink(syspath(path), syspath(dest))
|
||||
except NotImplementedError:
|
||||
# raised on python >= 3.2 and Windows versions before Vista
|
||||
raise FilesystemError(u'OS does not support symbolic links.'
|
||||
raise FilesystemError('OS does not support symbolic links.'
|
||||
'link', (path, dest), traceback.format_exc())
|
||||
except OSError as exc:
|
||||
# TODO: Windows version checks can be removed for python 3
|
||||
if hasattr('sys', 'getwindowsversion'):
|
||||
if sys.getwindowsversion()[0] < 6: # is before Vista
|
||||
exc = u'OS does not support symbolic links.'
|
||||
exc = 'OS does not support symbolic links.'
|
||||
raise FilesystemError(exc, 'link', (path, dest),
|
||||
traceback.format_exc())
|
||||
|
||||
@@ -509,21 +546,50 @@ def hardlink(path, dest, replace=False):
|
||||
return
|
||||
|
||||
if os.path.exists(syspath(dest)) and not replace:
|
||||
raise FilesystemError(u'file exists', 'rename', (path, dest))
|
||||
raise FilesystemError('file exists', 'rename', (path, dest))
|
||||
try:
|
||||
os.link(syspath(path), syspath(dest))
|
||||
except NotImplementedError:
|
||||
raise FilesystemError(u'OS does not support hard links.'
|
||||
raise FilesystemError('OS does not support hard links.'
|
||||
'link', (path, dest), traceback.format_exc())
|
||||
except OSError as exc:
|
||||
if exc.errno == errno.EXDEV:
|
||||
raise FilesystemError(u'Cannot hard link across devices.'
|
||||
raise FilesystemError('Cannot hard link across devices.'
|
||||
'link', (path, dest), traceback.format_exc())
|
||||
else:
|
||||
raise FilesystemError(exc, 'link', (path, dest),
|
||||
traceback.format_exc())
|
||||
|
||||
|
||||
def reflink(path, dest, replace=False, fallback=False):
|
||||
"""Create a reflink from `dest` to `path`.
|
||||
|
||||
Raise an `OSError` if `dest` already exists, unless `replace` is
|
||||
True. If `path` == `dest`, then do nothing.
|
||||
|
||||
If reflinking fails and `fallback` is enabled, try copying the file
|
||||
instead. Otherwise, raise an error without trying a plain copy.
|
||||
|
||||
May raise an `ImportError` if the `reflink` module is not available.
|
||||
"""
|
||||
import reflink as pyreflink
|
||||
|
||||
if samefile(path, dest):
|
||||
return
|
||||
|
||||
if os.path.exists(syspath(dest)) and not replace:
|
||||
raise FilesystemError('file exists', 'rename', (path, dest))
|
||||
|
||||
try:
|
||||
pyreflink.reflink(path, dest)
|
||||
except (NotImplementedError, pyreflink.ReflinkImpossibleError):
|
||||
if fallback:
|
||||
copy(path, dest, replace)
|
||||
else:
|
||||
raise FilesystemError('OS/filesystem does not support reflinks.',
|
||||
'link', (path, dest), traceback.format_exc())
|
||||
|
||||
|
||||
def unique_path(path):
|
||||
"""Returns a version of ``path`` that does not exist on the
|
||||
filesystem. Specifically, if ``path` itself already exists, then
|
||||
@@ -541,22 +607,23 @@ def unique_path(path):
|
||||
num = 0
|
||||
while True:
|
||||
num += 1
|
||||
suffix = u'.{}'.format(num).encode() + ext
|
||||
suffix = f'.{num}'.encode() + ext
|
||||
new_path = base + suffix
|
||||
if not os.path.exists(new_path):
|
||||
return new_path
|
||||
|
||||
|
||||
# Note: The Windows "reserved characters" are, of course, allowed on
|
||||
# Unix. They are forbidden here because they cause problems on Samba
|
||||
# shares, which are sufficiently common as to cause frequent problems.
|
||||
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247.aspx
|
||||
# https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247.aspx
|
||||
CHAR_REPLACE = [
|
||||
(re.compile(r'[\\/]'), u'_'), # / and \ -- forbidden everywhere.
|
||||
(re.compile(r'^\.'), u'_'), # Leading dot (hidden files on Unix).
|
||||
(re.compile(r'[\x00-\x1f]'), u''), # Control characters.
|
||||
(re.compile(r'[<>:"\?\*\|]'), u'_'), # Windows "reserved characters".
|
||||
(re.compile(r'\.$'), u'_'), # Trailing dots.
|
||||
(re.compile(r'\s+$'), u''), # Trailing whitespace.
|
||||
(re.compile(r'[\\/]'), '_'), # / and \ -- forbidden everywhere.
|
||||
(re.compile(r'^\.'), '_'), # Leading dot (hidden files on Unix).
|
||||
(re.compile(r'[\x00-\x1f]'), ''), # Control characters.
|
||||
(re.compile(r'[<>:"\?\*\|]'), '_'), # Windows "reserved characters".
|
||||
(re.compile(r'\.$'), '_'), # Trailing dots.
|
||||
(re.compile(r'\s+$'), ''), # Trailing whitespace.
|
||||
]
|
||||
|
||||
|
||||
@@ -680,36 +747,29 @@ def py3_path(path):
|
||||
it is. So this function helps us "smuggle" the true bytes data
|
||||
through APIs that took Python 3's Unicode mandate too seriously.
|
||||
"""
|
||||
if isinstance(path, six.text_type):
|
||||
if isinstance(path, str):
|
||||
return path
|
||||
assert isinstance(path, bytes)
|
||||
if six.PY2:
|
||||
return path
|
||||
return os.fsdecode(path)
|
||||
|
||||
|
||||
def str2bool(value):
|
||||
"""Returns a boolean reflecting a human-entered string."""
|
||||
return value.lower() in (u'yes', u'1', u'true', u't', u'y')
|
||||
return value.lower() in ('yes', '1', 'true', 't', 'y')
|
||||
|
||||
|
||||
def as_string(value):
|
||||
"""Convert a value to a Unicode object for matching with a query.
|
||||
None becomes the empty string. Bytestrings are silently decoded.
|
||||
"""
|
||||
if six.PY2:
|
||||
buffer_types = buffer, memoryview # noqa: F821
|
||||
else:
|
||||
buffer_types = memoryview
|
||||
|
||||
if value is None:
|
||||
return u''
|
||||
elif isinstance(value, buffer_types):
|
||||
return ''
|
||||
elif isinstance(value, memoryview):
|
||||
return bytes(value).decode('utf-8', 'ignore')
|
||||
elif isinstance(value, bytes):
|
||||
return value.decode('utf-8', 'ignore')
|
||||
else:
|
||||
return six.text_type(value)
|
||||
return str(value)
|
||||
|
||||
|
||||
def text_string(value, encoding='utf-8'):
|
||||
@@ -732,7 +792,7 @@ def plurality(objs):
|
||||
"""
|
||||
c = Counter(objs)
|
||||
if not c:
|
||||
raise ValueError(u'sequence must be non-empty')
|
||||
raise ValueError('sequence must be non-empty')
|
||||
return c.most_common(1)[0]
|
||||
|
||||
|
||||
@@ -749,7 +809,11 @@ def cpu_count():
|
||||
num = 0
|
||||
elif sys.platform == 'darwin':
|
||||
try:
|
||||
num = int(command_output(['/usr/sbin/sysctl', '-n', 'hw.ncpu']))
|
||||
num = int(command_output([
|
||||
'/usr/sbin/sysctl',
|
||||
'-n',
|
||||
'hw.ncpu',
|
||||
]).stdout)
|
||||
except (ValueError, OSError, subprocess.CalledProcessError):
|
||||
num = 0
|
||||
else:
|
||||
@@ -769,20 +833,23 @@ def convert_command_args(args):
|
||||
assert isinstance(args, list)
|
||||
|
||||
def convert(arg):
|
||||
if six.PY2:
|
||||
if isinstance(arg, six.text_type):
|
||||
arg = arg.encode(arg_encoding())
|
||||
else:
|
||||
if isinstance(arg, bytes):
|
||||
arg = arg.decode(arg_encoding(), 'surrogateescape')
|
||||
if isinstance(arg, bytes):
|
||||
arg = arg.decode(arg_encoding(), 'surrogateescape')
|
||||
return arg
|
||||
|
||||
return [convert(a) for a in args]
|
||||
|
||||
|
||||
# stdout and stderr as bytes
|
||||
CommandOutput = namedtuple("CommandOutput", ("stdout", "stderr"))
|
||||
|
||||
|
||||
def command_output(cmd, shell=False):
|
||||
"""Runs the command and returns its output after it has exited.
|
||||
|
||||
Returns a CommandOutput. The attributes ``stdout`` and ``stderr`` contain
|
||||
byte strings of the respective output streams.
|
||||
|
||||
``cmd`` is a list of arguments starting with the command names. The
|
||||
arguments are bytes on Unix and strings on Windows.
|
||||
If ``shell`` is true, ``cmd`` is assumed to be a string and passed to a
|
||||
@@ -797,10 +864,16 @@ def command_output(cmd, shell=False):
|
||||
"""
|
||||
cmd = convert_command_args(cmd)
|
||||
|
||||
try: # python >= 3.3
|
||||
devnull = subprocess.DEVNULL
|
||||
except AttributeError:
|
||||
devnull = open(os.devnull, 'r+b')
|
||||
|
||||
proc = subprocess.Popen(
|
||||
cmd,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
stdin=devnull,
|
||||
close_fds=platform.system() != 'Windows',
|
||||
shell=shell
|
||||
)
|
||||
@@ -811,7 +884,7 @@ def command_output(cmd, shell=False):
|
||||
cmd=' '.join(cmd),
|
||||
output=stdout + stderr,
|
||||
)
|
||||
return stdout
|
||||
return CommandOutput(stdout, stderr)
|
||||
|
||||
|
||||
def max_filename_length(path, limit=MAX_FILENAME_LENGTH):
|
||||
@@ -858,25 +931,6 @@ def editor_command():
|
||||
return open_anything()
|
||||
|
||||
|
||||
def shlex_split(s):
|
||||
"""Split a Unicode or bytes string according to shell lexing rules.
|
||||
|
||||
Raise `ValueError` if the string is not a well-formed shell string.
|
||||
This is a workaround for a bug in some versions of Python.
|
||||
"""
|
||||
if not six.PY2 or isinstance(s, bytes): # Shlex works fine.
|
||||
return shlex.split(s)
|
||||
|
||||
elif isinstance(s, six.text_type):
|
||||
# Work around a Python bug.
|
||||
# http://bugs.python.org/issue6988
|
||||
bs = s.encode('utf-8')
|
||||
return [c.decode('utf-8') for c in shlex.split(bs)]
|
||||
|
||||
else:
|
||||
raise TypeError(u'shlex_split called with non-string')
|
||||
|
||||
|
||||
def interactive_open(targets, command):
|
||||
"""Open the files in `targets` by `exec`ing a new `command`, given
|
||||
as a Unicode string. (The new program takes over, and Python
|
||||
@@ -888,7 +942,7 @@ def interactive_open(targets, command):
|
||||
|
||||
# Split the command string into its arguments.
|
||||
try:
|
||||
args = shlex_split(command)
|
||||
args = shlex.split(command)
|
||||
except ValueError: # Malformed shell tokens.
|
||||
args = [command]
|
||||
|
||||
@@ -903,7 +957,7 @@ def _windows_long_path_name(short_path):
|
||||
"""Use Windows' `GetLongPathNameW` via ctypes to get the canonical,
|
||||
long path given a short filename.
|
||||
"""
|
||||
if not isinstance(short_path, six.text_type):
|
||||
if not isinstance(short_path, str):
|
||||
short_path = short_path.decode(_fsencoding())
|
||||
|
||||
import ctypes
|
||||
@@ -964,7 +1018,7 @@ def raw_seconds_short(string):
|
||||
"""
|
||||
match = re.match(r'^(\d+):([0-5]\d)$', string)
|
||||
if not match:
|
||||
raise ValueError(u'String not in M:SS format')
|
||||
raise ValueError('String not in M:SS format')
|
||||
minutes, seconds = map(int, match.groups())
|
||||
return float(minutes * 60 + seconds)
|
||||
|
||||
@@ -991,3 +1045,59 @@ def asciify_path(path, sep_replace):
|
||||
sep_replace
|
||||
)
|
||||
return os.sep.join(path_components)
|
||||
|
||||
|
||||
def par_map(transform, items):
|
||||
"""Apply the function `transform` to all the elements in the
|
||||
iterable `items`, like `map(transform, items)` but with no return
|
||||
value. The map *might* happen in parallel: it's parallel on Python 3
|
||||
and sequential on Python 2.
|
||||
|
||||
The parallelism uses threads (not processes), so this is only useful
|
||||
for IO-bound `transform`s.
|
||||
"""
|
||||
pool = ThreadPool()
|
||||
pool.map(transform, items)
|
||||
pool.close()
|
||||
pool.join()
|
||||
|
||||
|
||||
def lazy_property(func):
|
||||
"""A decorator that creates a lazily evaluated property. On first access,
|
||||
the property is assigned the return value of `func`. This first value is
|
||||
stored, so that future accesses do not have to evaluate `func` again.
|
||||
|
||||
This behaviour is useful when `func` is expensive to evaluate, and it is
|
||||
not certain that the result will be needed.
|
||||
"""
|
||||
field_name = '_' + func.__name__
|
||||
|
||||
@property
|
||||
@functools.wraps(func)
|
||||
def wrapper(self):
|
||||
if hasattr(self, field_name):
|
||||
return getattr(self, field_name)
|
||||
|
||||
value = func(self)
|
||||
setattr(self, field_name, value)
|
||||
return value
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
def decode_commandline_path(path):
|
||||
"""Prepare a path for substitution into commandline template.
|
||||
|
||||
On Python 3, we need to construct the subprocess commands to invoke as a
|
||||
Unicode string. On Unix, this is a little unfortunate---the OS is
|
||||
expecting bytes---so we use surrogate escaping and decode with the
|
||||
argument encoding, which is the same encoding that will then be
|
||||
*reversed* to recover the same bytes before invoking the OS. On
|
||||
Windows, we want to preserve the Unicode filename "as is."
|
||||
"""
|
||||
# On Python 3, the template is a Unicode string, which only supports
|
||||
# substitution of Unicode variables.
|
||||
if platform.system() == 'Windows':
|
||||
return path.decode(_fsencoding())
|
||||
else:
|
||||
return path.decode(arg_encoding(), 'surrogateescape')
|
||||
|
||||
380
lib/beets/util/artresizer.py
Executable file → Normal file
380
lib/beets/util/artresizer.py
Executable file → Normal file
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This file is part of beets.
|
||||
# Copyright 2016, Fabrice Laporte
|
||||
#
|
||||
@@ -16,38 +15,39 @@
|
||||
"""Abstraction layer to resize images using PIL, ImageMagick, or a
|
||||
public resizing proxy if neither is available.
|
||||
"""
|
||||
from __future__ import division, absolute_import, print_function
|
||||
|
||||
import subprocess
|
||||
import os
|
||||
import os.path
|
||||
import re
|
||||
from tempfile import NamedTemporaryFile
|
||||
from six.moves.urllib.parse import urlencode
|
||||
from urllib.parse import urlencode
|
||||
from beets import logging
|
||||
from beets import util
|
||||
import six
|
||||
|
||||
# Resizing methods
|
||||
PIL = 1
|
||||
IMAGEMAGICK = 2
|
||||
WEBPROXY = 3
|
||||
|
||||
if util.SNI_SUPPORTED:
|
||||
PROXY_URL = 'https://images.weserv.nl/'
|
||||
else:
|
||||
PROXY_URL = 'http://images.weserv.nl/'
|
||||
PROXY_URL = 'https://images.weserv.nl/'
|
||||
|
||||
log = logging.getLogger('beets')
|
||||
|
||||
|
||||
def resize_url(url, maxwidth):
|
||||
def resize_url(url, maxwidth, quality=0):
|
||||
"""Return a proxied image URL that resizes the original image to
|
||||
maxwidth (preserving aspect ratio).
|
||||
"""
|
||||
return '{0}?{1}'.format(PROXY_URL, urlencode({
|
||||
params = {
|
||||
'url': url.replace('http://', ''),
|
||||
'w': maxwidth,
|
||||
}))
|
||||
}
|
||||
|
||||
if quality > 0:
|
||||
params['q'] = quality
|
||||
|
||||
return '{}?{}'.format(PROXY_URL, urlencode(params))
|
||||
|
||||
|
||||
def temp_file_for(path):
|
||||
@@ -59,49 +59,102 @@ def temp_file_for(path):
|
||||
return util.bytestring_path(f.name)
|
||||
|
||||
|
||||
def pil_resize(maxwidth, path_in, path_out=None):
|
||||
def pil_resize(maxwidth, path_in, path_out=None, quality=0, max_filesize=0):
|
||||
"""Resize using Python Imaging Library (PIL). Return the output path
|
||||
of resized image.
|
||||
"""
|
||||
path_out = path_out or temp_file_for(path_in)
|
||||
from PIL import Image
|
||||
log.debug(u'artresizer: PIL resizing {0} to {1}',
|
||||
|
||||
log.debug('artresizer: PIL resizing {0} to {1}',
|
||||
util.displayable_path(path_in), util.displayable_path(path_out))
|
||||
|
||||
try:
|
||||
im = Image.open(util.syspath(path_in))
|
||||
size = maxwidth, maxwidth
|
||||
im.thumbnail(size, Image.ANTIALIAS)
|
||||
im.save(path_out)
|
||||
return path_out
|
||||
except IOError:
|
||||
log.error(u"PIL cannot create thumbnail for '{0}'",
|
||||
|
||||
if quality == 0:
|
||||
# Use PIL's default quality.
|
||||
quality = -1
|
||||
|
||||
# progressive=False only affects JPEGs and is the default,
|
||||
# but we include it here for explicitness.
|
||||
im.save(util.py3_path(path_out), quality=quality, progressive=False)
|
||||
|
||||
if max_filesize > 0:
|
||||
# If maximum filesize is set, we attempt to lower the quality of
|
||||
# jpeg conversion by a proportional amount, up to 3 attempts
|
||||
# First, set the maximum quality to either provided, or 95
|
||||
if quality > 0:
|
||||
lower_qual = quality
|
||||
else:
|
||||
lower_qual = 95
|
||||
for i in range(5):
|
||||
# 5 attempts is an abitrary choice
|
||||
filesize = os.stat(util.syspath(path_out)).st_size
|
||||
log.debug("PIL Pass {0} : Output size: {1}B", i, filesize)
|
||||
if filesize <= max_filesize:
|
||||
return path_out
|
||||
# The relationship between filesize & quality will be
|
||||
# image dependent.
|
||||
lower_qual -= 10
|
||||
# Restrict quality dropping below 10
|
||||
if lower_qual < 10:
|
||||
lower_qual = 10
|
||||
# Use optimize flag to improve filesize decrease
|
||||
im.save(util.py3_path(path_out), quality=lower_qual,
|
||||
optimize=True, progressive=False)
|
||||
log.warning("PIL Failed to resize file to below {0}B",
|
||||
max_filesize)
|
||||
return path_out
|
||||
|
||||
else:
|
||||
return path_out
|
||||
except OSError:
|
||||
log.error("PIL cannot create thumbnail for '{0}'",
|
||||
util.displayable_path(path_in))
|
||||
return path_in
|
||||
|
||||
|
||||
def im_resize(maxwidth, path_in, path_out=None):
|
||||
"""Resize using ImageMagick's ``convert`` tool.
|
||||
Return the output path of resized image.
|
||||
def im_resize(maxwidth, path_in, path_out=None, quality=0, max_filesize=0):
|
||||
"""Resize using ImageMagick.
|
||||
|
||||
Use the ``magick`` program or ``convert`` on older versions. Return
|
||||
the output path of resized image.
|
||||
"""
|
||||
path_out = path_out or temp_file_for(path_in)
|
||||
log.debug(u'artresizer: ImageMagick resizing {0} to {1}',
|
||||
log.debug('artresizer: ImageMagick resizing {0} to {1}',
|
||||
util.displayable_path(path_in), util.displayable_path(path_out))
|
||||
|
||||
# "-resize widthxheight>" shrinks images with dimension(s) larger
|
||||
# than the corresponding width and/or height dimension(s). The >
|
||||
# "only shrink" flag is prefixed by ^ escape char for Windows
|
||||
# compatibility.
|
||||
# "-resize WIDTHx>" shrinks images with the width larger
|
||||
# than the given width while maintaining the aspect ratio
|
||||
# with regards to the height.
|
||||
# ImageMagick already seems to default to no interlace, but we include it
|
||||
# here for the sake of explicitness.
|
||||
cmd = ArtResizer.shared.im_convert_cmd + [
|
||||
util.syspath(path_in, prefix=False),
|
||||
'-resize', f'{maxwidth}x>',
|
||||
'-interlace', 'none',
|
||||
]
|
||||
|
||||
if quality > 0:
|
||||
cmd += ['-quality', f'{quality}']
|
||||
|
||||
# "-define jpeg:extent=SIZEb" sets the target filesize for imagemagick to
|
||||
# SIZE in bytes.
|
||||
if max_filesize > 0:
|
||||
cmd += ['-define', f'jpeg:extent={max_filesize}b']
|
||||
|
||||
cmd.append(util.syspath(path_out, prefix=False))
|
||||
|
||||
try:
|
||||
util.command_output([
|
||||
'convert', util.syspath(path_in, prefix=False),
|
||||
'-resize', '{0}x^>'.format(maxwidth),
|
||||
util.syspath(path_out, prefix=False),
|
||||
])
|
||||
util.command_output(cmd)
|
||||
except subprocess.CalledProcessError:
|
||||
log.warning(u'artresizer: IM convert failed for {0}',
|
||||
log.warning('artresizer: IM convert failed for {0}',
|
||||
util.displayable_path(path_in))
|
||||
return path_in
|
||||
|
||||
return path_out
|
||||
|
||||
|
||||
@@ -113,31 +166,33 @@ BACKEND_FUNCS = {
|
||||
|
||||
def pil_getsize(path_in):
|
||||
from PIL import Image
|
||||
|
||||
try:
|
||||
im = Image.open(util.syspath(path_in))
|
||||
return im.size
|
||||
except IOError as exc:
|
||||
log.error(u"PIL could not read file {}: {}",
|
||||
except OSError as exc:
|
||||
log.error("PIL could not read file {}: {}",
|
||||
util.displayable_path(path_in), exc)
|
||||
|
||||
|
||||
def im_getsize(path_in):
|
||||
cmd = ['identify', '-format', '%w %h',
|
||||
util.syspath(path_in, prefix=False)]
|
||||
cmd = ArtResizer.shared.im_identify_cmd + \
|
||||
['-format', '%w %h', util.syspath(path_in, prefix=False)]
|
||||
|
||||
try:
|
||||
out = util.command_output(cmd)
|
||||
out = util.command_output(cmd).stdout
|
||||
except subprocess.CalledProcessError as exc:
|
||||
log.warning(u'ImageMagick size query failed')
|
||||
log.warning('ImageMagick size query failed')
|
||||
log.debug(
|
||||
u'`convert` exited with (status {}) when '
|
||||
u'getting size with command {}:\n{}',
|
||||
'`convert` exited with (status {}) when '
|
||||
'getting size with command {}:\n{}',
|
||||
exc.returncode, cmd, exc.output.strip()
|
||||
)
|
||||
return
|
||||
try:
|
||||
return tuple(map(int, out.split(b' ')))
|
||||
except IndexError:
|
||||
log.warning(u'Could not understand IM output: {0!r}', out)
|
||||
log.warning('Could not understand IM output: {0!r}', out)
|
||||
|
||||
|
||||
BACKEND_GET_SIZE = {
|
||||
@@ -146,24 +201,125 @@ BACKEND_GET_SIZE = {
|
||||
}
|
||||
|
||||
|
||||
def pil_deinterlace(path_in, path_out=None):
|
||||
path_out = path_out or temp_file_for(path_in)
|
||||
from PIL import Image
|
||||
|
||||
try:
|
||||
im = Image.open(util.syspath(path_in))
|
||||
im.save(util.py3_path(path_out), progressive=False)
|
||||
return path_out
|
||||
except IOError:
|
||||
return path_in
|
||||
|
||||
|
||||
def im_deinterlace(path_in, path_out=None):
|
||||
path_out = path_out or temp_file_for(path_in)
|
||||
|
||||
cmd = ArtResizer.shared.im_convert_cmd + [
|
||||
util.syspath(path_in, prefix=False),
|
||||
'-interlace', 'none',
|
||||
util.syspath(path_out, prefix=False),
|
||||
]
|
||||
|
||||
try:
|
||||
util.command_output(cmd)
|
||||
return path_out
|
||||
except subprocess.CalledProcessError:
|
||||
return path_in
|
||||
|
||||
|
||||
DEINTERLACE_FUNCS = {
|
||||
PIL: pil_deinterlace,
|
||||
IMAGEMAGICK: im_deinterlace,
|
||||
}
|
||||
|
||||
|
||||
def im_get_format(filepath):
|
||||
cmd = ArtResizer.shared.im_identify_cmd + [
|
||||
'-format', '%[magick]',
|
||||
util.syspath(filepath)
|
||||
]
|
||||
|
||||
try:
|
||||
return util.command_output(cmd).stdout
|
||||
except subprocess.CalledProcessError:
|
||||
return None
|
||||
|
||||
|
||||
def pil_get_format(filepath):
|
||||
from PIL import Image, UnidentifiedImageError
|
||||
|
||||
try:
|
||||
with Image.open(util.syspath(filepath)) as im:
|
||||
return im.format
|
||||
except (ValueError, TypeError, UnidentifiedImageError, FileNotFoundError):
|
||||
log.exception("failed to detect image format for {}", filepath)
|
||||
return None
|
||||
|
||||
|
||||
BACKEND_GET_FORMAT = {
|
||||
PIL: pil_get_format,
|
||||
IMAGEMAGICK: im_get_format,
|
||||
}
|
||||
|
||||
|
||||
def im_convert_format(source, target, deinterlaced):
|
||||
cmd = ArtResizer.shared.im_convert_cmd + [
|
||||
util.syspath(source),
|
||||
*(["-interlace", "none"] if deinterlaced else []),
|
||||
util.syspath(target),
|
||||
]
|
||||
|
||||
try:
|
||||
subprocess.check_call(
|
||||
cmd,
|
||||
stderr=subprocess.DEVNULL,
|
||||
stdout=subprocess.DEVNULL
|
||||
)
|
||||
return target
|
||||
except subprocess.CalledProcessError:
|
||||
return source
|
||||
|
||||
|
||||
def pil_convert_format(source, target, deinterlaced):
|
||||
from PIL import Image, UnidentifiedImageError
|
||||
|
||||
try:
|
||||
with Image.open(util.syspath(source)) as im:
|
||||
im.save(util.py3_path(target), progressive=not deinterlaced)
|
||||
return target
|
||||
except (ValueError, TypeError, UnidentifiedImageError, FileNotFoundError,
|
||||
OSError):
|
||||
log.exception("failed to convert image {} -> {}", source, target)
|
||||
return source
|
||||
|
||||
|
||||
BACKEND_CONVERT_IMAGE_FORMAT = {
|
||||
PIL: pil_convert_format,
|
||||
IMAGEMAGICK: im_convert_format,
|
||||
}
|
||||
|
||||
|
||||
class Shareable(type):
|
||||
"""A pseudo-singleton metaclass that allows both shared and
|
||||
non-shared instances. The ``MyClass.shared`` property holds a
|
||||
lazily-created shared instance of ``MyClass`` while calling
|
||||
``MyClass()`` to construct a new object works as usual.
|
||||
"""
|
||||
def __init__(self, name, bases, dict):
|
||||
super(Shareable, self).__init__(name, bases, dict)
|
||||
self._instance = None
|
||||
|
||||
def __init__(cls, name, bases, dict):
|
||||
super().__init__(name, bases, dict)
|
||||
cls._instance = None
|
||||
|
||||
@property
|
||||
def shared(self):
|
||||
if self._instance is None:
|
||||
self._instance = self()
|
||||
return self._instance
|
||||
def shared(cls):
|
||||
if cls._instance is None:
|
||||
cls._instance = cls()
|
||||
return cls._instance
|
||||
|
||||
|
||||
class ArtResizer(six.with_metaclass(Shareable, object)):
|
||||
class ArtResizer(metaclass=Shareable):
|
||||
"""A singleton class that performs image resizes.
|
||||
"""
|
||||
|
||||
@@ -171,21 +327,44 @@ class ArtResizer(six.with_metaclass(Shareable, object)):
|
||||
"""Create a resizer object with an inferred method.
|
||||
"""
|
||||
self.method = self._check_method()
|
||||
log.debug(u"artresizer: method is {0}", self.method)
|
||||
log.debug("artresizer: method is {0}", self.method)
|
||||
self.can_compare = self._can_compare()
|
||||
|
||||
def resize(self, maxwidth, path_in, path_out=None):
|
||||
# Use ImageMagick's magick binary when it's available. If it's
|
||||
# not, fall back to the older, separate convert and identify
|
||||
# commands.
|
||||
if self.method[0] == IMAGEMAGICK:
|
||||
self.im_legacy = self.method[2]
|
||||
if self.im_legacy:
|
||||
self.im_convert_cmd = ['convert']
|
||||
self.im_identify_cmd = ['identify']
|
||||
else:
|
||||
self.im_convert_cmd = ['magick']
|
||||
self.im_identify_cmd = ['magick', 'identify']
|
||||
|
||||
def resize(
|
||||
self, maxwidth, path_in, path_out=None, quality=0, max_filesize=0
|
||||
):
|
||||
"""Manipulate an image file according to the method, returning a
|
||||
new path. For PIL or IMAGEMAGIC methods, resizes the image to a
|
||||
temporary file. For WEBPROXY, returns `path_in` unmodified.
|
||||
temporary file and encodes with the specified quality level.
|
||||
For WEBPROXY, returns `path_in` unmodified.
|
||||
"""
|
||||
if self.local:
|
||||
func = BACKEND_FUNCS[self.method[0]]
|
||||
return func(maxwidth, path_in, path_out)
|
||||
return func(maxwidth, path_in, path_out,
|
||||
quality=quality, max_filesize=max_filesize)
|
||||
else:
|
||||
return path_in
|
||||
|
||||
def proxy_url(self, maxwidth, url):
|
||||
def deinterlace(self, path_in, path_out=None):
|
||||
if self.local:
|
||||
func = DEINTERLACE_FUNCS[self.method[0]]
|
||||
return func(path_in, path_out)
|
||||
else:
|
||||
return path_in
|
||||
|
||||
def proxy_url(self, maxwidth, url, quality=0):
|
||||
"""Modifies an image URL according the method, returning a new
|
||||
URL. For WEBPROXY, a URL on the proxy server is returned.
|
||||
Otherwise, the URL is returned unmodified.
|
||||
@@ -193,7 +372,7 @@ class ArtResizer(six.with_metaclass(Shareable, object)):
|
||||
if self.local:
|
||||
return url
|
||||
else:
|
||||
return resize_url(url, maxwidth)
|
||||
return resize_url(url, maxwidth, quality)
|
||||
|
||||
@property
|
||||
def local(self):
|
||||
@@ -206,12 +385,50 @@ class ArtResizer(six.with_metaclass(Shareable, object)):
|
||||
"""Return the size of an image file as an int couple (width, height)
|
||||
in pixels.
|
||||
|
||||
Only available locally
|
||||
Only available locally.
|
||||
"""
|
||||
if self.local:
|
||||
func = BACKEND_GET_SIZE[self.method[0]]
|
||||
return func(path_in)
|
||||
|
||||
def get_format(self, path_in):
|
||||
"""Returns the format of the image as a string.
|
||||
|
||||
Only available locally.
|
||||
"""
|
||||
if self.local:
|
||||
func = BACKEND_GET_FORMAT[self.method[0]]
|
||||
return func(path_in)
|
||||
|
||||
def reformat(self, path_in, new_format, deinterlaced=True):
|
||||
"""Converts image to desired format, updating its extension, but
|
||||
keeping the same filename.
|
||||
|
||||
Only available locally.
|
||||
"""
|
||||
if not self.local:
|
||||
return path_in
|
||||
|
||||
new_format = new_format.lower()
|
||||
# A nonexhaustive map of image "types" to extensions overrides
|
||||
new_format = {
|
||||
'jpeg': 'jpg',
|
||||
}.get(new_format, new_format)
|
||||
|
||||
fname, ext = os.path.splitext(path_in)
|
||||
path_new = fname + b'.' + new_format.encode('utf8')
|
||||
func = BACKEND_CONVERT_IMAGE_FORMAT[self.method[0]]
|
||||
|
||||
# allows the exception to propagate, while still making sure a changed
|
||||
# file path was removed
|
||||
result_path = path_in
|
||||
try:
|
||||
result_path = func(path_in, path_new, deinterlaced)
|
||||
finally:
|
||||
if result_path != path_in:
|
||||
os.unlink(path_in)
|
||||
return result_path
|
||||
|
||||
def _can_compare(self):
|
||||
"""A boolean indicating whether image comparison is available"""
|
||||
|
||||
@@ -219,10 +436,20 @@ class ArtResizer(six.with_metaclass(Shareable, object)):
|
||||
|
||||
@staticmethod
|
||||
def _check_method():
|
||||
"""Return a tuple indicating an available method and its version."""
|
||||
"""Return a tuple indicating an available method and its version.
|
||||
|
||||
The result has at least two elements:
|
||||
- The method, eitehr WEBPROXY, PIL, or IMAGEMAGICK.
|
||||
- The version.
|
||||
|
||||
If the method is IMAGEMAGICK, there is also a third element: a
|
||||
bool flag indicating whether to use the `magick` binary or
|
||||
legacy single-purpose executables (`convert`, `identify`, etc.)
|
||||
"""
|
||||
version = get_im_version()
|
||||
if version:
|
||||
return IMAGEMAGICK, version
|
||||
version, legacy = version
|
||||
return IMAGEMAGICK, version, legacy
|
||||
|
||||
version = get_pil_version()
|
||||
if version:
|
||||
@@ -232,31 +459,34 @@ class ArtResizer(six.with_metaclass(Shareable, object)):
|
||||
|
||||
|
||||
def get_im_version():
|
||||
"""Return Image Magick version or None if it is unavailable
|
||||
Try invoking ImageMagick's "convert".
|
||||
"""Get the ImageMagick version and legacy flag as a pair. Or return
|
||||
None if ImageMagick is not available.
|
||||
"""
|
||||
try:
|
||||
out = util.command_output(['convert', '--version'])
|
||||
for cmd_name, legacy in ((['magick'], False), (['convert'], True)):
|
||||
cmd = cmd_name + ['--version']
|
||||
|
||||
if b'imagemagick' in out.lower():
|
||||
pattern = br".+ (\d+)\.(\d+)\.(\d+).*"
|
||||
match = re.search(pattern, out)
|
||||
if match:
|
||||
return (int(match.group(1)),
|
||||
int(match.group(2)),
|
||||
int(match.group(3)))
|
||||
return (0,)
|
||||
try:
|
||||
out = util.command_output(cmd).stdout
|
||||
except (subprocess.CalledProcessError, OSError) as exc:
|
||||
log.debug('ImageMagick version check failed: {}', exc)
|
||||
else:
|
||||
if b'imagemagick' in out.lower():
|
||||
pattern = br".+ (\d+)\.(\d+)\.(\d+).*"
|
||||
match = re.search(pattern, out)
|
||||
if match:
|
||||
version = (int(match.group(1)),
|
||||
int(match.group(2)),
|
||||
int(match.group(3)))
|
||||
return version, legacy
|
||||
|
||||
except (subprocess.CalledProcessError, OSError) as exc:
|
||||
log.debug(u'ImageMagick check `convert --version` failed: {}', exc)
|
||||
return None
|
||||
return None
|
||||
|
||||
|
||||
def get_pil_version():
|
||||
"""Return Image Magick version or None if it is unavailable
|
||||
Try importing PIL."""
|
||||
"""Get the PIL/Pillow version, or None if it is unavailable.
|
||||
"""
|
||||
try:
|
||||
__import__('PIL', fromlist=[str('Image')])
|
||||
__import__('PIL', fromlist=['Image'])
|
||||
return (0,)
|
||||
except ImportError:
|
||||
return None
|
||||
|
||||
28
lib/beets/util/bluelet.py
Executable file → Normal file
28
lib/beets/util/bluelet.py
Executable file → Normal file
@@ -1,5 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""Extremely simple pure-Python implementation of coroutine-style
|
||||
asynchronous socket I/O. Inspired by, but inferior to, Eventlet.
|
||||
Bluelet can also be thought of as a less-terrible replacement for
|
||||
@@ -7,9 +5,7 @@ asyncore.
|
||||
|
||||
Bluelet: easy concurrency without all the messy parallelism.
|
||||
"""
|
||||
from __future__ import division, absolute_import, print_function
|
||||
|
||||
import six
|
||||
import socket
|
||||
import select
|
||||
import sys
|
||||
@@ -22,7 +18,7 @@ import collections
|
||||
|
||||
# Basic events used for thread scheduling.
|
||||
|
||||
class Event(object):
|
||||
class Event:
|
||||
"""Just a base class identifying Bluelet events. An event is an
|
||||
object yielded from a Bluelet thread coroutine to suspend operation
|
||||
and communicate with the scheduler.
|
||||
@@ -201,7 +197,7 @@ class ThreadException(Exception):
|
||||
self.exc_info = exc_info
|
||||
|
||||
def reraise(self):
|
||||
six.reraise(self.exc_info[0], self.exc_info[1], self.exc_info[2])
|
||||
raise self.exc_info[1].with_traceback(self.exc_info[2])
|
||||
|
||||
|
||||
SUSPENDED = Event() # Special sentinel placeholder for suspended threads.
|
||||
@@ -269,7 +265,7 @@ def run(root_coro):
|
||||
except StopIteration:
|
||||
# Thread is done.
|
||||
complete_thread(coro, None)
|
||||
except:
|
||||
except BaseException:
|
||||
# Thread raised some other exception.
|
||||
del threads[coro]
|
||||
raise ThreadException(coro, sys.exc_info())
|
||||
@@ -336,16 +332,20 @@ def run(root_coro):
|
||||
break
|
||||
|
||||
# Wait and fire.
|
||||
event2coro = dict((v, k) for k, v in threads.items())
|
||||
event2coro = {v: k for k, v in threads.items()}
|
||||
for event in _event_select(threads.values()):
|
||||
# Run the IO operation, but catch socket errors.
|
||||
try:
|
||||
value = event.fire()
|
||||
except socket.error as exc:
|
||||
except OSError as exc:
|
||||
if isinstance(exc.args, tuple) and \
|
||||
exc.args[0] == errno.EPIPE:
|
||||
# Broken pipe. Remote host disconnected.
|
||||
pass
|
||||
elif isinstance(exc.args, tuple) and \
|
||||
exc.args[0] == errno.ECONNRESET:
|
||||
# Connection was reset by peer.
|
||||
pass
|
||||
else:
|
||||
traceback.print_exc()
|
||||
# Abort the coroutine.
|
||||
@@ -366,7 +366,7 @@ def run(root_coro):
|
||||
exit_te = te
|
||||
break
|
||||
|
||||
except:
|
||||
except BaseException:
|
||||
# For instance, KeyboardInterrupt during select(). Raise
|
||||
# into root thread and terminate others.
|
||||
threads = {root_coro: ExceptionEvent(sys.exc_info())}
|
||||
@@ -386,7 +386,7 @@ class SocketClosedError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class Listener(object):
|
||||
class Listener:
|
||||
"""A socket wrapper object for listening sockets.
|
||||
"""
|
||||
def __init__(self, host, port):
|
||||
@@ -416,7 +416,7 @@ class Listener(object):
|
||||
self.sock.close()
|
||||
|
||||
|
||||
class Connection(object):
|
||||
class Connection:
|
||||
"""A socket wrapper object for connected sockets.
|
||||
"""
|
||||
def __init__(self, sock, addr):
|
||||
@@ -541,7 +541,7 @@ def spawn(coro):
|
||||
and child coroutines run concurrently.
|
||||
"""
|
||||
if not isinstance(coro, types.GeneratorType):
|
||||
raise ValueError(u'%s is not a coroutine' % coro)
|
||||
raise ValueError('%s is not a coroutine' % coro)
|
||||
return SpawnEvent(coro)
|
||||
|
||||
|
||||
@@ -551,7 +551,7 @@ def call(coro):
|
||||
returns a value using end(), then this event returns that value.
|
||||
"""
|
||||
if not isinstance(coro, types.GeneratorType):
|
||||
raise ValueError(u'%s is not a coroutine' % coro)
|
||||
raise ValueError('%s is not a coroutine' % coro)
|
||||
return DelegationEvent(coro)
|
||||
|
||||
|
||||
|
||||
1453
lib/beets/util/confit.py
Executable file → Normal file
1453
lib/beets/util/confit.py
Executable file → Normal file
File diff suppressed because it is too large
Load Diff
2
lib/beets/util/enumeration.py
Executable file → Normal file
2
lib/beets/util/enumeration.py
Executable file → Normal file
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This file is part of beets.
|
||||
# Copyright 2016, Adrian Sampson.
|
||||
#
|
||||
@@ -13,7 +12,6 @@
|
||||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
from __future__ import division, absolute_import, print_function
|
||||
|
||||
from enum import Enum
|
||||
|
||||
|
||||
178
lib/beets/util/functemplate.py
Executable file → Normal file
178
lib/beets/util/functemplate.py
Executable file → Normal file
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This file is part of beets.
|
||||
# Copyright 2016, Adrian Sampson.
|
||||
#
|
||||
@@ -27,30 +26,30 @@ This is sort of like a tiny, horrible degeneration of a real templating
|
||||
engine like Jinja2 or Mustache.
|
||||
"""
|
||||
|
||||
from __future__ import division, absolute_import, print_function
|
||||
|
||||
import re
|
||||
import ast
|
||||
import dis
|
||||
import types
|
||||
import sys
|
||||
import six
|
||||
import functools
|
||||
|
||||
SYMBOL_DELIM = u'$'
|
||||
FUNC_DELIM = u'%'
|
||||
GROUP_OPEN = u'{'
|
||||
GROUP_CLOSE = u'}'
|
||||
ARG_SEP = u','
|
||||
ESCAPE_CHAR = u'$'
|
||||
SYMBOL_DELIM = '$'
|
||||
FUNC_DELIM = '%'
|
||||
GROUP_OPEN = '{'
|
||||
GROUP_CLOSE = '}'
|
||||
ARG_SEP = ','
|
||||
ESCAPE_CHAR = '$'
|
||||
|
||||
VARIABLE_PREFIX = '__var_'
|
||||
FUNCTION_PREFIX = '__func_'
|
||||
|
||||
|
||||
class Environment(object):
|
||||
class Environment:
|
||||
"""Contains the values and functions to be substituted into a
|
||||
template.
|
||||
"""
|
||||
|
||||
def __init__(self, values, functions):
|
||||
self.values = values
|
||||
self.functions = functions
|
||||
@@ -72,15 +71,7 @@ def ex_literal(val):
|
||||
"""An int, float, long, bool, string, or None literal with the given
|
||||
value.
|
||||
"""
|
||||
if val is None:
|
||||
return ast.Name('None', ast.Load())
|
||||
elif isinstance(val, six.integer_types):
|
||||
return ast.Num(val)
|
||||
elif isinstance(val, bool):
|
||||
return ast.Name(bytes(val), ast.Load())
|
||||
elif isinstance(val, six.string_types):
|
||||
return ast.Str(val)
|
||||
raise TypeError(u'no literal for {0}'.format(type(val)))
|
||||
return ast.Constant(val)
|
||||
|
||||
|
||||
def ex_varassign(name, expr):
|
||||
@@ -97,7 +88,7 @@ def ex_call(func, args):
|
||||
function may be an expression or the name of a function. Each
|
||||
argument may be an expression or a value to be used as a literal.
|
||||
"""
|
||||
if isinstance(func, six.string_types):
|
||||
if isinstance(func, str):
|
||||
func = ex_rvalue(func)
|
||||
|
||||
args = list(args)
|
||||
@@ -105,10 +96,7 @@ def ex_call(func, args):
|
||||
if not isinstance(args[i], ast.expr):
|
||||
args[i] = ex_literal(args[i])
|
||||
|
||||
if sys.version_info[:2] < (3, 5):
|
||||
return ast.Call(func, args, [], None, None)
|
||||
else:
|
||||
return ast.Call(func, args, [])
|
||||
return ast.Call(func, args, [])
|
||||
|
||||
|
||||
def compile_func(arg_names, statements, name='_the_func', debug=False):
|
||||
@@ -116,32 +104,30 @@ def compile_func(arg_names, statements, name='_the_func', debug=False):
|
||||
the resulting Python function. If `debug`, then print out the
|
||||
bytecode of the compiled function.
|
||||
"""
|
||||
if six.PY2:
|
||||
func_def = ast.FunctionDef(
|
||||
name=name.encode('utf-8'),
|
||||
args=ast.arguments(
|
||||
args=[ast.Name(n, ast.Param()) for n in arg_names],
|
||||
vararg=None,
|
||||
kwarg=None,
|
||||
defaults=[ex_literal(None) for _ in arg_names],
|
||||
),
|
||||
body=statements,
|
||||
decorator_list=[],
|
||||
)
|
||||
else:
|
||||
func_def = ast.FunctionDef(
|
||||
name=name,
|
||||
args=ast.arguments(
|
||||
args=[ast.arg(arg=n, annotation=None) for n in arg_names],
|
||||
kwonlyargs=[],
|
||||
kw_defaults=[],
|
||||
defaults=[ex_literal(None) for _ in arg_names],
|
||||
),
|
||||
body=statements,
|
||||
decorator_list=[],
|
||||
)
|
||||
args_fields = {
|
||||
'args': [ast.arg(arg=n, annotation=None) for n in arg_names],
|
||||
'kwonlyargs': [],
|
||||
'kw_defaults': [],
|
||||
'defaults': [ex_literal(None) for _ in arg_names],
|
||||
}
|
||||
if 'posonlyargs' in ast.arguments._fields: # Added in Python 3.8.
|
||||
args_fields['posonlyargs'] = []
|
||||
args = ast.arguments(**args_fields)
|
||||
|
||||
func_def = ast.FunctionDef(
|
||||
name=name,
|
||||
args=args,
|
||||
body=statements,
|
||||
decorator_list=[],
|
||||
)
|
||||
|
||||
# The ast.Module signature changed in 3.8 to accept a list of types to
|
||||
# ignore.
|
||||
if sys.version_info >= (3, 8):
|
||||
mod = ast.Module([func_def], [])
|
||||
else:
|
||||
mod = ast.Module([func_def])
|
||||
|
||||
mod = ast.Module([func_def])
|
||||
ast.fix_missing_locations(mod)
|
||||
|
||||
prog = compile(mod, '<generated>', 'exec')
|
||||
@@ -160,14 +146,15 @@ def compile_func(arg_names, statements, name='_the_func', debug=False):
|
||||
|
||||
# AST nodes for the template language.
|
||||
|
||||
class Symbol(object):
|
||||
class Symbol:
|
||||
"""A variable-substitution symbol in a template."""
|
||||
|
||||
def __init__(self, ident, original):
|
||||
self.ident = ident
|
||||
self.original = original
|
||||
|
||||
def __repr__(self):
|
||||
return u'Symbol(%s)' % repr(self.ident)
|
||||
return 'Symbol(%s)' % repr(self.ident)
|
||||
|
||||
def evaluate(self, env):
|
||||
"""Evaluate the symbol in the environment, returning a Unicode
|
||||
@@ -182,24 +169,22 @@ class Symbol(object):
|
||||
|
||||
def translate(self):
|
||||
"""Compile the variable lookup."""
|
||||
if six.PY2:
|
||||
ident = self.ident.encode('utf-8')
|
||||
else:
|
||||
ident = self.ident
|
||||
ident = self.ident
|
||||
expr = ex_rvalue(VARIABLE_PREFIX + ident)
|
||||
return [expr], set([ident]), set()
|
||||
return [expr], {ident}, set()
|
||||
|
||||
|
||||
class Call(object):
|
||||
class Call:
|
||||
"""A function call in a template."""
|
||||
|
||||
def __init__(self, ident, args, original):
|
||||
self.ident = ident
|
||||
self.args = args
|
||||
self.original = original
|
||||
|
||||
def __repr__(self):
|
||||
return u'Call(%s, %s, %s)' % (repr(self.ident), repr(self.args),
|
||||
repr(self.original))
|
||||
return 'Call({}, {}, {})'.format(repr(self.ident), repr(self.args),
|
||||
repr(self.original))
|
||||
|
||||
def evaluate(self, env):
|
||||
"""Evaluate the function call in the environment, returning a
|
||||
@@ -212,19 +197,15 @@ class Call(object):
|
||||
except Exception as exc:
|
||||
# Function raised exception! Maybe inlining the name of
|
||||
# the exception will help debug.
|
||||
return u'<%s>' % six.text_type(exc)
|
||||
return six.text_type(out)
|
||||
return '<%s>' % str(exc)
|
||||
return str(out)
|
||||
else:
|
||||
return self.original
|
||||
|
||||
def translate(self):
|
||||
"""Compile the function call."""
|
||||
varnames = set()
|
||||
if six.PY2:
|
||||
ident = self.ident.encode('utf-8')
|
||||
else:
|
||||
ident = self.ident
|
||||
funcnames = set([ident])
|
||||
funcnames = {self.ident}
|
||||
|
||||
arg_exprs = []
|
||||
for arg in self.args:
|
||||
@@ -235,32 +216,33 @@ class Call(object):
|
||||
# Create a subexpression that joins the result components of
|
||||
# the arguments.
|
||||
arg_exprs.append(ex_call(
|
||||
ast.Attribute(ex_literal(u''), 'join', ast.Load()),
|
||||
ast.Attribute(ex_literal(''), 'join', ast.Load()),
|
||||
[ex_call(
|
||||
'map',
|
||||
[
|
||||
ex_rvalue(six.text_type.__name__),
|
||||
ex_rvalue(str.__name__),
|
||||
ast.List(subexprs, ast.Load()),
|
||||
]
|
||||
)],
|
||||
))
|
||||
|
||||
subexpr_call = ex_call(
|
||||
FUNCTION_PREFIX + ident,
|
||||
FUNCTION_PREFIX + self.ident,
|
||||
arg_exprs
|
||||
)
|
||||
return [subexpr_call], varnames, funcnames
|
||||
|
||||
|
||||
class Expression(object):
|
||||
class Expression:
|
||||
"""Top-level template construct: contains a list of text blobs,
|
||||
Symbols, and Calls.
|
||||
"""
|
||||
|
||||
def __init__(self, parts):
|
||||
self.parts = parts
|
||||
|
||||
def __repr__(self):
|
||||
return u'Expression(%s)' % (repr(self.parts))
|
||||
return 'Expression(%s)' % (repr(self.parts))
|
||||
|
||||
def evaluate(self, env):
|
||||
"""Evaluate the entire expression in the environment, returning
|
||||
@@ -268,11 +250,11 @@ class Expression(object):
|
||||
"""
|
||||
out = []
|
||||
for part in self.parts:
|
||||
if isinstance(part, six.string_types):
|
||||
if isinstance(part, str):
|
||||
out.append(part)
|
||||
else:
|
||||
out.append(part.evaluate(env))
|
||||
return u''.join(map(six.text_type, out))
|
||||
return ''.join(map(str, out))
|
||||
|
||||
def translate(self):
|
||||
"""Compile the expression to a list of Python AST expressions, a
|
||||
@@ -282,7 +264,7 @@ class Expression(object):
|
||||
varnames = set()
|
||||
funcnames = set()
|
||||
for part in self.parts:
|
||||
if isinstance(part, six.string_types):
|
||||
if isinstance(part, str):
|
||||
expressions.append(ex_literal(part))
|
||||
else:
|
||||
e, v, f = part.translate()
|
||||
@@ -298,7 +280,7 @@ class ParseError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class Parser(object):
|
||||
class Parser:
|
||||
"""Parses a template expression string. Instantiate the class with
|
||||
the template source and call ``parse_expression``. The ``pos`` field
|
||||
will indicate the character after the expression finished and
|
||||
@@ -311,6 +293,7 @@ class Parser(object):
|
||||
replaced with a real, accepted parsing technique (PEG, parser
|
||||
generator, etc.).
|
||||
"""
|
||||
|
||||
def __init__(self, string, in_argument=False):
|
||||
""" Create a new parser.
|
||||
:param in_arguments: boolean that indicates the parser is to be
|
||||
@@ -325,8 +308,8 @@ class Parser(object):
|
||||
# Common parsing resources.
|
||||
special_chars = (SYMBOL_DELIM, FUNC_DELIM, GROUP_OPEN, GROUP_CLOSE,
|
||||
ESCAPE_CHAR)
|
||||
special_char_re = re.compile(r'[%s]|$' %
|
||||
u''.join(re.escape(c) for c in special_chars))
|
||||
special_char_re = re.compile(r'[%s]|\Z' %
|
||||
''.join(re.escape(c) for c in special_chars))
|
||||
escapable_chars = (SYMBOL_DELIM, FUNC_DELIM, GROUP_CLOSE, ARG_SEP)
|
||||
terminator_chars = (GROUP_CLOSE,)
|
||||
|
||||
@@ -343,8 +326,11 @@ class Parser(object):
|
||||
if self.in_argument:
|
||||
extra_special_chars = (ARG_SEP,)
|
||||
special_char_re = re.compile(
|
||||
r'[%s]|$' % u''.join(re.escape(c) for c in
|
||||
self.special_chars + extra_special_chars))
|
||||
r'[%s]|\Z' % ''.join(
|
||||
re.escape(c) for c in
|
||||
self.special_chars + extra_special_chars
|
||||
)
|
||||
)
|
||||
|
||||
text_parts = []
|
||||
|
||||
@@ -384,7 +370,7 @@ class Parser(object):
|
||||
|
||||
# Shift all characters collected so far into a single string.
|
||||
if text_parts:
|
||||
self.parts.append(u''.join(text_parts))
|
||||
self.parts.append(''.join(text_parts))
|
||||
text_parts = []
|
||||
|
||||
if char == SYMBOL_DELIM:
|
||||
@@ -406,7 +392,7 @@ class Parser(object):
|
||||
|
||||
# If any parsed characters remain, shift them into a string.
|
||||
if text_parts:
|
||||
self.parts.append(u''.join(text_parts))
|
||||
self.parts.append(''.join(text_parts))
|
||||
|
||||
def parse_symbol(self):
|
||||
"""Parse a variable reference (like ``$foo`` or ``${foo}``)
|
||||
@@ -544,11 +530,27 @@ def _parse(template):
|
||||
return Expression(parts)
|
||||
|
||||
|
||||
# External interface.
|
||||
def cached(func):
|
||||
"""Like the `functools.lru_cache` decorator, but works (as a no-op)
|
||||
on Python < 3.2.
|
||||
"""
|
||||
if hasattr(functools, 'lru_cache'):
|
||||
return functools.lru_cache(maxsize=128)(func)
|
||||
else:
|
||||
# Do nothing when lru_cache is not available.
|
||||
return func
|
||||
|
||||
class Template(object):
|
||||
|
||||
@cached
|
||||
def template(fmt):
|
||||
return Template(fmt)
|
||||
|
||||
|
||||
# External interface.
|
||||
class Template:
|
||||
"""A string template, including text, Symbols, and Calls.
|
||||
"""
|
||||
|
||||
def __init__(self, template):
|
||||
self.expr = _parse(template)
|
||||
self.original = template
|
||||
@@ -570,7 +572,7 @@ class Template(object):
|
||||
"""
|
||||
try:
|
||||
res = self.compiled(values, functions)
|
||||
except: # Handle any exceptions thrown by compiled version.
|
||||
except Exception: # Handle any exceptions thrown by compiled version.
|
||||
res = self.interpret(values, functions)
|
||||
|
||||
return res
|
||||
@@ -597,7 +599,7 @@ class Template(object):
|
||||
for funcname in funcnames:
|
||||
args[FUNCTION_PREFIX + funcname] = functions[funcname]
|
||||
parts = func(**args)
|
||||
return u''.join(parts)
|
||||
return ''.join(parts)
|
||||
|
||||
return wrapper_func
|
||||
|
||||
@@ -606,9 +608,9 @@ class Template(object):
|
||||
|
||||
if __name__ == '__main__':
|
||||
import timeit
|
||||
_tmpl = Template(u'foo $bar %baz{foozle $bar barzle} $bar')
|
||||
_tmpl = Template('foo $bar %baz{foozle $bar barzle} $bar')
|
||||
_vars = {'bar': 'qux'}
|
||||
_funcs = {'baz': six.text_type.upper}
|
||||
_funcs = {'baz': str.upper}
|
||||
interp_time = timeit.timeit('_tmpl.interpret(_vars, _funcs)',
|
||||
'from __main__ import _tmpl, _vars, _funcs',
|
||||
number=10000)
|
||||
@@ -617,4 +619,4 @@ if __name__ == '__main__':
|
||||
'from __main__ import _tmpl, _vars, _funcs',
|
||||
number=10000)
|
||||
print(comp_time)
|
||||
print(u'Speedup:', interp_time / comp_time)
|
||||
print('Speedup:', interp_time / comp_time)
|
||||
|
||||
2
lib/beets/util/hidden.py
Executable file → Normal file
2
lib/beets/util/hidden.py
Executable file → Normal file
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This file is part of beets.
|
||||
# Copyright 2016, Adrian Sampson.
|
||||
#
|
||||
@@ -14,7 +13,6 @@
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
"""Simple library to work out if a file is hidden on different platforms."""
|
||||
from __future__ import division, absolute_import, print_function
|
||||
|
||||
import os
|
||||
import stat
|
||||
|
||||
58
lib/beets/util/pipeline.py
Executable file → Normal file
58
lib/beets/util/pipeline.py
Executable file → Normal file
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This file is part of beets.
|
||||
# Copyright 2016, Adrian Sampson.
|
||||
#
|
||||
@@ -32,12 +31,10 @@ To do so, pass an iterable of coroutines to the Pipeline constructor
|
||||
in place of any single coroutine.
|
||||
"""
|
||||
|
||||
from __future__ import division, absolute_import, print_function
|
||||
|
||||
from six.moves import queue
|
||||
import queue
|
||||
from threading import Thread, Lock
|
||||
import sys
|
||||
import six
|
||||
|
||||
BUBBLE = '__PIPELINE_BUBBLE__'
|
||||
POISON = '__PIPELINE_POISON__'
|
||||
@@ -91,6 +88,7 @@ class CountedQueue(queue.Queue):
|
||||
still feeding into it. The queue is poisoned when all threads are
|
||||
finished with the queue.
|
||||
"""
|
||||
|
||||
def __init__(self, maxsize=0):
|
||||
queue.Queue.__init__(self, maxsize)
|
||||
self.nthreads = 0
|
||||
@@ -135,10 +133,11 @@ class CountedQueue(queue.Queue):
|
||||
_invalidate_queue(self, POISON, False)
|
||||
|
||||
|
||||
class MultiMessage(object):
|
||||
class MultiMessage:
|
||||
"""A message yielded by a pipeline stage encapsulating multiple
|
||||
values to be sent to the next stage.
|
||||
"""
|
||||
|
||||
def __init__(self, messages):
|
||||
self.messages = messages
|
||||
|
||||
@@ -210,8 +209,9 @@ def _allmsgs(obj):
|
||||
|
||||
class PipelineThread(Thread):
|
||||
"""Abstract base class for pipeline-stage threads."""
|
||||
|
||||
def __init__(self, all_threads):
|
||||
super(PipelineThread, self).__init__()
|
||||
super().__init__()
|
||||
self.abort_lock = Lock()
|
||||
self.abort_flag = False
|
||||
self.all_threads = all_threads
|
||||
@@ -241,15 +241,13 @@ class FirstPipelineThread(PipelineThread):
|
||||
"""The thread running the first stage in a parallel pipeline setup.
|
||||
The coroutine should just be a generator.
|
||||
"""
|
||||
|
||||
def __init__(self, coro, out_queue, all_threads):
|
||||
super(FirstPipelineThread, self).__init__(all_threads)
|
||||
super().__init__(all_threads)
|
||||
self.coro = coro
|
||||
self.out_queue = out_queue
|
||||
self.out_queue.acquire()
|
||||
|
||||
self.abort_lock = Lock()
|
||||
self.abort_flag = False
|
||||
|
||||
def run(self):
|
||||
try:
|
||||
while True:
|
||||
@@ -270,7 +268,7 @@ class FirstPipelineThread(PipelineThread):
|
||||
return
|
||||
self.out_queue.put(msg)
|
||||
|
||||
except:
|
||||
except BaseException:
|
||||
self.abort_all(sys.exc_info())
|
||||
return
|
||||
|
||||
@@ -282,8 +280,9 @@ class MiddlePipelineThread(PipelineThread):
|
||||
"""A thread running any stage in the pipeline except the first or
|
||||
last.
|
||||
"""
|
||||
|
||||
def __init__(self, coro, in_queue, out_queue, all_threads):
|
||||
super(MiddlePipelineThread, self).__init__(all_threads)
|
||||
super().__init__(all_threads)
|
||||
self.coro = coro
|
||||
self.in_queue = in_queue
|
||||
self.out_queue = out_queue
|
||||
@@ -318,7 +317,7 @@ class MiddlePipelineThread(PipelineThread):
|
||||
return
|
||||
self.out_queue.put(msg)
|
||||
|
||||
except:
|
||||
except BaseException:
|
||||
self.abort_all(sys.exc_info())
|
||||
return
|
||||
|
||||
@@ -330,8 +329,9 @@ class LastPipelineThread(PipelineThread):
|
||||
"""A thread running the last stage in a pipeline. The coroutine
|
||||
should yield nothing.
|
||||
"""
|
||||
|
||||
def __init__(self, coro, in_queue, all_threads):
|
||||
super(LastPipelineThread, self).__init__(all_threads)
|
||||
super().__init__(all_threads)
|
||||
self.coro = coro
|
||||
self.in_queue = in_queue
|
||||
|
||||
@@ -357,22 +357,23 @@ class LastPipelineThread(PipelineThread):
|
||||
# Send to consumer.
|
||||
self.coro.send(msg)
|
||||
|
||||
except:
|
||||
except BaseException:
|
||||
self.abort_all(sys.exc_info())
|
||||
return
|
||||
|
||||
|
||||
class Pipeline(object):
|
||||
class Pipeline:
|
||||
"""Represents a staged pattern of work. Each stage in the pipeline
|
||||
is a coroutine that receives messages from the previous stage and
|
||||
yields messages to be sent to the next stage.
|
||||
"""
|
||||
|
||||
def __init__(self, stages):
|
||||
"""Makes a new pipeline from a list of coroutines. There must
|
||||
be at least two stages.
|
||||
"""
|
||||
if len(stages) < 2:
|
||||
raise ValueError(u'pipeline must have at least two stages')
|
||||
raise ValueError('pipeline must have at least two stages')
|
||||
self.stages = []
|
||||
for stage in stages:
|
||||
if isinstance(stage, (list, tuple)):
|
||||
@@ -425,7 +426,7 @@ class Pipeline(object):
|
||||
while threads[-1].is_alive():
|
||||
threads[-1].join(1)
|
||||
|
||||
except:
|
||||
except BaseException:
|
||||
# Stop all the threads immediately.
|
||||
for thread in threads:
|
||||
thread.abort()
|
||||
@@ -442,7 +443,7 @@ class Pipeline(object):
|
||||
exc_info = thread.exc_info
|
||||
if exc_info:
|
||||
# Make the exception appear as it was raised originally.
|
||||
six.reraise(exc_info[0], exc_info[1], exc_info[2])
|
||||
raise exc_info[1].with_traceback(exc_info[2])
|
||||
|
||||
def pull(self):
|
||||
"""Yield elements from the end of the pipeline. Runs the stages
|
||||
@@ -469,6 +470,7 @@ class Pipeline(object):
|
||||
for msg in msgs:
|
||||
yield msg
|
||||
|
||||
|
||||
# Smoke test.
|
||||
if __name__ == '__main__':
|
||||
import time
|
||||
@@ -477,14 +479,14 @@ if __name__ == '__main__':
|
||||
# in parallel.
|
||||
def produce():
|
||||
for i in range(5):
|
||||
print(u'generating %i' % i)
|
||||
print('generating %i' % i)
|
||||
time.sleep(1)
|
||||
yield i
|
||||
|
||||
def work():
|
||||
num = yield
|
||||
while True:
|
||||
print(u'processing %i' % num)
|
||||
print('processing %i' % num)
|
||||
time.sleep(2)
|
||||
num = yield num * 2
|
||||
|
||||
@@ -492,7 +494,7 @@ if __name__ == '__main__':
|
||||
while True:
|
||||
num = yield
|
||||
time.sleep(1)
|
||||
print(u'received %i' % num)
|
||||
print('received %i' % num)
|
||||
|
||||
ts_start = time.time()
|
||||
Pipeline([produce(), work(), consume()]).run_sequential()
|
||||
@@ -501,22 +503,22 @@ if __name__ == '__main__':
|
||||
ts_par = time.time()
|
||||
Pipeline([produce(), (work(), work()), consume()]).run_parallel()
|
||||
ts_end = time.time()
|
||||
print(u'Sequential time:', ts_seq - ts_start)
|
||||
print(u'Parallel time:', ts_par - ts_seq)
|
||||
print(u'Multiply-parallel time:', ts_end - ts_par)
|
||||
print('Sequential time:', ts_seq - ts_start)
|
||||
print('Parallel time:', ts_par - ts_seq)
|
||||
print('Multiply-parallel time:', ts_end - ts_par)
|
||||
print()
|
||||
|
||||
# Test a pipeline that raises an exception.
|
||||
def exc_produce():
|
||||
for i in range(10):
|
||||
print(u'generating %i' % i)
|
||||
print('generating %i' % i)
|
||||
time.sleep(1)
|
||||
yield i
|
||||
|
||||
def exc_work():
|
||||
num = yield
|
||||
while True:
|
||||
print(u'processing %i' % num)
|
||||
print('processing %i' % num)
|
||||
time.sleep(3)
|
||||
if num == 3:
|
||||
raise Exception()
|
||||
@@ -525,6 +527,6 @@ if __name__ == '__main__':
|
||||
def exc_consume():
|
||||
while True:
|
||||
num = yield
|
||||
print(u'received %i' % num)
|
||||
print('received %i' % num)
|
||||
|
||||
Pipeline([exc_produce(), exc_work(), exc_consume()]).run_parallel(1)
|
||||
|
||||
2
lib/beets/vfs.py
Executable file → Normal file
2
lib/beets/vfs.py
Executable file → Normal file
@@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# This file is part of beets.
|
||||
# Copyright 2016, Adrian Sampson.
|
||||
#
|
||||
@@ -16,7 +15,6 @@
|
||||
"""A simple utility for constructing filesystem-like trees from beets
|
||||
libraries.
|
||||
"""
|
||||
from __future__ import division, absolute_import, print_function
|
||||
|
||||
from collections import namedtuple
|
||||
from beets import util
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# This file is part of beets.
|
||||
# Copyright 2013, Adrian Sampson.
|
||||
# Copyright 2016, Adrian Sampson.
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining
|
||||
# a copy of this software and associated documentation files (the
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
"""A namespace package for beets plugins."""
|
||||
|
||||
|
||||
# Make this a namespace package.
|
||||
from pkgutil import extend_path
|
||||
__path__ = extend_path(__path__, __name__)
|
||||
|
||||
196
lib/beetsplug/absubmit.py
Normal file
196
lib/beetsplug/absubmit.py
Normal file
@@ -0,0 +1,196 @@
|
||||
# This file is part of beets.
|
||||
# Copyright 2016, Pieter Mulder.
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining
|
||||
# a copy of this software and associated documentation files (the
|
||||
# "Software"), to deal in the Software without restriction, including
|
||||
# without limitation the rights to use, copy, modify, merge, publish,
|
||||
# distribute, sublicense, and/or sell copies of the Software, and to
|
||||
# permit persons to whom the Software is furnished to do so, subject to
|
||||
# the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
"""Calculate acoustic information and submit to AcousticBrainz.
|
||||
"""
|
||||
|
||||
|
||||
import errno
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
import subprocess
|
||||
import tempfile
|
||||
|
||||
from distutils.spawn import find_executable
|
||||
import requests
|
||||
|
||||
from beets import plugins
|
||||
from beets import util
|
||||
from beets import ui
|
||||
|
||||
# We use this field to check whether AcousticBrainz info is present.
|
||||
PROBE_FIELD = 'mood_acoustic'
|
||||
|
||||
|
||||
class ABSubmitError(Exception):
|
||||
"""Raised when failing to analyse file with extractor."""
|
||||
|
||||
|
||||
def call(args):
|
||||
"""Execute the command and return its output.
|
||||
|
||||
Raise a AnalysisABSubmitError on failure.
|
||||
"""
|
||||
try:
|
||||
return util.command_output(args).stdout
|
||||
except subprocess.CalledProcessError as e:
|
||||
raise ABSubmitError(
|
||||
'{} exited with status {}'.format(args[0], e.returncode)
|
||||
)
|
||||
|
||||
|
||||
class AcousticBrainzSubmitPlugin(plugins.BeetsPlugin):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
self.config.add({
|
||||
'extractor': '',
|
||||
'force': False,
|
||||
'pretend': False
|
||||
})
|
||||
|
||||
self.extractor = self.config['extractor'].as_str()
|
||||
if self.extractor:
|
||||
self.extractor = util.normpath(self.extractor)
|
||||
# Expicit path to extractor
|
||||
if not os.path.isfile(self.extractor):
|
||||
raise ui.UserError(
|
||||
'Extractor command does not exist: {0}.'.
|
||||
format(self.extractor)
|
||||
)
|
||||
else:
|
||||
# Implicit path to extractor, search for it in path
|
||||
self.extractor = 'streaming_extractor_music'
|
||||
try:
|
||||
call([self.extractor])
|
||||
except OSError:
|
||||
raise ui.UserError(
|
||||
'No extractor command found: please install the extractor'
|
||||
' binary from https://acousticbrainz.org/download'
|
||||
)
|
||||
except ABSubmitError:
|
||||
# Extractor found, will exit with an error if not called with
|
||||
# the correct amount of arguments.
|
||||
pass
|
||||
|
||||
# Get the executable location on the system, which we need
|
||||
# to calculate the SHA-1 hash.
|
||||
self.extractor = find_executable(self.extractor)
|
||||
|
||||
# Calculate extractor hash.
|
||||
self.extractor_sha = hashlib.sha1()
|
||||
with open(self.extractor, 'rb') as extractor:
|
||||
self.extractor_sha.update(extractor.read())
|
||||
self.extractor_sha = self.extractor_sha.hexdigest()
|
||||
|
||||
base_url = 'https://acousticbrainz.org/api/v1/{mbid}/low-level'
|
||||
|
||||
def commands(self):
|
||||
cmd = ui.Subcommand(
|
||||
'absubmit',
|
||||
help='calculate and submit AcousticBrainz analysis'
|
||||
)
|
||||
cmd.parser.add_option(
|
||||
'-f', '--force', dest='force_refetch',
|
||||
action='store_true', default=False,
|
||||
help='re-download data when already present'
|
||||
)
|
||||
cmd.parser.add_option(
|
||||
'-p', '--pretend', dest='pretend_fetch',
|
||||
action='store_true', default=False,
|
||||
help='pretend to perform action, but show \
|
||||
only files which would be processed'
|
||||
)
|
||||
cmd.func = self.command
|
||||
return [cmd]
|
||||
|
||||
def command(self, lib, opts, args):
|
||||
# Get items from arguments
|
||||
items = lib.items(ui.decargs(args))
|
||||
self.opts = opts
|
||||
util.par_map(self.analyze_submit, items)
|
||||
|
||||
def analyze_submit(self, item):
|
||||
analysis = self._get_analysis(item)
|
||||
if analysis:
|
||||
self._submit_data(item, analysis)
|
||||
|
||||
def _get_analysis(self, item):
|
||||
mbid = item['mb_trackid']
|
||||
|
||||
# Avoid re-analyzing files that already have AB data.
|
||||
if not self.opts.force_refetch and not self.config['force']:
|
||||
if item.get(PROBE_FIELD):
|
||||
return None
|
||||
|
||||
# If file has no MBID, skip it.
|
||||
if not mbid:
|
||||
self._log.info('Not analysing {}, missing '
|
||||
'musicbrainz track id.', item)
|
||||
return None
|
||||
|
||||
if self.opts.pretend_fetch or self.config['pretend']:
|
||||
self._log.info('pretend action - extract item: {}', item)
|
||||
return None
|
||||
|
||||
# Temporary file to save extractor output to, extractor only works
|
||||
# if an output file is given. Here we use a temporary file to copy
|
||||
# the data into a python object and then remove the file from the
|
||||
# system.
|
||||
tmp_file, filename = tempfile.mkstemp(suffix='.json')
|
||||
try:
|
||||
# Close the file, so the extractor can overwrite it.
|
||||
os.close(tmp_file)
|
||||
try:
|
||||
call([self.extractor, util.syspath(item.path), filename])
|
||||
except ABSubmitError as e:
|
||||
self._log.warning(
|
||||
'Failed to analyse {item} for AcousticBrainz: {error}',
|
||||
item=item, error=e
|
||||
)
|
||||
return None
|
||||
with open(filename) as tmp_file:
|
||||
analysis = json.load(tmp_file)
|
||||
# Add the hash to the output.
|
||||
analysis['metadata']['version']['essentia_build_sha'] = \
|
||||
self.extractor_sha
|
||||
return analysis
|
||||
finally:
|
||||
try:
|
||||
os.remove(filename)
|
||||
except OSError as e:
|
||||
# ENOENT means file does not exist, just ignore this error.
|
||||
if e.errno != errno.ENOENT:
|
||||
raise
|
||||
|
||||
def _submit_data(self, item, data):
|
||||
mbid = item['mb_trackid']
|
||||
headers = {'Content-Type': 'application/json'}
|
||||
response = requests.post(self.base_url.format(mbid=mbid),
|
||||
json=data, headers=headers)
|
||||
# Test that request was successful and raise an error on failure.
|
||||
if response.status_code != 200:
|
||||
try:
|
||||
message = response.json()['message']
|
||||
except (ValueError, KeyError) as e:
|
||||
message = f'unable to get error message: {e}'
|
||||
self._log.error(
|
||||
'Failed to submit AcousticBrainz analysis of {item}: '
|
||||
'{message}).', item=item, message=message
|
||||
)
|
||||
else:
|
||||
self._log.debug('Successfully submitted AcousticBrainz analysis '
|
||||
'for {}.', item)
|
||||
334
lib/beetsplug/acousticbrainz.py
Normal file
334
lib/beetsplug/acousticbrainz.py
Normal file
@@ -0,0 +1,334 @@
|
||||
# This file is part of beets.
|
||||
# Copyright 2015-2016, Ohm Patel.
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining
|
||||
# a copy of this software and associated documentation files (the
|
||||
# "Software"), to deal in the Software without restriction, including
|
||||
# without limitation the rights to use, copy, modify, merge, publish,
|
||||
# distribute, sublicense, and/or sell copies of the Software, and to
|
||||
# permit persons to whom the Software is furnished to do so, subject to
|
||||
# the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
"""Fetch various AcousticBrainz metadata using MBID.
|
||||
"""
|
||||
|
||||
from collections import defaultdict
|
||||
|
||||
import requests
|
||||
|
||||
from beets import plugins, ui
|
||||
from beets.dbcore import types
|
||||
|
||||
ACOUSTIC_BASE = "https://acousticbrainz.org/"
|
||||
LEVELS = ["/low-level", "/high-level"]
|
||||
ABSCHEME = {
|
||||
'highlevel': {
|
||||
'danceability': {
|
||||
'all': {
|
||||
'danceable': 'danceable'
|
||||
}
|
||||
},
|
||||
'gender': {
|
||||
'value': 'gender'
|
||||
},
|
||||
'genre_rosamerica': {
|
||||
'value': 'genre_rosamerica'
|
||||
},
|
||||
'mood_acoustic': {
|
||||
'all': {
|
||||
'acoustic': 'mood_acoustic'
|
||||
}
|
||||
},
|
||||
'mood_aggressive': {
|
||||
'all': {
|
||||
'aggressive': 'mood_aggressive'
|
||||
}
|
||||
},
|
||||
'mood_electronic': {
|
||||
'all': {
|
||||
'electronic': 'mood_electronic'
|
||||
}
|
||||
},
|
||||
'mood_happy': {
|
||||
'all': {
|
||||
'happy': 'mood_happy'
|
||||
}
|
||||
},
|
||||
'mood_party': {
|
||||
'all': {
|
||||
'party': 'mood_party'
|
||||
}
|
||||
},
|
||||
'mood_relaxed': {
|
||||
'all': {
|
||||
'relaxed': 'mood_relaxed'
|
||||
}
|
||||
},
|
||||
'mood_sad': {
|
||||
'all': {
|
||||
'sad': 'mood_sad'
|
||||
}
|
||||
},
|
||||
'moods_mirex': {
|
||||
'value': 'moods_mirex'
|
||||
},
|
||||
'ismir04_rhythm': {
|
||||
'value': 'rhythm'
|
||||
},
|
||||
'tonal_atonal': {
|
||||
'all': {
|
||||
'tonal': 'tonal'
|
||||
}
|
||||
},
|
||||
'timbre': {
|
||||
'value': 'timbre'
|
||||
},
|
||||
'voice_instrumental': {
|
||||
'value': 'voice_instrumental'
|
||||
},
|
||||
},
|
||||
'lowlevel': {
|
||||
'average_loudness': 'average_loudness'
|
||||
},
|
||||
'rhythm': {
|
||||
'bpm': 'bpm'
|
||||
},
|
||||
'tonal': {
|
||||
'chords_changes_rate': 'chords_changes_rate',
|
||||
'chords_key': 'chords_key',
|
||||
'chords_number_rate': 'chords_number_rate',
|
||||
'chords_scale': 'chords_scale',
|
||||
'key_key': ('initial_key', 0),
|
||||
'key_scale': ('initial_key', 1),
|
||||
'key_strength': 'key_strength'
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class AcousticPlugin(plugins.BeetsPlugin):
|
||||
item_types = {
|
||||
'average_loudness': types.Float(6),
|
||||
'chords_changes_rate': types.Float(6),
|
||||
'chords_key': types.STRING,
|
||||
'chords_number_rate': types.Float(6),
|
||||
'chords_scale': types.STRING,
|
||||
'danceable': types.Float(6),
|
||||
'gender': types.STRING,
|
||||
'genre_rosamerica': types.STRING,
|
||||
'initial_key': types.STRING,
|
||||
'key_strength': types.Float(6),
|
||||
'mood_acoustic': types.Float(6),
|
||||
'mood_aggressive': types.Float(6),
|
||||
'mood_electronic': types.Float(6),
|
||||
'mood_happy': types.Float(6),
|
||||
'mood_party': types.Float(6),
|
||||
'mood_relaxed': types.Float(6),
|
||||
'mood_sad': types.Float(6),
|
||||
'moods_mirex': types.STRING,
|
||||
'rhythm': types.Float(6),
|
||||
'timbre': types.STRING,
|
||||
'tonal': types.Float(6),
|
||||
'voice_instrumental': types.STRING,
|
||||
}
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
self.config.add({
|
||||
'auto': True,
|
||||
'force': False,
|
||||
'tags': []
|
||||
})
|
||||
|
||||
if self.config['auto']:
|
||||
self.register_listener('import_task_files',
|
||||
self.import_task_files)
|
||||
|
||||
def commands(self):
|
||||
cmd = ui.Subcommand('acousticbrainz',
|
||||
help="fetch metadata from AcousticBrainz")
|
||||
cmd.parser.add_option(
|
||||
'-f', '--force', dest='force_refetch',
|
||||
action='store_true', default=False,
|
||||
help='re-download data when already present'
|
||||
)
|
||||
|
||||
def func(lib, opts, args):
|
||||
items = lib.items(ui.decargs(args))
|
||||
self._fetch_info(items, ui.should_write(),
|
||||
opts.force_refetch or self.config['force'])
|
||||
|
||||
cmd.func = func
|
||||
return [cmd]
|
||||
|
||||
def import_task_files(self, session, task):
|
||||
"""Function is called upon beet import.
|
||||
"""
|
||||
self._fetch_info(task.imported_items(), False, True)
|
||||
|
||||
def _get_data(self, mbid):
|
||||
data = {}
|
||||
for url in _generate_urls(mbid):
|
||||
self._log.debug('fetching URL: {}', url)
|
||||
|
||||
try:
|
||||
res = requests.get(url)
|
||||
except requests.RequestException as exc:
|
||||
self._log.info('request error: {}', exc)
|
||||
return {}
|
||||
|
||||
if res.status_code == 404:
|
||||
self._log.info('recording ID {} not found', mbid)
|
||||
return {}
|
||||
|
||||
try:
|
||||
data.update(res.json())
|
||||
except ValueError:
|
||||
self._log.debug('Invalid Response: {}', res.text)
|
||||
return {}
|
||||
|
||||
return data
|
||||
|
||||
def _fetch_info(self, items, write, force):
|
||||
"""Fetch additional information from AcousticBrainz for the `item`s.
|
||||
"""
|
||||
tags = self.config['tags'].as_str_seq()
|
||||
for item in items:
|
||||
# If we're not forcing re-downloading for all tracks, check
|
||||
# whether the data is already present. We use one
|
||||
# representative field name to check for previously fetched
|
||||
# data.
|
||||
if not force:
|
||||
mood_str = item.get('mood_acoustic', '')
|
||||
if mood_str:
|
||||
self._log.info('data already present for: {}', item)
|
||||
continue
|
||||
|
||||
# We can only fetch data for tracks with MBIDs.
|
||||
if not item.mb_trackid:
|
||||
continue
|
||||
|
||||
self._log.info('getting data for: {}', item)
|
||||
data = self._get_data(item.mb_trackid)
|
||||
if data:
|
||||
for attr, val in self._map_data_to_scheme(data, ABSCHEME):
|
||||
if not tags or attr in tags:
|
||||
self._log.debug('attribute {} of {} set to {}',
|
||||
attr,
|
||||
item,
|
||||
val)
|
||||
setattr(item, attr, val)
|
||||
else:
|
||||
self._log.debug('skipping attribute {} of {}'
|
||||
' (value {}) due to config',
|
||||
attr,
|
||||
item,
|
||||
val)
|
||||
item.store()
|
||||
if write:
|
||||
item.try_write()
|
||||
|
||||
def _map_data_to_scheme(self, data, scheme):
|
||||
"""Given `data` as a structure of nested dictionaries, and `scheme` as a
|
||||
structure of nested dictionaries , `yield` tuples `(attr, val)` where
|
||||
`attr` and `val` are corresponding leaf nodes in `scheme` and `data`.
|
||||
|
||||
As its name indicates, `scheme` defines how the data is structured,
|
||||
so this function tries to find leaf nodes in `data` that correspond
|
||||
to the leafs nodes of `scheme`, and not the other way around.
|
||||
Leaf nodes of `data` that do not exist in the `scheme` do not matter.
|
||||
If a leaf node of `scheme` is not present in `data`,
|
||||
no value is yielded for that attribute and a simple warning is issued.
|
||||
|
||||
Finally, to account for attributes of which the value is split between
|
||||
several leaf nodes in `data`, leaf nodes of `scheme` can be tuples
|
||||
`(attr, order)` where `attr` is the attribute to which the leaf node
|
||||
belongs, and `order` is the place at which it should appear in the
|
||||
value. The different `value`s belonging to the same `attr` are simply
|
||||
joined with `' '`. This is hardcoded and not very flexible, but it gets
|
||||
the job done.
|
||||
|
||||
For example:
|
||||
|
||||
>>> scheme = {
|
||||
'key1': 'attribute',
|
||||
'key group': {
|
||||
'subkey1': 'subattribute',
|
||||
'subkey2': ('composite attribute', 0)
|
||||
},
|
||||
'key2': ('composite attribute', 1)
|
||||
}
|
||||
>>> data = {
|
||||
'key1': 'value',
|
||||
'key group': {
|
||||
'subkey1': 'subvalue',
|
||||
'subkey2': 'part 1 of composite attr'
|
||||
},
|
||||
'key2': 'part 2'
|
||||
}
|
||||
>>> print(list(_map_data_to_scheme(data, scheme)))
|
||||
[('subattribute', 'subvalue'),
|
||||
('attribute', 'value'),
|
||||
('composite attribute', 'part 1 of composite attr part 2')]
|
||||
"""
|
||||
# First, we traverse `scheme` and `data`, `yield`ing all the non
|
||||
# composites attributes straight away and populating the dictionary
|
||||
# `composites` with the composite attributes.
|
||||
|
||||
# When we are finished traversing `scheme`, `composites` should
|
||||
# map each composite attribute to an ordered list of the values
|
||||
# belonging to the attribute, for example:
|
||||
# `composites = {'initial_key': ['B', 'minor']}`.
|
||||
|
||||
# The recursive traversal.
|
||||
composites = defaultdict(list)
|
||||
yield from self._data_to_scheme_child(data,
|
||||
scheme,
|
||||
composites)
|
||||
|
||||
# When composites has been populated, yield the composite attributes
|
||||
# by joining their parts.
|
||||
for composite_attr, value_parts in composites.items():
|
||||
yield composite_attr, ' '.join(value_parts)
|
||||
|
||||
def _data_to_scheme_child(self, subdata, subscheme, composites):
|
||||
"""The recursive business logic of :meth:`_map_data_to_scheme`:
|
||||
Traverse two structures of nested dictionaries in parallel and `yield`
|
||||
tuples of corresponding leaf nodes.
|
||||
|
||||
If a leaf node belongs to a composite attribute (is a `tuple`),
|
||||
populate `composites` rather than yielding straight away.
|
||||
All the child functions for a single traversal share the same
|
||||
`composites` instance, which is passed along.
|
||||
"""
|
||||
for k, v in subscheme.items():
|
||||
if k in subdata:
|
||||
if type(v) == dict:
|
||||
yield from self._data_to_scheme_child(subdata[k],
|
||||
v,
|
||||
composites)
|
||||
elif type(v) == tuple:
|
||||
composite_attribute, part_number = v
|
||||
attribute_parts = composites[composite_attribute]
|
||||
# Parts are not guaranteed to be inserted in order
|
||||
while len(attribute_parts) <= part_number:
|
||||
attribute_parts.append('')
|
||||
attribute_parts[part_number] = subdata[k]
|
||||
else:
|
||||
yield v, subdata[k]
|
||||
else:
|
||||
self._log.warning('Acousticbrainz did not provide info'
|
||||
'about {}', k)
|
||||
self._log.debug('Data {} could not be mapped to scheme {} '
|
||||
'because key {} was not found', subdata, v, k)
|
||||
|
||||
|
||||
def _generate_urls(mbid):
|
||||
"""Generates AcousticBrainz end point urls for given `mbid`.
|
||||
"""
|
||||
for level in LEVELS:
|
||||
yield ACOUSTIC_BASE + mbid + level
|
||||
65
lib/beetsplug/albumtypes.py
Normal file
65
lib/beetsplug/albumtypes.py
Normal file
@@ -0,0 +1,65 @@
|
||||
# This file is part of beets.
|
||||
# Copyright 2021, Edgars Supe.
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining
|
||||
# a copy of this software and associated documentation files (the
|
||||
# "Software"), to deal in the Software without restriction, including
|
||||
# without limitation the rights to use, copy, modify, merge, publish,
|
||||
# distribute, sublicense, and/or sell copies of the Software, and to
|
||||
# permit persons to whom the Software is furnished to do so, subject to
|
||||
# the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
"""Adds an album template field for formatted album types."""
|
||||
|
||||
|
||||
from beets.autotag.mb import VARIOUS_ARTISTS_ID
|
||||
from beets.library import Album
|
||||
from beets.plugins import BeetsPlugin
|
||||
|
||||
|
||||
class AlbumTypesPlugin(BeetsPlugin):
|
||||
"""Adds an album template field for formatted album types."""
|
||||
|
||||
def __init__(self):
|
||||
"""Init AlbumTypesPlugin."""
|
||||
super().__init__()
|
||||
self.album_template_fields['atypes'] = self._atypes
|
||||
self.config.add({
|
||||
'types': [
|
||||
('ep', 'EP'),
|
||||
('single', 'Single'),
|
||||
('soundtrack', 'OST'),
|
||||
('live', 'Live'),
|
||||
('compilation', 'Anthology'),
|
||||
('remix', 'Remix')
|
||||
],
|
||||
'ignore_va': ['compilation'],
|
||||
'bracket': '[]'
|
||||
})
|
||||
|
||||
def _atypes(self, item: Album):
|
||||
"""Returns a formatted string based on album's types."""
|
||||
types = self.config['types'].as_pairs()
|
||||
ignore_va = self.config['ignore_va'].as_str_seq()
|
||||
bracket = self.config['bracket'].as_str()
|
||||
|
||||
# Assign a left and right bracket or leave blank if argument is empty.
|
||||
if len(bracket) == 2:
|
||||
bracket_l = bracket[0]
|
||||
bracket_r = bracket[1]
|
||||
else:
|
||||
bracket_l = ''
|
||||
bracket_r = ''
|
||||
|
||||
res = ''
|
||||
albumtypes = item.albumtypes.split('; ')
|
||||
is_va = item.mb_albumartistid == VARIOUS_ARTISTS_ID
|
||||
for type in types:
|
||||
if type[0] in albumtypes and type[1]:
|
||||
if not is_va or (type[0] not in ignore_va and is_va):
|
||||
res += f'{bracket_l}{type[1]}{bracket_r}'
|
||||
|
||||
return res
|
||||
984
lib/beetsplug/aura.py
Normal file
984
lib/beetsplug/aura.py
Normal file
@@ -0,0 +1,984 @@
|
||||
# This file is part of beets.
|
||||
# Copyright 2020, Callum Brown.
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining
|
||||
# a copy of this software and associated documentation files (the
|
||||
# "Software"), to deal in the Software without restriction, including
|
||||
# without limitation the rights to use, copy, modify, merge, publish,
|
||||
# distribute, sublicense, and/or sell copies of the Software, and to
|
||||
# permit persons to whom the Software is furnished to do so, subject to
|
||||
# the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
"""An AURA server using Flask."""
|
||||
|
||||
|
||||
from mimetypes import guess_type
|
||||
import re
|
||||
import os.path
|
||||
from os.path import isfile, getsize
|
||||
|
||||
from beets.plugins import BeetsPlugin
|
||||
from beets.ui import Subcommand, _open_library
|
||||
from beets import config
|
||||
from beets.util import py3_path
|
||||
from beets.library import Item, Album
|
||||
from beets.dbcore.query import (
|
||||
MatchQuery,
|
||||
NotQuery,
|
||||
RegexpQuery,
|
||||
AndQuery,
|
||||
FixedFieldSort,
|
||||
SlowFieldSort,
|
||||
MultipleSort,
|
||||
)
|
||||
|
||||
from flask import (
|
||||
Blueprint,
|
||||
Flask,
|
||||
current_app,
|
||||
send_file,
|
||||
make_response,
|
||||
request,
|
||||
)
|
||||
|
||||
|
||||
# Constants
|
||||
|
||||
# AURA server information
|
||||
# TODO: Add version information
|
||||
SERVER_INFO = {
|
||||
"aura-version": "0",
|
||||
"server": "beets-aura",
|
||||
"server-version": "0.1",
|
||||
"auth-required": False,
|
||||
"features": ["albums", "artists", "images"],
|
||||
}
|
||||
|
||||
# Maps AURA Track attribute to beets Item attribute
|
||||
TRACK_ATTR_MAP = {
|
||||
# Required
|
||||
"title": "title",
|
||||
"artist": "artist",
|
||||
# Optional
|
||||
"album": "album",
|
||||
"track": "track", # Track number on album
|
||||
"tracktotal": "tracktotal",
|
||||
"disc": "disc",
|
||||
"disctotal": "disctotal",
|
||||
"year": "year",
|
||||
"month": "month",
|
||||
"day": "day",
|
||||
"bpm": "bpm",
|
||||
"genre": "genre",
|
||||
"recording-mbid": "mb_trackid", # beets trackid is MB recording
|
||||
"track-mbid": "mb_releasetrackid",
|
||||
"composer": "composer",
|
||||
"albumartist": "albumartist",
|
||||
"comments": "comments",
|
||||
# Optional for Audio Metadata
|
||||
# TODO: Support the mimetype attribute, format != mime type
|
||||
# "mimetype": track.format,
|
||||
"duration": "length",
|
||||
"framerate": "samplerate",
|
||||
# I don't think beets has a framecount field
|
||||
# "framecount": ???,
|
||||
"channels": "channels",
|
||||
"bitrate": "bitrate",
|
||||
"bitdepth": "bitdepth",
|
||||
"size": "filesize",
|
||||
}
|
||||
|
||||
# Maps AURA Album attribute to beets Album attribute
|
||||
ALBUM_ATTR_MAP = {
|
||||
# Required
|
||||
"title": "album",
|
||||
"artist": "albumartist",
|
||||
# Optional
|
||||
"tracktotal": "albumtotal",
|
||||
"disctotal": "disctotal",
|
||||
"year": "year",
|
||||
"month": "month",
|
||||
"day": "day",
|
||||
"genre": "genre",
|
||||
"release-mbid": "mb_albumid",
|
||||
"release-group-mbid": "mb_releasegroupid",
|
||||
}
|
||||
|
||||
# Maps AURA Artist attribute to beets Item field
|
||||
# Artists are not first-class in beets, so information is extracted from
|
||||
# beets Items.
|
||||
ARTIST_ATTR_MAP = {
|
||||
# Required
|
||||
"name": "artist",
|
||||
# Optional
|
||||
"artist-mbid": "mb_artistid",
|
||||
}
|
||||
|
||||
|
||||
class AURADocument:
|
||||
"""Base class for building AURA documents."""
|
||||
|
||||
@staticmethod
|
||||
def error(status, title, detail):
|
||||
"""Make a response for an error following the JSON:API spec.
|
||||
|
||||
Args:
|
||||
status: An HTTP status code string, e.g. "404 Not Found".
|
||||
title: A short, human-readable summary of the problem.
|
||||
detail: A human-readable explanation specific to this
|
||||
occurrence of the problem.
|
||||
"""
|
||||
document = {
|
||||
"errors": [{"status": status, "title": title, "detail": detail}]
|
||||
}
|
||||
return make_response(document, status)
|
||||
|
||||
def translate_filters(self):
|
||||
"""Translate filters from request arguments to a beets Query."""
|
||||
# The format of each filter key in the request parameter is:
|
||||
# filter[<attribute>]. This regex extracts <attribute>.
|
||||
pattern = re.compile(r"filter\[(?P<attribute>[a-zA-Z0-9_-]+)\]")
|
||||
queries = []
|
||||
for key, value in request.args.items():
|
||||
match = pattern.match(key)
|
||||
if match:
|
||||
# Extract attribute name from key
|
||||
aura_attr = match.group("attribute")
|
||||
# Get the beets version of the attribute name
|
||||
beets_attr = self.attribute_map.get(aura_attr, aura_attr)
|
||||
converter = self.get_attribute_converter(beets_attr)
|
||||
value = converter(value)
|
||||
# Add exact match query to list
|
||||
# Use a slow query so it works with all fields
|
||||
queries.append(MatchQuery(beets_attr, value, fast=False))
|
||||
# NOTE: AURA doesn't officially support multiple queries
|
||||
return AndQuery(queries)
|
||||
|
||||
def translate_sorts(self, sort_arg):
|
||||
"""Translate an AURA sort parameter into a beets Sort.
|
||||
|
||||
Args:
|
||||
sort_arg: The value of the 'sort' query parameter; a comma
|
||||
separated list of fields to sort by, in order.
|
||||
E.g. "-year,title".
|
||||
"""
|
||||
# Change HTTP query parameter to a list
|
||||
aura_sorts = sort_arg.strip(",").split(",")
|
||||
sorts = []
|
||||
for aura_attr in aura_sorts:
|
||||
if aura_attr[0] == "-":
|
||||
ascending = False
|
||||
# Remove leading "-"
|
||||
aura_attr = aura_attr[1:]
|
||||
else:
|
||||
# JSON:API default
|
||||
ascending = True
|
||||
# Get the beets version of the attribute name
|
||||
beets_attr = self.attribute_map.get(aura_attr, aura_attr)
|
||||
# Use slow sort so it works with all fields (inc. computed)
|
||||
sorts.append(SlowFieldSort(beets_attr, ascending=ascending))
|
||||
return MultipleSort(sorts)
|
||||
|
||||
def paginate(self, collection):
|
||||
"""Get a page of the collection and the URL to the next page.
|
||||
|
||||
Args:
|
||||
collection: The raw data from which resource objects can be
|
||||
built. Could be an sqlite3.Cursor object (tracks and
|
||||
albums) or a list of strings (artists).
|
||||
"""
|
||||
# Pages start from zero
|
||||
page = request.args.get("page", 0, int)
|
||||
# Use page limit defined in config by default.
|
||||
default_limit = config["aura"]["page_limit"].get(int)
|
||||
limit = request.args.get("limit", default_limit, int)
|
||||
# start = offset of first item to return
|
||||
start = page * limit
|
||||
# end = offset of last item + 1
|
||||
end = start + limit
|
||||
if end > len(collection):
|
||||
end = len(collection)
|
||||
next_url = None
|
||||
else:
|
||||
# Not the last page so work out links.next url
|
||||
if not request.args:
|
||||
# No existing arguments, so current page is 0
|
||||
next_url = request.url + "?page=1"
|
||||
elif not request.args.get("page", None):
|
||||
# No existing page argument, so add one to the end
|
||||
next_url = request.url + "&page=1"
|
||||
else:
|
||||
# Increment page token by 1
|
||||
next_url = request.url.replace(
|
||||
f"page={page}", "page={}".format(page + 1)
|
||||
)
|
||||
# Get only the items in the page range
|
||||
data = [self.resource_object(collection[i]) for i in range(start, end)]
|
||||
return data, next_url
|
||||
|
||||
def get_included(self, data, include_str):
|
||||
"""Build a list of resource objects for inclusion.
|
||||
|
||||
Args:
|
||||
data: An array of dicts in the form of resource objects.
|
||||
include_str: A comma separated list of resource types to
|
||||
include. E.g. "tracks,images".
|
||||
"""
|
||||
# Change HTTP query parameter to a list
|
||||
to_include = include_str.strip(",").split(",")
|
||||
# Build a list of unique type and id combinations
|
||||
# For each resource object in the primary data, iterate over it's
|
||||
# relationships. If a relationship matches one of the types
|
||||
# requested for inclusion (e.g. "albums") then add each type-id pair
|
||||
# under the "data" key to unique_identifiers, checking first that
|
||||
# it has not already been added. This ensures that no resources are
|
||||
# included more than once.
|
||||
unique_identifiers = []
|
||||
for res_obj in data:
|
||||
for rel_name, rel_obj in res_obj["relationships"].items():
|
||||
if rel_name in to_include:
|
||||
# NOTE: Assumes relationship is to-many
|
||||
for identifier in rel_obj["data"]:
|
||||
if identifier not in unique_identifiers:
|
||||
unique_identifiers.append(identifier)
|
||||
# TODO: I think this could be improved
|
||||
included = []
|
||||
for identifier in unique_identifiers:
|
||||
res_type = identifier["type"]
|
||||
if res_type == "track":
|
||||
track_id = int(identifier["id"])
|
||||
track = current_app.config["lib"].get_item(track_id)
|
||||
included.append(TrackDocument.resource_object(track))
|
||||
elif res_type == "album":
|
||||
album_id = int(identifier["id"])
|
||||
album = current_app.config["lib"].get_album(album_id)
|
||||
included.append(AlbumDocument.resource_object(album))
|
||||
elif res_type == "artist":
|
||||
artist_id = identifier["id"]
|
||||
included.append(ArtistDocument.resource_object(artist_id))
|
||||
elif res_type == "image":
|
||||
image_id = identifier["id"]
|
||||
included.append(ImageDocument.resource_object(image_id))
|
||||
else:
|
||||
raise ValueError(f"Invalid resource type: {res_type}")
|
||||
return included
|
||||
|
||||
def all_resources(self):
|
||||
"""Build document for /tracks, /albums or /artists."""
|
||||
query = self.translate_filters()
|
||||
sort_arg = request.args.get("sort", None)
|
||||
if sort_arg:
|
||||
sort = self.translate_sorts(sort_arg)
|
||||
# For each sort field add a query which ensures all results
|
||||
# have a non-empty, non-zero value for that field.
|
||||
for s in sort.sorts:
|
||||
query.subqueries.append(
|
||||
NotQuery(
|
||||
# Match empty fields (^$) or zero fields, (^0$)
|
||||
RegexpQuery(s.field, "(^$|^0$)", fast=False)
|
||||
)
|
||||
)
|
||||
else:
|
||||
sort = None
|
||||
# Get information from the library
|
||||
collection = self.get_collection(query=query, sort=sort)
|
||||
# Convert info to AURA form and paginate it
|
||||
data, next_url = self.paginate(collection)
|
||||
document = {"data": data}
|
||||
# If there are more pages then provide a way to access them
|
||||
if next_url:
|
||||
document["links"] = {"next": next_url}
|
||||
# Include related resources for each element in "data"
|
||||
include_str = request.args.get("include", None)
|
||||
if include_str:
|
||||
document["included"] = self.get_included(data, include_str)
|
||||
return document
|
||||
|
||||
def single_resource_document(self, resource_object):
|
||||
"""Build document for a specific requested resource.
|
||||
|
||||
Args:
|
||||
resource_object: A dictionary in the form of a JSON:API
|
||||
resource object.
|
||||
"""
|
||||
document = {"data": resource_object}
|
||||
include_str = request.args.get("include", None)
|
||||
if include_str:
|
||||
# [document["data"]] is because arg needs to be list
|
||||
document["included"] = self.get_included(
|
||||
[document["data"]], include_str
|
||||
)
|
||||
return document
|
||||
|
||||
|
||||
class TrackDocument(AURADocument):
|
||||
"""Class for building documents for /tracks endpoints."""
|
||||
|
||||
attribute_map = TRACK_ATTR_MAP
|
||||
|
||||
def get_collection(self, query=None, sort=None):
|
||||
"""Get Item objects from the library.
|
||||
|
||||
Args:
|
||||
query: A beets Query object or a beets query string.
|
||||
sort: A beets Sort object.
|
||||
"""
|
||||
return current_app.config["lib"].items(query, sort)
|
||||
|
||||
def get_attribute_converter(self, beets_attr):
|
||||
"""Work out what data type an attribute should be for beets.
|
||||
|
||||
Args:
|
||||
beets_attr: The name of the beets attribute, e.g. "title".
|
||||
"""
|
||||
# filesize is a special field (read from disk not db?)
|
||||
if beets_attr == "filesize":
|
||||
converter = int
|
||||
else:
|
||||
try:
|
||||
# Look for field in list of Item fields
|
||||
# and get python type of database type.
|
||||
# See beets.library.Item and beets.dbcore.types
|
||||
converter = Item._fields[beets_attr].model_type
|
||||
except KeyError:
|
||||
# Fall back to string (NOTE: probably not good)
|
||||
converter = str
|
||||
return converter
|
||||
|
||||
@staticmethod
|
||||
def resource_object(track):
|
||||
"""Construct a JSON:API resource object from a beets Item.
|
||||
|
||||
Args:
|
||||
track: A beets Item object.
|
||||
"""
|
||||
attributes = {}
|
||||
# Use aura => beets attribute map, e.g. size => filesize
|
||||
for aura_attr, beets_attr in TRACK_ATTR_MAP.items():
|
||||
a = getattr(track, beets_attr)
|
||||
# Only set attribute if it's not None, 0, "", etc.
|
||||
# NOTE: This could result in required attributes not being set
|
||||
if a:
|
||||
attributes[aura_attr] = a
|
||||
|
||||
# JSON:API one-to-many relationship to parent album
|
||||
relationships = {
|
||||
"artists": {"data": [{"type": "artist", "id": track.artist}]}
|
||||
}
|
||||
# Only add album relationship if not singleton
|
||||
if not track.singleton:
|
||||
relationships["albums"] = {
|
||||
"data": [{"type": "album", "id": str(track.album_id)}]
|
||||
}
|
||||
|
||||
return {
|
||||
"type": "track",
|
||||
"id": str(track.id),
|
||||
"attributes": attributes,
|
||||
"relationships": relationships,
|
||||
}
|
||||
|
||||
def single_resource(self, track_id):
|
||||
"""Get track from the library and build a document.
|
||||
|
||||
Args:
|
||||
track_id: The beets id of the track (integer).
|
||||
"""
|
||||
track = current_app.config["lib"].get_item(track_id)
|
||||
if not track:
|
||||
return self.error(
|
||||
"404 Not Found",
|
||||
"No track with the requested id.",
|
||||
"There is no track with an id of {} in the library.".format(
|
||||
track_id
|
||||
),
|
||||
)
|
||||
return self.single_resource_document(self.resource_object(track))
|
||||
|
||||
|
||||
class AlbumDocument(AURADocument):
|
||||
"""Class for building documents for /albums endpoints."""
|
||||
|
||||
attribute_map = ALBUM_ATTR_MAP
|
||||
|
||||
def get_collection(self, query=None, sort=None):
|
||||
"""Get Album objects from the library.
|
||||
|
||||
Args:
|
||||
query: A beets Query object or a beets query string.
|
||||
sort: A beets Sort object.
|
||||
"""
|
||||
return current_app.config["lib"].albums(query, sort)
|
||||
|
||||
def get_attribute_converter(self, beets_attr):
|
||||
"""Work out what data type an attribute should be for beets.
|
||||
|
||||
Args:
|
||||
beets_attr: The name of the beets attribute, e.g. "title".
|
||||
"""
|
||||
try:
|
||||
# Look for field in list of Album fields
|
||||
# and get python type of database type.
|
||||
# See beets.library.Album and beets.dbcore.types
|
||||
converter = Album._fields[beets_attr].model_type
|
||||
except KeyError:
|
||||
# Fall back to string (NOTE: probably not good)
|
||||
converter = str
|
||||
return converter
|
||||
|
||||
@staticmethod
|
||||
def resource_object(album):
|
||||
"""Construct a JSON:API resource object from a beets Album.
|
||||
|
||||
Args:
|
||||
album: A beets Album object.
|
||||
"""
|
||||
attributes = {}
|
||||
# Use aura => beets attribute name map
|
||||
for aura_attr, beets_attr in ALBUM_ATTR_MAP.items():
|
||||
a = getattr(album, beets_attr)
|
||||
# Only set attribute if it's not None, 0, "", etc.
|
||||
# NOTE: This could mean required attributes are not set
|
||||
if a:
|
||||
attributes[aura_attr] = a
|
||||
|
||||
# Get beets Item objects for all tracks in the album sorted by
|
||||
# track number. Sorting is not required but it's nice.
|
||||
query = MatchQuery("album_id", album.id)
|
||||
sort = FixedFieldSort("track", ascending=True)
|
||||
tracks = current_app.config["lib"].items(query, sort)
|
||||
# JSON:API one-to-many relationship to tracks on the album
|
||||
relationships = {
|
||||
"tracks": {
|
||||
"data": [{"type": "track", "id": str(t.id)} for t in tracks]
|
||||
}
|
||||
}
|
||||
# Add images relationship if album has associated images
|
||||
if album.artpath:
|
||||
path = py3_path(album.artpath)
|
||||
filename = path.split("/")[-1]
|
||||
image_id = f"album-{album.id}-{filename}"
|
||||
relationships["images"] = {
|
||||
"data": [{"type": "image", "id": image_id}]
|
||||
}
|
||||
# Add artist relationship if artist name is same on tracks
|
||||
# Tracks are used to define artists so don't albumartist
|
||||
# Check for all tracks in case some have featured artists
|
||||
if album.albumartist in [t.artist for t in tracks]:
|
||||
relationships["artists"] = {
|
||||
"data": [{"type": "artist", "id": album.albumartist}]
|
||||
}
|
||||
|
||||
return {
|
||||
"type": "album",
|
||||
"id": str(album.id),
|
||||
"attributes": attributes,
|
||||
"relationships": relationships,
|
||||
}
|
||||
|
||||
def single_resource(self, album_id):
|
||||
"""Get album from the library and build a document.
|
||||
|
||||
Args:
|
||||
album_id: The beets id of the album (integer).
|
||||
"""
|
||||
album = current_app.config["lib"].get_album(album_id)
|
||||
if not album:
|
||||
return self.error(
|
||||
"404 Not Found",
|
||||
"No album with the requested id.",
|
||||
"There is no album with an id of {} in the library.".format(
|
||||
album_id
|
||||
),
|
||||
)
|
||||
return self.single_resource_document(self.resource_object(album))
|
||||
|
||||
|
||||
class ArtistDocument(AURADocument):
|
||||
"""Class for building documents for /artists endpoints."""
|
||||
|
||||
attribute_map = ARTIST_ATTR_MAP
|
||||
|
||||
def get_collection(self, query=None, sort=None):
|
||||
"""Get a list of artist names from the library.
|
||||
|
||||
Args:
|
||||
query: A beets Query object or a beets query string.
|
||||
sort: A beets Sort object.
|
||||
"""
|
||||
# Gets only tracks with matching artist information
|
||||
tracks = current_app.config["lib"].items(query, sort)
|
||||
collection = []
|
||||
for track in tracks:
|
||||
# Do not add duplicates
|
||||
if track.artist not in collection:
|
||||
collection.append(track.artist)
|
||||
return collection
|
||||
|
||||
def get_attribute_converter(self, beets_attr):
|
||||
"""Work out what data type an attribute should be for beets.
|
||||
|
||||
Args:
|
||||
beets_attr: The name of the beets attribute, e.g. "artist".
|
||||
"""
|
||||
try:
|
||||
# Look for field in list of Item fields
|
||||
# and get python type of database type.
|
||||
# See beets.library.Item and beets.dbcore.types
|
||||
converter = Item._fields[beets_attr].model_type
|
||||
except KeyError:
|
||||
# Fall back to string (NOTE: probably not good)
|
||||
converter = str
|
||||
return converter
|
||||
|
||||
@staticmethod
|
||||
def resource_object(artist_id):
|
||||
"""Construct a JSON:API resource object for the given artist.
|
||||
|
||||
Args:
|
||||
artist_id: A string which is the artist's name.
|
||||
"""
|
||||
# Get tracks where artist field exactly matches artist_id
|
||||
query = MatchQuery("artist", artist_id)
|
||||
tracks = current_app.config["lib"].items(query)
|
||||
if not tracks:
|
||||
return None
|
||||
|
||||
# Get artist information from the first track
|
||||
# NOTE: It could be that the first track doesn't have a
|
||||
# MusicBrainz id but later tracks do, which isn't ideal.
|
||||
attributes = {}
|
||||
# Use aura => beets attribute map, e.g. artist => name
|
||||
for aura_attr, beets_attr in ARTIST_ATTR_MAP.items():
|
||||
a = getattr(tracks[0], beets_attr)
|
||||
# Only set attribute if it's not None, 0, "", etc.
|
||||
# NOTE: This could mean required attributes are not set
|
||||
if a:
|
||||
attributes[aura_attr] = a
|
||||
|
||||
relationships = {
|
||||
"tracks": {
|
||||
"data": [{"type": "track", "id": str(t.id)} for t in tracks]
|
||||
}
|
||||
}
|
||||
album_query = MatchQuery("albumartist", artist_id)
|
||||
albums = current_app.config["lib"].albums(query=album_query)
|
||||
if len(albums) != 0:
|
||||
relationships["albums"] = {
|
||||
"data": [{"type": "album", "id": str(a.id)} for a in albums]
|
||||
}
|
||||
|
||||
return {
|
||||
"type": "artist",
|
||||
"id": artist_id,
|
||||
"attributes": attributes,
|
||||
"relationships": relationships,
|
||||
}
|
||||
|
||||
def single_resource(self, artist_id):
|
||||
"""Get info for the requested artist and build a document.
|
||||
|
||||
Args:
|
||||
artist_id: A string which is the artist's name.
|
||||
"""
|
||||
artist_resource = self.resource_object(artist_id)
|
||||
if not artist_resource:
|
||||
return self.error(
|
||||
"404 Not Found",
|
||||
"No artist with the requested id.",
|
||||
"There is no artist with an id of {} in the library.".format(
|
||||
artist_id
|
||||
),
|
||||
)
|
||||
return self.single_resource_document(artist_resource)
|
||||
|
||||
|
||||
def safe_filename(fn):
|
||||
"""Check whether a string is a simple (non-path) filename.
|
||||
|
||||
For example, `foo.txt` is safe because it is a "plain" filename. But
|
||||
`foo/bar.txt` and `../foo.txt` and `.` are all non-safe because they
|
||||
can traverse to other directories other than the current one.
|
||||
"""
|
||||
# Rule out any directories.
|
||||
if os.path.basename(fn) != fn:
|
||||
return False
|
||||
|
||||
# In single names, rule out Unix directory traversal names.
|
||||
if fn in ('.', '..'):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
class ImageDocument(AURADocument):
|
||||
"""Class for building documents for /images/(id) endpoints."""
|
||||
|
||||
@staticmethod
|
||||
def get_image_path(image_id):
|
||||
"""Works out the full path to the image with the given id.
|
||||
|
||||
Returns None if there is no such image.
|
||||
|
||||
Args:
|
||||
image_id: A string in the form
|
||||
"<parent_type>-<parent_id>-<img_filename>".
|
||||
"""
|
||||
# Split image_id into its constituent parts
|
||||
id_split = image_id.split("-")
|
||||
if len(id_split) < 3:
|
||||
# image_id is not in the required format
|
||||
return None
|
||||
parent_type = id_split[0]
|
||||
parent_id = id_split[1]
|
||||
img_filename = "-".join(id_split[2:])
|
||||
if not safe_filename(img_filename):
|
||||
return None
|
||||
|
||||
# Get the path to the directory parent's images are in
|
||||
if parent_type == "album":
|
||||
album = current_app.config["lib"].get_album(int(parent_id))
|
||||
if not album or not album.artpath:
|
||||
return None
|
||||
# Cut the filename off of artpath
|
||||
# This is in preparation for supporting images in the same
|
||||
# directory that are not tracked by beets.
|
||||
artpath = py3_path(album.artpath)
|
||||
dir_path = "/".join(artpath.split("/")[:-1])
|
||||
else:
|
||||
# Images for other resource types are not supported
|
||||
return None
|
||||
|
||||
img_path = os.path.join(dir_path, img_filename)
|
||||
# Check the image actually exists
|
||||
if isfile(img_path):
|
||||
return img_path
|
||||
else:
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def resource_object(image_id):
|
||||
"""Construct a JSON:API resource object for the given image.
|
||||
|
||||
Args:
|
||||
image_id: A string in the form
|
||||
"<parent_type>-<parent_id>-<img_filename>".
|
||||
"""
|
||||
# Could be called as a static method, so can't use
|
||||
# self.get_image_path()
|
||||
image_path = ImageDocument.get_image_path(image_id)
|
||||
if not image_path:
|
||||
return None
|
||||
|
||||
attributes = {
|
||||
"role": "cover",
|
||||
"mimetype": guess_type(image_path)[0],
|
||||
"size": getsize(image_path),
|
||||
}
|
||||
try:
|
||||
from PIL import Image
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
im = Image.open(image_path)
|
||||
attributes["width"] = im.width
|
||||
attributes["height"] = im.height
|
||||
|
||||
relationships = {}
|
||||
# Split id into [parent_type, parent_id, filename]
|
||||
id_split = image_id.split("-")
|
||||
relationships[id_split[0] + "s"] = {
|
||||
"data": [{"type": id_split[0], "id": id_split[1]}]
|
||||
}
|
||||
|
||||
return {
|
||||
"id": image_id,
|
||||
"type": "image",
|
||||
# Remove attributes that are None, 0, "", etc.
|
||||
"attributes": {k: v for k, v in attributes.items() if v},
|
||||
"relationships": relationships,
|
||||
}
|
||||
|
||||
def single_resource(self, image_id):
|
||||
"""Get info for the requested image and build a document.
|
||||
|
||||
Args:
|
||||
image_id: A string in the form
|
||||
"<parent_type>-<parent_id>-<img_filename>".
|
||||
"""
|
||||
image_resource = self.resource_object(image_id)
|
||||
if not image_resource:
|
||||
return self.error(
|
||||
"404 Not Found",
|
||||
"No image with the requested id.",
|
||||
"There is no image with an id of {} in the library.".format(
|
||||
image_id
|
||||
),
|
||||
)
|
||||
return self.single_resource_document(image_resource)
|
||||
|
||||
|
||||
# Initialise flask blueprint
|
||||
aura_bp = Blueprint("aura_bp", __name__)
|
||||
|
||||
|
||||
@aura_bp.route("/server")
|
||||
def server_info():
|
||||
"""Respond with info about the server."""
|
||||
return {"data": {"type": "server", "id": "0", "attributes": SERVER_INFO}}
|
||||
|
||||
|
||||
# Track endpoints
|
||||
|
||||
|
||||
@aura_bp.route("/tracks")
|
||||
def all_tracks():
|
||||
"""Respond with a list of all tracks and related information."""
|
||||
doc = TrackDocument()
|
||||
return doc.all_resources()
|
||||
|
||||
|
||||
@aura_bp.route("/tracks/<int:track_id>")
|
||||
def single_track(track_id):
|
||||
"""Respond with info about the specified track.
|
||||
|
||||
Args:
|
||||
track_id: The id of the track provided in the URL (integer).
|
||||
"""
|
||||
doc = TrackDocument()
|
||||
return doc.single_resource(track_id)
|
||||
|
||||
|
||||
@aura_bp.route("/tracks/<int:track_id>/audio")
|
||||
def audio_file(track_id):
|
||||
"""Supply an audio file for the specified track.
|
||||
|
||||
Args:
|
||||
track_id: The id of the track provided in the URL (integer).
|
||||
"""
|
||||
track = current_app.config["lib"].get_item(track_id)
|
||||
if not track:
|
||||
return AURADocument.error(
|
||||
"404 Not Found",
|
||||
"No track with the requested id.",
|
||||
"There is no track with an id of {} in the library.".format(
|
||||
track_id
|
||||
),
|
||||
)
|
||||
|
||||
path = py3_path(track.path)
|
||||
if not isfile(path):
|
||||
return AURADocument.error(
|
||||
"404 Not Found",
|
||||
"No audio file for the requested track.",
|
||||
(
|
||||
"There is no audio file for track {} at the expected location"
|
||||
).format(track_id),
|
||||
)
|
||||
|
||||
file_mimetype = guess_type(path)[0]
|
||||
if not file_mimetype:
|
||||
return AURADocument.error(
|
||||
"500 Internal Server Error",
|
||||
"Requested audio file has an unknown mimetype.",
|
||||
(
|
||||
"The audio file for track {} has an unknown mimetype. "
|
||||
"Its file extension is {}."
|
||||
).format(track_id, path.split(".")[-1]),
|
||||
)
|
||||
|
||||
# Check that the Accept header contains the file's mimetype
|
||||
# Takes into account */* and audio/*
|
||||
# Adding support for the bitrate parameter would require some effort so I
|
||||
# left it out. This means the client could be sent an error even if the
|
||||
# audio doesn't need transcoding.
|
||||
if not request.accept_mimetypes.best_match([file_mimetype]):
|
||||
return AURADocument.error(
|
||||
"406 Not Acceptable",
|
||||
"Unsupported MIME type or bitrate parameter in Accept header.",
|
||||
(
|
||||
"The audio file for track {} is only available as {} and "
|
||||
"bitrate parameters are not supported."
|
||||
).format(track_id, file_mimetype),
|
||||
)
|
||||
|
||||
return send_file(
|
||||
path,
|
||||
mimetype=file_mimetype,
|
||||
# Handles filename in Content-Disposition header
|
||||
as_attachment=True,
|
||||
# Tries to upgrade the stream to support range requests
|
||||
conditional=True,
|
||||
)
|
||||
|
||||
|
||||
# Album endpoints
|
||||
|
||||
|
||||
@aura_bp.route("/albums")
|
||||
def all_albums():
|
||||
"""Respond with a list of all albums and related information."""
|
||||
doc = AlbumDocument()
|
||||
return doc.all_resources()
|
||||
|
||||
|
||||
@aura_bp.route("/albums/<int:album_id>")
|
||||
def single_album(album_id):
|
||||
"""Respond with info about the specified album.
|
||||
|
||||
Args:
|
||||
album_id: The id of the album provided in the URL (integer).
|
||||
"""
|
||||
doc = AlbumDocument()
|
||||
return doc.single_resource(album_id)
|
||||
|
||||
|
||||
# Artist endpoints
|
||||
# Artist ids are their names
|
||||
|
||||
|
||||
@aura_bp.route("/artists")
|
||||
def all_artists():
|
||||
"""Respond with a list of all artists and related information."""
|
||||
doc = ArtistDocument()
|
||||
return doc.all_resources()
|
||||
|
||||
|
||||
# Using the path converter allows slashes in artist_id
|
||||
@aura_bp.route("/artists/<path:artist_id>")
|
||||
def single_artist(artist_id):
|
||||
"""Respond with info about the specified artist.
|
||||
|
||||
Args:
|
||||
artist_id: The id of the artist provided in the URL. A string
|
||||
which is the artist's name.
|
||||
"""
|
||||
doc = ArtistDocument()
|
||||
return doc.single_resource(artist_id)
|
||||
|
||||
|
||||
# Image endpoints
|
||||
# Image ids are in the form <parent_type>-<parent_id>-<img_filename>
|
||||
# For example: album-13-cover.jpg
|
||||
|
||||
|
||||
@aura_bp.route("/images/<string:image_id>")
|
||||
def single_image(image_id):
|
||||
"""Respond with info about the specified image.
|
||||
|
||||
Args:
|
||||
image_id: The id of the image provided in the URL. A string in
|
||||
the form "<parent_type>-<parent_id>-<img_filename>".
|
||||
"""
|
||||
doc = ImageDocument()
|
||||
return doc.single_resource(image_id)
|
||||
|
||||
|
||||
@aura_bp.route("/images/<string:image_id>/file")
|
||||
def image_file(image_id):
|
||||
"""Supply an image file for the specified image.
|
||||
|
||||
Args:
|
||||
image_id: The id of the image provided in the URL. A string in
|
||||
the form "<parent_type>-<parent_id>-<img_filename>".
|
||||
"""
|
||||
img_path = ImageDocument.get_image_path(image_id)
|
||||
if not img_path:
|
||||
return AURADocument.error(
|
||||
"404 Not Found",
|
||||
"No image with the requested id.",
|
||||
"There is no image with an id of {} in the library".format(
|
||||
image_id
|
||||
),
|
||||
)
|
||||
return send_file(img_path)
|
||||
|
||||
|
||||
# WSGI app
|
||||
|
||||
|
||||
def create_app():
|
||||
"""An application factory for use by a WSGI server."""
|
||||
config["aura"].add(
|
||||
{
|
||||
"host": "127.0.0.1",
|
||||
"port": 8337,
|
||||
"cors": [],
|
||||
"cors_supports_credentials": False,
|
||||
"page_limit": 500,
|
||||
}
|
||||
)
|
||||
|
||||
app = Flask(__name__)
|
||||
# Register AURA blueprint view functions under a URL prefix
|
||||
app.register_blueprint(aura_bp, url_prefix="/aura")
|
||||
# AURA specifies mimetype MUST be this
|
||||
app.config["JSONIFY_MIMETYPE"] = "application/vnd.api+json"
|
||||
# Disable auto-sorting of JSON keys
|
||||
app.config["JSON_SORT_KEYS"] = False
|
||||
# Provide a way to access the beets library
|
||||
# The normal method of using the Library and config provided in the
|
||||
# command function is not used because create_app() could be called
|
||||
# by an external WSGI server.
|
||||
# NOTE: this uses a 'private' function from beets.ui.__init__
|
||||
app.config["lib"] = _open_library(config)
|
||||
|
||||
# Enable CORS if required
|
||||
cors = config["aura"]["cors"].as_str_seq(list)
|
||||
if cors:
|
||||
from flask_cors import CORS
|
||||
|
||||
# "Accept" is the only header clients use
|
||||
app.config["CORS_ALLOW_HEADERS"] = "Accept"
|
||||
app.config["CORS_RESOURCES"] = {r"/aura/*": {"origins": cors}}
|
||||
app.config["CORS_SUPPORTS_CREDENTIALS"] = config["aura"][
|
||||
"cors_supports_credentials"
|
||||
].get(bool)
|
||||
CORS(app)
|
||||
|
||||
return app
|
||||
|
||||
|
||||
# Beets Plugin Hook
|
||||
|
||||
|
||||
class AURAPlugin(BeetsPlugin):
|
||||
"""The BeetsPlugin subclass for the AURA server plugin."""
|
||||
|
||||
def __init__(self):
|
||||
"""Add configuration options for the AURA plugin."""
|
||||
super().__init__()
|
||||
|
||||
def commands(self):
|
||||
"""Add subcommand used to run the AURA server."""
|
||||
|
||||
def run_aura(lib, opts, args):
|
||||
"""Run the application using Flask's built in-server.
|
||||
|
||||
Args:
|
||||
lib: A beets Library object (not used).
|
||||
opts: Command line options. An optparse.Values object.
|
||||
args: The list of arguments to process (not used).
|
||||
"""
|
||||
app = create_app()
|
||||
# Start the built-in server (not intended for production)
|
||||
app.run(
|
||||
host=self.config["host"].get(str),
|
||||
port=self.config["port"].get(int),
|
||||
debug=opts.debug,
|
||||
threaded=True,
|
||||
)
|
||||
|
||||
run_aura_cmd = Subcommand("aura", help="run an AURA server")
|
||||
run_aura_cmd.parser.add_option(
|
||||
"-d",
|
||||
"--debug",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="use Flask debug mode",
|
||||
)
|
||||
run_aura_cmd.func = run_aura
|
||||
return [run_aura_cmd]
|
||||
215
lib/beetsplug/badfiles.py
Normal file
215
lib/beetsplug/badfiles.py
Normal file
@@ -0,0 +1,215 @@
|
||||
# This file is part of beets.
|
||||
# Copyright 2016, François-Xavier Thomas.
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining
|
||||
# a copy of this software and associated documentation files (the
|
||||
# "Software"), to deal in the Software without restriction, including
|
||||
# without limitation the rights to use, copy, modify, merge, publish,
|
||||
# distribute, sublicense, and/or sell copies of the Software, and to
|
||||
# permit persons to whom the Software is furnished to do so, subject to
|
||||
# the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
"""Use command-line tools to check for audio file corruption.
|
||||
"""
|
||||
|
||||
|
||||
from subprocess import check_output, CalledProcessError, list2cmdline, STDOUT
|
||||
|
||||
import shlex
|
||||
import os
|
||||
import errno
|
||||
import sys
|
||||
import confuse
|
||||
from beets.plugins import BeetsPlugin
|
||||
from beets.ui import Subcommand
|
||||
from beets.util import displayable_path, par_map
|
||||
from beets import ui
|
||||
from beets import importer
|
||||
|
||||
|
||||
class CheckerCommandException(Exception):
|
||||
"""Raised when running a checker failed.
|
||||
|
||||
Attributes:
|
||||
checker: Checker command name.
|
||||
path: Path to the file being validated.
|
||||
errno: Error number from the checker execution error.
|
||||
msg: Message from the checker execution error.
|
||||
"""
|
||||
|
||||
def __init__(self, cmd, oserror):
|
||||
self.checker = cmd[0]
|
||||
self.path = cmd[-1]
|
||||
self.errno = oserror.errno
|
||||
self.msg = str(oserror)
|
||||
|
||||
|
||||
class BadFiles(BeetsPlugin):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.verbose = False
|
||||
|
||||
self.register_listener('import_task_start',
|
||||
self.on_import_task_start)
|
||||
self.register_listener('import_task_before_choice',
|
||||
self.on_import_task_before_choice)
|
||||
|
||||
def run_command(self, cmd):
|
||||
self._log.debug("running command: {}",
|
||||
displayable_path(list2cmdline(cmd)))
|
||||
try:
|
||||
output = check_output(cmd, stderr=STDOUT)
|
||||
errors = 0
|
||||
status = 0
|
||||
except CalledProcessError as e:
|
||||
output = e.output
|
||||
errors = 1
|
||||
status = e.returncode
|
||||
except OSError as e:
|
||||
raise CheckerCommandException(cmd, e)
|
||||
output = output.decode(sys.getdefaultencoding(), 'replace')
|
||||
return status, errors, [line for line in output.split("\n") if line]
|
||||
|
||||
def check_mp3val(self, path):
|
||||
status, errors, output = self.run_command(["mp3val", path])
|
||||
if status == 0:
|
||||
output = [line for line in output if line.startswith("WARNING:")]
|
||||
errors = len(output)
|
||||
return status, errors, output
|
||||
|
||||
def check_flac(self, path):
|
||||
return self.run_command(["flac", "-wst", path])
|
||||
|
||||
def check_custom(self, command):
|
||||
def checker(path):
|
||||
cmd = shlex.split(command)
|
||||
cmd.append(path)
|
||||
return self.run_command(cmd)
|
||||
return checker
|
||||
|
||||
def get_checker(self, ext):
|
||||
ext = ext.lower()
|
||||
try:
|
||||
command = self.config['commands'].get(dict).get(ext)
|
||||
except confuse.NotFoundError:
|
||||
command = None
|
||||
if command:
|
||||
return self.check_custom(command)
|
||||
if ext == "mp3":
|
||||
return self.check_mp3val
|
||||
if ext == "flac":
|
||||
return self.check_flac
|
||||
|
||||
def check_item(self, item):
|
||||
# First, check whether the path exists. If not, the user
|
||||
# should probably run `beet update` to cleanup your library.
|
||||
dpath = displayable_path(item.path)
|
||||
self._log.debug("checking path: {}", dpath)
|
||||
if not os.path.exists(item.path):
|
||||
ui.print_("{}: file does not exist".format(
|
||||
ui.colorize('text_error', dpath)))
|
||||
|
||||
# Run the checker against the file if one is found
|
||||
ext = os.path.splitext(item.path)[1][1:].decode('utf8', 'ignore')
|
||||
checker = self.get_checker(ext)
|
||||
if not checker:
|
||||
self._log.error("no checker specified in the config for {}",
|
||||
ext)
|
||||
return []
|
||||
path = item.path
|
||||
if not isinstance(path, str):
|
||||
path = item.path.decode(sys.getfilesystemencoding())
|
||||
try:
|
||||
status, errors, output = checker(path)
|
||||
except CheckerCommandException as e:
|
||||
if e.errno == errno.ENOENT:
|
||||
self._log.error(
|
||||
"command not found: {} when validating file: {}",
|
||||
e.checker,
|
||||
e.path
|
||||
)
|
||||
else:
|
||||
self._log.error("error invoking {}: {}", e.checker, e.msg)
|
||||
return []
|
||||
|
||||
error_lines = []
|
||||
|
||||
if status > 0:
|
||||
error_lines.append(
|
||||
"{}: checker exited with status {}"
|
||||
.format(ui.colorize('text_error', dpath), status))
|
||||
for line in output:
|
||||
error_lines.append(f" {line}")
|
||||
|
||||
elif errors > 0:
|
||||
error_lines.append(
|
||||
"{}: checker found {} errors or warnings"
|
||||
.format(ui.colorize('text_warning', dpath), errors))
|
||||
for line in output:
|
||||
error_lines.append(f" {line}")
|
||||
elif self.verbose:
|
||||
error_lines.append(
|
||||
"{}: ok".format(ui.colorize('text_success', dpath)))
|
||||
|
||||
return error_lines
|
||||
|
||||
def on_import_task_start(self, task, session):
|
||||
if not self.config['check_on_import'].get(False):
|
||||
return
|
||||
|
||||
checks_failed = []
|
||||
|
||||
for item in task.items:
|
||||
error_lines = self.check_item(item)
|
||||
if error_lines:
|
||||
checks_failed.append(error_lines)
|
||||
|
||||
if checks_failed:
|
||||
task._badfiles_checks_failed = checks_failed
|
||||
|
||||
def on_import_task_before_choice(self, task, session):
|
||||
if hasattr(task, '_badfiles_checks_failed'):
|
||||
ui.print_('{} one or more files failed checks:'
|
||||
.format(ui.colorize('text_warning', 'BAD')))
|
||||
for error in task._badfiles_checks_failed:
|
||||
for error_line in error:
|
||||
ui.print_(error_line)
|
||||
|
||||
ui.print_()
|
||||
ui.print_('What would you like to do?')
|
||||
|
||||
sel = ui.input_options(['aBort', 'skip', 'continue'])
|
||||
|
||||
if sel == 's':
|
||||
return importer.action.SKIP
|
||||
elif sel == 'c':
|
||||
return None
|
||||
elif sel == 'b':
|
||||
raise importer.ImportAbort()
|
||||
else:
|
||||
raise Exception(f'Unexpected selection: {sel}')
|
||||
|
||||
def command(self, lib, opts, args):
|
||||
# Get items from arguments
|
||||
items = lib.items(ui.decargs(args))
|
||||
self.verbose = opts.verbose
|
||||
|
||||
def check_and_print(item):
|
||||
for error_line in self.check_item(item):
|
||||
ui.print_(error_line)
|
||||
|
||||
par_map(check_and_print, items)
|
||||
|
||||
def commands(self):
|
||||
bad_command = Subcommand('bad',
|
||||
help='check for corrupt or missing files')
|
||||
bad_command.parser.add_option(
|
||||
'-v', '--verbose',
|
||||
action='store_true', default=False, dest='verbose',
|
||||
help='view results for both the bad and uncorrupted files'
|
||||
)
|
||||
bad_command.func = self.command
|
||||
return [bad_command]
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user