mirror of
https://github.com/rembo10/headphones.git
synced 2026-01-10 07:17:59 -05:00
use pkg_resources from setuptools-69.0.3
This commit is contained in:
File diff suppressed because it is too large
Load Diff
@@ -1,608 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2005-2010 ActiveState Software Inc.
|
||||
# Copyright (c) 2013 Eddy Petrișor
|
||||
|
||||
"""Utilities for determining application-specific dirs.
|
||||
|
||||
See <http://github.com/ActiveState/appdirs> for details and usage.
|
||||
"""
|
||||
# Dev Notes:
|
||||
# - MSDN on where to store app data files:
|
||||
# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
|
||||
# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
|
||||
# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
|
||||
|
||||
__version_info__ = (1, 4, 3)
|
||||
__version__ = '.'.join(map(str, __version_info__))
|
||||
|
||||
|
||||
import sys
|
||||
import os
|
||||
|
||||
PY3 = sys.version_info[0] == 3
|
||||
|
||||
if PY3:
|
||||
unicode = str
|
||||
|
||||
if sys.platform.startswith('java'):
|
||||
import platform
|
||||
os_name = platform.java_ver()[3][0]
|
||||
if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc.
|
||||
system = 'win32'
|
||||
elif os_name.startswith('Mac'): # "Mac OS X", etc.
|
||||
system = 'darwin'
|
||||
else: # "Linux", "SunOS", "FreeBSD", etc.
|
||||
# Setting this to "linux2" is not ideal, but only Windows or Mac
|
||||
# are actually checked for and the rest of the module expects
|
||||
# *sys.platform* style strings.
|
||||
system = 'linux2'
|
||||
else:
|
||||
system = sys.platform
|
||||
|
||||
|
||||
|
||||
def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
|
||||
r"""Return full path to the user-specific data dir for this application.
|
||||
|
||||
"appname" is the name of application.
|
||||
If None, just the system directory is returned.
|
||||
"appauthor" (only used on Windows) is the name of the
|
||||
appauthor or distributing body for this application. Typically
|
||||
it is the owning company name. This falls back to appname. You may
|
||||
pass False to disable it.
|
||||
"version" is an optional version path element to append to the
|
||||
path. You might want to use this if you want multiple versions
|
||||
of your app to be able to run independently. If used, this
|
||||
would typically be "<major>.<minor>".
|
||||
Only applied when appname is present.
|
||||
"roaming" (boolean, default False) can be set True to use the Windows
|
||||
roaming appdata directory. That means that for users on a Windows
|
||||
network setup for roaming profiles, this user data will be
|
||||
sync'd on login. See
|
||||
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
|
||||
for a discussion of issues.
|
||||
|
||||
Typical user data directories are:
|
||||
Mac OS X: ~/Library/Application Support/<AppName>
|
||||
Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined
|
||||
Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
|
||||
Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
|
||||
Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
|
||||
Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
|
||||
|
||||
For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
|
||||
That means, by default "~/.local/share/<AppName>".
|
||||
"""
|
||||
if system == "win32":
|
||||
if appauthor is None:
|
||||
appauthor = appname
|
||||
const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
|
||||
path = os.path.normpath(_get_win_folder(const))
|
||||
if appname:
|
||||
if appauthor is not False:
|
||||
path = os.path.join(path, appauthor, appname)
|
||||
else:
|
||||
path = os.path.join(path, appname)
|
||||
elif system == 'darwin':
|
||||
path = os.path.expanduser('~/Library/Application Support/')
|
||||
if appname:
|
||||
path = os.path.join(path, appname)
|
||||
else:
|
||||
path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share"))
|
||||
if appname:
|
||||
path = os.path.join(path, appname)
|
||||
if appname and version:
|
||||
path = os.path.join(path, version)
|
||||
return path
|
||||
|
||||
|
||||
def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
|
||||
r"""Return full path to the user-shared data dir for this application.
|
||||
|
||||
"appname" is the name of application.
|
||||
If None, just the system directory is returned.
|
||||
"appauthor" (only used on Windows) is the name of the
|
||||
appauthor or distributing body for this application. Typically
|
||||
it is the owning company name. This falls back to appname. You may
|
||||
pass False to disable it.
|
||||
"version" is an optional version path element to append to the
|
||||
path. You might want to use this if you want multiple versions
|
||||
of your app to be able to run independently. If used, this
|
||||
would typically be "<major>.<minor>".
|
||||
Only applied when appname is present.
|
||||
"multipath" is an optional parameter only applicable to *nix
|
||||
which indicates that the entire list of data dirs should be
|
||||
returned. By default, the first item from XDG_DATA_DIRS is
|
||||
returned, or '/usr/local/share/<AppName>',
|
||||
if XDG_DATA_DIRS is not set
|
||||
|
||||
Typical site data directories are:
|
||||
Mac OS X: /Library/Application Support/<AppName>
|
||||
Unix: /usr/local/share/<AppName> or /usr/share/<AppName>
|
||||
Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
|
||||
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
|
||||
Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7.
|
||||
|
||||
For Unix, this is using the $XDG_DATA_DIRS[0] default.
|
||||
|
||||
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
|
||||
"""
|
||||
if system == "win32":
|
||||
if appauthor is None:
|
||||
appauthor = appname
|
||||
path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
|
||||
if appname:
|
||||
if appauthor is not False:
|
||||
path = os.path.join(path, appauthor, appname)
|
||||
else:
|
||||
path = os.path.join(path, appname)
|
||||
elif system == 'darwin':
|
||||
path = os.path.expanduser('/Library/Application Support')
|
||||
if appname:
|
||||
path = os.path.join(path, appname)
|
||||
else:
|
||||
# XDG default for $XDG_DATA_DIRS
|
||||
# only first, if multipath is False
|
||||
path = os.getenv('XDG_DATA_DIRS',
|
||||
os.pathsep.join(['/usr/local/share', '/usr/share']))
|
||||
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
|
||||
if appname:
|
||||
if version:
|
||||
appname = os.path.join(appname, version)
|
||||
pathlist = [os.sep.join([x, appname]) for x in pathlist]
|
||||
|
||||
if multipath:
|
||||
path = os.pathsep.join(pathlist)
|
||||
else:
|
||||
path = pathlist[0]
|
||||
return path
|
||||
|
||||
if appname and version:
|
||||
path = os.path.join(path, version)
|
||||
return path
|
||||
|
||||
|
||||
def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
|
||||
r"""Return full path to the user-specific config dir for this application.
|
||||
|
||||
"appname" is the name of application.
|
||||
If None, just the system directory is returned.
|
||||
"appauthor" (only used on Windows) is the name of the
|
||||
appauthor or distributing body for this application. Typically
|
||||
it is the owning company name. This falls back to appname. You may
|
||||
pass False to disable it.
|
||||
"version" is an optional version path element to append to the
|
||||
path. You might want to use this if you want multiple versions
|
||||
of your app to be able to run independently. If used, this
|
||||
would typically be "<major>.<minor>".
|
||||
Only applied when appname is present.
|
||||
"roaming" (boolean, default False) can be set True to use the Windows
|
||||
roaming appdata directory. That means that for users on a Windows
|
||||
network setup for roaming profiles, this user data will be
|
||||
sync'd on login. See
|
||||
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
|
||||
for a discussion of issues.
|
||||
|
||||
Typical user config directories are:
|
||||
Mac OS X: same as user_data_dir
|
||||
Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined
|
||||
Win *: same as user_data_dir
|
||||
|
||||
For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
|
||||
That means, by default "~/.config/<AppName>".
|
||||
"""
|
||||
if system in ["win32", "darwin"]:
|
||||
path = user_data_dir(appname, appauthor, None, roaming)
|
||||
else:
|
||||
path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config"))
|
||||
if appname:
|
||||
path = os.path.join(path, appname)
|
||||
if appname and version:
|
||||
path = os.path.join(path, version)
|
||||
return path
|
||||
|
||||
|
||||
def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
|
||||
r"""Return full path to the user-shared data dir for this application.
|
||||
|
||||
"appname" is the name of application.
|
||||
If None, just the system directory is returned.
|
||||
"appauthor" (only used on Windows) is the name of the
|
||||
appauthor or distributing body for this application. Typically
|
||||
it is the owning company name. This falls back to appname. You may
|
||||
pass False to disable it.
|
||||
"version" is an optional version path element to append to the
|
||||
path. You might want to use this if you want multiple versions
|
||||
of your app to be able to run independently. If used, this
|
||||
would typically be "<major>.<minor>".
|
||||
Only applied when appname is present.
|
||||
"multipath" is an optional parameter only applicable to *nix
|
||||
which indicates that the entire list of config dirs should be
|
||||
returned. By default, the first item from XDG_CONFIG_DIRS is
|
||||
returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set
|
||||
|
||||
Typical site config directories are:
|
||||
Mac OS X: same as site_data_dir
|
||||
Unix: /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
|
||||
$XDG_CONFIG_DIRS
|
||||
Win *: same as site_data_dir
|
||||
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
|
||||
|
||||
For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
|
||||
|
||||
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
|
||||
"""
|
||||
if system in ["win32", "darwin"]:
|
||||
path = site_data_dir(appname, appauthor)
|
||||
if appname and version:
|
||||
path = os.path.join(path, version)
|
||||
else:
|
||||
# XDG default for $XDG_CONFIG_DIRS
|
||||
# only first, if multipath is False
|
||||
path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
|
||||
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
|
||||
if appname:
|
||||
if version:
|
||||
appname = os.path.join(appname, version)
|
||||
pathlist = [os.sep.join([x, appname]) for x in pathlist]
|
||||
|
||||
if multipath:
|
||||
path = os.pathsep.join(pathlist)
|
||||
else:
|
||||
path = pathlist[0]
|
||||
return path
|
||||
|
||||
|
||||
def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
|
||||
r"""Return full path to the user-specific cache dir for this application.
|
||||
|
||||
"appname" is the name of application.
|
||||
If None, just the system directory is returned.
|
||||
"appauthor" (only used on Windows) is the name of the
|
||||
appauthor or distributing body for this application. Typically
|
||||
it is the owning company name. This falls back to appname. You may
|
||||
pass False to disable it.
|
||||
"version" is an optional version path element to append to the
|
||||
path. You might want to use this if you want multiple versions
|
||||
of your app to be able to run independently. If used, this
|
||||
would typically be "<major>.<minor>".
|
||||
Only applied when appname is present.
|
||||
"opinion" (boolean) can be False to disable the appending of
|
||||
"Cache" to the base app data dir for Windows. See
|
||||
discussion below.
|
||||
|
||||
Typical user cache directories are:
|
||||
Mac OS X: ~/Library/Caches/<AppName>
|
||||
Unix: ~/.cache/<AppName> (XDG default)
|
||||
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
|
||||
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache
|
||||
|
||||
On Windows the only suggestion in the MSDN docs is that local settings go in
|
||||
the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
|
||||
app data dir (the default returned by `user_data_dir` above). Apps typically
|
||||
put cache data somewhere *under* the given dir here. Some examples:
|
||||
...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
|
||||
...\Acme\SuperApp\Cache\1.0
|
||||
OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
|
||||
This can be disabled with the `opinion=False` option.
|
||||
"""
|
||||
if system == "win32":
|
||||
if appauthor is None:
|
||||
appauthor = appname
|
||||
path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
|
||||
if appname:
|
||||
if appauthor is not False:
|
||||
path = os.path.join(path, appauthor, appname)
|
||||
else:
|
||||
path = os.path.join(path, appname)
|
||||
if opinion:
|
||||
path = os.path.join(path, "Cache")
|
||||
elif system == 'darwin':
|
||||
path = os.path.expanduser('~/Library/Caches')
|
||||
if appname:
|
||||
path = os.path.join(path, appname)
|
||||
else:
|
||||
path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
|
||||
if appname:
|
||||
path = os.path.join(path, appname)
|
||||
if appname and version:
|
||||
path = os.path.join(path, version)
|
||||
return path
|
||||
|
||||
|
||||
def user_state_dir(appname=None, appauthor=None, version=None, roaming=False):
|
||||
r"""Return full path to the user-specific state dir for this application.
|
||||
|
||||
"appname" is the name of application.
|
||||
If None, just the system directory is returned.
|
||||
"appauthor" (only used on Windows) is the name of the
|
||||
appauthor or distributing body for this application. Typically
|
||||
it is the owning company name. This falls back to appname. You may
|
||||
pass False to disable it.
|
||||
"version" is an optional version path element to append to the
|
||||
path. You might want to use this if you want multiple versions
|
||||
of your app to be able to run independently. If used, this
|
||||
would typically be "<major>.<minor>".
|
||||
Only applied when appname is present.
|
||||
"roaming" (boolean, default False) can be set True to use the Windows
|
||||
roaming appdata directory. That means that for users on a Windows
|
||||
network setup for roaming profiles, this user data will be
|
||||
sync'd on login. See
|
||||
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
|
||||
for a discussion of issues.
|
||||
|
||||
Typical user state directories are:
|
||||
Mac OS X: same as user_data_dir
|
||||
Unix: ~/.local/state/<AppName> # or in $XDG_STATE_HOME, if defined
|
||||
Win *: same as user_data_dir
|
||||
|
||||
For Unix, we follow this Debian proposal <https://wiki.debian.org/XDGBaseDirectorySpecification#state>
|
||||
to extend the XDG spec and support $XDG_STATE_HOME.
|
||||
|
||||
That means, by default "~/.local/state/<AppName>".
|
||||
"""
|
||||
if system in ["win32", "darwin"]:
|
||||
path = user_data_dir(appname, appauthor, None, roaming)
|
||||
else:
|
||||
path = os.getenv('XDG_STATE_HOME', os.path.expanduser("~/.local/state"))
|
||||
if appname:
|
||||
path = os.path.join(path, appname)
|
||||
if appname and version:
|
||||
path = os.path.join(path, version)
|
||||
return path
|
||||
|
||||
|
||||
def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
|
||||
r"""Return full path to the user-specific log dir for this application.
|
||||
|
||||
"appname" is the name of application.
|
||||
If None, just the system directory is returned.
|
||||
"appauthor" (only used on Windows) is the name of the
|
||||
appauthor or distributing body for this application. Typically
|
||||
it is the owning company name. This falls back to appname. You may
|
||||
pass False to disable it.
|
||||
"version" is an optional version path element to append to the
|
||||
path. You might want to use this if you want multiple versions
|
||||
of your app to be able to run independently. If used, this
|
||||
would typically be "<major>.<minor>".
|
||||
Only applied when appname is present.
|
||||
"opinion" (boolean) can be False to disable the appending of
|
||||
"Logs" to the base app data dir for Windows, and "log" to the
|
||||
base cache dir for Unix. See discussion below.
|
||||
|
||||
Typical user log directories are:
|
||||
Mac OS X: ~/Library/Logs/<AppName>
|
||||
Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined
|
||||
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
|
||||
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs
|
||||
|
||||
On Windows the only suggestion in the MSDN docs is that local settings
|
||||
go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
|
||||
examples of what some windows apps use for a logs dir.)
|
||||
|
||||
OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
|
||||
value for Windows and appends "log" to the user cache dir for Unix.
|
||||
This can be disabled with the `opinion=False` option.
|
||||
"""
|
||||
if system == "darwin":
|
||||
path = os.path.join(
|
||||
os.path.expanduser('~/Library/Logs'),
|
||||
appname)
|
||||
elif system == "win32":
|
||||
path = user_data_dir(appname, appauthor, version)
|
||||
version = False
|
||||
if opinion:
|
||||
path = os.path.join(path, "Logs")
|
||||
else:
|
||||
path = user_cache_dir(appname, appauthor, version)
|
||||
version = False
|
||||
if opinion:
|
||||
path = os.path.join(path, "log")
|
||||
if appname and version:
|
||||
path = os.path.join(path, version)
|
||||
return path
|
||||
|
||||
|
||||
class AppDirs(object):
|
||||
"""Convenience wrapper for getting application dirs."""
|
||||
def __init__(self, appname=None, appauthor=None, version=None,
|
||||
roaming=False, multipath=False):
|
||||
self.appname = appname
|
||||
self.appauthor = appauthor
|
||||
self.version = version
|
||||
self.roaming = roaming
|
||||
self.multipath = multipath
|
||||
|
||||
@property
|
||||
def user_data_dir(self):
|
||||
return user_data_dir(self.appname, self.appauthor,
|
||||
version=self.version, roaming=self.roaming)
|
||||
|
||||
@property
|
||||
def site_data_dir(self):
|
||||
return site_data_dir(self.appname, self.appauthor,
|
||||
version=self.version, multipath=self.multipath)
|
||||
|
||||
@property
|
||||
def user_config_dir(self):
|
||||
return user_config_dir(self.appname, self.appauthor,
|
||||
version=self.version, roaming=self.roaming)
|
||||
|
||||
@property
|
||||
def site_config_dir(self):
|
||||
return site_config_dir(self.appname, self.appauthor,
|
||||
version=self.version, multipath=self.multipath)
|
||||
|
||||
@property
|
||||
def user_cache_dir(self):
|
||||
return user_cache_dir(self.appname, self.appauthor,
|
||||
version=self.version)
|
||||
|
||||
@property
|
||||
def user_state_dir(self):
|
||||
return user_state_dir(self.appname, self.appauthor,
|
||||
version=self.version)
|
||||
|
||||
@property
|
||||
def user_log_dir(self):
|
||||
return user_log_dir(self.appname, self.appauthor,
|
||||
version=self.version)
|
||||
|
||||
|
||||
#---- internal support stuff
|
||||
|
||||
def _get_win_folder_from_registry(csidl_name):
|
||||
"""This is a fallback technique at best. I'm not sure if using the
|
||||
registry for this guarantees us the correct answer for all CSIDL_*
|
||||
names.
|
||||
"""
|
||||
if PY3:
|
||||
import winreg as _winreg
|
||||
else:
|
||||
import _winreg
|
||||
|
||||
shell_folder_name = {
|
||||
"CSIDL_APPDATA": "AppData",
|
||||
"CSIDL_COMMON_APPDATA": "Common AppData",
|
||||
"CSIDL_LOCAL_APPDATA": "Local AppData",
|
||||
}[csidl_name]
|
||||
|
||||
key = _winreg.OpenKey(
|
||||
_winreg.HKEY_CURRENT_USER,
|
||||
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
|
||||
)
|
||||
dir, type = _winreg.QueryValueEx(key, shell_folder_name)
|
||||
return dir
|
||||
|
||||
|
||||
def _get_win_folder_with_pywin32(csidl_name):
|
||||
from win32com.shell import shellcon, shell
|
||||
dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
|
||||
# Try to make this a unicode path because SHGetFolderPath does
|
||||
# not return unicode strings when there is unicode data in the
|
||||
# path.
|
||||
try:
|
||||
dir = unicode(dir)
|
||||
|
||||
# Downgrade to short path name if have highbit chars. See
|
||||
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
|
||||
has_high_char = False
|
||||
for c in dir:
|
||||
if ord(c) > 255:
|
||||
has_high_char = True
|
||||
break
|
||||
if has_high_char:
|
||||
try:
|
||||
import win32api
|
||||
dir = win32api.GetShortPathName(dir)
|
||||
except ImportError:
|
||||
pass
|
||||
except UnicodeError:
|
||||
pass
|
||||
return dir
|
||||
|
||||
|
||||
def _get_win_folder_with_ctypes(csidl_name):
|
||||
import ctypes
|
||||
|
||||
csidl_const = {
|
||||
"CSIDL_APPDATA": 26,
|
||||
"CSIDL_COMMON_APPDATA": 35,
|
||||
"CSIDL_LOCAL_APPDATA": 28,
|
||||
}[csidl_name]
|
||||
|
||||
buf = ctypes.create_unicode_buffer(1024)
|
||||
ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
|
||||
|
||||
# Downgrade to short path name if have highbit chars. See
|
||||
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
|
||||
has_high_char = False
|
||||
for c in buf:
|
||||
if ord(c) > 255:
|
||||
has_high_char = True
|
||||
break
|
||||
if has_high_char:
|
||||
buf2 = ctypes.create_unicode_buffer(1024)
|
||||
if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
|
||||
buf = buf2
|
||||
|
||||
return buf.value
|
||||
|
||||
def _get_win_folder_with_jna(csidl_name):
|
||||
import array
|
||||
from com.sun import jna
|
||||
from com.sun.jna.platform import win32
|
||||
|
||||
buf_size = win32.WinDef.MAX_PATH * 2
|
||||
buf = array.zeros('c', buf_size)
|
||||
shell = win32.Shell32.INSTANCE
|
||||
shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)
|
||||
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
|
||||
|
||||
# Downgrade to short path name if have highbit chars. See
|
||||
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
|
||||
has_high_char = False
|
||||
for c in dir:
|
||||
if ord(c) > 255:
|
||||
has_high_char = True
|
||||
break
|
||||
if has_high_char:
|
||||
buf = array.zeros('c', buf_size)
|
||||
kernel = win32.Kernel32.INSTANCE
|
||||
if kernel.GetShortPathName(dir, buf, buf_size):
|
||||
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
|
||||
|
||||
return dir
|
||||
|
||||
if system == "win32":
|
||||
try:
|
||||
import win32com.shell
|
||||
_get_win_folder = _get_win_folder_with_pywin32
|
||||
except ImportError:
|
||||
try:
|
||||
from ctypes import windll
|
||||
_get_win_folder = _get_win_folder_with_ctypes
|
||||
except ImportError:
|
||||
try:
|
||||
import com.sun.jna
|
||||
_get_win_folder = _get_win_folder_with_jna
|
||||
except ImportError:
|
||||
_get_win_folder = _get_win_folder_from_registry
|
||||
|
||||
|
||||
#---- self test code
|
||||
|
||||
if __name__ == "__main__":
|
||||
appname = "MyApp"
|
||||
appauthor = "MyCompany"
|
||||
|
||||
props = ("user_data_dir",
|
||||
"user_config_dir",
|
||||
"user_cache_dir",
|
||||
"user_state_dir",
|
||||
"user_log_dir",
|
||||
"site_data_dir",
|
||||
"site_config_dir")
|
||||
|
||||
print("-- app dirs %s --" % __version__)
|
||||
|
||||
print("-- app dirs (with optional 'version')")
|
||||
dirs = AppDirs(appname, appauthor, version="1.0")
|
||||
for prop in props:
|
||||
print("%s: %s" % (prop, getattr(dirs, prop)))
|
||||
|
||||
print("\n-- app dirs (without optional 'version')")
|
||||
dirs = AppDirs(appname, appauthor)
|
||||
for prop in props:
|
||||
print("%s: %s" % (prop, getattr(dirs, prop)))
|
||||
|
||||
print("\n-- app dirs (without optional 'appauthor')")
|
||||
dirs = AppDirs(appname)
|
||||
for prop in props:
|
||||
print("%s: %s" % (prop, getattr(dirs, prop)))
|
||||
|
||||
print("\n-- app dirs (with disabled 'appauthor')")
|
||||
dirs = AppDirs(appname, appauthor=False)
|
||||
for prop in props:
|
||||
print("%s: %s" % (prop, getattr(dirs, prop)))
|
||||
@@ -0,0 +1 @@
|
||||
importlib_resources
|
||||
36
lib/pkg_resources/_vendor/importlib_resources/__init__.py
Normal file
36
lib/pkg_resources/_vendor/importlib_resources/__init__.py
Normal file
@@ -0,0 +1,36 @@
|
||||
"""Read resources contained within a package."""
|
||||
|
||||
from ._common import (
|
||||
as_file,
|
||||
files,
|
||||
Package,
|
||||
)
|
||||
|
||||
from ._legacy import (
|
||||
contents,
|
||||
open_binary,
|
||||
read_binary,
|
||||
open_text,
|
||||
read_text,
|
||||
is_resource,
|
||||
path,
|
||||
Resource,
|
||||
)
|
||||
|
||||
from .abc import ResourceReader
|
||||
|
||||
|
||||
__all__ = [
|
||||
'Package',
|
||||
'Resource',
|
||||
'ResourceReader',
|
||||
'as_file',
|
||||
'contents',
|
||||
'files',
|
||||
'is_resource',
|
||||
'open_binary',
|
||||
'open_text',
|
||||
'path',
|
||||
'read_binary',
|
||||
'read_text',
|
||||
]
|
||||
170
lib/pkg_resources/_vendor/importlib_resources/_adapters.py
Normal file
170
lib/pkg_resources/_vendor/importlib_resources/_adapters.py
Normal file
@@ -0,0 +1,170 @@
|
||||
from contextlib import suppress
|
||||
from io import TextIOWrapper
|
||||
|
||||
from . import abc
|
||||
|
||||
|
||||
class SpecLoaderAdapter:
|
||||
"""
|
||||
Adapt a package spec to adapt the underlying loader.
|
||||
"""
|
||||
|
||||
def __init__(self, spec, adapter=lambda spec: spec.loader):
|
||||
self.spec = spec
|
||||
self.loader = adapter(spec)
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self.spec, name)
|
||||
|
||||
|
||||
class TraversableResourcesLoader:
|
||||
"""
|
||||
Adapt a loader to provide TraversableResources.
|
||||
"""
|
||||
|
||||
def __init__(self, spec):
|
||||
self.spec = spec
|
||||
|
||||
def get_resource_reader(self, name):
|
||||
return CompatibilityFiles(self.spec)._native()
|
||||
|
||||
|
||||
def _io_wrapper(file, mode='r', *args, **kwargs):
|
||||
if mode == 'r':
|
||||
return TextIOWrapper(file, *args, **kwargs)
|
||||
elif mode == 'rb':
|
||||
return file
|
||||
raise ValueError(
|
||||
"Invalid mode value '{}', only 'r' and 'rb' are supported".format(mode)
|
||||
)
|
||||
|
||||
|
||||
class CompatibilityFiles:
|
||||
"""
|
||||
Adapter for an existing or non-existent resource reader
|
||||
to provide a compatibility .files().
|
||||
"""
|
||||
|
||||
class SpecPath(abc.Traversable):
|
||||
"""
|
||||
Path tied to a module spec.
|
||||
Can be read and exposes the resource reader children.
|
||||
"""
|
||||
|
||||
def __init__(self, spec, reader):
|
||||
self._spec = spec
|
||||
self._reader = reader
|
||||
|
||||
def iterdir(self):
|
||||
if not self._reader:
|
||||
return iter(())
|
||||
return iter(
|
||||
CompatibilityFiles.ChildPath(self._reader, path)
|
||||
for path in self._reader.contents()
|
||||
)
|
||||
|
||||
def is_file(self):
|
||||
return False
|
||||
|
||||
is_dir = is_file
|
||||
|
||||
def joinpath(self, other):
|
||||
if not self._reader:
|
||||
return CompatibilityFiles.OrphanPath(other)
|
||||
return CompatibilityFiles.ChildPath(self._reader, other)
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self._spec.name
|
||||
|
||||
def open(self, mode='r', *args, **kwargs):
|
||||
return _io_wrapper(self._reader.open_resource(None), mode, *args, **kwargs)
|
||||
|
||||
class ChildPath(abc.Traversable):
|
||||
"""
|
||||
Path tied to a resource reader child.
|
||||
Can be read but doesn't expose any meaningful children.
|
||||
"""
|
||||
|
||||
def __init__(self, reader, name):
|
||||
self._reader = reader
|
||||
self._name = name
|
||||
|
||||
def iterdir(self):
|
||||
return iter(())
|
||||
|
||||
def is_file(self):
|
||||
return self._reader.is_resource(self.name)
|
||||
|
||||
def is_dir(self):
|
||||
return not self.is_file()
|
||||
|
||||
def joinpath(self, other):
|
||||
return CompatibilityFiles.OrphanPath(self.name, other)
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self._name
|
||||
|
||||
def open(self, mode='r', *args, **kwargs):
|
||||
return _io_wrapper(
|
||||
self._reader.open_resource(self.name), mode, *args, **kwargs
|
||||
)
|
||||
|
||||
class OrphanPath(abc.Traversable):
|
||||
"""
|
||||
Orphan path, not tied to a module spec or resource reader.
|
||||
Can't be read and doesn't expose any meaningful children.
|
||||
"""
|
||||
|
||||
def __init__(self, *path_parts):
|
||||
if len(path_parts) < 1:
|
||||
raise ValueError('Need at least one path part to construct a path')
|
||||
self._path = path_parts
|
||||
|
||||
def iterdir(self):
|
||||
return iter(())
|
||||
|
||||
def is_file(self):
|
||||
return False
|
||||
|
||||
is_dir = is_file
|
||||
|
||||
def joinpath(self, other):
|
||||
return CompatibilityFiles.OrphanPath(*self._path, other)
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self._path[-1]
|
||||
|
||||
def open(self, mode='r', *args, **kwargs):
|
||||
raise FileNotFoundError("Can't open orphan path")
|
||||
|
||||
def __init__(self, spec):
|
||||
self.spec = spec
|
||||
|
||||
@property
|
||||
def _reader(self):
|
||||
with suppress(AttributeError):
|
||||
return self.spec.loader.get_resource_reader(self.spec.name)
|
||||
|
||||
def _native(self):
|
||||
"""
|
||||
Return the native reader if it supports files().
|
||||
"""
|
||||
reader = self._reader
|
||||
return reader if hasattr(reader, 'files') else self
|
||||
|
||||
def __getattr__(self, attr):
|
||||
return getattr(self._reader, attr)
|
||||
|
||||
def files(self):
|
||||
return CompatibilityFiles.SpecPath(self.spec, self._reader)
|
||||
|
||||
|
||||
def wrap_spec(package):
|
||||
"""
|
||||
Construct a package spec with traversable compatibility
|
||||
on the spec/loader/reader.
|
||||
"""
|
||||
return SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader)
|
||||
207
lib/pkg_resources/_vendor/importlib_resources/_common.py
Normal file
207
lib/pkg_resources/_vendor/importlib_resources/_common.py
Normal file
@@ -0,0 +1,207 @@
|
||||
import os
|
||||
import pathlib
|
||||
import tempfile
|
||||
import functools
|
||||
import contextlib
|
||||
import types
|
||||
import importlib
|
||||
import inspect
|
||||
import warnings
|
||||
import itertools
|
||||
|
||||
from typing import Union, Optional, cast
|
||||
from .abc import ResourceReader, Traversable
|
||||
|
||||
from ._compat import wrap_spec
|
||||
|
||||
Package = Union[types.ModuleType, str]
|
||||
Anchor = Package
|
||||
|
||||
|
||||
def package_to_anchor(func):
|
||||
"""
|
||||
Replace 'package' parameter as 'anchor' and warn about the change.
|
||||
|
||||
Other errors should fall through.
|
||||
|
||||
>>> files('a', 'b')
|
||||
Traceback (most recent call last):
|
||||
TypeError: files() takes from 0 to 1 positional arguments but 2 were given
|
||||
"""
|
||||
undefined = object()
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapper(anchor=undefined, package=undefined):
|
||||
if package is not undefined:
|
||||
if anchor is not undefined:
|
||||
return func(anchor, package)
|
||||
warnings.warn(
|
||||
"First parameter to files is renamed to 'anchor'",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return func(package)
|
||||
elif anchor is undefined:
|
||||
return func()
|
||||
return func(anchor)
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
@package_to_anchor
|
||||
def files(anchor: Optional[Anchor] = None) -> Traversable:
|
||||
"""
|
||||
Get a Traversable resource for an anchor.
|
||||
"""
|
||||
return from_package(resolve(anchor))
|
||||
|
||||
|
||||
def get_resource_reader(package: types.ModuleType) -> Optional[ResourceReader]:
|
||||
"""
|
||||
Return the package's loader if it's a ResourceReader.
|
||||
"""
|
||||
# We can't use
|
||||
# a issubclass() check here because apparently abc.'s __subclasscheck__()
|
||||
# hook wants to create a weak reference to the object, but
|
||||
# zipimport.zipimporter does not support weak references, resulting in a
|
||||
# TypeError. That seems terrible.
|
||||
spec = package.__spec__
|
||||
reader = getattr(spec.loader, 'get_resource_reader', None) # type: ignore
|
||||
if reader is None:
|
||||
return None
|
||||
return reader(spec.name) # type: ignore
|
||||
|
||||
|
||||
@functools.singledispatch
|
||||
def resolve(cand: Optional[Anchor]) -> types.ModuleType:
|
||||
return cast(types.ModuleType, cand)
|
||||
|
||||
|
||||
@resolve.register
|
||||
def _(cand: str) -> types.ModuleType:
|
||||
return importlib.import_module(cand)
|
||||
|
||||
|
||||
@resolve.register
|
||||
def _(cand: None) -> types.ModuleType:
|
||||
return resolve(_infer_caller().f_globals['__name__'])
|
||||
|
||||
|
||||
def _infer_caller():
|
||||
"""
|
||||
Walk the stack and find the frame of the first caller not in this module.
|
||||
"""
|
||||
|
||||
def is_this_file(frame_info):
|
||||
return frame_info.filename == __file__
|
||||
|
||||
def is_wrapper(frame_info):
|
||||
return frame_info.function == 'wrapper'
|
||||
|
||||
not_this_file = itertools.filterfalse(is_this_file, inspect.stack())
|
||||
# also exclude 'wrapper' due to singledispatch in the call stack
|
||||
callers = itertools.filterfalse(is_wrapper, not_this_file)
|
||||
return next(callers).frame
|
||||
|
||||
|
||||
def from_package(package: types.ModuleType):
|
||||
"""
|
||||
Return a Traversable object for the given package.
|
||||
|
||||
"""
|
||||
spec = wrap_spec(package)
|
||||
reader = spec.loader.get_resource_reader(spec.name)
|
||||
return reader.files()
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _tempfile(
|
||||
reader,
|
||||
suffix='',
|
||||
# gh-93353: Keep a reference to call os.remove() in late Python
|
||||
# finalization.
|
||||
*,
|
||||
_os_remove=os.remove,
|
||||
):
|
||||
# Not using tempfile.NamedTemporaryFile as it leads to deeper 'try'
|
||||
# blocks due to the need to close the temporary file to work on Windows
|
||||
# properly.
|
||||
fd, raw_path = tempfile.mkstemp(suffix=suffix)
|
||||
try:
|
||||
try:
|
||||
os.write(fd, reader())
|
||||
finally:
|
||||
os.close(fd)
|
||||
del reader
|
||||
yield pathlib.Path(raw_path)
|
||||
finally:
|
||||
try:
|
||||
_os_remove(raw_path)
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
|
||||
def _temp_file(path):
|
||||
return _tempfile(path.read_bytes, suffix=path.name)
|
||||
|
||||
|
||||
def _is_present_dir(path: Traversable) -> bool:
|
||||
"""
|
||||
Some Traversables implement ``is_dir()`` to raise an
|
||||
exception (i.e. ``FileNotFoundError``) when the
|
||||
directory doesn't exist. This function wraps that call
|
||||
to always return a boolean and only return True
|
||||
if there's a dir and it exists.
|
||||
"""
|
||||
with contextlib.suppress(FileNotFoundError):
|
||||
return path.is_dir()
|
||||
return False
|
||||
|
||||
|
||||
@functools.singledispatch
|
||||
def as_file(path):
|
||||
"""
|
||||
Given a Traversable object, return that object as a
|
||||
path on the local file system in a context manager.
|
||||
"""
|
||||
return _temp_dir(path) if _is_present_dir(path) else _temp_file(path)
|
||||
|
||||
|
||||
@as_file.register(pathlib.Path)
|
||||
@contextlib.contextmanager
|
||||
def _(path):
|
||||
"""
|
||||
Degenerate behavior for pathlib.Path objects.
|
||||
"""
|
||||
yield path
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _temp_path(dir: tempfile.TemporaryDirectory):
|
||||
"""
|
||||
Wrap tempfile.TemporyDirectory to return a pathlib object.
|
||||
"""
|
||||
with dir as result:
|
||||
yield pathlib.Path(result)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _temp_dir(path):
|
||||
"""
|
||||
Given a traversable dir, recursively replicate the whole tree
|
||||
to the file system in a context manager.
|
||||
"""
|
||||
assert path.is_dir()
|
||||
with _temp_path(tempfile.TemporaryDirectory()) as temp_dir:
|
||||
yield _write_contents(temp_dir, path)
|
||||
|
||||
|
||||
def _write_contents(target, source):
|
||||
child = target.joinpath(source.name)
|
||||
if source.is_dir():
|
||||
child.mkdir()
|
||||
for item in source.iterdir():
|
||||
_write_contents(child, item)
|
||||
else:
|
||||
child.write_bytes(source.read_bytes())
|
||||
return child
|
||||
108
lib/pkg_resources/_vendor/importlib_resources/_compat.py
Normal file
108
lib/pkg_resources/_vendor/importlib_resources/_compat.py
Normal file
@@ -0,0 +1,108 @@
|
||||
# flake8: noqa
|
||||
|
||||
import abc
|
||||
import os
|
||||
import sys
|
||||
import pathlib
|
||||
from contextlib import suppress
|
||||
from typing import Union
|
||||
|
||||
|
||||
if sys.version_info >= (3, 10):
|
||||
from zipfile import Path as ZipPath # type: ignore
|
||||
else:
|
||||
from ..zipp import Path as ZipPath # type: ignore
|
||||
|
||||
|
||||
try:
|
||||
from typing import runtime_checkable # type: ignore
|
||||
except ImportError:
|
||||
|
||||
def runtime_checkable(cls): # type: ignore
|
||||
return cls
|
||||
|
||||
|
||||
try:
|
||||
from typing import Protocol # type: ignore
|
||||
except ImportError:
|
||||
Protocol = abc.ABC # type: ignore
|
||||
|
||||
|
||||
class TraversableResourcesLoader:
|
||||
"""
|
||||
Adapt loaders to provide TraversableResources and other
|
||||
compatibility.
|
||||
|
||||
Used primarily for Python 3.9 and earlier where the native
|
||||
loaders do not yet implement TraversableResources.
|
||||
"""
|
||||
|
||||
def __init__(self, spec):
|
||||
self.spec = spec
|
||||
|
||||
@property
|
||||
def path(self):
|
||||
return self.spec.origin
|
||||
|
||||
def get_resource_reader(self, name):
|
||||
from . import readers, _adapters
|
||||
|
||||
def _zip_reader(spec):
|
||||
with suppress(AttributeError):
|
||||
return readers.ZipReader(spec.loader, spec.name)
|
||||
|
||||
def _namespace_reader(spec):
|
||||
with suppress(AttributeError, ValueError):
|
||||
return readers.NamespaceReader(spec.submodule_search_locations)
|
||||
|
||||
def _available_reader(spec):
|
||||
with suppress(AttributeError):
|
||||
return spec.loader.get_resource_reader(spec.name)
|
||||
|
||||
def _native_reader(spec):
|
||||
reader = _available_reader(spec)
|
||||
return reader if hasattr(reader, 'files') else None
|
||||
|
||||
def _file_reader(spec):
|
||||
try:
|
||||
path = pathlib.Path(self.path)
|
||||
except TypeError:
|
||||
return None
|
||||
if path.exists():
|
||||
return readers.FileReader(self)
|
||||
|
||||
return (
|
||||
# native reader if it supplies 'files'
|
||||
_native_reader(self.spec)
|
||||
or
|
||||
# local ZipReader if a zip module
|
||||
_zip_reader(self.spec)
|
||||
or
|
||||
# local NamespaceReader if a namespace module
|
||||
_namespace_reader(self.spec)
|
||||
or
|
||||
# local FileReader
|
||||
_file_reader(self.spec)
|
||||
# fallback - adapt the spec ResourceReader to TraversableReader
|
||||
or _adapters.CompatibilityFiles(self.spec)
|
||||
)
|
||||
|
||||
|
||||
def wrap_spec(package):
|
||||
"""
|
||||
Construct a package spec with traversable compatibility
|
||||
on the spec/loader/reader.
|
||||
|
||||
Supersedes _adapters.wrap_spec to use TraversableResourcesLoader
|
||||
from above for older Python compatibility (<3.10).
|
||||
"""
|
||||
from . import _adapters
|
||||
|
||||
return _adapters.SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader)
|
||||
|
||||
|
||||
if sys.version_info >= (3, 9):
|
||||
StrPath = Union[str, os.PathLike[str]]
|
||||
else:
|
||||
# PathLike is only subscriptable at runtime in 3.9+
|
||||
StrPath = Union[str, "os.PathLike[str]"]
|
||||
35
lib/pkg_resources/_vendor/importlib_resources/_itertools.py
Normal file
35
lib/pkg_resources/_vendor/importlib_resources/_itertools.py
Normal file
@@ -0,0 +1,35 @@
|
||||
from itertools import filterfalse
|
||||
|
||||
from typing import (
|
||||
Callable,
|
||||
Iterable,
|
||||
Iterator,
|
||||
Optional,
|
||||
Set,
|
||||
TypeVar,
|
||||
Union,
|
||||
)
|
||||
|
||||
# Type and type variable definitions
|
||||
_T = TypeVar('_T')
|
||||
_U = TypeVar('_U')
|
||||
|
||||
|
||||
def unique_everseen(
|
||||
iterable: Iterable[_T], key: Optional[Callable[[_T], _U]] = None
|
||||
) -> Iterator[_T]:
|
||||
"List unique elements, preserving order. Remember all elements ever seen."
|
||||
# unique_everseen('AAAABBBCCDAABBB') --> A B C D
|
||||
# unique_everseen('ABBCcAD', str.lower) --> A B C D
|
||||
seen: Set[Union[_T, _U]] = set()
|
||||
seen_add = seen.add
|
||||
if key is None:
|
||||
for element in filterfalse(seen.__contains__, iterable):
|
||||
seen_add(element)
|
||||
yield element
|
||||
else:
|
||||
for element in iterable:
|
||||
k = key(element)
|
||||
if k not in seen:
|
||||
seen_add(k)
|
||||
yield element
|
||||
120
lib/pkg_resources/_vendor/importlib_resources/_legacy.py
Normal file
120
lib/pkg_resources/_vendor/importlib_resources/_legacy.py
Normal file
@@ -0,0 +1,120 @@
|
||||
import functools
|
||||
import os
|
||||
import pathlib
|
||||
import types
|
||||
import warnings
|
||||
|
||||
from typing import Union, Iterable, ContextManager, BinaryIO, TextIO, Any
|
||||
|
||||
from . import _common
|
||||
|
||||
Package = Union[types.ModuleType, str]
|
||||
Resource = str
|
||||
|
||||
|
||||
def deprecated(func):
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
warnings.warn(
|
||||
f"{func.__name__} is deprecated. Use files() instead. "
|
||||
"Refer to https://importlib-resources.readthedocs.io"
|
||||
"/en/latest/using.html#migrating-from-legacy for migration advice.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return func(*args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
def normalize_path(path: Any) -> str:
|
||||
"""Normalize a path by ensuring it is a string.
|
||||
|
||||
If the resulting string contains path separators, an exception is raised.
|
||||
"""
|
||||
str_path = str(path)
|
||||
parent, file_name = os.path.split(str_path)
|
||||
if parent:
|
||||
raise ValueError(f'{path!r} must be only a file name')
|
||||
return file_name
|
||||
|
||||
|
||||
@deprecated
|
||||
def open_binary(package: Package, resource: Resource) -> BinaryIO:
|
||||
"""Return a file-like object opened for binary reading of the resource."""
|
||||
return (_common.files(package) / normalize_path(resource)).open('rb')
|
||||
|
||||
|
||||
@deprecated
|
||||
def read_binary(package: Package, resource: Resource) -> bytes:
|
||||
"""Return the binary contents of the resource."""
|
||||
return (_common.files(package) / normalize_path(resource)).read_bytes()
|
||||
|
||||
|
||||
@deprecated
|
||||
def open_text(
|
||||
package: Package,
|
||||
resource: Resource,
|
||||
encoding: str = 'utf-8',
|
||||
errors: str = 'strict',
|
||||
) -> TextIO:
|
||||
"""Return a file-like object opened for text reading of the resource."""
|
||||
return (_common.files(package) / normalize_path(resource)).open(
|
||||
'r', encoding=encoding, errors=errors
|
||||
)
|
||||
|
||||
|
||||
@deprecated
|
||||
def read_text(
|
||||
package: Package,
|
||||
resource: Resource,
|
||||
encoding: str = 'utf-8',
|
||||
errors: str = 'strict',
|
||||
) -> str:
|
||||
"""Return the decoded string of the resource.
|
||||
|
||||
The decoding-related arguments have the same semantics as those of
|
||||
bytes.decode().
|
||||
"""
|
||||
with open_text(package, resource, encoding, errors) as fp:
|
||||
return fp.read()
|
||||
|
||||
|
||||
@deprecated
|
||||
def contents(package: Package) -> Iterable[str]:
|
||||
"""Return an iterable of entries in `package`.
|
||||
|
||||
Note that not all entries are resources. Specifically, directories are
|
||||
not considered resources. Use `is_resource()` on each entry returned here
|
||||
to check if it is a resource or not.
|
||||
"""
|
||||
return [path.name for path in _common.files(package).iterdir()]
|
||||
|
||||
|
||||
@deprecated
|
||||
def is_resource(package: Package, name: str) -> bool:
|
||||
"""True if `name` is a resource inside `package`.
|
||||
|
||||
Directories are *not* resources.
|
||||
"""
|
||||
resource = normalize_path(name)
|
||||
return any(
|
||||
traversable.name == resource and traversable.is_file()
|
||||
for traversable in _common.files(package).iterdir()
|
||||
)
|
||||
|
||||
|
||||
@deprecated
|
||||
def path(
|
||||
package: Package,
|
||||
resource: Resource,
|
||||
) -> ContextManager[pathlib.Path]:
|
||||
"""A context manager providing a file path object to the resource.
|
||||
|
||||
If the resource does not already exist on its own on the file system,
|
||||
a temporary file will be created. If the file was created, the file
|
||||
will be deleted upon exiting the context manager (no exception is
|
||||
raised if the file was deleted prior to the context manager
|
||||
exiting).
|
||||
"""
|
||||
return _common.as_file(_common.files(package) / normalize_path(resource))
|
||||
170
lib/pkg_resources/_vendor/importlib_resources/abc.py
Normal file
170
lib/pkg_resources/_vendor/importlib_resources/abc.py
Normal file
@@ -0,0 +1,170 @@
|
||||
import abc
|
||||
import io
|
||||
import itertools
|
||||
import pathlib
|
||||
from typing import Any, BinaryIO, Iterable, Iterator, NoReturn, Text, Optional
|
||||
|
||||
from ._compat import runtime_checkable, Protocol, StrPath
|
||||
|
||||
|
||||
__all__ = ["ResourceReader", "Traversable", "TraversableResources"]
|
||||
|
||||
|
||||
class ResourceReader(metaclass=abc.ABCMeta):
|
||||
"""Abstract base class for loaders to provide resource reading support."""
|
||||
|
||||
@abc.abstractmethod
|
||||
def open_resource(self, resource: Text) -> BinaryIO:
|
||||
"""Return an opened, file-like object for binary reading.
|
||||
|
||||
The 'resource' argument is expected to represent only a file name.
|
||||
If the resource cannot be found, FileNotFoundError is raised.
|
||||
"""
|
||||
# This deliberately raises FileNotFoundError instead of
|
||||
# NotImplementedError so that if this method is accidentally called,
|
||||
# it'll still do the right thing.
|
||||
raise FileNotFoundError
|
||||
|
||||
@abc.abstractmethod
|
||||
def resource_path(self, resource: Text) -> Text:
|
||||
"""Return the file system path to the specified resource.
|
||||
|
||||
The 'resource' argument is expected to represent only a file name.
|
||||
If the resource does not exist on the file system, raise
|
||||
FileNotFoundError.
|
||||
"""
|
||||
# This deliberately raises FileNotFoundError instead of
|
||||
# NotImplementedError so that if this method is accidentally called,
|
||||
# it'll still do the right thing.
|
||||
raise FileNotFoundError
|
||||
|
||||
@abc.abstractmethod
|
||||
def is_resource(self, path: Text) -> bool:
|
||||
"""Return True if the named 'path' is a resource.
|
||||
|
||||
Files are resources, directories are not.
|
||||
"""
|
||||
raise FileNotFoundError
|
||||
|
||||
@abc.abstractmethod
|
||||
def contents(self) -> Iterable[str]:
|
||||
"""Return an iterable of entries in `package`."""
|
||||
raise FileNotFoundError
|
||||
|
||||
|
||||
class TraversalError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class Traversable(Protocol):
|
||||
"""
|
||||
An object with a subset of pathlib.Path methods suitable for
|
||||
traversing directories and opening files.
|
||||
|
||||
Any exceptions that occur when accessing the backing resource
|
||||
may propagate unaltered.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def iterdir(self) -> Iterator["Traversable"]:
|
||||
"""
|
||||
Yield Traversable objects in self
|
||||
"""
|
||||
|
||||
def read_bytes(self) -> bytes:
|
||||
"""
|
||||
Read contents of self as bytes
|
||||
"""
|
||||
with self.open('rb') as strm:
|
||||
return strm.read()
|
||||
|
||||
def read_text(self, encoding: Optional[str] = None) -> str:
|
||||
"""
|
||||
Read contents of self as text
|
||||
"""
|
||||
with self.open(encoding=encoding) as strm:
|
||||
return strm.read()
|
||||
|
||||
@abc.abstractmethod
|
||||
def is_dir(self) -> bool:
|
||||
"""
|
||||
Return True if self is a directory
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def is_file(self) -> bool:
|
||||
"""
|
||||
Return True if self is a file
|
||||
"""
|
||||
|
||||
def joinpath(self, *descendants: StrPath) -> "Traversable":
|
||||
"""
|
||||
Return Traversable resolved with any descendants applied.
|
||||
|
||||
Each descendant should be a path segment relative to self
|
||||
and each may contain multiple levels separated by
|
||||
``posixpath.sep`` (``/``).
|
||||
"""
|
||||
if not descendants:
|
||||
return self
|
||||
names = itertools.chain.from_iterable(
|
||||
path.parts for path in map(pathlib.PurePosixPath, descendants)
|
||||
)
|
||||
target = next(names)
|
||||
matches = (
|
||||
traversable for traversable in self.iterdir() if traversable.name == target
|
||||
)
|
||||
try:
|
||||
match = next(matches)
|
||||
except StopIteration:
|
||||
raise TraversalError(
|
||||
"Target not found during traversal.", target, list(names)
|
||||
)
|
||||
return match.joinpath(*names)
|
||||
|
||||
def __truediv__(self, child: StrPath) -> "Traversable":
|
||||
"""
|
||||
Return Traversable child in self
|
||||
"""
|
||||
return self.joinpath(child)
|
||||
|
||||
@abc.abstractmethod
|
||||
def open(self, mode='r', *args, **kwargs):
|
||||
"""
|
||||
mode may be 'r' or 'rb' to open as text or binary. Return a handle
|
||||
suitable for reading (same as pathlib.Path.open).
|
||||
|
||||
When opening as text, accepts encoding parameters such as those
|
||||
accepted by io.TextIOWrapper.
|
||||
"""
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def name(self) -> str:
|
||||
"""
|
||||
The base name of this object without any parent references.
|
||||
"""
|
||||
|
||||
|
||||
class TraversableResources(ResourceReader):
|
||||
"""
|
||||
The required interface for providing traversable
|
||||
resources.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def files(self) -> "Traversable":
|
||||
"""Return a Traversable object for the loaded package."""
|
||||
|
||||
def open_resource(self, resource: StrPath) -> io.BufferedReader:
|
||||
return self.files().joinpath(resource).open('rb')
|
||||
|
||||
def resource_path(self, resource: Any) -> NoReturn:
|
||||
raise FileNotFoundError(resource)
|
||||
|
||||
def is_resource(self, path: StrPath) -> bool:
|
||||
return self.files().joinpath(path).is_file()
|
||||
|
||||
def contents(self) -> Iterator[str]:
|
||||
return (item.name for item in self.files().iterdir())
|
||||
120
lib/pkg_resources/_vendor/importlib_resources/readers.py
Normal file
120
lib/pkg_resources/_vendor/importlib_resources/readers.py
Normal file
@@ -0,0 +1,120 @@
|
||||
import collections
|
||||
import pathlib
|
||||
import operator
|
||||
|
||||
from . import abc
|
||||
|
||||
from ._itertools import unique_everseen
|
||||
from ._compat import ZipPath
|
||||
|
||||
|
||||
def remove_duplicates(items):
|
||||
return iter(collections.OrderedDict.fromkeys(items))
|
||||
|
||||
|
||||
class FileReader(abc.TraversableResources):
|
||||
def __init__(self, loader):
|
||||
self.path = pathlib.Path(loader.path).parent
|
||||
|
||||
def resource_path(self, resource):
|
||||
"""
|
||||
Return the file system path to prevent
|
||||
`resources.path()` from creating a temporary
|
||||
copy.
|
||||
"""
|
||||
return str(self.path.joinpath(resource))
|
||||
|
||||
def files(self):
|
||||
return self.path
|
||||
|
||||
|
||||
class ZipReader(abc.TraversableResources):
|
||||
def __init__(self, loader, module):
|
||||
_, _, name = module.rpartition('.')
|
||||
self.prefix = loader.prefix.replace('\\', '/') + name + '/'
|
||||
self.archive = loader.archive
|
||||
|
||||
def open_resource(self, resource):
|
||||
try:
|
||||
return super().open_resource(resource)
|
||||
except KeyError as exc:
|
||||
raise FileNotFoundError(exc.args[0])
|
||||
|
||||
def is_resource(self, path):
|
||||
# workaround for `zipfile.Path.is_file` returning true
|
||||
# for non-existent paths.
|
||||
target = self.files().joinpath(path)
|
||||
return target.is_file() and target.exists()
|
||||
|
||||
def files(self):
|
||||
return ZipPath(self.archive, self.prefix)
|
||||
|
||||
|
||||
class MultiplexedPath(abc.Traversable):
|
||||
"""
|
||||
Given a series of Traversable objects, implement a merged
|
||||
version of the interface across all objects. Useful for
|
||||
namespace packages which may be multihomed at a single
|
||||
name.
|
||||
"""
|
||||
|
||||
def __init__(self, *paths):
|
||||
self._paths = list(map(pathlib.Path, remove_duplicates(paths)))
|
||||
if not self._paths:
|
||||
message = 'MultiplexedPath must contain at least one path'
|
||||
raise FileNotFoundError(message)
|
||||
if not all(path.is_dir() for path in self._paths):
|
||||
raise NotADirectoryError('MultiplexedPath only supports directories')
|
||||
|
||||
def iterdir(self):
|
||||
files = (file for path in self._paths for file in path.iterdir())
|
||||
return unique_everseen(files, key=operator.attrgetter('name'))
|
||||
|
||||
def read_bytes(self):
|
||||
raise FileNotFoundError(f'{self} is not a file')
|
||||
|
||||
def read_text(self, *args, **kwargs):
|
||||
raise FileNotFoundError(f'{self} is not a file')
|
||||
|
||||
def is_dir(self):
|
||||
return True
|
||||
|
||||
def is_file(self):
|
||||
return False
|
||||
|
||||
def joinpath(self, *descendants):
|
||||
try:
|
||||
return super().joinpath(*descendants)
|
||||
except abc.TraversalError:
|
||||
# One of the paths did not resolve (a directory does not exist).
|
||||
# Just return something that will not exist.
|
||||
return self._paths[0].joinpath(*descendants)
|
||||
|
||||
def open(self, *args, **kwargs):
|
||||
raise FileNotFoundError(f'{self} is not a file')
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self._paths[0].name
|
||||
|
||||
def __repr__(self):
|
||||
paths = ', '.join(f"'{path}'" for path in self._paths)
|
||||
return f'MultiplexedPath({paths})'
|
||||
|
||||
|
||||
class NamespaceReader(abc.TraversableResources):
|
||||
def __init__(self, namespace_path):
|
||||
if 'NamespacePath' not in str(namespace_path):
|
||||
raise ValueError('Invalid path')
|
||||
self.path = MultiplexedPath(*list(namespace_path))
|
||||
|
||||
def resource_path(self, resource):
|
||||
"""
|
||||
Return the file system path to prevent
|
||||
`resources.path()` from creating a temporary
|
||||
copy.
|
||||
"""
|
||||
return str(self.path.joinpath(resource))
|
||||
|
||||
def files(self):
|
||||
return self.path
|
||||
106
lib/pkg_resources/_vendor/importlib_resources/simple.py
Normal file
106
lib/pkg_resources/_vendor/importlib_resources/simple.py
Normal file
@@ -0,0 +1,106 @@
|
||||
"""
|
||||
Interface adapters for low-level readers.
|
||||
"""
|
||||
|
||||
import abc
|
||||
import io
|
||||
import itertools
|
||||
from typing import BinaryIO, List
|
||||
|
||||
from .abc import Traversable, TraversableResources
|
||||
|
||||
|
||||
class SimpleReader(abc.ABC):
|
||||
"""
|
||||
The minimum, low-level interface required from a resource
|
||||
provider.
|
||||
"""
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def package(self) -> str:
|
||||
"""
|
||||
The name of the package for which this reader loads resources.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def children(self) -> List['SimpleReader']:
|
||||
"""
|
||||
Obtain an iterable of SimpleReader for available
|
||||
child containers (e.g. directories).
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def resources(self) -> List[str]:
|
||||
"""
|
||||
Obtain available named resources for this virtual package.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def open_binary(self, resource: str) -> BinaryIO:
|
||||
"""
|
||||
Obtain a File-like for a named resource.
|
||||
"""
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self.package.split('.')[-1]
|
||||
|
||||
|
||||
class ResourceContainer(Traversable):
|
||||
"""
|
||||
Traversable container for a package's resources via its reader.
|
||||
"""
|
||||
|
||||
def __init__(self, reader: SimpleReader):
|
||||
self.reader = reader
|
||||
|
||||
def is_dir(self):
|
||||
return True
|
||||
|
||||
def is_file(self):
|
||||
return False
|
||||
|
||||
def iterdir(self):
|
||||
files = (ResourceHandle(self, name) for name in self.reader.resources)
|
||||
dirs = map(ResourceContainer, self.reader.children())
|
||||
return itertools.chain(files, dirs)
|
||||
|
||||
def open(self, *args, **kwargs):
|
||||
raise IsADirectoryError()
|
||||
|
||||
|
||||
class ResourceHandle(Traversable):
|
||||
"""
|
||||
Handle to a named resource in a ResourceReader.
|
||||
"""
|
||||
|
||||
def __init__(self, parent: ResourceContainer, name: str):
|
||||
self.parent = parent
|
||||
self.name = name # type: ignore
|
||||
|
||||
def is_file(self):
|
||||
return True
|
||||
|
||||
def is_dir(self):
|
||||
return False
|
||||
|
||||
def open(self, mode='r', *args, **kwargs):
|
||||
stream = self.parent.reader.open_binary(self.name)
|
||||
if 'b' not in mode:
|
||||
stream = io.TextIOWrapper(*args, **kwargs)
|
||||
return stream
|
||||
|
||||
def joinpath(self, name):
|
||||
raise RuntimeError("Cannot traverse into a resource")
|
||||
|
||||
|
||||
class TraversableReader(TraversableResources, SimpleReader):
|
||||
"""
|
||||
A TraversableResources based on SimpleReader. Resource providers
|
||||
may derive from this class to provide the TraversableResources
|
||||
interface by supplying the SimpleReader interface.
|
||||
"""
|
||||
|
||||
def files(self):
|
||||
return ResourceContainer(self)
|
||||
@@ -0,0 +1,32 @@
|
||||
import os
|
||||
|
||||
|
||||
try:
|
||||
from test.support import import_helper # type: ignore
|
||||
except ImportError:
|
||||
# Python 3.9 and earlier
|
||||
class import_helper: # type: ignore
|
||||
from test.support import (
|
||||
modules_setup,
|
||||
modules_cleanup,
|
||||
DirsOnSysPath,
|
||||
CleanImport,
|
||||
)
|
||||
|
||||
|
||||
try:
|
||||
from test.support import os_helper # type: ignore
|
||||
except ImportError:
|
||||
# Python 3.9 compat
|
||||
class os_helper: # type:ignore
|
||||
from test.support import temp_dir
|
||||
|
||||
|
||||
try:
|
||||
# Python 3.10
|
||||
from test.support.os_helper import unlink
|
||||
except ImportError:
|
||||
from test.support import unlink as _unlink
|
||||
|
||||
def unlink(target):
|
||||
return _unlink(os.fspath(target))
|
||||
50
lib/pkg_resources/_vendor/importlib_resources/tests/_path.py
Normal file
50
lib/pkg_resources/_vendor/importlib_resources/tests/_path.py
Normal file
@@ -0,0 +1,50 @@
|
||||
import pathlib
|
||||
import functools
|
||||
|
||||
|
||||
####
|
||||
# from jaraco.path 3.4
|
||||
|
||||
|
||||
def build(spec, prefix=pathlib.Path()):
|
||||
"""
|
||||
Build a set of files/directories, as described by the spec.
|
||||
|
||||
Each key represents a pathname, and the value represents
|
||||
the content. Content may be a nested directory.
|
||||
|
||||
>>> spec = {
|
||||
... 'README.txt': "A README file",
|
||||
... "foo": {
|
||||
... "__init__.py": "",
|
||||
... "bar": {
|
||||
... "__init__.py": "",
|
||||
... },
|
||||
... "baz.py": "# Some code",
|
||||
... }
|
||||
... }
|
||||
>>> tmpdir = getfixture('tmpdir')
|
||||
>>> build(spec, tmpdir)
|
||||
"""
|
||||
for name, contents in spec.items():
|
||||
create(contents, pathlib.Path(prefix) / name)
|
||||
|
||||
|
||||
@functools.singledispatch
|
||||
def create(content, path):
|
||||
path.mkdir(exist_ok=True)
|
||||
build(content, prefix=path) # type: ignore
|
||||
|
||||
|
||||
@create.register
|
||||
def _(content: bytes, path):
|
||||
path.write_bytes(content)
|
||||
|
||||
|
||||
@create.register
|
||||
def _(content: str, path):
|
||||
path.write_text(content)
|
||||
|
||||
|
||||
# end from jaraco.path
|
||||
####
|
||||
@@ -0,0 +1 @@
|
||||
one resource
|
||||
@@ -0,0 +1 @@
|
||||
two resource
|
||||
@@ -0,0 +1,102 @@
|
||||
import io
|
||||
import unittest
|
||||
|
||||
import importlib_resources as resources
|
||||
|
||||
from importlib_resources._adapters import (
|
||||
CompatibilityFiles,
|
||||
wrap_spec,
|
||||
)
|
||||
|
||||
from . import util
|
||||
|
||||
|
||||
class CompatibilityFilesTests(unittest.TestCase):
|
||||
@property
|
||||
def package(self):
|
||||
bytes_data = io.BytesIO(b'Hello, world!')
|
||||
return util.create_package(
|
||||
file=bytes_data,
|
||||
path='some_path',
|
||||
contents=('a', 'b', 'c'),
|
||||
)
|
||||
|
||||
@property
|
||||
def files(self):
|
||||
return resources.files(self.package)
|
||||
|
||||
def test_spec_path_iter(self):
|
||||
self.assertEqual(
|
||||
sorted(path.name for path in self.files.iterdir()),
|
||||
['a', 'b', 'c'],
|
||||
)
|
||||
|
||||
def test_child_path_iter(self):
|
||||
self.assertEqual(list((self.files / 'a').iterdir()), [])
|
||||
|
||||
def test_orphan_path_iter(self):
|
||||
self.assertEqual(list((self.files / 'a' / 'a').iterdir()), [])
|
||||
self.assertEqual(list((self.files / 'a' / 'a' / 'a').iterdir()), [])
|
||||
|
||||
def test_spec_path_is(self):
|
||||
self.assertFalse(self.files.is_file())
|
||||
self.assertFalse(self.files.is_dir())
|
||||
|
||||
def test_child_path_is(self):
|
||||
self.assertTrue((self.files / 'a').is_file())
|
||||
self.assertFalse((self.files / 'a').is_dir())
|
||||
|
||||
def test_orphan_path_is(self):
|
||||
self.assertFalse((self.files / 'a' / 'a').is_file())
|
||||
self.assertFalse((self.files / 'a' / 'a').is_dir())
|
||||
self.assertFalse((self.files / 'a' / 'a' / 'a').is_file())
|
||||
self.assertFalse((self.files / 'a' / 'a' / 'a').is_dir())
|
||||
|
||||
def test_spec_path_name(self):
|
||||
self.assertEqual(self.files.name, 'testingpackage')
|
||||
|
||||
def test_child_path_name(self):
|
||||
self.assertEqual((self.files / 'a').name, 'a')
|
||||
|
||||
def test_orphan_path_name(self):
|
||||
self.assertEqual((self.files / 'a' / 'b').name, 'b')
|
||||
self.assertEqual((self.files / 'a' / 'b' / 'c').name, 'c')
|
||||
|
||||
def test_spec_path_open(self):
|
||||
self.assertEqual(self.files.read_bytes(), b'Hello, world!')
|
||||
self.assertEqual(self.files.read_text(), 'Hello, world!')
|
||||
|
||||
def test_child_path_open(self):
|
||||
self.assertEqual((self.files / 'a').read_bytes(), b'Hello, world!')
|
||||
self.assertEqual((self.files / 'a').read_text(), 'Hello, world!')
|
||||
|
||||
def test_orphan_path_open(self):
|
||||
with self.assertRaises(FileNotFoundError):
|
||||
(self.files / 'a' / 'b').read_bytes()
|
||||
with self.assertRaises(FileNotFoundError):
|
||||
(self.files / 'a' / 'b' / 'c').read_bytes()
|
||||
|
||||
def test_open_invalid_mode(self):
|
||||
with self.assertRaises(ValueError):
|
||||
self.files.open('0')
|
||||
|
||||
def test_orphan_path_invalid(self):
|
||||
with self.assertRaises(ValueError):
|
||||
CompatibilityFiles.OrphanPath()
|
||||
|
||||
def test_wrap_spec(self):
|
||||
spec = wrap_spec(self.package)
|
||||
self.assertIsInstance(spec.loader.get_resource_reader(None), CompatibilityFiles)
|
||||
|
||||
|
||||
class CompatibilityFilesNoReaderTests(unittest.TestCase):
|
||||
@property
|
||||
def package(self):
|
||||
return util.create_package_from_loader(None)
|
||||
|
||||
@property
|
||||
def files(self):
|
||||
return resources.files(self.package)
|
||||
|
||||
def test_spec_path_joinpath(self):
|
||||
self.assertIsInstance(self.files / 'a', CompatibilityFiles.OrphanPath)
|
||||
@@ -0,0 +1,43 @@
|
||||
import unittest
|
||||
import importlib_resources as resources
|
||||
|
||||
from . import data01
|
||||
from . import util
|
||||
|
||||
|
||||
class ContentsTests:
|
||||
expected = {
|
||||
'__init__.py',
|
||||
'binary.file',
|
||||
'subdirectory',
|
||||
'utf-16.file',
|
||||
'utf-8.file',
|
||||
}
|
||||
|
||||
def test_contents(self):
|
||||
contents = {path.name for path in resources.files(self.data).iterdir()}
|
||||
assert self.expected <= contents
|
||||
|
||||
|
||||
class ContentsDiskTests(ContentsTests, unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.data = data01
|
||||
|
||||
|
||||
class ContentsZipTests(ContentsTests, util.ZipSetup, unittest.TestCase):
|
||||
pass
|
||||
|
||||
|
||||
class ContentsNamespaceTests(ContentsTests, unittest.TestCase):
|
||||
expected = {
|
||||
# no __init__ because of namespace design
|
||||
# no subdirectory as incidental difference in fixture
|
||||
'binary.file',
|
||||
'utf-16.file',
|
||||
'utf-8.file',
|
||||
}
|
||||
|
||||
def setUp(self):
|
||||
from . import namespacedata01
|
||||
|
||||
self.data = namespacedata01
|
||||
@@ -0,0 +1,112 @@
|
||||
import typing
|
||||
import textwrap
|
||||
import unittest
|
||||
import warnings
|
||||
import importlib
|
||||
import contextlib
|
||||
|
||||
import importlib_resources as resources
|
||||
from ..abc import Traversable
|
||||
from . import data01
|
||||
from . import util
|
||||
from . import _path
|
||||
from ._compat import os_helper, import_helper
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def suppress_known_deprecation():
|
||||
with warnings.catch_warnings(record=True) as ctx:
|
||||
warnings.simplefilter('default', category=DeprecationWarning)
|
||||
yield ctx
|
||||
|
||||
|
||||
class FilesTests:
|
||||
def test_read_bytes(self):
|
||||
files = resources.files(self.data)
|
||||
actual = files.joinpath('utf-8.file').read_bytes()
|
||||
assert actual == b'Hello, UTF-8 world!\n'
|
||||
|
||||
def test_read_text(self):
|
||||
files = resources.files(self.data)
|
||||
actual = files.joinpath('utf-8.file').read_text(encoding='utf-8')
|
||||
assert actual == 'Hello, UTF-8 world!\n'
|
||||
|
||||
@unittest.skipUnless(
|
||||
hasattr(typing, 'runtime_checkable'),
|
||||
"Only suitable when typing supports runtime_checkable",
|
||||
)
|
||||
def test_traversable(self):
|
||||
assert isinstance(resources.files(self.data), Traversable)
|
||||
|
||||
def test_old_parameter(self):
|
||||
"""
|
||||
Files used to take a 'package' parameter. Make sure anyone
|
||||
passing by name is still supported.
|
||||
"""
|
||||
with suppress_known_deprecation():
|
||||
resources.files(package=self.data)
|
||||
|
||||
|
||||
class OpenDiskTests(FilesTests, unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.data = data01
|
||||
|
||||
|
||||
class OpenZipTests(FilesTests, util.ZipSetup, unittest.TestCase):
|
||||
pass
|
||||
|
||||
|
||||
class OpenNamespaceTests(FilesTests, unittest.TestCase):
|
||||
def setUp(self):
|
||||
from . import namespacedata01
|
||||
|
||||
self.data = namespacedata01
|
||||
|
||||
|
||||
class SiteDir:
|
||||
def setUp(self):
|
||||
self.fixtures = contextlib.ExitStack()
|
||||
self.addCleanup(self.fixtures.close)
|
||||
self.site_dir = self.fixtures.enter_context(os_helper.temp_dir())
|
||||
self.fixtures.enter_context(import_helper.DirsOnSysPath(self.site_dir))
|
||||
self.fixtures.enter_context(import_helper.CleanImport())
|
||||
|
||||
|
||||
class ModulesFilesTests(SiteDir, unittest.TestCase):
|
||||
def test_module_resources(self):
|
||||
"""
|
||||
A module can have resources found adjacent to the module.
|
||||
"""
|
||||
spec = {
|
||||
'mod.py': '',
|
||||
'res.txt': 'resources are the best',
|
||||
}
|
||||
_path.build(spec, self.site_dir)
|
||||
import mod
|
||||
|
||||
actual = resources.files(mod).joinpath('res.txt').read_text()
|
||||
assert actual == spec['res.txt']
|
||||
|
||||
|
||||
class ImplicitContextFilesTests(SiteDir, unittest.TestCase):
|
||||
def test_implicit_files(self):
|
||||
"""
|
||||
Without any parameter, files() will infer the location as the caller.
|
||||
"""
|
||||
spec = {
|
||||
'somepkg': {
|
||||
'__init__.py': textwrap.dedent(
|
||||
"""
|
||||
import importlib_resources as res
|
||||
val = res.files().joinpath('res.txt').read_text()
|
||||
"""
|
||||
),
|
||||
'res.txt': 'resources are the best',
|
||||
},
|
||||
}
|
||||
_path.build(spec, self.site_dir)
|
||||
assert importlib.import_module('somepkg').val == 'resources are the best'
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
@@ -0,0 +1,81 @@
|
||||
import unittest
|
||||
|
||||
import importlib_resources as resources
|
||||
from . import data01
|
||||
from . import util
|
||||
|
||||
|
||||
class CommonBinaryTests(util.CommonTests, unittest.TestCase):
|
||||
def execute(self, package, path):
|
||||
target = resources.files(package).joinpath(path)
|
||||
with target.open('rb'):
|
||||
pass
|
||||
|
||||
|
||||
class CommonTextTests(util.CommonTests, unittest.TestCase):
|
||||
def execute(self, package, path):
|
||||
target = resources.files(package).joinpath(path)
|
||||
with target.open():
|
||||
pass
|
||||
|
||||
|
||||
class OpenTests:
|
||||
def test_open_binary(self):
|
||||
target = resources.files(self.data) / 'binary.file'
|
||||
with target.open('rb') as fp:
|
||||
result = fp.read()
|
||||
self.assertEqual(result, b'\x00\x01\x02\x03')
|
||||
|
||||
def test_open_text_default_encoding(self):
|
||||
target = resources.files(self.data) / 'utf-8.file'
|
||||
with target.open() as fp:
|
||||
result = fp.read()
|
||||
self.assertEqual(result, 'Hello, UTF-8 world!\n')
|
||||
|
||||
def test_open_text_given_encoding(self):
|
||||
target = resources.files(self.data) / 'utf-16.file'
|
||||
with target.open(encoding='utf-16', errors='strict') as fp:
|
||||
result = fp.read()
|
||||
self.assertEqual(result, 'Hello, UTF-16 world!\n')
|
||||
|
||||
def test_open_text_with_errors(self):
|
||||
# Raises UnicodeError without the 'errors' argument.
|
||||
target = resources.files(self.data) / 'utf-16.file'
|
||||
with target.open(encoding='utf-8', errors='strict') as fp:
|
||||
self.assertRaises(UnicodeError, fp.read)
|
||||
with target.open(encoding='utf-8', errors='ignore') as fp:
|
||||
result = fp.read()
|
||||
self.assertEqual(
|
||||
result,
|
||||
'H\x00e\x00l\x00l\x00o\x00,\x00 '
|
||||
'\x00U\x00T\x00F\x00-\x001\x006\x00 '
|
||||
'\x00w\x00o\x00r\x00l\x00d\x00!\x00\n\x00',
|
||||
)
|
||||
|
||||
def test_open_binary_FileNotFoundError(self):
|
||||
target = resources.files(self.data) / 'does-not-exist'
|
||||
self.assertRaises(FileNotFoundError, target.open, 'rb')
|
||||
|
||||
def test_open_text_FileNotFoundError(self):
|
||||
target = resources.files(self.data) / 'does-not-exist'
|
||||
self.assertRaises(FileNotFoundError, target.open)
|
||||
|
||||
|
||||
class OpenDiskTests(OpenTests, unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.data = data01
|
||||
|
||||
|
||||
class OpenDiskNamespaceTests(OpenTests, unittest.TestCase):
|
||||
def setUp(self):
|
||||
from . import namespacedata01
|
||||
|
||||
self.data = namespacedata01
|
||||
|
||||
|
||||
class OpenZipTests(OpenTests, util.ZipSetup, unittest.TestCase):
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
@@ -0,0 +1,64 @@
|
||||
import io
|
||||
import unittest
|
||||
|
||||
import importlib_resources as resources
|
||||
from . import data01
|
||||
from . import util
|
||||
|
||||
|
||||
class CommonTests(util.CommonTests, unittest.TestCase):
|
||||
def execute(self, package, path):
|
||||
with resources.as_file(resources.files(package).joinpath(path)):
|
||||
pass
|
||||
|
||||
|
||||
class PathTests:
|
||||
def test_reading(self):
|
||||
# Path should be readable.
|
||||
# Test also implicitly verifies the returned object is a pathlib.Path
|
||||
# instance.
|
||||
target = resources.files(self.data) / 'utf-8.file'
|
||||
with resources.as_file(target) as path:
|
||||
self.assertTrue(path.name.endswith("utf-8.file"), repr(path))
|
||||
# pathlib.Path.read_text() was introduced in Python 3.5.
|
||||
with path.open('r', encoding='utf-8') as file:
|
||||
text = file.read()
|
||||
self.assertEqual('Hello, UTF-8 world!\n', text)
|
||||
|
||||
|
||||
class PathDiskTests(PathTests, unittest.TestCase):
|
||||
data = data01
|
||||
|
||||
def test_natural_path(self):
|
||||
"""
|
||||
Guarantee the internal implementation detail that
|
||||
file-system-backed resources do not get the tempdir
|
||||
treatment.
|
||||
"""
|
||||
target = resources.files(self.data) / 'utf-8.file'
|
||||
with resources.as_file(target) as path:
|
||||
assert 'data' in str(path)
|
||||
|
||||
|
||||
class PathMemoryTests(PathTests, unittest.TestCase):
|
||||
def setUp(self):
|
||||
file = io.BytesIO(b'Hello, UTF-8 world!\n')
|
||||
self.addCleanup(file.close)
|
||||
self.data = util.create_package(
|
||||
file=file, path=FileNotFoundError("package exists only in memory")
|
||||
)
|
||||
self.data.__spec__.origin = None
|
||||
self.data.__spec__.has_location = False
|
||||
|
||||
|
||||
class PathZipTests(PathTests, util.ZipSetup, unittest.TestCase):
|
||||
def test_remove_in_context_manager(self):
|
||||
# It is not an error if the file that was temporarily stashed on the
|
||||
# file system is removed inside the `with` stanza.
|
||||
target = resources.files(self.data) / 'utf-8.file'
|
||||
with resources.as_file(target) as path:
|
||||
path.unlink()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
@@ -0,0 +1,76 @@
|
||||
import unittest
|
||||
import importlib_resources as resources
|
||||
|
||||
from . import data01
|
||||
from . import util
|
||||
from importlib import import_module
|
||||
|
||||
|
||||
class CommonBinaryTests(util.CommonTests, unittest.TestCase):
|
||||
def execute(self, package, path):
|
||||
resources.files(package).joinpath(path).read_bytes()
|
||||
|
||||
|
||||
class CommonTextTests(util.CommonTests, unittest.TestCase):
|
||||
def execute(self, package, path):
|
||||
resources.files(package).joinpath(path).read_text()
|
||||
|
||||
|
||||
class ReadTests:
|
||||
def test_read_bytes(self):
|
||||
result = resources.files(self.data).joinpath('binary.file').read_bytes()
|
||||
self.assertEqual(result, b'\0\1\2\3')
|
||||
|
||||
def test_read_text_default_encoding(self):
|
||||
result = resources.files(self.data).joinpath('utf-8.file').read_text()
|
||||
self.assertEqual(result, 'Hello, UTF-8 world!\n')
|
||||
|
||||
def test_read_text_given_encoding(self):
|
||||
result = (
|
||||
resources.files(self.data)
|
||||
.joinpath('utf-16.file')
|
||||
.read_text(encoding='utf-16')
|
||||
)
|
||||
self.assertEqual(result, 'Hello, UTF-16 world!\n')
|
||||
|
||||
def test_read_text_with_errors(self):
|
||||
# Raises UnicodeError without the 'errors' argument.
|
||||
target = resources.files(self.data) / 'utf-16.file'
|
||||
self.assertRaises(UnicodeError, target.read_text, encoding='utf-8')
|
||||
result = target.read_text(encoding='utf-8', errors='ignore')
|
||||
self.assertEqual(
|
||||
result,
|
||||
'H\x00e\x00l\x00l\x00o\x00,\x00 '
|
||||
'\x00U\x00T\x00F\x00-\x001\x006\x00 '
|
||||
'\x00w\x00o\x00r\x00l\x00d\x00!\x00\n\x00',
|
||||
)
|
||||
|
||||
|
||||
class ReadDiskTests(ReadTests, unittest.TestCase):
|
||||
data = data01
|
||||
|
||||
|
||||
class ReadZipTests(ReadTests, util.ZipSetup, unittest.TestCase):
|
||||
def test_read_submodule_resource(self):
|
||||
submodule = import_module('ziptestdata.subdirectory')
|
||||
result = resources.files(submodule).joinpath('binary.file').read_bytes()
|
||||
self.assertEqual(result, b'\0\1\2\3')
|
||||
|
||||
def test_read_submodule_resource_by_name(self):
|
||||
result = (
|
||||
resources.files('ziptestdata.subdirectory')
|
||||
.joinpath('binary.file')
|
||||
.read_bytes()
|
||||
)
|
||||
self.assertEqual(result, b'\0\1\2\3')
|
||||
|
||||
|
||||
class ReadNamespaceTests(ReadTests, unittest.TestCase):
|
||||
def setUp(self):
|
||||
from . import namespacedata01
|
||||
|
||||
self.data = namespacedata01
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
@@ -0,0 +1,133 @@
|
||||
import os.path
|
||||
import sys
|
||||
import pathlib
|
||||
import unittest
|
||||
|
||||
from importlib import import_module
|
||||
from importlib_resources.readers import MultiplexedPath, NamespaceReader
|
||||
|
||||
|
||||
class MultiplexedPathTest(unittest.TestCase):
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
path = pathlib.Path(__file__).parent / 'namespacedata01'
|
||||
cls.folder = str(path)
|
||||
|
||||
def test_init_no_paths(self):
|
||||
with self.assertRaises(FileNotFoundError):
|
||||
MultiplexedPath()
|
||||
|
||||
def test_init_file(self):
|
||||
with self.assertRaises(NotADirectoryError):
|
||||
MultiplexedPath(os.path.join(self.folder, 'binary.file'))
|
||||
|
||||
def test_iterdir(self):
|
||||
contents = {path.name for path in MultiplexedPath(self.folder).iterdir()}
|
||||
try:
|
||||
contents.remove('__pycache__')
|
||||
except (KeyError, ValueError):
|
||||
pass
|
||||
self.assertEqual(contents, {'binary.file', 'utf-16.file', 'utf-8.file'})
|
||||
|
||||
def test_iterdir_duplicate(self):
|
||||
data01 = os.path.abspath(os.path.join(__file__, '..', 'data01'))
|
||||
contents = {
|
||||
path.name for path in MultiplexedPath(self.folder, data01).iterdir()
|
||||
}
|
||||
for remove in ('__pycache__', '__init__.pyc'):
|
||||
try:
|
||||
contents.remove(remove)
|
||||
except (KeyError, ValueError):
|
||||
pass
|
||||
self.assertEqual(
|
||||
contents,
|
||||
{'__init__.py', 'binary.file', 'subdirectory', 'utf-16.file', 'utf-8.file'},
|
||||
)
|
||||
|
||||
def test_is_dir(self):
|
||||
self.assertEqual(MultiplexedPath(self.folder).is_dir(), True)
|
||||
|
||||
def test_is_file(self):
|
||||
self.assertEqual(MultiplexedPath(self.folder).is_file(), False)
|
||||
|
||||
def test_open_file(self):
|
||||
path = MultiplexedPath(self.folder)
|
||||
with self.assertRaises(FileNotFoundError):
|
||||
path.read_bytes()
|
||||
with self.assertRaises(FileNotFoundError):
|
||||
path.read_text()
|
||||
with self.assertRaises(FileNotFoundError):
|
||||
path.open()
|
||||
|
||||
def test_join_path(self):
|
||||
prefix = os.path.abspath(os.path.join(__file__, '..'))
|
||||
data01 = os.path.join(prefix, 'data01')
|
||||
path = MultiplexedPath(self.folder, data01)
|
||||
self.assertEqual(
|
||||
str(path.joinpath('binary.file'))[len(prefix) + 1 :],
|
||||
os.path.join('namespacedata01', 'binary.file'),
|
||||
)
|
||||
self.assertEqual(
|
||||
str(path.joinpath('subdirectory'))[len(prefix) + 1 :],
|
||||
os.path.join('data01', 'subdirectory'),
|
||||
)
|
||||
self.assertEqual(
|
||||
str(path.joinpath('imaginary'))[len(prefix) + 1 :],
|
||||
os.path.join('namespacedata01', 'imaginary'),
|
||||
)
|
||||
self.assertEqual(path.joinpath(), path)
|
||||
|
||||
def test_join_path_compound(self):
|
||||
path = MultiplexedPath(self.folder)
|
||||
assert not path.joinpath('imaginary/foo.py').exists()
|
||||
|
||||
def test_repr(self):
|
||||
self.assertEqual(
|
||||
repr(MultiplexedPath(self.folder)),
|
||||
f"MultiplexedPath('{self.folder}')",
|
||||
)
|
||||
|
||||
def test_name(self):
|
||||
self.assertEqual(
|
||||
MultiplexedPath(self.folder).name,
|
||||
os.path.basename(self.folder),
|
||||
)
|
||||
|
||||
|
||||
class NamespaceReaderTest(unittest.TestCase):
|
||||
site_dir = str(pathlib.Path(__file__).parent)
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
sys.path.append(cls.site_dir)
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
sys.path.remove(cls.site_dir)
|
||||
|
||||
def test_init_error(self):
|
||||
with self.assertRaises(ValueError):
|
||||
NamespaceReader(['path1', 'path2'])
|
||||
|
||||
def test_resource_path(self):
|
||||
namespacedata01 = import_module('namespacedata01')
|
||||
reader = NamespaceReader(namespacedata01.__spec__.submodule_search_locations)
|
||||
|
||||
root = os.path.abspath(os.path.join(__file__, '..', 'namespacedata01'))
|
||||
self.assertEqual(
|
||||
reader.resource_path('binary.file'), os.path.join(root, 'binary.file')
|
||||
)
|
||||
self.assertEqual(
|
||||
reader.resource_path('imaginary'), os.path.join(root, 'imaginary')
|
||||
)
|
||||
|
||||
def test_files(self):
|
||||
namespacedata01 = import_module('namespacedata01')
|
||||
reader = NamespaceReader(namespacedata01.__spec__.submodule_search_locations)
|
||||
root = os.path.abspath(os.path.join(__file__, '..', 'namespacedata01'))
|
||||
self.assertIsInstance(reader.files(), MultiplexedPath)
|
||||
self.assertEqual(repr(reader.files()), f"MultiplexedPath('{root}')")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
@@ -0,0 +1,260 @@
|
||||
import sys
|
||||
import unittest
|
||||
import importlib_resources as resources
|
||||
import uuid
|
||||
import pathlib
|
||||
|
||||
from . import data01
|
||||
from . import zipdata01, zipdata02
|
||||
from . import util
|
||||
from importlib import import_module
|
||||
from ._compat import import_helper, unlink
|
||||
|
||||
|
||||
class ResourceTests:
|
||||
# Subclasses are expected to set the `data` attribute.
|
||||
|
||||
def test_is_file_exists(self):
|
||||
target = resources.files(self.data) / 'binary.file'
|
||||
self.assertTrue(target.is_file())
|
||||
|
||||
def test_is_file_missing(self):
|
||||
target = resources.files(self.data) / 'not-a-file'
|
||||
self.assertFalse(target.is_file())
|
||||
|
||||
def test_is_dir(self):
|
||||
target = resources.files(self.data) / 'subdirectory'
|
||||
self.assertFalse(target.is_file())
|
||||
self.assertTrue(target.is_dir())
|
||||
|
||||
|
||||
class ResourceDiskTests(ResourceTests, unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.data = data01
|
||||
|
||||
|
||||
class ResourceZipTests(ResourceTests, util.ZipSetup, unittest.TestCase):
|
||||
pass
|
||||
|
||||
|
||||
def names(traversable):
|
||||
return {item.name for item in traversable.iterdir()}
|
||||
|
||||
|
||||
class ResourceLoaderTests(unittest.TestCase):
|
||||
def test_resource_contents(self):
|
||||
package = util.create_package(
|
||||
file=data01, path=data01.__file__, contents=['A', 'B', 'C']
|
||||
)
|
||||
self.assertEqual(names(resources.files(package)), {'A', 'B', 'C'})
|
||||
|
||||
def test_is_file(self):
|
||||
package = util.create_package(
|
||||
file=data01, path=data01.__file__, contents=['A', 'B', 'C', 'D/E', 'D/F']
|
||||
)
|
||||
self.assertTrue(resources.files(package).joinpath('B').is_file())
|
||||
|
||||
def test_is_dir(self):
|
||||
package = util.create_package(
|
||||
file=data01, path=data01.__file__, contents=['A', 'B', 'C', 'D/E', 'D/F']
|
||||
)
|
||||
self.assertTrue(resources.files(package).joinpath('D').is_dir())
|
||||
|
||||
def test_resource_missing(self):
|
||||
package = util.create_package(
|
||||
file=data01, path=data01.__file__, contents=['A', 'B', 'C', 'D/E', 'D/F']
|
||||
)
|
||||
self.assertFalse(resources.files(package).joinpath('Z').is_file())
|
||||
|
||||
|
||||
class ResourceCornerCaseTests(unittest.TestCase):
|
||||
def test_package_has_no_reader_fallback(self):
|
||||
# Test odd ball packages which:
|
||||
# 1. Do not have a ResourceReader as a loader
|
||||
# 2. Are not on the file system
|
||||
# 3. Are not in a zip file
|
||||
module = util.create_package(
|
||||
file=data01, path=data01.__file__, contents=['A', 'B', 'C']
|
||||
)
|
||||
# Give the module a dummy loader.
|
||||
module.__loader__ = object()
|
||||
# Give the module a dummy origin.
|
||||
module.__file__ = '/path/which/shall/not/be/named'
|
||||
module.__spec__.loader = module.__loader__
|
||||
module.__spec__.origin = module.__file__
|
||||
self.assertFalse(resources.files(module).joinpath('A').is_file())
|
||||
|
||||
|
||||
class ResourceFromZipsTest01(util.ZipSetupBase, unittest.TestCase):
|
||||
ZIP_MODULE = zipdata01 # type: ignore
|
||||
|
||||
def test_is_submodule_resource(self):
|
||||
submodule = import_module('ziptestdata.subdirectory')
|
||||
self.assertTrue(resources.files(submodule).joinpath('binary.file').is_file())
|
||||
|
||||
def test_read_submodule_resource_by_name(self):
|
||||
self.assertTrue(
|
||||
resources.files('ziptestdata.subdirectory')
|
||||
.joinpath('binary.file')
|
||||
.is_file()
|
||||
)
|
||||
|
||||
def test_submodule_contents(self):
|
||||
submodule = import_module('ziptestdata.subdirectory')
|
||||
self.assertEqual(
|
||||
names(resources.files(submodule)), {'__init__.py', 'binary.file'}
|
||||
)
|
||||
|
||||
def test_submodule_contents_by_name(self):
|
||||
self.assertEqual(
|
||||
names(resources.files('ziptestdata.subdirectory')),
|
||||
{'__init__.py', 'binary.file'},
|
||||
)
|
||||
|
||||
def test_as_file_directory(self):
|
||||
with resources.as_file(resources.files('ziptestdata')) as data:
|
||||
assert data.name == 'ziptestdata'
|
||||
assert data.is_dir()
|
||||
assert data.joinpath('subdirectory').is_dir()
|
||||
assert len(list(data.iterdir()))
|
||||
assert not data.parent.exists()
|
||||
|
||||
|
||||
class ResourceFromZipsTest02(util.ZipSetupBase, unittest.TestCase):
|
||||
ZIP_MODULE = zipdata02 # type: ignore
|
||||
|
||||
def test_unrelated_contents(self):
|
||||
"""
|
||||
Test thata zip with two unrelated subpackages return
|
||||
distinct resources. Ref python/importlib_resources#44.
|
||||
"""
|
||||
self.assertEqual(
|
||||
names(resources.files('ziptestdata.one')),
|
||||
{'__init__.py', 'resource1.txt'},
|
||||
)
|
||||
self.assertEqual(
|
||||
names(resources.files('ziptestdata.two')),
|
||||
{'__init__.py', 'resource2.txt'},
|
||||
)
|
||||
|
||||
|
||||
class DeletingZipsTest(unittest.TestCase):
|
||||
"""Having accessed resources in a zip file should not keep an open
|
||||
reference to the zip.
|
||||
"""
|
||||
|
||||
ZIP_MODULE = zipdata01
|
||||
|
||||
def setUp(self):
|
||||
modules = import_helper.modules_setup()
|
||||
self.addCleanup(import_helper.modules_cleanup, *modules)
|
||||
|
||||
data_path = pathlib.Path(self.ZIP_MODULE.__file__)
|
||||
data_dir = data_path.parent
|
||||
self.source_zip_path = data_dir / 'ziptestdata.zip'
|
||||
self.zip_path = pathlib.Path(f'{uuid.uuid4()}.zip').absolute()
|
||||
self.zip_path.write_bytes(self.source_zip_path.read_bytes())
|
||||
sys.path.append(str(self.zip_path))
|
||||
self.data = import_module('ziptestdata')
|
||||
|
||||
def tearDown(self):
|
||||
try:
|
||||
sys.path.remove(str(self.zip_path))
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
try:
|
||||
del sys.path_importer_cache[str(self.zip_path)]
|
||||
del sys.modules[self.data.__name__]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
try:
|
||||
unlink(self.zip_path)
|
||||
except OSError:
|
||||
# If the test fails, this will probably fail too
|
||||
pass
|
||||
|
||||
def test_iterdir_does_not_keep_open(self):
|
||||
c = [item.name for item in resources.files('ziptestdata').iterdir()]
|
||||
self.zip_path.unlink()
|
||||
del c
|
||||
|
||||
def test_is_file_does_not_keep_open(self):
|
||||
c = resources.files('ziptestdata').joinpath('binary.file').is_file()
|
||||
self.zip_path.unlink()
|
||||
del c
|
||||
|
||||
def test_is_file_failure_does_not_keep_open(self):
|
||||
c = resources.files('ziptestdata').joinpath('not-present').is_file()
|
||||
self.zip_path.unlink()
|
||||
del c
|
||||
|
||||
@unittest.skip("Desired but not supported.")
|
||||
def test_as_file_does_not_keep_open(self): # pragma: no cover
|
||||
c = resources.as_file(resources.files('ziptestdata') / 'binary.file')
|
||||
self.zip_path.unlink()
|
||||
del c
|
||||
|
||||
def test_entered_path_does_not_keep_open(self):
|
||||
# This is what certifi does on import to make its bundle
|
||||
# available for the process duration.
|
||||
c = resources.as_file(
|
||||
resources.files('ziptestdata') / 'binary.file'
|
||||
).__enter__()
|
||||
self.zip_path.unlink()
|
||||
del c
|
||||
|
||||
def test_read_binary_does_not_keep_open(self):
|
||||
c = resources.files('ziptestdata').joinpath('binary.file').read_bytes()
|
||||
self.zip_path.unlink()
|
||||
del c
|
||||
|
||||
def test_read_text_does_not_keep_open(self):
|
||||
c = resources.files('ziptestdata').joinpath('utf-8.file').read_text()
|
||||
self.zip_path.unlink()
|
||||
del c
|
||||
|
||||
|
||||
class ResourceFromNamespaceTest01(unittest.TestCase):
|
||||
site_dir = str(pathlib.Path(__file__).parent)
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
sys.path.append(cls.site_dir)
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
sys.path.remove(cls.site_dir)
|
||||
|
||||
def test_is_submodule_resource(self):
|
||||
self.assertTrue(
|
||||
resources.files(import_module('namespacedata01'))
|
||||
.joinpath('binary.file')
|
||||
.is_file()
|
||||
)
|
||||
|
||||
def test_read_submodule_resource_by_name(self):
|
||||
self.assertTrue(
|
||||
resources.files('namespacedata01').joinpath('binary.file').is_file()
|
||||
)
|
||||
|
||||
def test_submodule_contents(self):
|
||||
contents = names(resources.files(import_module('namespacedata01')))
|
||||
try:
|
||||
contents.remove('__pycache__')
|
||||
except KeyError:
|
||||
pass
|
||||
self.assertEqual(contents, {'binary.file', 'utf-8.file', 'utf-16.file'})
|
||||
|
||||
def test_submodule_contents_by_name(self):
|
||||
contents = names(resources.files('namespacedata01'))
|
||||
try:
|
||||
contents.remove('__pycache__')
|
||||
except KeyError:
|
||||
pass
|
||||
self.assertEqual(contents, {'binary.file', 'utf-8.file', 'utf-16.file'})
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
@@ -0,0 +1,53 @@
|
||||
"""
|
||||
Generate the zip test data files.
|
||||
|
||||
Run to build the tests/zipdataNN/ziptestdata.zip files from
|
||||
files in tests/dataNN.
|
||||
|
||||
Replaces the file with the working copy, but does commit anything
|
||||
to the source repo.
|
||||
"""
|
||||
|
||||
import contextlib
|
||||
import os
|
||||
import pathlib
|
||||
import zipfile
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
>>> from unittest import mock
|
||||
>>> monkeypatch = getfixture('monkeypatch')
|
||||
>>> monkeypatch.setattr(zipfile, 'ZipFile', mock.MagicMock())
|
||||
>>> print(); main() # print workaround for bpo-32509
|
||||
<BLANKLINE>
|
||||
...data01... -> ziptestdata/...
|
||||
...
|
||||
...data02... -> ziptestdata/...
|
||||
...
|
||||
"""
|
||||
suffixes = '01', '02'
|
||||
tuple(map(generate, suffixes))
|
||||
|
||||
|
||||
def generate(suffix):
|
||||
root = pathlib.Path(__file__).parent.relative_to(os.getcwd())
|
||||
zfpath = root / f'zipdata{suffix}/ziptestdata.zip'
|
||||
with zipfile.ZipFile(zfpath, 'w') as zf:
|
||||
for src, rel in walk(root / f'data{suffix}'):
|
||||
dst = 'ziptestdata' / pathlib.PurePosixPath(rel.as_posix())
|
||||
print(src, '->', dst)
|
||||
zf.write(src, dst)
|
||||
|
||||
|
||||
def walk(datapath):
|
||||
for dirpath, dirnames, filenames in os.walk(datapath):
|
||||
with contextlib.suppress(ValueError):
|
||||
dirnames.remove('__pycache__')
|
||||
for filename in filenames:
|
||||
res = pathlib.Path(dirpath) / filename
|
||||
rel = res.relative_to(datapath)
|
||||
yield res, rel
|
||||
|
||||
|
||||
__name__ == '__main__' and main()
|
||||
167
lib/pkg_resources/_vendor/importlib_resources/tests/util.py
Normal file
167
lib/pkg_resources/_vendor/importlib_resources/tests/util.py
Normal file
@@ -0,0 +1,167 @@
|
||||
import abc
|
||||
import importlib
|
||||
import io
|
||||
import sys
|
||||
import types
|
||||
import pathlib
|
||||
|
||||
from . import data01
|
||||
from . import zipdata01
|
||||
from ..abc import ResourceReader
|
||||
from ._compat import import_helper
|
||||
|
||||
|
||||
from importlib.machinery import ModuleSpec
|
||||
|
||||
|
||||
class Reader(ResourceReader):
|
||||
def __init__(self, **kwargs):
|
||||
vars(self).update(kwargs)
|
||||
|
||||
def get_resource_reader(self, package):
|
||||
return self
|
||||
|
||||
def open_resource(self, path):
|
||||
self._path = path
|
||||
if isinstance(self.file, Exception):
|
||||
raise self.file
|
||||
return self.file
|
||||
|
||||
def resource_path(self, path_):
|
||||
self._path = path_
|
||||
if isinstance(self.path, Exception):
|
||||
raise self.path
|
||||
return self.path
|
||||
|
||||
def is_resource(self, path_):
|
||||
self._path = path_
|
||||
if isinstance(self.path, Exception):
|
||||
raise self.path
|
||||
|
||||
def part(entry):
|
||||
return entry.split('/')
|
||||
|
||||
return any(
|
||||
len(parts) == 1 and parts[0] == path_ for parts in map(part, self._contents)
|
||||
)
|
||||
|
||||
def contents(self):
|
||||
if isinstance(self.path, Exception):
|
||||
raise self.path
|
||||
yield from self._contents
|
||||
|
||||
|
||||
def create_package_from_loader(loader, is_package=True):
|
||||
name = 'testingpackage'
|
||||
module = types.ModuleType(name)
|
||||
spec = ModuleSpec(name, loader, origin='does-not-exist', is_package=is_package)
|
||||
module.__spec__ = spec
|
||||
module.__loader__ = loader
|
||||
return module
|
||||
|
||||
|
||||
def create_package(file=None, path=None, is_package=True, contents=()):
|
||||
return create_package_from_loader(
|
||||
Reader(file=file, path=path, _contents=contents),
|
||||
is_package,
|
||||
)
|
||||
|
||||
|
||||
class CommonTests(metaclass=abc.ABCMeta):
|
||||
"""
|
||||
Tests shared by test_open, test_path, and test_read.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def execute(self, package, path):
|
||||
"""
|
||||
Call the pertinent legacy API function (e.g. open_text, path)
|
||||
on package and path.
|
||||
"""
|
||||
|
||||
def test_package_name(self):
|
||||
# Passing in the package name should succeed.
|
||||
self.execute(data01.__name__, 'utf-8.file')
|
||||
|
||||
def test_package_object(self):
|
||||
# Passing in the package itself should succeed.
|
||||
self.execute(data01, 'utf-8.file')
|
||||
|
||||
def test_string_path(self):
|
||||
# Passing in a string for the path should succeed.
|
||||
path = 'utf-8.file'
|
||||
self.execute(data01, path)
|
||||
|
||||
def test_pathlib_path(self):
|
||||
# Passing in a pathlib.PurePath object for the path should succeed.
|
||||
path = pathlib.PurePath('utf-8.file')
|
||||
self.execute(data01, path)
|
||||
|
||||
def test_importing_module_as_side_effect(self):
|
||||
# The anchor package can already be imported.
|
||||
del sys.modules[data01.__name__]
|
||||
self.execute(data01.__name__, 'utf-8.file')
|
||||
|
||||
def test_missing_path(self):
|
||||
# Attempting to open or read or request the path for a
|
||||
# non-existent path should succeed if open_resource
|
||||
# can return a viable data stream.
|
||||
bytes_data = io.BytesIO(b'Hello, world!')
|
||||
package = create_package(file=bytes_data, path=FileNotFoundError())
|
||||
self.execute(package, 'utf-8.file')
|
||||
self.assertEqual(package.__loader__._path, 'utf-8.file')
|
||||
|
||||
def test_extant_path(self):
|
||||
# Attempting to open or read or request the path when the
|
||||
# path does exist should still succeed. Does not assert
|
||||
# anything about the result.
|
||||
bytes_data = io.BytesIO(b'Hello, world!')
|
||||
# any path that exists
|
||||
path = __file__
|
||||
package = create_package(file=bytes_data, path=path)
|
||||
self.execute(package, 'utf-8.file')
|
||||
self.assertEqual(package.__loader__._path, 'utf-8.file')
|
||||
|
||||
def test_useless_loader(self):
|
||||
package = create_package(file=FileNotFoundError(), path=FileNotFoundError())
|
||||
with self.assertRaises(FileNotFoundError):
|
||||
self.execute(package, 'utf-8.file')
|
||||
|
||||
|
||||
class ZipSetupBase:
|
||||
ZIP_MODULE = None
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
data_path = pathlib.Path(cls.ZIP_MODULE.__file__)
|
||||
data_dir = data_path.parent
|
||||
cls._zip_path = str(data_dir / 'ziptestdata.zip')
|
||||
sys.path.append(cls._zip_path)
|
||||
cls.data = importlib.import_module('ziptestdata')
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
try:
|
||||
sys.path.remove(cls._zip_path)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
try:
|
||||
del sys.path_importer_cache[cls._zip_path]
|
||||
del sys.modules[cls.data.__name__]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
try:
|
||||
del cls.data
|
||||
del cls._zip_path
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
def setUp(self):
|
||||
modules = import_helper.modules_setup()
|
||||
self.addCleanup(import_helper.modules_cleanup, *modules)
|
||||
|
||||
|
||||
class ZipSetup(ZipSetupBase):
|
||||
ZIP_MODULE = zipdata01 # type: ignore
|
||||
@@ -0,0 +1 @@
|
||||
jaraco
|
||||
@@ -0,0 +1 @@
|
||||
jaraco
|
||||
@@ -0,0 +1 @@
|
||||
jaraco
|
||||
0
lib/pkg_resources/_vendor/jaraco/__init__.py
Normal file
0
lib/pkg_resources/_vendor/jaraco/__init__.py
Normal file
288
lib/pkg_resources/_vendor/jaraco/context.py
Normal file
288
lib/pkg_resources/_vendor/jaraco/context.py
Normal file
@@ -0,0 +1,288 @@
|
||||
import os
|
||||
import subprocess
|
||||
import contextlib
|
||||
import functools
|
||||
import tempfile
|
||||
import shutil
|
||||
import operator
|
||||
import warnings
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def pushd(dir):
|
||||
"""
|
||||
>>> tmp_path = getfixture('tmp_path')
|
||||
>>> with pushd(tmp_path):
|
||||
... assert os.getcwd() == os.fspath(tmp_path)
|
||||
>>> assert os.getcwd() != os.fspath(tmp_path)
|
||||
"""
|
||||
|
||||
orig = os.getcwd()
|
||||
os.chdir(dir)
|
||||
try:
|
||||
yield dir
|
||||
finally:
|
||||
os.chdir(orig)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def tarball_context(url, target_dir=None, runner=None, pushd=pushd):
|
||||
"""
|
||||
Get a tarball, extract it, change to that directory, yield, then
|
||||
clean up.
|
||||
`runner` is the function to invoke commands.
|
||||
`pushd` is a context manager for changing the directory.
|
||||
"""
|
||||
if target_dir is None:
|
||||
target_dir = os.path.basename(url).replace('.tar.gz', '').replace('.tgz', '')
|
||||
if runner is None:
|
||||
runner = functools.partial(subprocess.check_call, shell=True)
|
||||
else:
|
||||
warnings.warn("runner parameter is deprecated", DeprecationWarning)
|
||||
# In the tar command, use --strip-components=1 to strip the first path and
|
||||
# then
|
||||
# use -C to cause the files to be extracted to {target_dir}. This ensures
|
||||
# that we always know where the files were extracted.
|
||||
runner('mkdir {target_dir}'.format(**vars()))
|
||||
try:
|
||||
getter = 'wget {url} -O -'
|
||||
extract = 'tar x{compression} --strip-components=1 -C {target_dir}'
|
||||
cmd = ' | '.join((getter, extract))
|
||||
runner(cmd.format(compression=infer_compression(url), **vars()))
|
||||
with pushd(target_dir):
|
||||
yield target_dir
|
||||
finally:
|
||||
runner('rm -Rf {target_dir}'.format(**vars()))
|
||||
|
||||
|
||||
def infer_compression(url):
|
||||
"""
|
||||
Given a URL or filename, infer the compression code for tar.
|
||||
|
||||
>>> infer_compression('http://foo/bar.tar.gz')
|
||||
'z'
|
||||
>>> infer_compression('http://foo/bar.tgz')
|
||||
'z'
|
||||
>>> infer_compression('file.bz')
|
||||
'j'
|
||||
>>> infer_compression('file.xz')
|
||||
'J'
|
||||
"""
|
||||
# cheat and just assume it's the last two characters
|
||||
compression_indicator = url[-2:]
|
||||
mapping = dict(gz='z', bz='j', xz='J')
|
||||
# Assume 'z' (gzip) if no match
|
||||
return mapping.get(compression_indicator, 'z')
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def temp_dir(remover=shutil.rmtree):
|
||||
"""
|
||||
Create a temporary directory context. Pass a custom remover
|
||||
to override the removal behavior.
|
||||
|
||||
>>> import pathlib
|
||||
>>> with temp_dir() as the_dir:
|
||||
... assert os.path.isdir(the_dir)
|
||||
... _ = pathlib.Path(the_dir).joinpath('somefile').write_text('contents')
|
||||
>>> assert not os.path.exists(the_dir)
|
||||
"""
|
||||
temp_dir = tempfile.mkdtemp()
|
||||
try:
|
||||
yield temp_dir
|
||||
finally:
|
||||
remover(temp_dir)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def repo_context(url, branch=None, quiet=True, dest_ctx=temp_dir):
|
||||
"""
|
||||
Check out the repo indicated by url.
|
||||
|
||||
If dest_ctx is supplied, it should be a context manager
|
||||
to yield the target directory for the check out.
|
||||
"""
|
||||
exe = 'git' if 'git' in url else 'hg'
|
||||
with dest_ctx() as repo_dir:
|
||||
cmd = [exe, 'clone', url, repo_dir]
|
||||
if branch:
|
||||
cmd.extend(['--branch', branch])
|
||||
devnull = open(os.path.devnull, 'w')
|
||||
stdout = devnull if quiet else None
|
||||
subprocess.check_call(cmd, stdout=stdout)
|
||||
yield repo_dir
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def null():
|
||||
"""
|
||||
A null context suitable to stand in for a meaningful context.
|
||||
|
||||
>>> with null() as value:
|
||||
... assert value is None
|
||||
"""
|
||||
yield
|
||||
|
||||
|
||||
class ExceptionTrap:
|
||||
"""
|
||||
A context manager that will catch certain exceptions and provide an
|
||||
indication they occurred.
|
||||
|
||||
>>> with ExceptionTrap() as trap:
|
||||
... raise Exception()
|
||||
>>> bool(trap)
|
||||
True
|
||||
|
||||
>>> with ExceptionTrap() as trap:
|
||||
... pass
|
||||
>>> bool(trap)
|
||||
False
|
||||
|
||||
>>> with ExceptionTrap(ValueError) as trap:
|
||||
... raise ValueError("1 + 1 is not 3")
|
||||
>>> bool(trap)
|
||||
True
|
||||
>>> trap.value
|
||||
ValueError('1 + 1 is not 3')
|
||||
>>> trap.tb
|
||||
<traceback object at ...>
|
||||
|
||||
>>> with ExceptionTrap(ValueError) as trap:
|
||||
... raise Exception()
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
Exception
|
||||
|
||||
>>> bool(trap)
|
||||
False
|
||||
"""
|
||||
|
||||
exc_info = None, None, None
|
||||
|
||||
def __init__(self, exceptions=(Exception,)):
|
||||
self.exceptions = exceptions
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
@property
|
||||
def type(self):
|
||||
return self.exc_info[0]
|
||||
|
||||
@property
|
||||
def value(self):
|
||||
return self.exc_info[1]
|
||||
|
||||
@property
|
||||
def tb(self):
|
||||
return self.exc_info[2]
|
||||
|
||||
def __exit__(self, *exc_info):
|
||||
type = exc_info[0]
|
||||
matches = type and issubclass(type, self.exceptions)
|
||||
if matches:
|
||||
self.exc_info = exc_info
|
||||
return matches
|
||||
|
||||
def __bool__(self):
|
||||
return bool(self.type)
|
||||
|
||||
def raises(self, func, *, _test=bool):
|
||||
"""
|
||||
Wrap func and replace the result with the truth
|
||||
value of the trap (True if an exception occurred).
|
||||
|
||||
First, give the decorator an alias to support Python 3.8
|
||||
Syntax.
|
||||
|
||||
>>> raises = ExceptionTrap(ValueError).raises
|
||||
|
||||
Now decorate a function that always fails.
|
||||
|
||||
>>> @raises
|
||||
... def fail():
|
||||
... raise ValueError('failed')
|
||||
>>> fail()
|
||||
True
|
||||
"""
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
with ExceptionTrap(self.exceptions) as trap:
|
||||
func(*args, **kwargs)
|
||||
return _test(trap)
|
||||
|
||||
return wrapper
|
||||
|
||||
def passes(self, func):
|
||||
"""
|
||||
Wrap func and replace the result with the truth
|
||||
value of the trap (True if no exception).
|
||||
|
||||
First, give the decorator an alias to support Python 3.8
|
||||
Syntax.
|
||||
|
||||
>>> passes = ExceptionTrap(ValueError).passes
|
||||
|
||||
Now decorate a function that always fails.
|
||||
|
||||
>>> @passes
|
||||
... def fail():
|
||||
... raise ValueError('failed')
|
||||
|
||||
>>> fail()
|
||||
False
|
||||
"""
|
||||
return self.raises(func, _test=operator.not_)
|
||||
|
||||
|
||||
class suppress(contextlib.suppress, contextlib.ContextDecorator):
|
||||
"""
|
||||
A version of contextlib.suppress with decorator support.
|
||||
|
||||
>>> @suppress(KeyError)
|
||||
... def key_error():
|
||||
... {}['']
|
||||
>>> key_error()
|
||||
"""
|
||||
|
||||
|
||||
class on_interrupt(contextlib.ContextDecorator):
|
||||
"""
|
||||
Replace a KeyboardInterrupt with SystemExit(1)
|
||||
|
||||
>>> def do_interrupt():
|
||||
... raise KeyboardInterrupt()
|
||||
>>> on_interrupt('error')(do_interrupt)()
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
SystemExit: 1
|
||||
>>> on_interrupt('error', code=255)(do_interrupt)()
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
SystemExit: 255
|
||||
>>> on_interrupt('suppress')(do_interrupt)()
|
||||
>>> with __import__('pytest').raises(KeyboardInterrupt):
|
||||
... on_interrupt('ignore')(do_interrupt)()
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
action='error',
|
||||
# py3.7 compat
|
||||
# /,
|
||||
code=1,
|
||||
):
|
||||
self.action = action
|
||||
self.code = code
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exctype, excinst, exctb):
|
||||
if exctype is not KeyboardInterrupt or self.action == 'ignore':
|
||||
return
|
||||
elif self.action == 'error':
|
||||
raise SystemExit(self.code) from excinst
|
||||
return self.action == 'suppress'
|
||||
556
lib/pkg_resources/_vendor/jaraco/functools.py
Normal file
556
lib/pkg_resources/_vendor/jaraco/functools.py
Normal file
@@ -0,0 +1,556 @@
|
||||
import functools
|
||||
import time
|
||||
import inspect
|
||||
import collections
|
||||
import types
|
||||
import itertools
|
||||
import warnings
|
||||
|
||||
import pkg_resources.extern.more_itertools
|
||||
|
||||
from typing import Callable, TypeVar
|
||||
|
||||
|
||||
CallableT = TypeVar("CallableT", bound=Callable[..., object])
|
||||
|
||||
|
||||
def compose(*funcs):
|
||||
"""
|
||||
Compose any number of unary functions into a single unary function.
|
||||
|
||||
>>> import textwrap
|
||||
>>> expected = str.strip(textwrap.dedent(compose.__doc__))
|
||||
>>> strip_and_dedent = compose(str.strip, textwrap.dedent)
|
||||
>>> strip_and_dedent(compose.__doc__) == expected
|
||||
True
|
||||
|
||||
Compose also allows the innermost function to take arbitrary arguments.
|
||||
|
||||
>>> round_three = lambda x: round(x, ndigits=3)
|
||||
>>> f = compose(round_three, int.__truediv__)
|
||||
>>> [f(3*x, x+1) for x in range(1,10)]
|
||||
[1.5, 2.0, 2.25, 2.4, 2.5, 2.571, 2.625, 2.667, 2.7]
|
||||
"""
|
||||
|
||||
def compose_two(f1, f2):
|
||||
return lambda *args, **kwargs: f1(f2(*args, **kwargs))
|
||||
|
||||
return functools.reduce(compose_two, funcs)
|
||||
|
||||
|
||||
def method_caller(method_name, *args, **kwargs):
|
||||
"""
|
||||
Return a function that will call a named method on the
|
||||
target object with optional positional and keyword
|
||||
arguments.
|
||||
|
||||
>>> lower = method_caller('lower')
|
||||
>>> lower('MyString')
|
||||
'mystring'
|
||||
"""
|
||||
|
||||
def call_method(target):
|
||||
func = getattr(target, method_name)
|
||||
return func(*args, **kwargs)
|
||||
|
||||
return call_method
|
||||
|
||||
|
||||
def once(func):
|
||||
"""
|
||||
Decorate func so it's only ever called the first time.
|
||||
|
||||
This decorator can ensure that an expensive or non-idempotent function
|
||||
will not be expensive on subsequent calls and is idempotent.
|
||||
|
||||
>>> add_three = once(lambda a: a+3)
|
||||
>>> add_three(3)
|
||||
6
|
||||
>>> add_three(9)
|
||||
6
|
||||
>>> add_three('12')
|
||||
6
|
||||
|
||||
To reset the stored value, simply clear the property ``saved_result``.
|
||||
|
||||
>>> del add_three.saved_result
|
||||
>>> add_three(9)
|
||||
12
|
||||
>>> add_three(8)
|
||||
12
|
||||
|
||||
Or invoke 'reset()' on it.
|
||||
|
||||
>>> add_three.reset()
|
||||
>>> add_three(-3)
|
||||
0
|
||||
>>> add_three(0)
|
||||
0
|
||||
"""
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
if not hasattr(wrapper, 'saved_result'):
|
||||
wrapper.saved_result = func(*args, **kwargs)
|
||||
return wrapper.saved_result
|
||||
|
||||
wrapper.reset = lambda: vars(wrapper).__delitem__('saved_result')
|
||||
return wrapper
|
||||
|
||||
|
||||
def method_cache(
|
||||
method: CallableT,
|
||||
cache_wrapper: Callable[
|
||||
[CallableT], CallableT
|
||||
] = functools.lru_cache(), # type: ignore[assignment]
|
||||
) -> CallableT:
|
||||
"""
|
||||
Wrap lru_cache to support storing the cache data in the object instances.
|
||||
|
||||
Abstracts the common paradigm where the method explicitly saves an
|
||||
underscore-prefixed protected property on first call and returns that
|
||||
subsequently.
|
||||
|
||||
>>> class MyClass:
|
||||
... calls = 0
|
||||
...
|
||||
... @method_cache
|
||||
... def method(self, value):
|
||||
... self.calls += 1
|
||||
... return value
|
||||
|
||||
>>> a = MyClass()
|
||||
>>> a.method(3)
|
||||
3
|
||||
>>> for x in range(75):
|
||||
... res = a.method(x)
|
||||
>>> a.calls
|
||||
75
|
||||
|
||||
Note that the apparent behavior will be exactly like that of lru_cache
|
||||
except that the cache is stored on each instance, so values in one
|
||||
instance will not flush values from another, and when an instance is
|
||||
deleted, so are the cached values for that instance.
|
||||
|
||||
>>> b = MyClass()
|
||||
>>> for x in range(35):
|
||||
... res = b.method(x)
|
||||
>>> b.calls
|
||||
35
|
||||
>>> a.method(0)
|
||||
0
|
||||
>>> a.calls
|
||||
75
|
||||
|
||||
Note that if method had been decorated with ``functools.lru_cache()``,
|
||||
a.calls would have been 76 (due to the cached value of 0 having been
|
||||
flushed by the 'b' instance).
|
||||
|
||||
Clear the cache with ``.cache_clear()``
|
||||
|
||||
>>> a.method.cache_clear()
|
||||
|
||||
Same for a method that hasn't yet been called.
|
||||
|
||||
>>> c = MyClass()
|
||||
>>> c.method.cache_clear()
|
||||
|
||||
Another cache wrapper may be supplied:
|
||||
|
||||
>>> cache = functools.lru_cache(maxsize=2)
|
||||
>>> MyClass.method2 = method_cache(lambda self: 3, cache_wrapper=cache)
|
||||
>>> a = MyClass()
|
||||
>>> a.method2()
|
||||
3
|
||||
|
||||
Caution - do not subsequently wrap the method with another decorator, such
|
||||
as ``@property``, which changes the semantics of the function.
|
||||
|
||||
See also
|
||||
http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/
|
||||
for another implementation and additional justification.
|
||||
"""
|
||||
|
||||
def wrapper(self: object, *args: object, **kwargs: object) -> object:
|
||||
# it's the first call, replace the method with a cached, bound method
|
||||
bound_method: CallableT = types.MethodType( # type: ignore[assignment]
|
||||
method, self
|
||||
)
|
||||
cached_method = cache_wrapper(bound_method)
|
||||
setattr(self, method.__name__, cached_method)
|
||||
return cached_method(*args, **kwargs)
|
||||
|
||||
# Support cache clear even before cache has been created.
|
||||
wrapper.cache_clear = lambda: None # type: ignore[attr-defined]
|
||||
|
||||
return ( # type: ignore[return-value]
|
||||
_special_method_cache(method, cache_wrapper) or wrapper
|
||||
)
|
||||
|
||||
|
||||
def _special_method_cache(method, cache_wrapper):
|
||||
"""
|
||||
Because Python treats special methods differently, it's not
|
||||
possible to use instance attributes to implement the cached
|
||||
methods.
|
||||
|
||||
Instead, install the wrapper method under a different name
|
||||
and return a simple proxy to that wrapper.
|
||||
|
||||
https://github.com/jaraco/jaraco.functools/issues/5
|
||||
"""
|
||||
name = method.__name__
|
||||
special_names = '__getattr__', '__getitem__'
|
||||
if name not in special_names:
|
||||
return
|
||||
|
||||
wrapper_name = '__cached' + name
|
||||
|
||||
def proxy(self, *args, **kwargs):
|
||||
if wrapper_name not in vars(self):
|
||||
bound = types.MethodType(method, self)
|
||||
cache = cache_wrapper(bound)
|
||||
setattr(self, wrapper_name, cache)
|
||||
else:
|
||||
cache = getattr(self, wrapper_name)
|
||||
return cache(*args, **kwargs)
|
||||
|
||||
return proxy
|
||||
|
||||
|
||||
def apply(transform):
|
||||
"""
|
||||
Decorate a function with a transform function that is
|
||||
invoked on results returned from the decorated function.
|
||||
|
||||
>>> @apply(reversed)
|
||||
... def get_numbers(start):
|
||||
... "doc for get_numbers"
|
||||
... return range(start, start+3)
|
||||
>>> list(get_numbers(4))
|
||||
[6, 5, 4]
|
||||
>>> get_numbers.__doc__
|
||||
'doc for get_numbers'
|
||||
"""
|
||||
|
||||
def wrap(func):
|
||||
return functools.wraps(func)(compose(transform, func))
|
||||
|
||||
return wrap
|
||||
|
||||
|
||||
def result_invoke(action):
|
||||
r"""
|
||||
Decorate a function with an action function that is
|
||||
invoked on the results returned from the decorated
|
||||
function (for its side-effect), then return the original
|
||||
result.
|
||||
|
||||
>>> @result_invoke(print)
|
||||
... def add_two(a, b):
|
||||
... return a + b
|
||||
>>> x = add_two(2, 3)
|
||||
5
|
||||
>>> x
|
||||
5
|
||||
"""
|
||||
|
||||
def wrap(func):
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
result = func(*args, **kwargs)
|
||||
action(result)
|
||||
return result
|
||||
|
||||
return wrapper
|
||||
|
||||
return wrap
|
||||
|
||||
|
||||
def invoke(f, *args, **kwargs):
|
||||
"""
|
||||
Call a function for its side effect after initialization.
|
||||
|
||||
The benefit of using the decorator instead of simply invoking a function
|
||||
after defining it is that it makes explicit the author's intent for the
|
||||
function to be called immediately. Whereas if one simply calls the
|
||||
function immediately, it's less obvious if that was intentional or
|
||||
incidental. It also avoids repeating the name - the two actions, defining
|
||||
the function and calling it immediately are modeled separately, but linked
|
||||
by the decorator construct.
|
||||
|
||||
The benefit of having a function construct (opposed to just invoking some
|
||||
behavior inline) is to serve as a scope in which the behavior occurs. It
|
||||
avoids polluting the global namespace with local variables, provides an
|
||||
anchor on which to attach documentation (docstring), keeps the behavior
|
||||
logically separated (instead of conceptually separated or not separated at
|
||||
all), and provides potential to re-use the behavior for testing or other
|
||||
purposes.
|
||||
|
||||
This function is named as a pithy way to communicate, "call this function
|
||||
primarily for its side effect", or "while defining this function, also
|
||||
take it aside and call it". It exists because there's no Python construct
|
||||
for "define and call" (nor should there be, as decorators serve this need
|
||||
just fine). The behavior happens immediately and synchronously.
|
||||
|
||||
>>> @invoke
|
||||
... def func(): print("called")
|
||||
called
|
||||
>>> func()
|
||||
called
|
||||
|
||||
Use functools.partial to pass parameters to the initial call
|
||||
|
||||
>>> @functools.partial(invoke, name='bingo')
|
||||
... def func(name): print("called with", name)
|
||||
called with bingo
|
||||
"""
|
||||
f(*args, **kwargs)
|
||||
return f
|
||||
|
||||
|
||||
def call_aside(*args, **kwargs):
|
||||
"""
|
||||
Deprecated name for invoke.
|
||||
"""
|
||||
warnings.warn("call_aside is deprecated, use invoke", DeprecationWarning)
|
||||
return invoke(*args, **kwargs)
|
||||
|
||||
|
||||
class Throttler:
|
||||
"""
|
||||
Rate-limit a function (or other callable)
|
||||
"""
|
||||
|
||||
def __init__(self, func, max_rate=float('Inf')):
|
||||
if isinstance(func, Throttler):
|
||||
func = func.func
|
||||
self.func = func
|
||||
self.max_rate = max_rate
|
||||
self.reset()
|
||||
|
||||
def reset(self):
|
||||
self.last_called = 0
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
self._wait()
|
||||
return self.func(*args, **kwargs)
|
||||
|
||||
def _wait(self):
|
||||
"ensure at least 1/max_rate seconds from last call"
|
||||
elapsed = time.time() - self.last_called
|
||||
must_wait = 1 / self.max_rate - elapsed
|
||||
time.sleep(max(0, must_wait))
|
||||
self.last_called = time.time()
|
||||
|
||||
def __get__(self, obj, type=None):
|
||||
return first_invoke(self._wait, functools.partial(self.func, obj))
|
||||
|
||||
|
||||
def first_invoke(func1, func2):
|
||||
"""
|
||||
Return a function that when invoked will invoke func1 without
|
||||
any parameters (for its side-effect) and then invoke func2
|
||||
with whatever parameters were passed, returning its result.
|
||||
"""
|
||||
|
||||
def wrapper(*args, **kwargs):
|
||||
func1()
|
||||
return func2(*args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
def retry_call(func, cleanup=lambda: None, retries=0, trap=()):
|
||||
"""
|
||||
Given a callable func, trap the indicated exceptions
|
||||
for up to 'retries' times, invoking cleanup on the
|
||||
exception. On the final attempt, allow any exceptions
|
||||
to propagate.
|
||||
"""
|
||||
attempts = itertools.count() if retries == float('inf') else range(retries)
|
||||
for attempt in attempts:
|
||||
try:
|
||||
return func()
|
||||
except trap:
|
||||
cleanup()
|
||||
|
||||
return func()
|
||||
|
||||
|
||||
def retry(*r_args, **r_kwargs):
|
||||
"""
|
||||
Decorator wrapper for retry_call. Accepts arguments to retry_call
|
||||
except func and then returns a decorator for the decorated function.
|
||||
|
||||
Ex:
|
||||
|
||||
>>> @retry(retries=3)
|
||||
... def my_func(a, b):
|
||||
... "this is my funk"
|
||||
... print(a, b)
|
||||
>>> my_func.__doc__
|
||||
'this is my funk'
|
||||
"""
|
||||
|
||||
def decorate(func):
|
||||
@functools.wraps(func)
|
||||
def wrapper(*f_args, **f_kwargs):
|
||||
bound = functools.partial(func, *f_args, **f_kwargs)
|
||||
return retry_call(bound, *r_args, **r_kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
return decorate
|
||||
|
||||
|
||||
def print_yielded(func):
|
||||
"""
|
||||
Convert a generator into a function that prints all yielded elements
|
||||
|
||||
>>> @print_yielded
|
||||
... def x():
|
||||
... yield 3; yield None
|
||||
>>> x()
|
||||
3
|
||||
None
|
||||
"""
|
||||
print_all = functools.partial(map, print)
|
||||
print_results = compose(more_itertools.consume, print_all, func)
|
||||
return functools.wraps(func)(print_results)
|
||||
|
||||
|
||||
def pass_none(func):
|
||||
"""
|
||||
Wrap func so it's not called if its first param is None
|
||||
|
||||
>>> print_text = pass_none(print)
|
||||
>>> print_text('text')
|
||||
text
|
||||
>>> print_text(None)
|
||||
"""
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapper(param, *args, **kwargs):
|
||||
if param is not None:
|
||||
return func(param, *args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
def assign_params(func, namespace):
|
||||
"""
|
||||
Assign parameters from namespace where func solicits.
|
||||
|
||||
>>> def func(x, y=3):
|
||||
... print(x, y)
|
||||
>>> assigned = assign_params(func, dict(x=2, z=4))
|
||||
>>> assigned()
|
||||
2 3
|
||||
|
||||
The usual errors are raised if a function doesn't receive
|
||||
its required parameters:
|
||||
|
||||
>>> assigned = assign_params(func, dict(y=3, z=4))
|
||||
>>> assigned()
|
||||
Traceback (most recent call last):
|
||||
TypeError: func() ...argument...
|
||||
|
||||
It even works on methods:
|
||||
|
||||
>>> class Handler:
|
||||
... def meth(self, arg):
|
||||
... print(arg)
|
||||
>>> assign_params(Handler().meth, dict(arg='crystal', foo='clear'))()
|
||||
crystal
|
||||
"""
|
||||
sig = inspect.signature(func)
|
||||
params = sig.parameters.keys()
|
||||
call_ns = {k: namespace[k] for k in params if k in namespace}
|
||||
return functools.partial(func, **call_ns)
|
||||
|
||||
|
||||
def save_method_args(method):
|
||||
"""
|
||||
Wrap a method such that when it is called, the args and kwargs are
|
||||
saved on the method.
|
||||
|
||||
>>> class MyClass:
|
||||
... @save_method_args
|
||||
... def method(self, a, b):
|
||||
... print(a, b)
|
||||
>>> my_ob = MyClass()
|
||||
>>> my_ob.method(1, 2)
|
||||
1 2
|
||||
>>> my_ob._saved_method.args
|
||||
(1, 2)
|
||||
>>> my_ob._saved_method.kwargs
|
||||
{}
|
||||
>>> my_ob.method(a=3, b='foo')
|
||||
3 foo
|
||||
>>> my_ob._saved_method.args
|
||||
()
|
||||
>>> my_ob._saved_method.kwargs == dict(a=3, b='foo')
|
||||
True
|
||||
|
||||
The arguments are stored on the instance, allowing for
|
||||
different instance to save different args.
|
||||
|
||||
>>> your_ob = MyClass()
|
||||
>>> your_ob.method({str('x'): 3}, b=[4])
|
||||
{'x': 3} [4]
|
||||
>>> your_ob._saved_method.args
|
||||
({'x': 3},)
|
||||
>>> my_ob._saved_method.args
|
||||
()
|
||||
"""
|
||||
args_and_kwargs = collections.namedtuple('args_and_kwargs', 'args kwargs')
|
||||
|
||||
@functools.wraps(method)
|
||||
def wrapper(self, *args, **kwargs):
|
||||
attr_name = '_saved_' + method.__name__
|
||||
attr = args_and_kwargs(args, kwargs)
|
||||
setattr(self, attr_name, attr)
|
||||
return method(self, *args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
def except_(*exceptions, replace=None, use=None):
|
||||
"""
|
||||
Replace the indicated exceptions, if raised, with the indicated
|
||||
literal replacement or evaluated expression (if present).
|
||||
|
||||
>>> safe_int = except_(ValueError)(int)
|
||||
>>> safe_int('five')
|
||||
>>> safe_int('5')
|
||||
5
|
||||
|
||||
Specify a literal replacement with ``replace``.
|
||||
|
||||
>>> safe_int_r = except_(ValueError, replace=0)(int)
|
||||
>>> safe_int_r('five')
|
||||
0
|
||||
|
||||
Provide an expression to ``use`` to pass through particular parameters.
|
||||
|
||||
>>> safe_int_pt = except_(ValueError, use='args[0]')(int)
|
||||
>>> safe_int_pt('five')
|
||||
'five'
|
||||
|
||||
"""
|
||||
|
||||
def decorate(func):
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except exceptions:
|
||||
try:
|
||||
return eval(use)
|
||||
except TypeError:
|
||||
return replace
|
||||
|
||||
return wrapper
|
||||
|
||||
return decorate
|
||||
2
lib/pkg_resources/_vendor/jaraco/text/Lorem ipsum.txt
Normal file
2
lib/pkg_resources/_vendor/jaraco/text/Lorem ipsum.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
|
||||
Curabitur pretium tincidunt lacus. Nulla gravida orci a odio. Nullam varius, turpis et commodo pharetra, est eros bibendum elit, nec luctus magna felis sollicitudin mauris. Integer in mauris eu nibh euismod gravida. Duis ac tellus et risus vulputate vehicula. Donec lobortis risus a elit. Etiam tempor. Ut ullamcorper, ligula eu tempor congue, eros est euismod turpis, id tincidunt sapien risus a quam. Maecenas fermentum consequat mi. Donec fermentum. Pellentesque malesuada nulla a mi. Duis sapien sem, aliquet nec, commodo eget, consequat quis, neque. Aliquam faucibus, elit ut dictum aliquet, felis nisl adipiscing sapien, sed malesuada diam lacus eget erat. Cras mollis scelerisque nunc. Nullam arcu. Aliquam consequat. Curabitur augue lorem, dapibus quis, laoreet et, pretium ac, nisi. Aenean magna nisl, mollis quis, molestie eu, feugiat in, orci. In hac habitasse platea dictumst.
|
||||
599
lib/pkg_resources/_vendor/jaraco/text/__init__.py
Normal file
599
lib/pkg_resources/_vendor/jaraco/text/__init__.py
Normal file
@@ -0,0 +1,599 @@
|
||||
import re
|
||||
import itertools
|
||||
import textwrap
|
||||
import functools
|
||||
|
||||
try:
|
||||
from importlib.resources import files # type: ignore
|
||||
except ImportError: # pragma: nocover
|
||||
from pkg_resources.extern.importlib_resources import files # type: ignore
|
||||
|
||||
from pkg_resources.extern.jaraco.functools import compose, method_cache
|
||||
from pkg_resources.extern.jaraco.context import ExceptionTrap
|
||||
|
||||
|
||||
def substitution(old, new):
|
||||
"""
|
||||
Return a function that will perform a substitution on a string
|
||||
"""
|
||||
return lambda s: s.replace(old, new)
|
||||
|
||||
|
||||
def multi_substitution(*substitutions):
|
||||
"""
|
||||
Take a sequence of pairs specifying substitutions, and create
|
||||
a function that performs those substitutions.
|
||||
|
||||
>>> multi_substitution(('foo', 'bar'), ('bar', 'baz'))('foo')
|
||||
'baz'
|
||||
"""
|
||||
substitutions = itertools.starmap(substitution, substitutions)
|
||||
# compose function applies last function first, so reverse the
|
||||
# substitutions to get the expected order.
|
||||
substitutions = reversed(tuple(substitutions))
|
||||
return compose(*substitutions)
|
||||
|
||||
|
||||
class FoldedCase(str):
|
||||
"""
|
||||
A case insensitive string class; behaves just like str
|
||||
except compares equal when the only variation is case.
|
||||
|
||||
>>> s = FoldedCase('hello world')
|
||||
|
||||
>>> s == 'Hello World'
|
||||
True
|
||||
|
||||
>>> 'Hello World' == s
|
||||
True
|
||||
|
||||
>>> s != 'Hello World'
|
||||
False
|
||||
|
||||
>>> s.index('O')
|
||||
4
|
||||
|
||||
>>> s.split('O')
|
||||
['hell', ' w', 'rld']
|
||||
|
||||
>>> sorted(map(FoldedCase, ['GAMMA', 'alpha', 'Beta']))
|
||||
['alpha', 'Beta', 'GAMMA']
|
||||
|
||||
Sequence membership is straightforward.
|
||||
|
||||
>>> "Hello World" in [s]
|
||||
True
|
||||
>>> s in ["Hello World"]
|
||||
True
|
||||
|
||||
You may test for set inclusion, but candidate and elements
|
||||
must both be folded.
|
||||
|
||||
>>> FoldedCase("Hello World") in {s}
|
||||
True
|
||||
>>> s in {FoldedCase("Hello World")}
|
||||
True
|
||||
|
||||
String inclusion works as long as the FoldedCase object
|
||||
is on the right.
|
||||
|
||||
>>> "hello" in FoldedCase("Hello World")
|
||||
True
|
||||
|
||||
But not if the FoldedCase object is on the left:
|
||||
|
||||
>>> FoldedCase('hello') in 'Hello World'
|
||||
False
|
||||
|
||||
In that case, use ``in_``:
|
||||
|
||||
>>> FoldedCase('hello').in_('Hello World')
|
||||
True
|
||||
|
||||
>>> FoldedCase('hello') > FoldedCase('Hello')
|
||||
False
|
||||
"""
|
||||
|
||||
def __lt__(self, other):
|
||||
return self.lower() < other.lower()
|
||||
|
||||
def __gt__(self, other):
|
||||
return self.lower() > other.lower()
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.lower() == other.lower()
|
||||
|
||||
def __ne__(self, other):
|
||||
return self.lower() != other.lower()
|
||||
|
||||
def __hash__(self):
|
||||
return hash(self.lower())
|
||||
|
||||
def __contains__(self, other):
|
||||
return super().lower().__contains__(other.lower())
|
||||
|
||||
def in_(self, other):
|
||||
"Does self appear in other?"
|
||||
return self in FoldedCase(other)
|
||||
|
||||
# cache lower since it's likely to be called frequently.
|
||||
@method_cache
|
||||
def lower(self):
|
||||
return super().lower()
|
||||
|
||||
def index(self, sub):
|
||||
return self.lower().index(sub.lower())
|
||||
|
||||
def split(self, splitter=' ', maxsplit=0):
|
||||
pattern = re.compile(re.escape(splitter), re.I)
|
||||
return pattern.split(self, maxsplit)
|
||||
|
||||
|
||||
# Python 3.8 compatibility
|
||||
_unicode_trap = ExceptionTrap(UnicodeDecodeError)
|
||||
|
||||
|
||||
@_unicode_trap.passes
|
||||
def is_decodable(value):
|
||||
r"""
|
||||
Return True if the supplied value is decodable (using the default
|
||||
encoding).
|
||||
|
||||
>>> is_decodable(b'\xff')
|
||||
False
|
||||
>>> is_decodable(b'\x32')
|
||||
True
|
||||
"""
|
||||
value.decode()
|
||||
|
||||
|
||||
def is_binary(value):
|
||||
r"""
|
||||
Return True if the value appears to be binary (that is, it's a byte
|
||||
string and isn't decodable).
|
||||
|
||||
>>> is_binary(b'\xff')
|
||||
True
|
||||
>>> is_binary('\xff')
|
||||
False
|
||||
"""
|
||||
return isinstance(value, bytes) and not is_decodable(value)
|
||||
|
||||
|
||||
def trim(s):
|
||||
r"""
|
||||
Trim something like a docstring to remove the whitespace that
|
||||
is common due to indentation and formatting.
|
||||
|
||||
>>> trim("\n\tfoo = bar\n\t\tbar = baz\n")
|
||||
'foo = bar\n\tbar = baz'
|
||||
"""
|
||||
return textwrap.dedent(s).strip()
|
||||
|
||||
|
||||
def wrap(s):
|
||||
"""
|
||||
Wrap lines of text, retaining existing newlines as
|
||||
paragraph markers.
|
||||
|
||||
>>> print(wrap(lorem_ipsum))
|
||||
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do
|
||||
eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad
|
||||
minim veniam, quis nostrud exercitation ullamco laboris nisi ut
|
||||
aliquip ex ea commodo consequat. Duis aute irure dolor in
|
||||
reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla
|
||||
pariatur. Excepteur sint occaecat cupidatat non proident, sunt in
|
||||
culpa qui officia deserunt mollit anim id est laborum.
|
||||
<BLANKLINE>
|
||||
Curabitur pretium tincidunt lacus. Nulla gravida orci a odio. Nullam
|
||||
varius, turpis et commodo pharetra, est eros bibendum elit, nec luctus
|
||||
magna felis sollicitudin mauris. Integer in mauris eu nibh euismod
|
||||
gravida. Duis ac tellus et risus vulputate vehicula. Donec lobortis
|
||||
risus a elit. Etiam tempor. Ut ullamcorper, ligula eu tempor congue,
|
||||
eros est euismod turpis, id tincidunt sapien risus a quam. Maecenas
|
||||
fermentum consequat mi. Donec fermentum. Pellentesque malesuada nulla
|
||||
a mi. Duis sapien sem, aliquet nec, commodo eget, consequat quis,
|
||||
neque. Aliquam faucibus, elit ut dictum aliquet, felis nisl adipiscing
|
||||
sapien, sed malesuada diam lacus eget erat. Cras mollis scelerisque
|
||||
nunc. Nullam arcu. Aliquam consequat. Curabitur augue lorem, dapibus
|
||||
quis, laoreet et, pretium ac, nisi. Aenean magna nisl, mollis quis,
|
||||
molestie eu, feugiat in, orci. In hac habitasse platea dictumst.
|
||||
"""
|
||||
paragraphs = s.splitlines()
|
||||
wrapped = ('\n'.join(textwrap.wrap(para)) for para in paragraphs)
|
||||
return '\n\n'.join(wrapped)
|
||||
|
||||
|
||||
def unwrap(s):
|
||||
r"""
|
||||
Given a multi-line string, return an unwrapped version.
|
||||
|
||||
>>> wrapped = wrap(lorem_ipsum)
|
||||
>>> wrapped.count('\n')
|
||||
20
|
||||
>>> unwrapped = unwrap(wrapped)
|
||||
>>> unwrapped.count('\n')
|
||||
1
|
||||
>>> print(unwrapped)
|
||||
Lorem ipsum dolor sit amet, consectetur adipiscing ...
|
||||
Curabitur pretium tincidunt lacus. Nulla gravida orci ...
|
||||
|
||||
"""
|
||||
paragraphs = re.split(r'\n\n+', s)
|
||||
cleaned = (para.replace('\n', ' ') for para in paragraphs)
|
||||
return '\n'.join(cleaned)
|
||||
|
||||
|
||||
|
||||
|
||||
class Splitter(object):
|
||||
"""object that will split a string with the given arguments for each call
|
||||
|
||||
>>> s = Splitter(',')
|
||||
>>> s('hello, world, this is your, master calling')
|
||||
['hello', ' world', ' this is your', ' master calling']
|
||||
"""
|
||||
|
||||
def __init__(self, *args):
|
||||
self.args = args
|
||||
|
||||
def __call__(self, s):
|
||||
return s.split(*self.args)
|
||||
|
||||
|
||||
def indent(string, prefix=' ' * 4):
|
||||
"""
|
||||
>>> indent('foo')
|
||||
' foo'
|
||||
"""
|
||||
return prefix + string
|
||||
|
||||
|
||||
class WordSet(tuple):
|
||||
"""
|
||||
Given an identifier, return the words that identifier represents,
|
||||
whether in camel case, underscore-separated, etc.
|
||||
|
||||
>>> WordSet.parse("camelCase")
|
||||
('camel', 'Case')
|
||||
|
||||
>>> WordSet.parse("under_sep")
|
||||
('under', 'sep')
|
||||
|
||||
Acronyms should be retained
|
||||
|
||||
>>> WordSet.parse("firstSNL")
|
||||
('first', 'SNL')
|
||||
|
||||
>>> WordSet.parse("you_and_I")
|
||||
('you', 'and', 'I')
|
||||
|
||||
>>> WordSet.parse("A simple test")
|
||||
('A', 'simple', 'test')
|
||||
|
||||
Multiple caps should not interfere with the first cap of another word.
|
||||
|
||||
>>> WordSet.parse("myABCClass")
|
||||
('my', 'ABC', 'Class')
|
||||
|
||||
The result is a WordSet, so you can get the form you need.
|
||||
|
||||
>>> WordSet.parse("myABCClass").underscore_separated()
|
||||
'my_ABC_Class'
|
||||
|
||||
>>> WordSet.parse('a-command').camel_case()
|
||||
'ACommand'
|
||||
|
||||
>>> WordSet.parse('someIdentifier').lowered().space_separated()
|
||||
'some identifier'
|
||||
|
||||
Slices of the result should return another WordSet.
|
||||
|
||||
>>> WordSet.parse('taken-out-of-context')[1:].underscore_separated()
|
||||
'out_of_context'
|
||||
|
||||
>>> WordSet.from_class_name(WordSet()).lowered().space_separated()
|
||||
'word set'
|
||||
|
||||
>>> example = WordSet.parse('figured it out')
|
||||
>>> example.headless_camel_case()
|
||||
'figuredItOut'
|
||||
>>> example.dash_separated()
|
||||
'figured-it-out'
|
||||
|
||||
"""
|
||||
|
||||
_pattern = re.compile('([A-Z]?[a-z]+)|([A-Z]+(?![a-z]))')
|
||||
|
||||
def capitalized(self):
|
||||
return WordSet(word.capitalize() for word in self)
|
||||
|
||||
def lowered(self):
|
||||
return WordSet(word.lower() for word in self)
|
||||
|
||||
def camel_case(self):
|
||||
return ''.join(self.capitalized())
|
||||
|
||||
def headless_camel_case(self):
|
||||
words = iter(self)
|
||||
first = next(words).lower()
|
||||
new_words = itertools.chain((first,), WordSet(words).camel_case())
|
||||
return ''.join(new_words)
|
||||
|
||||
def underscore_separated(self):
|
||||
return '_'.join(self)
|
||||
|
||||
def dash_separated(self):
|
||||
return '-'.join(self)
|
||||
|
||||
def space_separated(self):
|
||||
return ' '.join(self)
|
||||
|
||||
def trim_right(self, item):
|
||||
"""
|
||||
Remove the item from the end of the set.
|
||||
|
||||
>>> WordSet.parse('foo bar').trim_right('foo')
|
||||
('foo', 'bar')
|
||||
>>> WordSet.parse('foo bar').trim_right('bar')
|
||||
('foo',)
|
||||
>>> WordSet.parse('').trim_right('bar')
|
||||
()
|
||||
"""
|
||||
return self[:-1] if self and self[-1] == item else self
|
||||
|
||||
def trim_left(self, item):
|
||||
"""
|
||||
Remove the item from the beginning of the set.
|
||||
|
||||
>>> WordSet.parse('foo bar').trim_left('foo')
|
||||
('bar',)
|
||||
>>> WordSet.parse('foo bar').trim_left('bar')
|
||||
('foo', 'bar')
|
||||
>>> WordSet.parse('').trim_left('bar')
|
||||
()
|
||||
"""
|
||||
return self[1:] if self and self[0] == item else self
|
||||
|
||||
def trim(self, item):
|
||||
"""
|
||||
>>> WordSet.parse('foo bar').trim('foo')
|
||||
('bar',)
|
||||
"""
|
||||
return self.trim_left(item).trim_right(item)
|
||||
|
||||
def __getitem__(self, item):
|
||||
result = super(WordSet, self).__getitem__(item)
|
||||
if isinstance(item, slice):
|
||||
result = WordSet(result)
|
||||
return result
|
||||
|
||||
@classmethod
|
||||
def parse(cls, identifier):
|
||||
matches = cls._pattern.finditer(identifier)
|
||||
return WordSet(match.group(0) for match in matches)
|
||||
|
||||
@classmethod
|
||||
def from_class_name(cls, subject):
|
||||
return cls.parse(subject.__class__.__name__)
|
||||
|
||||
|
||||
# for backward compatibility
|
||||
words = WordSet.parse
|
||||
|
||||
|
||||
def simple_html_strip(s):
|
||||
r"""
|
||||
Remove HTML from the string `s`.
|
||||
|
||||
>>> str(simple_html_strip(''))
|
||||
''
|
||||
|
||||
>>> print(simple_html_strip('A <bold>stormy</bold> day in paradise'))
|
||||
A stormy day in paradise
|
||||
|
||||
>>> print(simple_html_strip('Somebody <!-- do not --> tell the truth.'))
|
||||
Somebody tell the truth.
|
||||
|
||||
>>> print(simple_html_strip('What about<br/>\nmultiple lines?'))
|
||||
What about
|
||||
multiple lines?
|
||||
"""
|
||||
html_stripper = re.compile('(<!--.*?-->)|(<[^>]*>)|([^<]+)', re.DOTALL)
|
||||
texts = (match.group(3) or '' for match in html_stripper.finditer(s))
|
||||
return ''.join(texts)
|
||||
|
||||
|
||||
class SeparatedValues(str):
|
||||
"""
|
||||
A string separated by a separator. Overrides __iter__ for getting
|
||||
the values.
|
||||
|
||||
>>> list(SeparatedValues('a,b,c'))
|
||||
['a', 'b', 'c']
|
||||
|
||||
Whitespace is stripped and empty values are discarded.
|
||||
|
||||
>>> list(SeparatedValues(' a, b , c, '))
|
||||
['a', 'b', 'c']
|
||||
"""
|
||||
|
||||
separator = ','
|
||||
|
||||
def __iter__(self):
|
||||
parts = self.split(self.separator)
|
||||
return filter(None, (part.strip() for part in parts))
|
||||
|
||||
|
||||
class Stripper:
|
||||
r"""
|
||||
Given a series of lines, find the common prefix and strip it from them.
|
||||
|
||||
>>> lines = [
|
||||
... 'abcdefg\n',
|
||||
... 'abc\n',
|
||||
... 'abcde\n',
|
||||
... ]
|
||||
>>> res = Stripper.strip_prefix(lines)
|
||||
>>> res.prefix
|
||||
'abc'
|
||||
>>> list(res.lines)
|
||||
['defg\n', '\n', 'de\n']
|
||||
|
||||
If no prefix is common, nothing should be stripped.
|
||||
|
||||
>>> lines = [
|
||||
... 'abcd\n',
|
||||
... '1234\n',
|
||||
... ]
|
||||
>>> res = Stripper.strip_prefix(lines)
|
||||
>>> res.prefix = ''
|
||||
>>> list(res.lines)
|
||||
['abcd\n', '1234\n']
|
||||
"""
|
||||
|
||||
def __init__(self, prefix, lines):
|
||||
self.prefix = prefix
|
||||
self.lines = map(self, lines)
|
||||
|
||||
@classmethod
|
||||
def strip_prefix(cls, lines):
|
||||
prefix_lines, lines = itertools.tee(lines)
|
||||
prefix = functools.reduce(cls.common_prefix, prefix_lines)
|
||||
return cls(prefix, lines)
|
||||
|
||||
def __call__(self, line):
|
||||
if not self.prefix:
|
||||
return line
|
||||
null, prefix, rest = line.partition(self.prefix)
|
||||
return rest
|
||||
|
||||
@staticmethod
|
||||
def common_prefix(s1, s2):
|
||||
"""
|
||||
Return the common prefix of two lines.
|
||||
"""
|
||||
index = min(len(s1), len(s2))
|
||||
while s1[:index] != s2[:index]:
|
||||
index -= 1
|
||||
return s1[:index]
|
||||
|
||||
|
||||
def remove_prefix(text, prefix):
|
||||
"""
|
||||
Remove the prefix from the text if it exists.
|
||||
|
||||
>>> remove_prefix('underwhelming performance', 'underwhelming ')
|
||||
'performance'
|
||||
|
||||
>>> remove_prefix('something special', 'sample')
|
||||
'something special'
|
||||
"""
|
||||
null, prefix, rest = text.rpartition(prefix)
|
||||
return rest
|
||||
|
||||
|
||||
def remove_suffix(text, suffix):
|
||||
"""
|
||||
Remove the suffix from the text if it exists.
|
||||
|
||||
>>> remove_suffix('name.git', '.git')
|
||||
'name'
|
||||
|
||||
>>> remove_suffix('something special', 'sample')
|
||||
'something special'
|
||||
"""
|
||||
rest, suffix, null = text.partition(suffix)
|
||||
return rest
|
||||
|
||||
|
||||
def normalize_newlines(text):
|
||||
r"""
|
||||
Replace alternate newlines with the canonical newline.
|
||||
|
||||
>>> normalize_newlines('Lorem Ipsum\u2029')
|
||||
'Lorem Ipsum\n'
|
||||
>>> normalize_newlines('Lorem Ipsum\r\n')
|
||||
'Lorem Ipsum\n'
|
||||
>>> normalize_newlines('Lorem Ipsum\x85')
|
||||
'Lorem Ipsum\n'
|
||||
"""
|
||||
newlines = ['\r\n', '\r', '\n', '\u0085', '\u2028', '\u2029']
|
||||
pattern = '|'.join(newlines)
|
||||
return re.sub(pattern, '\n', text)
|
||||
|
||||
|
||||
def _nonblank(str):
|
||||
return str and not str.startswith('#')
|
||||
|
||||
|
||||
@functools.singledispatch
|
||||
def yield_lines(iterable):
|
||||
r"""
|
||||
Yield valid lines of a string or iterable.
|
||||
|
||||
>>> list(yield_lines(''))
|
||||
[]
|
||||
>>> list(yield_lines(['foo', 'bar']))
|
||||
['foo', 'bar']
|
||||
>>> list(yield_lines('foo\nbar'))
|
||||
['foo', 'bar']
|
||||
>>> list(yield_lines('\nfoo\n#bar\nbaz #comment'))
|
||||
['foo', 'baz #comment']
|
||||
>>> list(yield_lines(['foo\nbar', 'baz', 'bing\n\n\n']))
|
||||
['foo', 'bar', 'baz', 'bing']
|
||||
"""
|
||||
return itertools.chain.from_iterable(map(yield_lines, iterable))
|
||||
|
||||
|
||||
@yield_lines.register(str)
|
||||
def _(text):
|
||||
return filter(_nonblank, map(str.strip, text.splitlines()))
|
||||
|
||||
|
||||
def drop_comment(line):
|
||||
"""
|
||||
Drop comments.
|
||||
|
||||
>>> drop_comment('foo # bar')
|
||||
'foo'
|
||||
|
||||
A hash without a space may be in a URL.
|
||||
|
||||
>>> drop_comment('http://example.com/foo#bar')
|
||||
'http://example.com/foo#bar'
|
||||
"""
|
||||
return line.partition(' #')[0]
|
||||
|
||||
|
||||
def join_continuation(lines):
|
||||
r"""
|
||||
Join lines continued by a trailing backslash.
|
||||
|
||||
>>> list(join_continuation(['foo \\', 'bar', 'baz']))
|
||||
['foobar', 'baz']
|
||||
>>> list(join_continuation(['foo \\', 'bar', 'baz']))
|
||||
['foobar', 'baz']
|
||||
>>> list(join_continuation(['foo \\', 'bar \\', 'baz']))
|
||||
['foobarbaz']
|
||||
|
||||
Not sure why, but...
|
||||
The character preceeding the backslash is also elided.
|
||||
|
||||
>>> list(join_continuation(['goo\\', 'dly']))
|
||||
['godly']
|
||||
|
||||
A terrible idea, but...
|
||||
If no line is available to continue, suppress the lines.
|
||||
|
||||
>>> list(join_continuation(['foo', 'bar\\', 'baz\\']))
|
||||
['foo']
|
||||
"""
|
||||
lines = iter(lines)
|
||||
for item in lines:
|
||||
while item.endswith('\\'):
|
||||
try:
|
||||
item = item[:-2].strip() + next(lines)
|
||||
except StopIteration:
|
||||
return
|
||||
yield item
|
||||
6
lib/pkg_resources/_vendor/more_itertools/__init__.py
Normal file
6
lib/pkg_resources/_vendor/more_itertools/__init__.py
Normal file
@@ -0,0 +1,6 @@
|
||||
"""More routines for operating on iterables, beyond itertools"""
|
||||
|
||||
from .more import * # noqa
|
||||
from .recipes import * # noqa
|
||||
|
||||
__version__ = '9.1.0'
|
||||
2
lib/pkg_resources/_vendor/more_itertools/__init__.pyi
Normal file
2
lib/pkg_resources/_vendor/more_itertools/__init__.pyi
Normal file
@@ -0,0 +1,2 @@
|
||||
from .more import *
|
||||
from .recipes import *
|
||||
4391
lib/pkg_resources/_vendor/more_itertools/more.py
Executable file
4391
lib/pkg_resources/_vendor/more_itertools/more.py
Executable file
File diff suppressed because it is too large
Load Diff
666
lib/pkg_resources/_vendor/more_itertools/more.pyi
Normal file
666
lib/pkg_resources/_vendor/more_itertools/more.pyi
Normal file
@@ -0,0 +1,666 @@
|
||||
"""Stubs for more_itertools.more"""
|
||||
from __future__ import annotations
|
||||
|
||||
from types import TracebackType
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
Container,
|
||||
ContextManager,
|
||||
Generic,
|
||||
Hashable,
|
||||
Iterable,
|
||||
Iterator,
|
||||
overload,
|
||||
Reversible,
|
||||
Sequence,
|
||||
Sized,
|
||||
Type,
|
||||
TypeVar,
|
||||
type_check_only,
|
||||
)
|
||||
from typing_extensions import Protocol
|
||||
|
||||
# Type and type variable definitions
|
||||
_T = TypeVar('_T')
|
||||
_T1 = TypeVar('_T1')
|
||||
_T2 = TypeVar('_T2')
|
||||
_U = TypeVar('_U')
|
||||
_V = TypeVar('_V')
|
||||
_W = TypeVar('_W')
|
||||
_T_co = TypeVar('_T_co', covariant=True)
|
||||
_GenFn = TypeVar('_GenFn', bound=Callable[..., Iterator[object]])
|
||||
_Raisable = BaseException | Type[BaseException]
|
||||
|
||||
@type_check_only
|
||||
class _SizedIterable(Protocol[_T_co], Sized, Iterable[_T_co]): ...
|
||||
|
||||
@type_check_only
|
||||
class _SizedReversible(Protocol[_T_co], Sized, Reversible[_T_co]): ...
|
||||
|
||||
@type_check_only
|
||||
class _SupportsSlicing(Protocol[_T_co]):
|
||||
def __getitem__(self, __k: slice) -> _T_co: ...
|
||||
|
||||
def chunked(
|
||||
iterable: Iterable[_T], n: int | None, strict: bool = ...
|
||||
) -> Iterator[list[_T]]: ...
|
||||
@overload
|
||||
def first(iterable: Iterable[_T]) -> _T: ...
|
||||
@overload
|
||||
def first(iterable: Iterable[_T], default: _U) -> _T | _U: ...
|
||||
@overload
|
||||
def last(iterable: Iterable[_T]) -> _T: ...
|
||||
@overload
|
||||
def last(iterable: Iterable[_T], default: _U) -> _T | _U: ...
|
||||
@overload
|
||||
def nth_or_last(iterable: Iterable[_T], n: int) -> _T: ...
|
||||
@overload
|
||||
def nth_or_last(iterable: Iterable[_T], n: int, default: _U) -> _T | _U: ...
|
||||
|
||||
class peekable(Generic[_T], Iterator[_T]):
|
||||
def __init__(self, iterable: Iterable[_T]) -> None: ...
|
||||
def __iter__(self) -> peekable[_T]: ...
|
||||
def __bool__(self) -> bool: ...
|
||||
@overload
|
||||
def peek(self) -> _T: ...
|
||||
@overload
|
||||
def peek(self, default: _U) -> _T | _U: ...
|
||||
def prepend(self, *items: _T) -> None: ...
|
||||
def __next__(self) -> _T: ...
|
||||
@overload
|
||||
def __getitem__(self, index: int) -> _T: ...
|
||||
@overload
|
||||
def __getitem__(self, index: slice) -> list[_T]: ...
|
||||
|
||||
def consumer(func: _GenFn) -> _GenFn: ...
|
||||
def ilen(iterable: Iterable[object]) -> int: ...
|
||||
def iterate(func: Callable[[_T], _T], start: _T) -> Iterator[_T]: ...
|
||||
def with_iter(
|
||||
context_manager: ContextManager[Iterable[_T]],
|
||||
) -> Iterator[_T]: ...
|
||||
def one(
|
||||
iterable: Iterable[_T],
|
||||
too_short: _Raisable | None = ...,
|
||||
too_long: _Raisable | None = ...,
|
||||
) -> _T: ...
|
||||
def raise_(exception: _Raisable, *args: Any) -> None: ...
|
||||
def strictly_n(
|
||||
iterable: Iterable[_T],
|
||||
n: int,
|
||||
too_short: _GenFn | None = ...,
|
||||
too_long: _GenFn | None = ...,
|
||||
) -> list[_T]: ...
|
||||
def distinct_permutations(
|
||||
iterable: Iterable[_T], r: int | None = ...
|
||||
) -> Iterator[tuple[_T, ...]]: ...
|
||||
def intersperse(
|
||||
e: _U, iterable: Iterable[_T], n: int = ...
|
||||
) -> Iterator[_T | _U]: ...
|
||||
def unique_to_each(*iterables: Iterable[_T]) -> list[list[_T]]: ...
|
||||
@overload
|
||||
def windowed(
|
||||
seq: Iterable[_T], n: int, *, step: int = ...
|
||||
) -> Iterator[tuple[_T | None, ...]]: ...
|
||||
@overload
|
||||
def windowed(
|
||||
seq: Iterable[_T], n: int, fillvalue: _U, step: int = ...
|
||||
) -> Iterator[tuple[_T | _U, ...]]: ...
|
||||
def substrings(iterable: Iterable[_T]) -> Iterator[tuple[_T, ...]]: ...
|
||||
def substrings_indexes(
|
||||
seq: Sequence[_T], reverse: bool = ...
|
||||
) -> Iterator[tuple[Sequence[_T], int, int]]: ...
|
||||
|
||||
class bucket(Generic[_T, _U], Container[_U]):
|
||||
def __init__(
|
||||
self,
|
||||
iterable: Iterable[_T],
|
||||
key: Callable[[_T], _U],
|
||||
validator: Callable[[object], object] | None = ...,
|
||||
) -> None: ...
|
||||
def __contains__(self, value: object) -> bool: ...
|
||||
def __iter__(self) -> Iterator[_U]: ...
|
||||
def __getitem__(self, value: object) -> Iterator[_T]: ...
|
||||
|
||||
def spy(
|
||||
iterable: Iterable[_T], n: int = ...
|
||||
) -> tuple[list[_T], Iterator[_T]]: ...
|
||||
def interleave(*iterables: Iterable[_T]) -> Iterator[_T]: ...
|
||||
def interleave_longest(*iterables: Iterable[_T]) -> Iterator[_T]: ...
|
||||
def interleave_evenly(
|
||||
iterables: list[Iterable[_T]], lengths: list[int] | None = ...
|
||||
) -> Iterator[_T]: ...
|
||||
def collapse(
|
||||
iterable: Iterable[Any],
|
||||
base_type: type | None = ...,
|
||||
levels: int | None = ...,
|
||||
) -> Iterator[Any]: ...
|
||||
@overload
|
||||
def side_effect(
|
||||
func: Callable[[_T], object],
|
||||
iterable: Iterable[_T],
|
||||
chunk_size: None = ...,
|
||||
before: Callable[[], object] | None = ...,
|
||||
after: Callable[[], object] | None = ...,
|
||||
) -> Iterator[_T]: ...
|
||||
@overload
|
||||
def side_effect(
|
||||
func: Callable[[list[_T]], object],
|
||||
iterable: Iterable[_T],
|
||||
chunk_size: int,
|
||||
before: Callable[[], object] | None = ...,
|
||||
after: Callable[[], object] | None = ...,
|
||||
) -> Iterator[_T]: ...
|
||||
def sliced(
|
||||
seq: _SupportsSlicing[_T], n: int, strict: bool = ...
|
||||
) -> Iterator[_T]: ...
|
||||
def split_at(
|
||||
iterable: Iterable[_T],
|
||||
pred: Callable[[_T], object],
|
||||
maxsplit: int = ...,
|
||||
keep_separator: bool = ...,
|
||||
) -> Iterator[list[_T]]: ...
|
||||
def split_before(
|
||||
iterable: Iterable[_T], pred: Callable[[_T], object], maxsplit: int = ...
|
||||
) -> Iterator[list[_T]]: ...
|
||||
def split_after(
|
||||
iterable: Iterable[_T], pred: Callable[[_T], object], maxsplit: int = ...
|
||||
) -> Iterator[list[_T]]: ...
|
||||
def split_when(
|
||||
iterable: Iterable[_T],
|
||||
pred: Callable[[_T, _T], object],
|
||||
maxsplit: int = ...,
|
||||
) -> Iterator[list[_T]]: ...
|
||||
def split_into(
|
||||
iterable: Iterable[_T], sizes: Iterable[int | None]
|
||||
) -> Iterator[list[_T]]: ...
|
||||
@overload
|
||||
def padded(
|
||||
iterable: Iterable[_T],
|
||||
*,
|
||||
n: int | None = ...,
|
||||
next_multiple: bool = ...,
|
||||
) -> Iterator[_T | None]: ...
|
||||
@overload
|
||||
def padded(
|
||||
iterable: Iterable[_T],
|
||||
fillvalue: _U,
|
||||
n: int | None = ...,
|
||||
next_multiple: bool = ...,
|
||||
) -> Iterator[_T | _U]: ...
|
||||
@overload
|
||||
def repeat_last(iterable: Iterable[_T]) -> Iterator[_T]: ...
|
||||
@overload
|
||||
def repeat_last(iterable: Iterable[_T], default: _U) -> Iterator[_T | _U]: ...
|
||||
def distribute(n: int, iterable: Iterable[_T]) -> list[Iterator[_T]]: ...
|
||||
@overload
|
||||
def stagger(
|
||||
iterable: Iterable[_T],
|
||||
offsets: _SizedIterable[int] = ...,
|
||||
longest: bool = ...,
|
||||
) -> Iterator[tuple[_T | None, ...]]: ...
|
||||
@overload
|
||||
def stagger(
|
||||
iterable: Iterable[_T],
|
||||
offsets: _SizedIterable[int] = ...,
|
||||
longest: bool = ...,
|
||||
fillvalue: _U = ...,
|
||||
) -> Iterator[tuple[_T | _U, ...]]: ...
|
||||
|
||||
class UnequalIterablesError(ValueError):
|
||||
def __init__(self, details: tuple[int, int, int] | None = ...) -> None: ...
|
||||
|
||||
@overload
|
||||
def zip_equal(__iter1: Iterable[_T1]) -> Iterator[tuple[_T1]]: ...
|
||||
@overload
|
||||
def zip_equal(
|
||||
__iter1: Iterable[_T1], __iter2: Iterable[_T2]
|
||||
) -> Iterator[tuple[_T1, _T2]]: ...
|
||||
@overload
|
||||
def zip_equal(
|
||||
__iter1: Iterable[_T],
|
||||
__iter2: Iterable[_T],
|
||||
__iter3: Iterable[_T],
|
||||
*iterables: Iterable[_T],
|
||||
) -> Iterator[tuple[_T, ...]]: ...
|
||||
@overload
|
||||
def zip_offset(
|
||||
__iter1: Iterable[_T1],
|
||||
*,
|
||||
offsets: _SizedIterable[int],
|
||||
longest: bool = ...,
|
||||
fillvalue: None = None,
|
||||
) -> Iterator[tuple[_T1 | None]]: ...
|
||||
@overload
|
||||
def zip_offset(
|
||||
__iter1: Iterable[_T1],
|
||||
__iter2: Iterable[_T2],
|
||||
*,
|
||||
offsets: _SizedIterable[int],
|
||||
longest: bool = ...,
|
||||
fillvalue: None = None,
|
||||
) -> Iterator[tuple[_T1 | None, _T2 | None]]: ...
|
||||
@overload
|
||||
def zip_offset(
|
||||
__iter1: Iterable[_T],
|
||||
__iter2: Iterable[_T],
|
||||
__iter3: Iterable[_T],
|
||||
*iterables: Iterable[_T],
|
||||
offsets: _SizedIterable[int],
|
||||
longest: bool = ...,
|
||||
fillvalue: None = None,
|
||||
) -> Iterator[tuple[_T | None, ...]]: ...
|
||||
@overload
|
||||
def zip_offset(
|
||||
__iter1: Iterable[_T1],
|
||||
*,
|
||||
offsets: _SizedIterable[int],
|
||||
longest: bool = ...,
|
||||
fillvalue: _U,
|
||||
) -> Iterator[tuple[_T1 | _U]]: ...
|
||||
@overload
|
||||
def zip_offset(
|
||||
__iter1: Iterable[_T1],
|
||||
__iter2: Iterable[_T2],
|
||||
*,
|
||||
offsets: _SizedIterable[int],
|
||||
longest: bool = ...,
|
||||
fillvalue: _U,
|
||||
) -> Iterator[tuple[_T1 | _U, _T2 | _U]]: ...
|
||||
@overload
|
||||
def zip_offset(
|
||||
__iter1: Iterable[_T],
|
||||
__iter2: Iterable[_T],
|
||||
__iter3: Iterable[_T],
|
||||
*iterables: Iterable[_T],
|
||||
offsets: _SizedIterable[int],
|
||||
longest: bool = ...,
|
||||
fillvalue: _U,
|
||||
) -> Iterator[tuple[_T | _U, ...]]: ...
|
||||
def sort_together(
|
||||
iterables: Iterable[Iterable[_T]],
|
||||
key_list: Iterable[int] = ...,
|
||||
key: Callable[..., Any] | None = ...,
|
||||
reverse: bool = ...,
|
||||
) -> list[tuple[_T, ...]]: ...
|
||||
def unzip(iterable: Iterable[Sequence[_T]]) -> tuple[Iterator[_T], ...]: ...
|
||||
def divide(n: int, iterable: Iterable[_T]) -> list[Iterator[_T]]: ...
|
||||
def always_iterable(
|
||||
obj: object,
|
||||
base_type: type | tuple[type | tuple[Any, ...], ...] | None = ...,
|
||||
) -> Iterator[Any]: ...
|
||||
def adjacent(
|
||||
predicate: Callable[[_T], bool],
|
||||
iterable: Iterable[_T],
|
||||
distance: int = ...,
|
||||
) -> Iterator[tuple[bool, _T]]: ...
|
||||
@overload
|
||||
def groupby_transform(
|
||||
iterable: Iterable[_T],
|
||||
keyfunc: None = None,
|
||||
valuefunc: None = None,
|
||||
reducefunc: None = None,
|
||||
) -> Iterator[tuple[_T, Iterator[_T]]]: ...
|
||||
@overload
|
||||
def groupby_transform(
|
||||
iterable: Iterable[_T],
|
||||
keyfunc: Callable[[_T], _U],
|
||||
valuefunc: None,
|
||||
reducefunc: None,
|
||||
) -> Iterator[tuple[_U, Iterator[_T]]]: ...
|
||||
@overload
|
||||
def groupby_transform(
|
||||
iterable: Iterable[_T],
|
||||
keyfunc: None,
|
||||
valuefunc: Callable[[_T], _V],
|
||||
reducefunc: None,
|
||||
) -> Iterable[tuple[_T, Iterable[_V]]]: ...
|
||||
@overload
|
||||
def groupby_transform(
|
||||
iterable: Iterable[_T],
|
||||
keyfunc: Callable[[_T], _U],
|
||||
valuefunc: Callable[[_T], _V],
|
||||
reducefunc: None,
|
||||
) -> Iterable[tuple[_U, Iterator[_V]]]: ...
|
||||
@overload
|
||||
def groupby_transform(
|
||||
iterable: Iterable[_T],
|
||||
keyfunc: None,
|
||||
valuefunc: None,
|
||||
reducefunc: Callable[[Iterator[_T]], _W],
|
||||
) -> Iterable[tuple[_T, _W]]: ...
|
||||
@overload
|
||||
def groupby_transform(
|
||||
iterable: Iterable[_T],
|
||||
keyfunc: Callable[[_T], _U],
|
||||
valuefunc: None,
|
||||
reducefunc: Callable[[Iterator[_T]], _W],
|
||||
) -> Iterable[tuple[_U, _W]]: ...
|
||||
@overload
|
||||
def groupby_transform(
|
||||
iterable: Iterable[_T],
|
||||
keyfunc: None,
|
||||
valuefunc: Callable[[_T], _V],
|
||||
reducefunc: Callable[[Iterable[_V]], _W],
|
||||
) -> Iterable[tuple[_T, _W]]: ...
|
||||
@overload
|
||||
def groupby_transform(
|
||||
iterable: Iterable[_T],
|
||||
keyfunc: Callable[[_T], _U],
|
||||
valuefunc: Callable[[_T], _V],
|
||||
reducefunc: Callable[[Iterable[_V]], _W],
|
||||
) -> Iterable[tuple[_U, _W]]: ...
|
||||
|
||||
class numeric_range(Generic[_T, _U], Sequence[_T], Hashable, Reversible[_T]):
|
||||
@overload
|
||||
def __init__(self, __stop: _T) -> None: ...
|
||||
@overload
|
||||
def __init__(self, __start: _T, __stop: _T) -> None: ...
|
||||
@overload
|
||||
def __init__(self, __start: _T, __stop: _T, __step: _U) -> None: ...
|
||||
def __bool__(self) -> bool: ...
|
||||
def __contains__(self, elem: object) -> bool: ...
|
||||
def __eq__(self, other: object) -> bool: ...
|
||||
@overload
|
||||
def __getitem__(self, key: int) -> _T: ...
|
||||
@overload
|
||||
def __getitem__(self, key: slice) -> numeric_range[_T, _U]: ...
|
||||
def __hash__(self) -> int: ...
|
||||
def __iter__(self) -> Iterator[_T]: ...
|
||||
def __len__(self) -> int: ...
|
||||
def __reduce__(
|
||||
self,
|
||||
) -> tuple[Type[numeric_range[_T, _U]], tuple[_T, _T, _U]]: ...
|
||||
def __repr__(self) -> str: ...
|
||||
def __reversed__(self) -> Iterator[_T]: ...
|
||||
def count(self, value: _T) -> int: ...
|
||||
def index(self, value: _T) -> int: ... # type: ignore
|
||||
|
||||
def count_cycle(
|
||||
iterable: Iterable[_T], n: int | None = ...
|
||||
) -> Iterable[tuple[int, _T]]: ...
|
||||
def mark_ends(
|
||||
iterable: Iterable[_T],
|
||||
) -> Iterable[tuple[bool, bool, _T]]: ...
|
||||
def locate(
|
||||
iterable: Iterable[object],
|
||||
pred: Callable[..., Any] = ...,
|
||||
window_size: int | None = ...,
|
||||
) -> Iterator[int]: ...
|
||||
def lstrip(
|
||||
iterable: Iterable[_T], pred: Callable[[_T], object]
|
||||
) -> Iterator[_T]: ...
|
||||
def rstrip(
|
||||
iterable: Iterable[_T], pred: Callable[[_T], object]
|
||||
) -> Iterator[_T]: ...
|
||||
def strip(
|
||||
iterable: Iterable[_T], pred: Callable[[_T], object]
|
||||
) -> Iterator[_T]: ...
|
||||
|
||||
class islice_extended(Generic[_T], Iterator[_T]):
|
||||
def __init__(self, iterable: Iterable[_T], *args: int | None) -> None: ...
|
||||
def __iter__(self) -> islice_extended[_T]: ...
|
||||
def __next__(self) -> _T: ...
|
||||
def __getitem__(self, index: slice) -> islice_extended[_T]: ...
|
||||
|
||||
def always_reversible(iterable: Iterable[_T]) -> Iterator[_T]: ...
|
||||
def consecutive_groups(
|
||||
iterable: Iterable[_T], ordering: Callable[[_T], int] = ...
|
||||
) -> Iterator[Iterator[_T]]: ...
|
||||
@overload
|
||||
def difference(
|
||||
iterable: Iterable[_T],
|
||||
func: Callable[[_T, _T], _U] = ...,
|
||||
*,
|
||||
initial: None = ...,
|
||||
) -> Iterator[_T | _U]: ...
|
||||
@overload
|
||||
def difference(
|
||||
iterable: Iterable[_T], func: Callable[[_T, _T], _U] = ..., *, initial: _U
|
||||
) -> Iterator[_U]: ...
|
||||
|
||||
class SequenceView(Generic[_T], Sequence[_T]):
|
||||
def __init__(self, target: Sequence[_T]) -> None: ...
|
||||
@overload
|
||||
def __getitem__(self, index: int) -> _T: ...
|
||||
@overload
|
||||
def __getitem__(self, index: slice) -> Sequence[_T]: ...
|
||||
def __len__(self) -> int: ...
|
||||
|
||||
class seekable(Generic[_T], Iterator[_T]):
|
||||
def __init__(
|
||||
self, iterable: Iterable[_T], maxlen: int | None = ...
|
||||
) -> None: ...
|
||||
def __iter__(self) -> seekable[_T]: ...
|
||||
def __next__(self) -> _T: ...
|
||||
def __bool__(self) -> bool: ...
|
||||
@overload
|
||||
def peek(self) -> _T: ...
|
||||
@overload
|
||||
def peek(self, default: _U) -> _T | _U: ...
|
||||
def elements(self) -> SequenceView[_T]: ...
|
||||
def seek(self, index: int) -> None: ...
|
||||
|
||||
class run_length:
|
||||
@staticmethod
|
||||
def encode(iterable: Iterable[_T]) -> Iterator[tuple[_T, int]]: ...
|
||||
@staticmethod
|
||||
def decode(iterable: Iterable[tuple[_T, int]]) -> Iterator[_T]: ...
|
||||
|
||||
def exactly_n(
|
||||
iterable: Iterable[_T], n: int, predicate: Callable[[_T], object] = ...
|
||||
) -> bool: ...
|
||||
def circular_shifts(iterable: Iterable[_T]) -> list[tuple[_T, ...]]: ...
|
||||
def make_decorator(
|
||||
wrapping_func: Callable[..., _U], result_index: int = ...
|
||||
) -> Callable[..., Callable[[Callable[..., Any]], Callable[..., _U]]]: ...
|
||||
@overload
|
||||
def map_reduce(
|
||||
iterable: Iterable[_T],
|
||||
keyfunc: Callable[[_T], _U],
|
||||
valuefunc: None = ...,
|
||||
reducefunc: None = ...,
|
||||
) -> dict[_U, list[_T]]: ...
|
||||
@overload
|
||||
def map_reduce(
|
||||
iterable: Iterable[_T],
|
||||
keyfunc: Callable[[_T], _U],
|
||||
valuefunc: Callable[[_T], _V],
|
||||
reducefunc: None = ...,
|
||||
) -> dict[_U, list[_V]]: ...
|
||||
@overload
|
||||
def map_reduce(
|
||||
iterable: Iterable[_T],
|
||||
keyfunc: Callable[[_T], _U],
|
||||
valuefunc: None = ...,
|
||||
reducefunc: Callable[[list[_T]], _W] = ...,
|
||||
) -> dict[_U, _W]: ...
|
||||
@overload
|
||||
def map_reduce(
|
||||
iterable: Iterable[_T],
|
||||
keyfunc: Callable[[_T], _U],
|
||||
valuefunc: Callable[[_T], _V],
|
||||
reducefunc: Callable[[list[_V]], _W],
|
||||
) -> dict[_U, _W]: ...
|
||||
def rlocate(
|
||||
iterable: Iterable[_T],
|
||||
pred: Callable[..., object] = ...,
|
||||
window_size: int | None = ...,
|
||||
) -> Iterator[int]: ...
|
||||
def replace(
|
||||
iterable: Iterable[_T],
|
||||
pred: Callable[..., object],
|
||||
substitutes: Iterable[_U],
|
||||
count: int | None = ...,
|
||||
window_size: int = ...,
|
||||
) -> Iterator[_T | _U]: ...
|
||||
def partitions(iterable: Iterable[_T]) -> Iterator[list[list[_T]]]: ...
|
||||
def set_partitions(
|
||||
iterable: Iterable[_T], k: int | None = ...
|
||||
) -> Iterator[list[list[_T]]]: ...
|
||||
|
||||
class time_limited(Generic[_T], Iterator[_T]):
|
||||
def __init__(
|
||||
self, limit_seconds: float, iterable: Iterable[_T]
|
||||
) -> None: ...
|
||||
def __iter__(self) -> islice_extended[_T]: ...
|
||||
def __next__(self) -> _T: ...
|
||||
|
||||
@overload
|
||||
def only(
|
||||
iterable: Iterable[_T], *, too_long: _Raisable | None = ...
|
||||
) -> _T | None: ...
|
||||
@overload
|
||||
def only(
|
||||
iterable: Iterable[_T], default: _U, too_long: _Raisable | None = ...
|
||||
) -> _T | _U: ...
|
||||
def ichunked(iterable: Iterable[_T], n: int) -> Iterator[Iterator[_T]]: ...
|
||||
def distinct_combinations(
|
||||
iterable: Iterable[_T], r: int
|
||||
) -> Iterator[tuple[_T, ...]]: ...
|
||||
def filter_except(
|
||||
validator: Callable[[Any], object],
|
||||
iterable: Iterable[_T],
|
||||
*exceptions: Type[BaseException],
|
||||
) -> Iterator[_T]: ...
|
||||
def map_except(
|
||||
function: Callable[[Any], _U],
|
||||
iterable: Iterable[_T],
|
||||
*exceptions: Type[BaseException],
|
||||
) -> Iterator[_U]: ...
|
||||
def map_if(
|
||||
iterable: Iterable[Any],
|
||||
pred: Callable[[Any], bool],
|
||||
func: Callable[[Any], Any],
|
||||
func_else: Callable[[Any], Any] | None = ...,
|
||||
) -> Iterator[Any]: ...
|
||||
def sample(
|
||||
iterable: Iterable[_T],
|
||||
k: int,
|
||||
weights: Iterable[float] | None = ...,
|
||||
) -> list[_T]: ...
|
||||
def is_sorted(
|
||||
iterable: Iterable[_T],
|
||||
key: Callable[[_T], _U] | None = ...,
|
||||
reverse: bool = False,
|
||||
strict: bool = False,
|
||||
) -> bool: ...
|
||||
|
||||
class AbortThread(BaseException):
|
||||
pass
|
||||
|
||||
class callback_iter(Generic[_T], Iterator[_T]):
|
||||
def __init__(
|
||||
self,
|
||||
func: Callable[..., Any],
|
||||
callback_kwd: str = ...,
|
||||
wait_seconds: float = ...,
|
||||
) -> None: ...
|
||||
def __enter__(self) -> callback_iter[_T]: ...
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: Type[BaseException] | None,
|
||||
exc_value: BaseException | None,
|
||||
traceback: TracebackType | None,
|
||||
) -> bool | None: ...
|
||||
def __iter__(self) -> callback_iter[_T]: ...
|
||||
def __next__(self) -> _T: ...
|
||||
def _reader(self) -> Iterator[_T]: ...
|
||||
@property
|
||||
def done(self) -> bool: ...
|
||||
@property
|
||||
def result(self) -> Any: ...
|
||||
|
||||
def windowed_complete(
|
||||
iterable: Iterable[_T], n: int
|
||||
) -> Iterator[tuple[_T, ...]]: ...
|
||||
def all_unique(
|
||||
iterable: Iterable[_T], key: Callable[[_T], _U] | None = ...
|
||||
) -> bool: ...
|
||||
def nth_product(index: int, *args: Iterable[_T]) -> tuple[_T, ...]: ...
|
||||
def nth_permutation(
|
||||
iterable: Iterable[_T], r: int, index: int
|
||||
) -> tuple[_T, ...]: ...
|
||||
def value_chain(*args: _T | Iterable[_T]) -> Iterable[_T]: ...
|
||||
def product_index(element: Iterable[_T], *args: Iterable[_T]) -> int: ...
|
||||
def combination_index(
|
||||
element: Iterable[_T], iterable: Iterable[_T]
|
||||
) -> int: ...
|
||||
def permutation_index(
|
||||
element: Iterable[_T], iterable: Iterable[_T]
|
||||
) -> int: ...
|
||||
def repeat_each(iterable: Iterable[_T], n: int = ...) -> Iterator[_T]: ...
|
||||
|
||||
class countable(Generic[_T], Iterator[_T]):
|
||||
def __init__(self, iterable: Iterable[_T]) -> None: ...
|
||||
def __iter__(self) -> countable[_T]: ...
|
||||
def __next__(self) -> _T: ...
|
||||
|
||||
def chunked_even(iterable: Iterable[_T], n: int) -> Iterator[list[_T]]: ...
|
||||
def zip_broadcast(
|
||||
*objects: _T | Iterable[_T],
|
||||
scalar_types: type | tuple[type | tuple[Any, ...], ...] | None = ...,
|
||||
strict: bool = ...,
|
||||
) -> Iterable[tuple[_T, ...]]: ...
|
||||
def unique_in_window(
|
||||
iterable: Iterable[_T], n: int, key: Callable[[_T], _U] | None = ...
|
||||
) -> Iterator[_T]: ...
|
||||
def duplicates_everseen(
|
||||
iterable: Iterable[_T], key: Callable[[_T], _U] | None = ...
|
||||
) -> Iterator[_T]: ...
|
||||
def duplicates_justseen(
|
||||
iterable: Iterable[_T], key: Callable[[_T], _U] | None = ...
|
||||
) -> Iterator[_T]: ...
|
||||
|
||||
class _SupportsLessThan(Protocol):
|
||||
def __lt__(self, __other: Any) -> bool: ...
|
||||
|
||||
_SupportsLessThanT = TypeVar("_SupportsLessThanT", bound=_SupportsLessThan)
|
||||
|
||||
@overload
|
||||
def minmax(
|
||||
iterable_or_value: Iterable[_SupportsLessThanT], *, key: None = None
|
||||
) -> tuple[_SupportsLessThanT, _SupportsLessThanT]: ...
|
||||
@overload
|
||||
def minmax(
|
||||
iterable_or_value: Iterable[_T], *, key: Callable[[_T], _SupportsLessThan]
|
||||
) -> tuple[_T, _T]: ...
|
||||
@overload
|
||||
def minmax(
|
||||
iterable_or_value: Iterable[_SupportsLessThanT],
|
||||
*,
|
||||
key: None = None,
|
||||
default: _U,
|
||||
) -> _U | tuple[_SupportsLessThanT, _SupportsLessThanT]: ...
|
||||
@overload
|
||||
def minmax(
|
||||
iterable_or_value: Iterable[_T],
|
||||
*,
|
||||
key: Callable[[_T], _SupportsLessThan],
|
||||
default: _U,
|
||||
) -> _U | tuple[_T, _T]: ...
|
||||
@overload
|
||||
def minmax(
|
||||
iterable_or_value: _SupportsLessThanT,
|
||||
__other: _SupportsLessThanT,
|
||||
*others: _SupportsLessThanT,
|
||||
) -> tuple[_SupportsLessThanT, _SupportsLessThanT]: ...
|
||||
@overload
|
||||
def minmax(
|
||||
iterable_or_value: _T,
|
||||
__other: _T,
|
||||
*others: _T,
|
||||
key: Callable[[_T], _SupportsLessThan],
|
||||
) -> tuple[_T, _T]: ...
|
||||
def longest_common_prefix(
|
||||
iterables: Iterable[Iterable[_T]],
|
||||
) -> Iterator[_T]: ...
|
||||
def iequals(*iterables: Iterable[object]) -> bool: ...
|
||||
def constrained_batches(
|
||||
iterable: Iterable[object],
|
||||
max_size: int,
|
||||
max_count: int | None = ...,
|
||||
get_len: Callable[[_T], object] = ...,
|
||||
strict: bool = ...,
|
||||
) -> Iterator[tuple[_T]]: ...
|
||||
def gray_product(*iterables: Iterable[_T]) -> Iterator[tuple[_T, ...]]: ...
|
||||
0
lib/pkg_resources/_vendor/more_itertools/py.typed
Normal file
0
lib/pkg_resources/_vendor/more_itertools/py.typed
Normal file
930
lib/pkg_resources/_vendor/more_itertools/recipes.py
Normal file
930
lib/pkg_resources/_vendor/more_itertools/recipes.py
Normal file
@@ -0,0 +1,930 @@
|
||||
"""Imported from the recipes section of the itertools documentation.
|
||||
|
||||
All functions taken from the recipes section of the itertools library docs
|
||||
[1]_.
|
||||
Some backward-compatible usability improvements have been made.
|
||||
|
||||
.. [1] http://docs.python.org/library/itertools.html#recipes
|
||||
|
||||
"""
|
||||
import math
|
||||
import operator
|
||||
import warnings
|
||||
|
||||
from collections import deque
|
||||
from collections.abc import Sized
|
||||
from functools import reduce
|
||||
from itertools import (
|
||||
chain,
|
||||
combinations,
|
||||
compress,
|
||||
count,
|
||||
cycle,
|
||||
groupby,
|
||||
islice,
|
||||
product,
|
||||
repeat,
|
||||
starmap,
|
||||
tee,
|
||||
zip_longest,
|
||||
)
|
||||
from random import randrange, sample, choice
|
||||
from sys import hexversion
|
||||
|
||||
__all__ = [
|
||||
'all_equal',
|
||||
'batched',
|
||||
'before_and_after',
|
||||
'consume',
|
||||
'convolve',
|
||||
'dotproduct',
|
||||
'first_true',
|
||||
'factor',
|
||||
'flatten',
|
||||
'grouper',
|
||||
'iter_except',
|
||||
'iter_index',
|
||||
'matmul',
|
||||
'ncycles',
|
||||
'nth',
|
||||
'nth_combination',
|
||||
'padnone',
|
||||
'pad_none',
|
||||
'pairwise',
|
||||
'partition',
|
||||
'polynomial_from_roots',
|
||||
'powerset',
|
||||
'prepend',
|
||||
'quantify',
|
||||
'random_combination_with_replacement',
|
||||
'random_combination',
|
||||
'random_permutation',
|
||||
'random_product',
|
||||
'repeatfunc',
|
||||
'roundrobin',
|
||||
'sieve',
|
||||
'sliding_window',
|
||||
'subslices',
|
||||
'tabulate',
|
||||
'tail',
|
||||
'take',
|
||||
'transpose',
|
||||
'triplewise',
|
||||
'unique_everseen',
|
||||
'unique_justseen',
|
||||
]
|
||||
|
||||
_marker = object()
|
||||
|
||||
|
||||
def take(n, iterable):
|
||||
"""Return first *n* items of the iterable as a list.
|
||||
|
||||
>>> take(3, range(10))
|
||||
[0, 1, 2]
|
||||
|
||||
If there are fewer than *n* items in the iterable, all of them are
|
||||
returned.
|
||||
|
||||
>>> take(10, range(3))
|
||||
[0, 1, 2]
|
||||
|
||||
"""
|
||||
return list(islice(iterable, n))
|
||||
|
||||
|
||||
def tabulate(function, start=0):
|
||||
"""Return an iterator over the results of ``func(start)``,
|
||||
``func(start + 1)``, ``func(start + 2)``...
|
||||
|
||||
*func* should be a function that accepts one integer argument.
|
||||
|
||||
If *start* is not specified it defaults to 0. It will be incremented each
|
||||
time the iterator is advanced.
|
||||
|
||||
>>> square = lambda x: x ** 2
|
||||
>>> iterator = tabulate(square, -3)
|
||||
>>> take(4, iterator)
|
||||
[9, 4, 1, 0]
|
||||
|
||||
"""
|
||||
return map(function, count(start))
|
||||
|
||||
|
||||
def tail(n, iterable):
|
||||
"""Return an iterator over the last *n* items of *iterable*.
|
||||
|
||||
>>> t = tail(3, 'ABCDEFG')
|
||||
>>> list(t)
|
||||
['E', 'F', 'G']
|
||||
|
||||
"""
|
||||
# If the given iterable has a length, then we can use islice to get its
|
||||
# final elements. Note that if the iterable is not actually Iterable,
|
||||
# either islice or deque will throw a TypeError. This is why we don't
|
||||
# check if it is Iterable.
|
||||
if isinstance(iterable, Sized):
|
||||
yield from islice(iterable, max(0, len(iterable) - n), None)
|
||||
else:
|
||||
yield from iter(deque(iterable, maxlen=n))
|
||||
|
||||
|
||||
def consume(iterator, n=None):
|
||||
"""Advance *iterable* by *n* steps. If *n* is ``None``, consume it
|
||||
entirely.
|
||||
|
||||
Efficiently exhausts an iterator without returning values. Defaults to
|
||||
consuming the whole iterator, but an optional second argument may be
|
||||
provided to limit consumption.
|
||||
|
||||
>>> i = (x for x in range(10))
|
||||
>>> next(i)
|
||||
0
|
||||
>>> consume(i, 3)
|
||||
>>> next(i)
|
||||
4
|
||||
>>> consume(i)
|
||||
>>> next(i)
|
||||
Traceback (most recent call last):
|
||||
File "<stdin>", line 1, in <module>
|
||||
StopIteration
|
||||
|
||||
If the iterator has fewer items remaining than the provided limit, the
|
||||
whole iterator will be consumed.
|
||||
|
||||
>>> i = (x for x in range(3))
|
||||
>>> consume(i, 5)
|
||||
>>> next(i)
|
||||
Traceback (most recent call last):
|
||||
File "<stdin>", line 1, in <module>
|
||||
StopIteration
|
||||
|
||||
"""
|
||||
# Use functions that consume iterators at C speed.
|
||||
if n is None:
|
||||
# feed the entire iterator into a zero-length deque
|
||||
deque(iterator, maxlen=0)
|
||||
else:
|
||||
# advance to the empty slice starting at position n
|
||||
next(islice(iterator, n, n), None)
|
||||
|
||||
|
||||
def nth(iterable, n, default=None):
|
||||
"""Returns the nth item or a default value.
|
||||
|
||||
>>> l = range(10)
|
||||
>>> nth(l, 3)
|
||||
3
|
||||
>>> nth(l, 20, "zebra")
|
||||
'zebra'
|
||||
|
||||
"""
|
||||
return next(islice(iterable, n, None), default)
|
||||
|
||||
|
||||
def all_equal(iterable):
|
||||
"""
|
||||
Returns ``True`` if all the elements are equal to each other.
|
||||
|
||||
>>> all_equal('aaaa')
|
||||
True
|
||||
>>> all_equal('aaab')
|
||||
False
|
||||
|
||||
"""
|
||||
g = groupby(iterable)
|
||||
return next(g, True) and not next(g, False)
|
||||
|
||||
|
||||
def quantify(iterable, pred=bool):
|
||||
"""Return the how many times the predicate is true.
|
||||
|
||||
>>> quantify([True, False, True])
|
||||
2
|
||||
|
||||
"""
|
||||
return sum(map(pred, iterable))
|
||||
|
||||
|
||||
def pad_none(iterable):
|
||||
"""Returns the sequence of elements and then returns ``None`` indefinitely.
|
||||
|
||||
>>> take(5, pad_none(range(3)))
|
||||
[0, 1, 2, None, None]
|
||||
|
||||
Useful for emulating the behavior of the built-in :func:`map` function.
|
||||
|
||||
See also :func:`padded`.
|
||||
|
||||
"""
|
||||
return chain(iterable, repeat(None))
|
||||
|
||||
|
||||
padnone = pad_none
|
||||
|
||||
|
||||
def ncycles(iterable, n):
|
||||
"""Returns the sequence elements *n* times
|
||||
|
||||
>>> list(ncycles(["a", "b"], 3))
|
||||
['a', 'b', 'a', 'b', 'a', 'b']
|
||||
|
||||
"""
|
||||
return chain.from_iterable(repeat(tuple(iterable), n))
|
||||
|
||||
|
||||
def dotproduct(vec1, vec2):
|
||||
"""Returns the dot product of the two iterables.
|
||||
|
||||
>>> dotproduct([10, 10], [20, 20])
|
||||
400
|
||||
|
||||
"""
|
||||
return sum(map(operator.mul, vec1, vec2))
|
||||
|
||||
|
||||
def flatten(listOfLists):
|
||||
"""Return an iterator flattening one level of nesting in a list of lists.
|
||||
|
||||
>>> list(flatten([[0, 1], [2, 3]]))
|
||||
[0, 1, 2, 3]
|
||||
|
||||
See also :func:`collapse`, which can flatten multiple levels of nesting.
|
||||
|
||||
"""
|
||||
return chain.from_iterable(listOfLists)
|
||||
|
||||
|
||||
def repeatfunc(func, times=None, *args):
|
||||
"""Call *func* with *args* repeatedly, returning an iterable over the
|
||||
results.
|
||||
|
||||
If *times* is specified, the iterable will terminate after that many
|
||||
repetitions:
|
||||
|
||||
>>> from operator import add
|
||||
>>> times = 4
|
||||
>>> args = 3, 5
|
||||
>>> list(repeatfunc(add, times, *args))
|
||||
[8, 8, 8, 8]
|
||||
|
||||
If *times* is ``None`` the iterable will not terminate:
|
||||
|
||||
>>> from random import randrange
|
||||
>>> times = None
|
||||
>>> args = 1, 11
|
||||
>>> take(6, repeatfunc(randrange, times, *args)) # doctest:+SKIP
|
||||
[2, 4, 8, 1, 8, 4]
|
||||
|
||||
"""
|
||||
if times is None:
|
||||
return starmap(func, repeat(args))
|
||||
return starmap(func, repeat(args, times))
|
||||
|
||||
|
||||
def _pairwise(iterable):
|
||||
"""Returns an iterator of paired items, overlapping, from the original
|
||||
|
||||
>>> take(4, pairwise(count()))
|
||||
[(0, 1), (1, 2), (2, 3), (3, 4)]
|
||||
|
||||
On Python 3.10 and above, this is an alias for :func:`itertools.pairwise`.
|
||||
|
||||
"""
|
||||
a, b = tee(iterable)
|
||||
next(b, None)
|
||||
yield from zip(a, b)
|
||||
|
||||
|
||||
try:
|
||||
from itertools import pairwise as itertools_pairwise
|
||||
except ImportError:
|
||||
pairwise = _pairwise
|
||||
else:
|
||||
|
||||
def pairwise(iterable):
|
||||
yield from itertools_pairwise(iterable)
|
||||
|
||||
pairwise.__doc__ = _pairwise.__doc__
|
||||
|
||||
|
||||
class UnequalIterablesError(ValueError):
|
||||
def __init__(self, details=None):
|
||||
msg = 'Iterables have different lengths'
|
||||
if details is not None:
|
||||
msg += (': index 0 has length {}; index {} has length {}').format(
|
||||
*details
|
||||
)
|
||||
|
||||
super().__init__(msg)
|
||||
|
||||
|
||||
def _zip_equal_generator(iterables):
|
||||
for combo in zip_longest(*iterables, fillvalue=_marker):
|
||||
for val in combo:
|
||||
if val is _marker:
|
||||
raise UnequalIterablesError()
|
||||
yield combo
|
||||
|
||||
|
||||
def _zip_equal(*iterables):
|
||||
# Check whether the iterables are all the same size.
|
||||
try:
|
||||
first_size = len(iterables[0])
|
||||
for i, it in enumerate(iterables[1:], 1):
|
||||
size = len(it)
|
||||
if size != first_size:
|
||||
break
|
||||
else:
|
||||
# If we didn't break out, we can use the built-in zip.
|
||||
return zip(*iterables)
|
||||
|
||||
# If we did break out, there was a mismatch.
|
||||
raise UnequalIterablesError(details=(first_size, i, size))
|
||||
# If any one of the iterables didn't have a length, start reading
|
||||
# them until one runs out.
|
||||
except TypeError:
|
||||
return _zip_equal_generator(iterables)
|
||||
|
||||
|
||||
def grouper(iterable, n, incomplete='fill', fillvalue=None):
|
||||
"""Group elements from *iterable* into fixed-length groups of length *n*.
|
||||
|
||||
>>> list(grouper('ABCDEF', 3))
|
||||
[('A', 'B', 'C'), ('D', 'E', 'F')]
|
||||
|
||||
The keyword arguments *incomplete* and *fillvalue* control what happens for
|
||||
iterables whose length is not a multiple of *n*.
|
||||
|
||||
When *incomplete* is `'fill'`, the last group will contain instances of
|
||||
*fillvalue*.
|
||||
|
||||
>>> list(grouper('ABCDEFG', 3, incomplete='fill', fillvalue='x'))
|
||||
[('A', 'B', 'C'), ('D', 'E', 'F'), ('G', 'x', 'x')]
|
||||
|
||||
When *incomplete* is `'ignore'`, the last group will not be emitted.
|
||||
|
||||
>>> list(grouper('ABCDEFG', 3, incomplete='ignore', fillvalue='x'))
|
||||
[('A', 'B', 'C'), ('D', 'E', 'F')]
|
||||
|
||||
When *incomplete* is `'strict'`, a subclass of `ValueError` will be raised.
|
||||
|
||||
>>> it = grouper('ABCDEFG', 3, incomplete='strict')
|
||||
>>> list(it) # doctest: +IGNORE_EXCEPTION_DETAIL
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
UnequalIterablesError
|
||||
|
||||
"""
|
||||
args = [iter(iterable)] * n
|
||||
if incomplete == 'fill':
|
||||
return zip_longest(*args, fillvalue=fillvalue)
|
||||
if incomplete == 'strict':
|
||||
return _zip_equal(*args)
|
||||
if incomplete == 'ignore':
|
||||
return zip(*args)
|
||||
else:
|
||||
raise ValueError('Expected fill, strict, or ignore')
|
||||
|
||||
|
||||
def roundrobin(*iterables):
|
||||
"""Yields an item from each iterable, alternating between them.
|
||||
|
||||
>>> list(roundrobin('ABC', 'D', 'EF'))
|
||||
['A', 'D', 'E', 'B', 'F', 'C']
|
||||
|
||||
This function produces the same output as :func:`interleave_longest`, but
|
||||
may perform better for some inputs (in particular when the number of
|
||||
iterables is small).
|
||||
|
||||
"""
|
||||
# Recipe credited to George Sakkis
|
||||
pending = len(iterables)
|
||||
nexts = cycle(iter(it).__next__ for it in iterables)
|
||||
while pending:
|
||||
try:
|
||||
for next in nexts:
|
||||
yield next()
|
||||
except StopIteration:
|
||||
pending -= 1
|
||||
nexts = cycle(islice(nexts, pending))
|
||||
|
||||
|
||||
def partition(pred, iterable):
|
||||
"""
|
||||
Returns a 2-tuple of iterables derived from the input iterable.
|
||||
The first yields the items that have ``pred(item) == False``.
|
||||
The second yields the items that have ``pred(item) == True``.
|
||||
|
||||
>>> is_odd = lambda x: x % 2 != 0
|
||||
>>> iterable = range(10)
|
||||
>>> even_items, odd_items = partition(is_odd, iterable)
|
||||
>>> list(even_items), list(odd_items)
|
||||
([0, 2, 4, 6, 8], [1, 3, 5, 7, 9])
|
||||
|
||||
If *pred* is None, :func:`bool` is used.
|
||||
|
||||
>>> iterable = [0, 1, False, True, '', ' ']
|
||||
>>> false_items, true_items = partition(None, iterable)
|
||||
>>> list(false_items), list(true_items)
|
||||
([0, False, ''], [1, True, ' '])
|
||||
|
||||
"""
|
||||
if pred is None:
|
||||
pred = bool
|
||||
|
||||
evaluations = ((pred(x), x) for x in iterable)
|
||||
t1, t2 = tee(evaluations)
|
||||
return (
|
||||
(x for (cond, x) in t1 if not cond),
|
||||
(x for (cond, x) in t2 if cond),
|
||||
)
|
||||
|
||||
|
||||
def powerset(iterable):
|
||||
"""Yields all possible subsets of the iterable.
|
||||
|
||||
>>> list(powerset([1, 2, 3]))
|
||||
[(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)]
|
||||
|
||||
:func:`powerset` will operate on iterables that aren't :class:`set`
|
||||
instances, so repeated elements in the input will produce repeated elements
|
||||
in the output. Use :func:`unique_everseen` on the input to avoid generating
|
||||
duplicates:
|
||||
|
||||
>>> seq = [1, 1, 0]
|
||||
>>> list(powerset(seq))
|
||||
[(), (1,), (1,), (0,), (1, 1), (1, 0), (1, 0), (1, 1, 0)]
|
||||
>>> from more_itertools import unique_everseen
|
||||
>>> list(powerset(unique_everseen(seq)))
|
||||
[(), (1,), (0,), (1, 0)]
|
||||
|
||||
"""
|
||||
s = list(iterable)
|
||||
return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1))
|
||||
|
||||
|
||||
def unique_everseen(iterable, key=None):
|
||||
"""
|
||||
Yield unique elements, preserving order.
|
||||
|
||||
>>> list(unique_everseen('AAAABBBCCDAABBB'))
|
||||
['A', 'B', 'C', 'D']
|
||||
>>> list(unique_everseen('ABBCcAD', str.lower))
|
||||
['A', 'B', 'C', 'D']
|
||||
|
||||
Sequences with a mix of hashable and unhashable items can be used.
|
||||
The function will be slower (i.e., `O(n^2)`) for unhashable items.
|
||||
|
||||
Remember that ``list`` objects are unhashable - you can use the *key*
|
||||
parameter to transform the list to a tuple (which is hashable) to
|
||||
avoid a slowdown.
|
||||
|
||||
>>> iterable = ([1, 2], [2, 3], [1, 2])
|
||||
>>> list(unique_everseen(iterable)) # Slow
|
||||
[[1, 2], [2, 3]]
|
||||
>>> list(unique_everseen(iterable, key=tuple)) # Faster
|
||||
[[1, 2], [2, 3]]
|
||||
|
||||
Similary, you may want to convert unhashable ``set`` objects with
|
||||
``key=frozenset``. For ``dict`` objects,
|
||||
``key=lambda x: frozenset(x.items())`` can be used.
|
||||
|
||||
"""
|
||||
seenset = set()
|
||||
seenset_add = seenset.add
|
||||
seenlist = []
|
||||
seenlist_add = seenlist.append
|
||||
use_key = key is not None
|
||||
|
||||
for element in iterable:
|
||||
k = key(element) if use_key else element
|
||||
try:
|
||||
if k not in seenset:
|
||||
seenset_add(k)
|
||||
yield element
|
||||
except TypeError:
|
||||
if k not in seenlist:
|
||||
seenlist_add(k)
|
||||
yield element
|
||||
|
||||
|
||||
def unique_justseen(iterable, key=None):
|
||||
"""Yields elements in order, ignoring serial duplicates
|
||||
|
||||
>>> list(unique_justseen('AAAABBBCCDAABBB'))
|
||||
['A', 'B', 'C', 'D', 'A', 'B']
|
||||
>>> list(unique_justseen('ABBCcAD', str.lower))
|
||||
['A', 'B', 'C', 'A', 'D']
|
||||
|
||||
"""
|
||||
return map(next, map(operator.itemgetter(1), groupby(iterable, key)))
|
||||
|
||||
|
||||
def iter_except(func, exception, first=None):
|
||||
"""Yields results from a function repeatedly until an exception is raised.
|
||||
|
||||
Converts a call-until-exception interface to an iterator interface.
|
||||
Like ``iter(func, sentinel)``, but uses an exception instead of a sentinel
|
||||
to end the loop.
|
||||
|
||||
>>> l = [0, 1, 2]
|
||||
>>> list(iter_except(l.pop, IndexError))
|
||||
[2, 1, 0]
|
||||
|
||||
Multiple exceptions can be specified as a stopping condition:
|
||||
|
||||
>>> l = [1, 2, 3, '...', 4, 5, 6]
|
||||
>>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError)))
|
||||
[7, 6, 5]
|
||||
>>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError)))
|
||||
[4, 3, 2]
|
||||
>>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError)))
|
||||
[]
|
||||
|
||||
"""
|
||||
try:
|
||||
if first is not None:
|
||||
yield first()
|
||||
while 1:
|
||||
yield func()
|
||||
except exception:
|
||||
pass
|
||||
|
||||
|
||||
def first_true(iterable, default=None, pred=None):
|
||||
"""
|
||||
Returns the first true value in the iterable.
|
||||
|
||||
If no true value is found, returns *default*
|
||||
|
||||
If *pred* is not None, returns the first item for which
|
||||
``pred(item) == True`` .
|
||||
|
||||
>>> first_true(range(10))
|
||||
1
|
||||
>>> first_true(range(10), pred=lambda x: x > 5)
|
||||
6
|
||||
>>> first_true(range(10), default='missing', pred=lambda x: x > 9)
|
||||
'missing'
|
||||
|
||||
"""
|
||||
return next(filter(pred, iterable), default)
|
||||
|
||||
|
||||
def random_product(*args, repeat=1):
|
||||
"""Draw an item at random from each of the input iterables.
|
||||
|
||||
>>> random_product('abc', range(4), 'XYZ') # doctest:+SKIP
|
||||
('c', 3, 'Z')
|
||||
|
||||
If *repeat* is provided as a keyword argument, that many items will be
|
||||
drawn from each iterable.
|
||||
|
||||
>>> random_product('abcd', range(4), repeat=2) # doctest:+SKIP
|
||||
('a', 2, 'd', 3)
|
||||
|
||||
This equivalent to taking a random selection from
|
||||
``itertools.product(*args, **kwarg)``.
|
||||
|
||||
"""
|
||||
pools = [tuple(pool) for pool in args] * repeat
|
||||
return tuple(choice(pool) for pool in pools)
|
||||
|
||||
|
||||
def random_permutation(iterable, r=None):
|
||||
"""Return a random *r* length permutation of the elements in *iterable*.
|
||||
|
||||
If *r* is not specified or is ``None``, then *r* defaults to the length of
|
||||
*iterable*.
|
||||
|
||||
>>> random_permutation(range(5)) # doctest:+SKIP
|
||||
(3, 4, 0, 1, 2)
|
||||
|
||||
This equivalent to taking a random selection from
|
||||
``itertools.permutations(iterable, r)``.
|
||||
|
||||
"""
|
||||
pool = tuple(iterable)
|
||||
r = len(pool) if r is None else r
|
||||
return tuple(sample(pool, r))
|
||||
|
||||
|
||||
def random_combination(iterable, r):
|
||||
"""Return a random *r* length subsequence of the elements in *iterable*.
|
||||
|
||||
>>> random_combination(range(5), 3) # doctest:+SKIP
|
||||
(2, 3, 4)
|
||||
|
||||
This equivalent to taking a random selection from
|
||||
``itertools.combinations(iterable, r)``.
|
||||
|
||||
"""
|
||||
pool = tuple(iterable)
|
||||
n = len(pool)
|
||||
indices = sorted(sample(range(n), r))
|
||||
return tuple(pool[i] for i in indices)
|
||||
|
||||
|
||||
def random_combination_with_replacement(iterable, r):
|
||||
"""Return a random *r* length subsequence of elements in *iterable*,
|
||||
allowing individual elements to be repeated.
|
||||
|
||||
>>> random_combination_with_replacement(range(3), 5) # doctest:+SKIP
|
||||
(0, 0, 1, 2, 2)
|
||||
|
||||
This equivalent to taking a random selection from
|
||||
``itertools.combinations_with_replacement(iterable, r)``.
|
||||
|
||||
"""
|
||||
pool = tuple(iterable)
|
||||
n = len(pool)
|
||||
indices = sorted(randrange(n) for i in range(r))
|
||||
return tuple(pool[i] for i in indices)
|
||||
|
||||
|
||||
def nth_combination(iterable, r, index):
|
||||
"""Equivalent to ``list(combinations(iterable, r))[index]``.
|
||||
|
||||
The subsequences of *iterable* that are of length *r* can be ordered
|
||||
lexicographically. :func:`nth_combination` computes the subsequence at
|
||||
sort position *index* directly, without computing the previous
|
||||
subsequences.
|
||||
|
||||
>>> nth_combination(range(5), 3, 5)
|
||||
(0, 3, 4)
|
||||
|
||||
``ValueError`` will be raised If *r* is negative or greater than the length
|
||||
of *iterable*.
|
||||
``IndexError`` will be raised if the given *index* is invalid.
|
||||
"""
|
||||
pool = tuple(iterable)
|
||||
n = len(pool)
|
||||
if (r < 0) or (r > n):
|
||||
raise ValueError
|
||||
|
||||
c = 1
|
||||
k = min(r, n - r)
|
||||
for i in range(1, k + 1):
|
||||
c = c * (n - k + i) // i
|
||||
|
||||
if index < 0:
|
||||
index += c
|
||||
|
||||
if (index < 0) or (index >= c):
|
||||
raise IndexError
|
||||
|
||||
result = []
|
||||
while r:
|
||||
c, n, r = c * r // n, n - 1, r - 1
|
||||
while index >= c:
|
||||
index -= c
|
||||
c, n = c * (n - r) // n, n - 1
|
||||
result.append(pool[-1 - n])
|
||||
|
||||
return tuple(result)
|
||||
|
||||
|
||||
def prepend(value, iterator):
|
||||
"""Yield *value*, followed by the elements in *iterator*.
|
||||
|
||||
>>> value = '0'
|
||||
>>> iterator = ['1', '2', '3']
|
||||
>>> list(prepend(value, iterator))
|
||||
['0', '1', '2', '3']
|
||||
|
||||
To prepend multiple values, see :func:`itertools.chain`
|
||||
or :func:`value_chain`.
|
||||
|
||||
"""
|
||||
return chain([value], iterator)
|
||||
|
||||
|
||||
def convolve(signal, kernel):
|
||||
"""Convolve the iterable *signal* with the iterable *kernel*.
|
||||
|
||||
>>> signal = (1, 2, 3, 4, 5)
|
||||
>>> kernel = [3, 2, 1]
|
||||
>>> list(convolve(signal, kernel))
|
||||
[3, 8, 14, 20, 26, 14, 5]
|
||||
|
||||
Note: the input arguments are not interchangeable, as the *kernel*
|
||||
is immediately consumed and stored.
|
||||
|
||||
"""
|
||||
kernel = tuple(kernel)[::-1]
|
||||
n = len(kernel)
|
||||
window = deque([0], maxlen=n) * n
|
||||
for x in chain(signal, repeat(0, n - 1)):
|
||||
window.append(x)
|
||||
yield sum(map(operator.mul, kernel, window))
|
||||
|
||||
|
||||
def before_and_after(predicate, it):
|
||||
"""A variant of :func:`takewhile` that allows complete access to the
|
||||
remainder of the iterator.
|
||||
|
||||
>>> it = iter('ABCdEfGhI')
|
||||
>>> all_upper, remainder = before_and_after(str.isupper, it)
|
||||
>>> ''.join(all_upper)
|
||||
'ABC'
|
||||
>>> ''.join(remainder) # takewhile() would lose the 'd'
|
||||
'dEfGhI'
|
||||
|
||||
Note that the first iterator must be fully consumed before the second
|
||||
iterator can generate valid results.
|
||||
"""
|
||||
it = iter(it)
|
||||
transition = []
|
||||
|
||||
def true_iterator():
|
||||
for elem in it:
|
||||
if predicate(elem):
|
||||
yield elem
|
||||
else:
|
||||
transition.append(elem)
|
||||
return
|
||||
|
||||
# Note: this is different from itertools recipes to allow nesting
|
||||
# before_and_after remainders into before_and_after again. See tests
|
||||
# for an example.
|
||||
remainder_iterator = chain(transition, it)
|
||||
|
||||
return true_iterator(), remainder_iterator
|
||||
|
||||
|
||||
def triplewise(iterable):
|
||||
"""Return overlapping triplets from *iterable*.
|
||||
|
||||
>>> list(triplewise('ABCDE'))
|
||||
[('A', 'B', 'C'), ('B', 'C', 'D'), ('C', 'D', 'E')]
|
||||
|
||||
"""
|
||||
for (a, _), (b, c) in pairwise(pairwise(iterable)):
|
||||
yield a, b, c
|
||||
|
||||
|
||||
def sliding_window(iterable, n):
|
||||
"""Return a sliding window of width *n* over *iterable*.
|
||||
|
||||
>>> list(sliding_window(range(6), 4))
|
||||
[(0, 1, 2, 3), (1, 2, 3, 4), (2, 3, 4, 5)]
|
||||
|
||||
If *iterable* has fewer than *n* items, then nothing is yielded:
|
||||
|
||||
>>> list(sliding_window(range(3), 4))
|
||||
[]
|
||||
|
||||
For a variant with more features, see :func:`windowed`.
|
||||
"""
|
||||
it = iter(iterable)
|
||||
window = deque(islice(it, n), maxlen=n)
|
||||
if len(window) == n:
|
||||
yield tuple(window)
|
||||
for x in it:
|
||||
window.append(x)
|
||||
yield tuple(window)
|
||||
|
||||
|
||||
def subslices(iterable):
|
||||
"""Return all contiguous non-empty subslices of *iterable*.
|
||||
|
||||
>>> list(subslices('ABC'))
|
||||
[['A'], ['A', 'B'], ['A', 'B', 'C'], ['B'], ['B', 'C'], ['C']]
|
||||
|
||||
This is similar to :func:`substrings`, but emits items in a different
|
||||
order.
|
||||
"""
|
||||
seq = list(iterable)
|
||||
slices = starmap(slice, combinations(range(len(seq) + 1), 2))
|
||||
return map(operator.getitem, repeat(seq), slices)
|
||||
|
||||
|
||||
def polynomial_from_roots(roots):
|
||||
"""Compute a polynomial's coefficients from its roots.
|
||||
|
||||
>>> roots = [5, -4, 3] # (x - 5) * (x + 4) * (x - 3)
|
||||
>>> polynomial_from_roots(roots) # x^3 - 4 * x^2 - 17 * x + 60
|
||||
[1, -4, -17, 60]
|
||||
"""
|
||||
# Use math.prod for Python 3.8+,
|
||||
prod = getattr(math, 'prod', lambda x: reduce(operator.mul, x, 1))
|
||||
roots = list(map(operator.neg, roots))
|
||||
return [
|
||||
sum(map(prod, combinations(roots, k))) for k in range(len(roots) + 1)
|
||||
]
|
||||
|
||||
|
||||
def iter_index(iterable, value, start=0):
|
||||
"""Yield the index of each place in *iterable* that *value* occurs,
|
||||
beginning with index *start*.
|
||||
|
||||
See :func:`locate` for a more general means of finding the indexes
|
||||
associated with particular values.
|
||||
|
||||
>>> list(iter_index('AABCADEAF', 'A'))
|
||||
[0, 1, 4, 7]
|
||||
"""
|
||||
try:
|
||||
seq_index = iterable.index
|
||||
except AttributeError:
|
||||
# Slow path for general iterables
|
||||
it = islice(iterable, start, None)
|
||||
for i, element in enumerate(it, start):
|
||||
if element is value or element == value:
|
||||
yield i
|
||||
else:
|
||||
# Fast path for sequences
|
||||
i = start - 1
|
||||
try:
|
||||
while True:
|
||||
i = seq_index(value, i + 1)
|
||||
yield i
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
|
||||
def sieve(n):
|
||||
"""Yield the primes less than n.
|
||||
|
||||
>>> list(sieve(30))
|
||||
[2, 3, 5, 7, 11, 13, 17, 19, 23, 29]
|
||||
"""
|
||||
isqrt = getattr(math, 'isqrt', lambda x: int(math.sqrt(x)))
|
||||
data = bytearray((0, 1)) * (n // 2)
|
||||
data[:3] = 0, 0, 0
|
||||
limit = isqrt(n) + 1
|
||||
for p in compress(range(limit), data):
|
||||
data[p * p : n : p + p] = bytes(len(range(p * p, n, p + p)))
|
||||
data[2] = 1
|
||||
return iter_index(data, 1) if n > 2 else iter([])
|
||||
|
||||
|
||||
def batched(iterable, n):
|
||||
"""Batch data into lists of length *n*. The last batch may be shorter.
|
||||
|
||||
>>> list(batched('ABCDEFG', 3))
|
||||
[['A', 'B', 'C'], ['D', 'E', 'F'], ['G']]
|
||||
|
||||
This recipe is from the ``itertools`` docs. This library also provides
|
||||
:func:`chunked`, which has a different implementation.
|
||||
"""
|
||||
if hexversion >= 0x30C00A0: # Python 3.12.0a0
|
||||
warnings.warn(
|
||||
(
|
||||
'batched will be removed in a future version of '
|
||||
'more-itertools. Use the standard library '
|
||||
'itertools.batched function instead'
|
||||
),
|
||||
DeprecationWarning,
|
||||
)
|
||||
|
||||
it = iter(iterable)
|
||||
while True:
|
||||
batch = list(islice(it, n))
|
||||
if not batch:
|
||||
break
|
||||
yield batch
|
||||
|
||||
|
||||
def transpose(it):
|
||||
"""Swap the rows and columns of the input.
|
||||
|
||||
>>> list(transpose([(1, 2, 3), (11, 22, 33)]))
|
||||
[(1, 11), (2, 22), (3, 33)]
|
||||
|
||||
The caller should ensure that the dimensions of the input are compatible.
|
||||
"""
|
||||
# TODO: when 3.9 goes end-of-life, add stric=True to this.
|
||||
return zip(*it)
|
||||
|
||||
|
||||
def matmul(m1, m2):
|
||||
"""Multiply two matrices.
|
||||
>>> list(matmul([(7, 5), (3, 5)], [(2, 5), (7, 9)]))
|
||||
[[49, 80], [41, 60]]
|
||||
|
||||
The caller should ensure that the dimensions of the input matrices are
|
||||
compatible with each other.
|
||||
"""
|
||||
n = len(m2[0])
|
||||
return batched(starmap(dotproduct, product(m1, transpose(m2))), n)
|
||||
|
||||
|
||||
def factor(n):
|
||||
"""Yield the prime factors of n.
|
||||
>>> list(factor(360))
|
||||
[2, 2, 2, 3, 3, 5]
|
||||
"""
|
||||
isqrt = getattr(math, 'isqrt', lambda x: int(math.sqrt(x)))
|
||||
for prime in sieve(isqrt(n) + 1):
|
||||
while True:
|
||||
quotient, remainder = divmod(n, prime)
|
||||
if remainder:
|
||||
break
|
||||
yield prime
|
||||
n = quotient
|
||||
if n == 1:
|
||||
return
|
||||
if n >= 2:
|
||||
yield n
|
||||
119
lib/pkg_resources/_vendor/more_itertools/recipes.pyi
Normal file
119
lib/pkg_resources/_vendor/more_itertools/recipes.pyi
Normal file
@@ -0,0 +1,119 @@
|
||||
"""Stubs for more_itertools.recipes"""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
Iterable,
|
||||
Iterator,
|
||||
overload,
|
||||
Sequence,
|
||||
Type,
|
||||
TypeVar,
|
||||
)
|
||||
|
||||
# Type and type variable definitions
|
||||
_T = TypeVar('_T')
|
||||
_U = TypeVar('_U')
|
||||
|
||||
def take(n: int, iterable: Iterable[_T]) -> list[_T]: ...
|
||||
def tabulate(
|
||||
function: Callable[[int], _T], start: int = ...
|
||||
) -> Iterator[_T]: ...
|
||||
def tail(n: int, iterable: Iterable[_T]) -> Iterator[_T]: ...
|
||||
def consume(iterator: Iterable[object], n: int | None = ...) -> None: ...
|
||||
@overload
|
||||
def nth(iterable: Iterable[_T], n: int) -> _T | None: ...
|
||||
@overload
|
||||
def nth(iterable: Iterable[_T], n: int, default: _U) -> _T | _U: ...
|
||||
def all_equal(iterable: Iterable[object]) -> bool: ...
|
||||
def quantify(
|
||||
iterable: Iterable[_T], pred: Callable[[_T], bool] = ...
|
||||
) -> int: ...
|
||||
def pad_none(iterable: Iterable[_T]) -> Iterator[_T | None]: ...
|
||||
def padnone(iterable: Iterable[_T]) -> Iterator[_T | None]: ...
|
||||
def ncycles(iterable: Iterable[_T], n: int) -> Iterator[_T]: ...
|
||||
def dotproduct(vec1: Iterable[object], vec2: Iterable[object]) -> object: ...
|
||||
def flatten(listOfLists: Iterable[Iterable[_T]]) -> Iterator[_T]: ...
|
||||
def repeatfunc(
|
||||
func: Callable[..., _U], times: int | None = ..., *args: Any
|
||||
) -> Iterator[_U]: ...
|
||||
def pairwise(iterable: Iterable[_T]) -> Iterator[tuple[_T, _T]]: ...
|
||||
def grouper(
|
||||
iterable: Iterable[_T],
|
||||
n: int,
|
||||
incomplete: str = ...,
|
||||
fillvalue: _U = ...,
|
||||
) -> Iterator[tuple[_T | _U, ...]]: ...
|
||||
def roundrobin(*iterables: Iterable[_T]) -> Iterator[_T]: ...
|
||||
def partition(
|
||||
pred: Callable[[_T], object] | None, iterable: Iterable[_T]
|
||||
) -> tuple[Iterator[_T], Iterator[_T]]: ...
|
||||
def powerset(iterable: Iterable[_T]) -> Iterator[tuple[_T, ...]]: ...
|
||||
def unique_everseen(
|
||||
iterable: Iterable[_T], key: Callable[[_T], _U] | None = ...
|
||||
) -> Iterator[_T]: ...
|
||||
def unique_justseen(
|
||||
iterable: Iterable[_T], key: Callable[[_T], object] | None = ...
|
||||
) -> Iterator[_T]: ...
|
||||
@overload
|
||||
def iter_except(
|
||||
func: Callable[[], _T],
|
||||
exception: Type[BaseException] | tuple[Type[BaseException], ...],
|
||||
first: None = ...,
|
||||
) -> Iterator[_T]: ...
|
||||
@overload
|
||||
def iter_except(
|
||||
func: Callable[[], _T],
|
||||
exception: Type[BaseException] | tuple[Type[BaseException], ...],
|
||||
first: Callable[[], _U],
|
||||
) -> Iterator[_T | _U]: ...
|
||||
@overload
|
||||
def first_true(
|
||||
iterable: Iterable[_T], *, pred: Callable[[_T], object] | None = ...
|
||||
) -> _T | None: ...
|
||||
@overload
|
||||
def first_true(
|
||||
iterable: Iterable[_T],
|
||||
default: _U,
|
||||
pred: Callable[[_T], object] | None = ...,
|
||||
) -> _T | _U: ...
|
||||
def random_product(
|
||||
*args: Iterable[_T], repeat: int = ...
|
||||
) -> tuple[_T, ...]: ...
|
||||
def random_permutation(
|
||||
iterable: Iterable[_T], r: int | None = ...
|
||||
) -> tuple[_T, ...]: ...
|
||||
def random_combination(iterable: Iterable[_T], r: int) -> tuple[_T, ...]: ...
|
||||
def random_combination_with_replacement(
|
||||
iterable: Iterable[_T], r: int
|
||||
) -> tuple[_T, ...]: ...
|
||||
def nth_combination(
|
||||
iterable: Iterable[_T], r: int, index: int
|
||||
) -> tuple[_T, ...]: ...
|
||||
def prepend(value: _T, iterator: Iterable[_U]) -> Iterator[_T | _U]: ...
|
||||
def convolve(signal: Iterable[_T], kernel: Iterable[_T]) -> Iterator[_T]: ...
|
||||
def before_and_after(
|
||||
predicate: Callable[[_T], bool], it: Iterable[_T]
|
||||
) -> tuple[Iterator[_T], Iterator[_T]]: ...
|
||||
def triplewise(iterable: Iterable[_T]) -> Iterator[tuple[_T, _T, _T]]: ...
|
||||
def sliding_window(
|
||||
iterable: Iterable[_T], n: int
|
||||
) -> Iterator[tuple[_T, ...]]: ...
|
||||
def subslices(iterable: Iterable[_T]) -> Iterator[list[_T]]: ...
|
||||
def polynomial_from_roots(roots: Sequence[int]) -> list[int]: ...
|
||||
def iter_index(
|
||||
iterable: Iterable[object],
|
||||
value: Any,
|
||||
start: int | None = ...,
|
||||
) -> Iterator[int]: ...
|
||||
def sieve(n: int) -> Iterator[int]: ...
|
||||
def batched(
|
||||
iterable: Iterable[_T],
|
||||
n: int,
|
||||
) -> Iterator[list[_T]]: ...
|
||||
def transpose(
|
||||
it: Iterable[Iterable[_T]],
|
||||
) -> tuple[Iterator[_T], ...]: ...
|
||||
def matmul(m1: Sequence[_T], m2: Sequence[_T]) -> Iterator[list[_T]]: ...
|
||||
def factor(n: int) -> Iterator[int]: ...
|
||||
@@ -1,3 +0,0 @@
|
||||
This software is made available under the terms of *either* of the licenses
|
||||
found in LICENSE.APACHE or LICENSE.BSD. Contributions to this software is made
|
||||
under the terms of *both* these licenses.
|
||||
@@ -1,177 +0,0 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
@@ -1,23 +0,0 @@
|
||||
Copyright (c) Donald Stufft and individual contributors.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
@@ -1,26 +0,0 @@
|
||||
# This file is dual licensed under the terms of the Apache License, Version
|
||||
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
|
||||
# for complete details.
|
||||
|
||||
__all__ = [
|
||||
"__title__",
|
||||
"__summary__",
|
||||
"__uri__",
|
||||
"__version__",
|
||||
"__author__",
|
||||
"__email__",
|
||||
"__license__",
|
||||
"__copyright__",
|
||||
]
|
||||
|
||||
__title__ = "packaging"
|
||||
__summary__ = "Core utilities for Python packages"
|
||||
__uri__ = "https://github.com/pypa/packaging"
|
||||
|
||||
__version__ = "21.2"
|
||||
|
||||
__author__ = "Donald Stufft and individual contributors"
|
||||
__email__ = "donald@stufft.io"
|
||||
|
||||
__license__ = "BSD-2-Clause or Apache-2.0"
|
||||
__copyright__ = "2014-2019 %s" % __author__
|
||||
@@ -2,24 +2,14 @@
|
||||
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
|
||||
# for complete details.
|
||||
|
||||
from .__about__ import (
|
||||
__author__,
|
||||
__copyright__,
|
||||
__email__,
|
||||
__license__,
|
||||
__summary__,
|
||||
__title__,
|
||||
__uri__,
|
||||
__version__,
|
||||
)
|
||||
__title__ = "packaging"
|
||||
__summary__ = "Core utilities for Python packages"
|
||||
__uri__ = "https://github.com/pypa/packaging"
|
||||
|
||||
__all__ = [
|
||||
"__title__",
|
||||
"__summary__",
|
||||
"__uri__",
|
||||
"__version__",
|
||||
"__author__",
|
||||
"__email__",
|
||||
"__license__",
|
||||
"__copyright__",
|
||||
]
|
||||
__version__ = "23.1"
|
||||
|
||||
__author__ = "Donald Stufft and individual contributors"
|
||||
__email__ = "donald@stufft.io"
|
||||
|
||||
__license__ = "BSD-2-Clause or Apache-2.0"
|
||||
__copyright__ = "2014-2019 %s" % __author__
|
||||
|
||||
108
lib/pkg_resources/_vendor/packaging/_elffile.py
Normal file
108
lib/pkg_resources/_vendor/packaging/_elffile.py
Normal file
@@ -0,0 +1,108 @@
|
||||
"""
|
||||
ELF file parser.
|
||||
|
||||
This provides a class ``ELFFile`` that parses an ELF executable in a similar
|
||||
interface to ``ZipFile``. Only the read interface is implemented.
|
||||
|
||||
Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca
|
||||
ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html
|
||||
"""
|
||||
|
||||
import enum
|
||||
import os
|
||||
import struct
|
||||
from typing import IO, Optional, Tuple
|
||||
|
||||
|
||||
class ELFInvalid(ValueError):
|
||||
pass
|
||||
|
||||
|
||||
class EIClass(enum.IntEnum):
|
||||
C32 = 1
|
||||
C64 = 2
|
||||
|
||||
|
||||
class EIData(enum.IntEnum):
|
||||
Lsb = 1
|
||||
Msb = 2
|
||||
|
||||
|
||||
class EMachine(enum.IntEnum):
|
||||
I386 = 3
|
||||
S390 = 22
|
||||
Arm = 40
|
||||
X8664 = 62
|
||||
AArc64 = 183
|
||||
|
||||
|
||||
class ELFFile:
|
||||
"""
|
||||
Representation of an ELF executable.
|
||||
"""
|
||||
|
||||
def __init__(self, f: IO[bytes]) -> None:
|
||||
self._f = f
|
||||
|
||||
try:
|
||||
ident = self._read("16B")
|
||||
except struct.error:
|
||||
raise ELFInvalid("unable to parse identification")
|
||||
magic = bytes(ident[:4])
|
||||
if magic != b"\x7fELF":
|
||||
raise ELFInvalid(f"invalid magic: {magic!r}")
|
||||
|
||||
self.capacity = ident[4] # Format for program header (bitness).
|
||||
self.encoding = ident[5] # Data structure encoding (endianness).
|
||||
|
||||
try:
|
||||
# e_fmt: Format for program header.
|
||||
# p_fmt: Format for section header.
|
||||
# p_idx: Indexes to find p_type, p_offset, and p_filesz.
|
||||
e_fmt, self._p_fmt, self._p_idx = {
|
||||
(1, 1): ("<HHIIIIIHHH", "<IIIIIIII", (0, 1, 4)), # 32-bit LSB.
|
||||
(1, 2): (">HHIIIIIHHH", ">IIIIIIII", (0, 1, 4)), # 32-bit MSB.
|
||||
(2, 1): ("<HHIQQQIHHH", "<IIQQQQQQ", (0, 2, 5)), # 64-bit LSB.
|
||||
(2, 2): (">HHIQQQIHHH", ">IIQQQQQQ", (0, 2, 5)), # 64-bit MSB.
|
||||
}[(self.capacity, self.encoding)]
|
||||
except KeyError:
|
||||
raise ELFInvalid(
|
||||
f"unrecognized capacity ({self.capacity}) or "
|
||||
f"encoding ({self.encoding})"
|
||||
)
|
||||
|
||||
try:
|
||||
(
|
||||
_,
|
||||
self.machine, # Architecture type.
|
||||
_,
|
||||
_,
|
||||
self._e_phoff, # Offset of program header.
|
||||
_,
|
||||
self.flags, # Processor-specific flags.
|
||||
_,
|
||||
self._e_phentsize, # Size of section.
|
||||
self._e_phnum, # Number of sections.
|
||||
) = self._read(e_fmt)
|
||||
except struct.error as e:
|
||||
raise ELFInvalid("unable to parse machine and section information") from e
|
||||
|
||||
def _read(self, fmt: str) -> Tuple[int, ...]:
|
||||
return struct.unpack(fmt, self._f.read(struct.calcsize(fmt)))
|
||||
|
||||
@property
|
||||
def interpreter(self) -> Optional[str]:
|
||||
"""
|
||||
The path recorded in the ``PT_INTERP`` section header.
|
||||
"""
|
||||
for index in range(self._e_phnum):
|
||||
self._f.seek(self._e_phoff + self._e_phentsize * index)
|
||||
try:
|
||||
data = self._read(self._p_fmt)
|
||||
except struct.error:
|
||||
continue
|
||||
if data[self._p_idx[0]] != 3: # Not PT_INTERP.
|
||||
continue
|
||||
self._f.seek(data[self._p_idx[1]])
|
||||
return os.fsdecode(self._f.read(data[self._p_idx[2]])).strip("\0")
|
||||
return None
|
||||
@@ -1,121 +1,60 @@
|
||||
import collections
|
||||
import contextlib
|
||||
import functools
|
||||
import os
|
||||
import re
|
||||
import struct
|
||||
import sys
|
||||
import warnings
|
||||
from typing import IO, Dict, Iterator, NamedTuple, Optional, Tuple
|
||||
from typing import Dict, Generator, Iterator, NamedTuple, Optional, Tuple
|
||||
|
||||
from ._elffile import EIClass, EIData, ELFFile, EMachine
|
||||
|
||||
EF_ARM_ABIMASK = 0xFF000000
|
||||
EF_ARM_ABI_VER5 = 0x05000000
|
||||
EF_ARM_ABI_FLOAT_HARD = 0x00000400
|
||||
|
||||
|
||||
# Python does not provide platform information at sufficient granularity to
|
||||
# identify the architecture of the running executable in some cases, so we
|
||||
# determine it dynamically by reading the information from the running
|
||||
# process. This only applies on Linux, which uses the ELF format.
|
||||
class _ELFFileHeader:
|
||||
# https://en.wikipedia.org/wiki/Executable_and_Linkable_Format#File_header
|
||||
class _InvalidELFFileHeader(ValueError):
|
||||
"""
|
||||
An invalid ELF file header was found.
|
||||
"""
|
||||
|
||||
ELF_MAGIC_NUMBER = 0x7F454C46
|
||||
ELFCLASS32 = 1
|
||||
ELFCLASS64 = 2
|
||||
ELFDATA2LSB = 1
|
||||
ELFDATA2MSB = 2
|
||||
EM_386 = 3
|
||||
EM_S390 = 22
|
||||
EM_ARM = 40
|
||||
EM_X86_64 = 62
|
||||
EF_ARM_ABIMASK = 0xFF000000
|
||||
EF_ARM_ABI_VER5 = 0x05000000
|
||||
EF_ARM_ABI_FLOAT_HARD = 0x00000400
|
||||
|
||||
def __init__(self, file: IO[bytes]) -> None:
|
||||
def unpack(fmt: str) -> int:
|
||||
try:
|
||||
data = file.read(struct.calcsize(fmt))
|
||||
result: Tuple[int, ...] = struct.unpack(fmt, data)
|
||||
except struct.error:
|
||||
raise _ELFFileHeader._InvalidELFFileHeader()
|
||||
return result[0]
|
||||
|
||||
self.e_ident_magic = unpack(">I")
|
||||
if self.e_ident_magic != self.ELF_MAGIC_NUMBER:
|
||||
raise _ELFFileHeader._InvalidELFFileHeader()
|
||||
self.e_ident_class = unpack("B")
|
||||
if self.e_ident_class not in {self.ELFCLASS32, self.ELFCLASS64}:
|
||||
raise _ELFFileHeader._InvalidELFFileHeader()
|
||||
self.e_ident_data = unpack("B")
|
||||
if self.e_ident_data not in {self.ELFDATA2LSB, self.ELFDATA2MSB}:
|
||||
raise _ELFFileHeader._InvalidELFFileHeader()
|
||||
self.e_ident_version = unpack("B")
|
||||
self.e_ident_osabi = unpack("B")
|
||||
self.e_ident_abiversion = unpack("B")
|
||||
self.e_ident_pad = file.read(7)
|
||||
format_h = "<H" if self.e_ident_data == self.ELFDATA2LSB else ">H"
|
||||
format_i = "<I" if self.e_ident_data == self.ELFDATA2LSB else ">I"
|
||||
format_q = "<Q" if self.e_ident_data == self.ELFDATA2LSB else ">Q"
|
||||
format_p = format_i if self.e_ident_class == self.ELFCLASS32 else format_q
|
||||
self.e_type = unpack(format_h)
|
||||
self.e_machine = unpack(format_h)
|
||||
self.e_version = unpack(format_i)
|
||||
self.e_entry = unpack(format_p)
|
||||
self.e_phoff = unpack(format_p)
|
||||
self.e_shoff = unpack(format_p)
|
||||
self.e_flags = unpack(format_i)
|
||||
self.e_ehsize = unpack(format_h)
|
||||
self.e_phentsize = unpack(format_h)
|
||||
self.e_phnum = unpack(format_h)
|
||||
self.e_shentsize = unpack(format_h)
|
||||
self.e_shnum = unpack(format_h)
|
||||
self.e_shstrndx = unpack(format_h)
|
||||
|
||||
|
||||
def _get_elf_header() -> Optional[_ELFFileHeader]:
|
||||
# `os.PathLike` not a generic type until Python 3.9, so sticking with `str`
|
||||
# as the type for `path` until then.
|
||||
@contextlib.contextmanager
|
||||
def _parse_elf(path: str) -> Generator[Optional[ELFFile], None, None]:
|
||||
try:
|
||||
with open(sys.executable, "rb") as f:
|
||||
elf_header = _ELFFileHeader(f)
|
||||
except (OSError, TypeError, _ELFFileHeader._InvalidELFFileHeader):
|
||||
return None
|
||||
return elf_header
|
||||
with open(path, "rb") as f:
|
||||
yield ELFFile(f)
|
||||
except (OSError, TypeError, ValueError):
|
||||
yield None
|
||||
|
||||
|
||||
def _is_linux_armhf() -> bool:
|
||||
def _is_linux_armhf(executable: str) -> bool:
|
||||
# hard-float ABI can be detected from the ELF header of the running
|
||||
# process
|
||||
# https://static.docs.arm.com/ihi0044/g/aaelf32.pdf
|
||||
elf_header = _get_elf_header()
|
||||
if elf_header is None:
|
||||
return False
|
||||
result = elf_header.e_ident_class == elf_header.ELFCLASS32
|
||||
result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB
|
||||
result &= elf_header.e_machine == elf_header.EM_ARM
|
||||
result &= (
|
||||
elf_header.e_flags & elf_header.EF_ARM_ABIMASK
|
||||
) == elf_header.EF_ARM_ABI_VER5
|
||||
result &= (
|
||||
elf_header.e_flags & elf_header.EF_ARM_ABI_FLOAT_HARD
|
||||
) == elf_header.EF_ARM_ABI_FLOAT_HARD
|
||||
return result
|
||||
with _parse_elf(executable) as f:
|
||||
return (
|
||||
f is not None
|
||||
and f.capacity == EIClass.C32
|
||||
and f.encoding == EIData.Lsb
|
||||
and f.machine == EMachine.Arm
|
||||
and f.flags & EF_ARM_ABIMASK == EF_ARM_ABI_VER5
|
||||
and f.flags & EF_ARM_ABI_FLOAT_HARD == EF_ARM_ABI_FLOAT_HARD
|
||||
)
|
||||
|
||||
|
||||
def _is_linux_i686() -> bool:
|
||||
elf_header = _get_elf_header()
|
||||
if elf_header is None:
|
||||
return False
|
||||
result = elf_header.e_ident_class == elf_header.ELFCLASS32
|
||||
result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB
|
||||
result &= elf_header.e_machine == elf_header.EM_386
|
||||
return result
|
||||
def _is_linux_i686(executable: str) -> bool:
|
||||
with _parse_elf(executable) as f:
|
||||
return (
|
||||
f is not None
|
||||
and f.capacity == EIClass.C32
|
||||
and f.encoding == EIData.Lsb
|
||||
and f.machine == EMachine.I386
|
||||
)
|
||||
|
||||
|
||||
def _have_compatible_abi(arch: str) -> bool:
|
||||
def _have_compatible_abi(executable: str, arch: str) -> bool:
|
||||
if arch == "armv7l":
|
||||
return _is_linux_armhf()
|
||||
return _is_linux_armhf(executable)
|
||||
if arch == "i686":
|
||||
return _is_linux_i686()
|
||||
return _is_linux_i686(executable)
|
||||
return arch in {"x86_64", "aarch64", "ppc64", "ppc64le", "s390x"}
|
||||
|
||||
|
||||
@@ -141,10 +80,10 @@ def _glibc_version_string_confstr() -> Optional[str]:
|
||||
# platform module.
|
||||
# https://github.com/python/cpython/blob/fcf1d003bf4f0100c/Lib/platform.py#L175-L183
|
||||
try:
|
||||
# os.confstr("CS_GNU_LIBC_VERSION") returns a string like "glibc 2.17".
|
||||
version_string = os.confstr("CS_GNU_LIBC_VERSION")
|
||||
# Should be a string like "glibc 2.17".
|
||||
version_string: str = getattr(os, "confstr")("CS_GNU_LIBC_VERSION")
|
||||
assert version_string is not None
|
||||
_, version = version_string.split()
|
||||
_, version = version_string.rsplit()
|
||||
except (AssertionError, AttributeError, OSError, ValueError):
|
||||
# os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)...
|
||||
return None
|
||||
@@ -211,8 +150,8 @@ def _parse_glibc_version(version_str: str) -> Tuple[int, int]:
|
||||
m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str)
|
||||
if not m:
|
||||
warnings.warn(
|
||||
"Expected glibc version with 2 components major.minor,"
|
||||
" got: %s" % version_str,
|
||||
f"Expected glibc version with 2 components major.minor,"
|
||||
f" got: {version_str}",
|
||||
RuntimeWarning,
|
||||
)
|
||||
return -1, -1
|
||||
@@ -265,7 +204,7 @@ _LEGACY_MANYLINUX_MAP = {
|
||||
|
||||
|
||||
def platform_tags(linux: str, arch: str) -> Iterator[str]:
|
||||
if not _have_compatible_abi(arch):
|
||||
if not _have_compatible_abi(sys.executable, arch):
|
||||
return
|
||||
# Oldest glibc to be supported regardless of architecture is (2, 17).
|
||||
too_old_glibc2 = _GLibCVersion(2, 16)
|
||||
|
||||
@@ -4,68 +4,13 @@ This module implements logic to detect if the currently running Python is
|
||||
linked against musl, and what musl version is used.
|
||||
"""
|
||||
|
||||
import contextlib
|
||||
import functools
|
||||
import operator
|
||||
import os
|
||||
import re
|
||||
import struct
|
||||
import subprocess
|
||||
import sys
|
||||
from typing import IO, Iterator, NamedTuple, Optional, Tuple
|
||||
from typing import Iterator, NamedTuple, Optional
|
||||
|
||||
|
||||
def _read_unpacked(f: IO[bytes], fmt: str) -> Tuple[int, ...]:
|
||||
return struct.unpack(fmt, f.read(struct.calcsize(fmt)))
|
||||
|
||||
|
||||
def _parse_ld_musl_from_elf(f: IO[bytes]) -> Optional[str]:
|
||||
"""Detect musl libc location by parsing the Python executable.
|
||||
|
||||
Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca
|
||||
ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html
|
||||
"""
|
||||
f.seek(0)
|
||||
try:
|
||||
ident = _read_unpacked(f, "16B")
|
||||
except struct.error:
|
||||
return None
|
||||
if ident[:4] != tuple(b"\x7fELF"): # Invalid magic, not ELF.
|
||||
return None
|
||||
f.seek(struct.calcsize("HHI"), 1) # Skip file type, machine, and version.
|
||||
|
||||
try:
|
||||
# e_fmt: Format for program header.
|
||||
# p_fmt: Format for section header.
|
||||
# p_idx: Indexes to find p_type, p_offset, and p_filesz.
|
||||
e_fmt, p_fmt, p_idx = {
|
||||
1: ("IIIIHHH", "IIIIIIII", (0, 1, 4)), # 32-bit.
|
||||
2: ("QQQIHHH", "IIQQQQQQ", (0, 2, 5)), # 64-bit.
|
||||
}[ident[4]]
|
||||
except KeyError:
|
||||
return None
|
||||
else:
|
||||
p_get = operator.itemgetter(*p_idx)
|
||||
|
||||
# Find the interpreter section and return its content.
|
||||
try:
|
||||
_, e_phoff, _, _, _, e_phentsize, e_phnum = _read_unpacked(f, e_fmt)
|
||||
except struct.error:
|
||||
return None
|
||||
for i in range(e_phnum + 1):
|
||||
f.seek(e_phoff + e_phentsize * i)
|
||||
try:
|
||||
p_type, p_offset, p_filesz = p_get(_read_unpacked(f, p_fmt))
|
||||
except struct.error:
|
||||
return None
|
||||
if p_type != 3: # Not PT_INTERP.
|
||||
continue
|
||||
f.seek(p_offset)
|
||||
interpreter = os.fsdecode(f.read(p_filesz)).strip("\0")
|
||||
if "musl" not in interpreter:
|
||||
return None
|
||||
return interpreter
|
||||
return None
|
||||
from ._elffile import ELFFile
|
||||
|
||||
|
||||
class _MuslVersion(NamedTuple):
|
||||
@@ -95,13 +40,12 @@ def _get_musl_version(executable: str) -> Optional[_MuslVersion]:
|
||||
Version 1.2.2
|
||||
Dynamic Program Loader
|
||||
"""
|
||||
with contextlib.ExitStack() as stack:
|
||||
try:
|
||||
f = stack.enter_context(open(executable, "rb"))
|
||||
except IOError:
|
||||
return None
|
||||
ld = _parse_ld_musl_from_elf(f)
|
||||
if not ld:
|
||||
try:
|
||||
with open(executable, "rb") as f:
|
||||
ld = ELFFile(f).interpreter
|
||||
except (OSError, TypeError, ValueError):
|
||||
return None
|
||||
if ld is None or "musl" not in ld:
|
||||
return None
|
||||
proc = subprocess.run([ld], stderr=subprocess.PIPE, universal_newlines=True)
|
||||
return _parse_musl_version(proc.stderr)
|
||||
|
||||
353
lib/pkg_resources/_vendor/packaging/_parser.py
Normal file
353
lib/pkg_resources/_vendor/packaging/_parser.py
Normal file
@@ -0,0 +1,353 @@
|
||||
"""Handwritten parser of dependency specifiers.
|
||||
|
||||
The docstring for each __parse_* function contains ENBF-inspired grammar representing
|
||||
the implementation.
|
||||
"""
|
||||
|
||||
import ast
|
||||
from typing import Any, List, NamedTuple, Optional, Tuple, Union
|
||||
|
||||
from ._tokenizer import DEFAULT_RULES, Tokenizer
|
||||
|
||||
|
||||
class Node:
|
||||
def __init__(self, value: str) -> None:
|
||||
self.value = value
|
||||
|
||||
def __str__(self) -> str:
|
||||
return self.value
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<{self.__class__.__name__}('{self}')>"
|
||||
|
||||
def serialize(self) -> str:
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class Variable(Node):
|
||||
def serialize(self) -> str:
|
||||
return str(self)
|
||||
|
||||
|
||||
class Value(Node):
|
||||
def serialize(self) -> str:
|
||||
return f'"{self}"'
|
||||
|
||||
|
||||
class Op(Node):
|
||||
def serialize(self) -> str:
|
||||
return str(self)
|
||||
|
||||
|
||||
MarkerVar = Union[Variable, Value]
|
||||
MarkerItem = Tuple[MarkerVar, Op, MarkerVar]
|
||||
# MarkerAtom = Union[MarkerItem, List["MarkerAtom"]]
|
||||
# MarkerList = List[Union["MarkerList", MarkerAtom, str]]
|
||||
# mypy does not support recursive type definition
|
||||
# https://github.com/python/mypy/issues/731
|
||||
MarkerAtom = Any
|
||||
MarkerList = List[Any]
|
||||
|
||||
|
||||
class ParsedRequirement(NamedTuple):
|
||||
name: str
|
||||
url: str
|
||||
extras: List[str]
|
||||
specifier: str
|
||||
marker: Optional[MarkerList]
|
||||
|
||||
|
||||
# --------------------------------------------------------------------------------------
|
||||
# Recursive descent parser for dependency specifier
|
||||
# --------------------------------------------------------------------------------------
|
||||
def parse_requirement(source: str) -> ParsedRequirement:
|
||||
return _parse_requirement(Tokenizer(source, rules=DEFAULT_RULES))
|
||||
|
||||
|
||||
def _parse_requirement(tokenizer: Tokenizer) -> ParsedRequirement:
|
||||
"""
|
||||
requirement = WS? IDENTIFIER WS? extras WS? requirement_details
|
||||
"""
|
||||
tokenizer.consume("WS")
|
||||
|
||||
name_token = tokenizer.expect(
|
||||
"IDENTIFIER", expected="package name at the start of dependency specifier"
|
||||
)
|
||||
name = name_token.text
|
||||
tokenizer.consume("WS")
|
||||
|
||||
extras = _parse_extras(tokenizer)
|
||||
tokenizer.consume("WS")
|
||||
|
||||
url, specifier, marker = _parse_requirement_details(tokenizer)
|
||||
tokenizer.expect("END", expected="end of dependency specifier")
|
||||
|
||||
return ParsedRequirement(name, url, extras, specifier, marker)
|
||||
|
||||
|
||||
def _parse_requirement_details(
|
||||
tokenizer: Tokenizer,
|
||||
) -> Tuple[str, str, Optional[MarkerList]]:
|
||||
"""
|
||||
requirement_details = AT URL (WS requirement_marker?)?
|
||||
| specifier WS? (requirement_marker)?
|
||||
"""
|
||||
|
||||
specifier = ""
|
||||
url = ""
|
||||
marker = None
|
||||
|
||||
if tokenizer.check("AT"):
|
||||
tokenizer.read()
|
||||
tokenizer.consume("WS")
|
||||
|
||||
url_start = tokenizer.position
|
||||
url = tokenizer.expect("URL", expected="URL after @").text
|
||||
if tokenizer.check("END", peek=True):
|
||||
return (url, specifier, marker)
|
||||
|
||||
tokenizer.expect("WS", expected="whitespace after URL")
|
||||
|
||||
# The input might end after whitespace.
|
||||
if tokenizer.check("END", peek=True):
|
||||
return (url, specifier, marker)
|
||||
|
||||
marker = _parse_requirement_marker(
|
||||
tokenizer, span_start=url_start, after="URL and whitespace"
|
||||
)
|
||||
else:
|
||||
specifier_start = tokenizer.position
|
||||
specifier = _parse_specifier(tokenizer)
|
||||
tokenizer.consume("WS")
|
||||
|
||||
if tokenizer.check("END", peek=True):
|
||||
return (url, specifier, marker)
|
||||
|
||||
marker = _parse_requirement_marker(
|
||||
tokenizer,
|
||||
span_start=specifier_start,
|
||||
after=(
|
||||
"version specifier"
|
||||
if specifier
|
||||
else "name and no valid version specifier"
|
||||
),
|
||||
)
|
||||
|
||||
return (url, specifier, marker)
|
||||
|
||||
|
||||
def _parse_requirement_marker(
|
||||
tokenizer: Tokenizer, *, span_start: int, after: str
|
||||
) -> MarkerList:
|
||||
"""
|
||||
requirement_marker = SEMICOLON marker WS?
|
||||
"""
|
||||
|
||||
if not tokenizer.check("SEMICOLON"):
|
||||
tokenizer.raise_syntax_error(
|
||||
f"Expected end or semicolon (after {after})",
|
||||
span_start=span_start,
|
||||
)
|
||||
tokenizer.read()
|
||||
|
||||
marker = _parse_marker(tokenizer)
|
||||
tokenizer.consume("WS")
|
||||
|
||||
return marker
|
||||
|
||||
|
||||
def _parse_extras(tokenizer: Tokenizer) -> List[str]:
|
||||
"""
|
||||
extras = (LEFT_BRACKET wsp* extras_list? wsp* RIGHT_BRACKET)?
|
||||
"""
|
||||
if not tokenizer.check("LEFT_BRACKET", peek=True):
|
||||
return []
|
||||
|
||||
with tokenizer.enclosing_tokens(
|
||||
"LEFT_BRACKET",
|
||||
"RIGHT_BRACKET",
|
||||
around="extras",
|
||||
):
|
||||
tokenizer.consume("WS")
|
||||
extras = _parse_extras_list(tokenizer)
|
||||
tokenizer.consume("WS")
|
||||
|
||||
return extras
|
||||
|
||||
|
||||
def _parse_extras_list(tokenizer: Tokenizer) -> List[str]:
|
||||
"""
|
||||
extras_list = identifier (wsp* ',' wsp* identifier)*
|
||||
"""
|
||||
extras: List[str] = []
|
||||
|
||||
if not tokenizer.check("IDENTIFIER"):
|
||||
return extras
|
||||
|
||||
extras.append(tokenizer.read().text)
|
||||
|
||||
while True:
|
||||
tokenizer.consume("WS")
|
||||
if tokenizer.check("IDENTIFIER", peek=True):
|
||||
tokenizer.raise_syntax_error("Expected comma between extra names")
|
||||
elif not tokenizer.check("COMMA"):
|
||||
break
|
||||
|
||||
tokenizer.read()
|
||||
tokenizer.consume("WS")
|
||||
|
||||
extra_token = tokenizer.expect("IDENTIFIER", expected="extra name after comma")
|
||||
extras.append(extra_token.text)
|
||||
|
||||
return extras
|
||||
|
||||
|
||||
def _parse_specifier(tokenizer: Tokenizer) -> str:
|
||||
"""
|
||||
specifier = LEFT_PARENTHESIS WS? version_many WS? RIGHT_PARENTHESIS
|
||||
| WS? version_many WS?
|
||||
"""
|
||||
with tokenizer.enclosing_tokens(
|
||||
"LEFT_PARENTHESIS",
|
||||
"RIGHT_PARENTHESIS",
|
||||
around="version specifier",
|
||||
):
|
||||
tokenizer.consume("WS")
|
||||
parsed_specifiers = _parse_version_many(tokenizer)
|
||||
tokenizer.consume("WS")
|
||||
|
||||
return parsed_specifiers
|
||||
|
||||
|
||||
def _parse_version_many(tokenizer: Tokenizer) -> str:
|
||||
"""
|
||||
version_many = (SPECIFIER (WS? COMMA WS? SPECIFIER)*)?
|
||||
"""
|
||||
parsed_specifiers = ""
|
||||
while tokenizer.check("SPECIFIER"):
|
||||
span_start = tokenizer.position
|
||||
parsed_specifiers += tokenizer.read().text
|
||||
if tokenizer.check("VERSION_PREFIX_TRAIL", peek=True):
|
||||
tokenizer.raise_syntax_error(
|
||||
".* suffix can only be used with `==` or `!=` operators",
|
||||
span_start=span_start,
|
||||
span_end=tokenizer.position + 1,
|
||||
)
|
||||
if tokenizer.check("VERSION_LOCAL_LABEL_TRAIL", peek=True):
|
||||
tokenizer.raise_syntax_error(
|
||||
"Local version label can only be used with `==` or `!=` operators",
|
||||
span_start=span_start,
|
||||
span_end=tokenizer.position,
|
||||
)
|
||||
tokenizer.consume("WS")
|
||||
if not tokenizer.check("COMMA"):
|
||||
break
|
||||
parsed_specifiers += tokenizer.read().text
|
||||
tokenizer.consume("WS")
|
||||
|
||||
return parsed_specifiers
|
||||
|
||||
|
||||
# --------------------------------------------------------------------------------------
|
||||
# Recursive descent parser for marker expression
|
||||
# --------------------------------------------------------------------------------------
|
||||
def parse_marker(source: str) -> MarkerList:
|
||||
return _parse_marker(Tokenizer(source, rules=DEFAULT_RULES))
|
||||
|
||||
|
||||
def _parse_marker(tokenizer: Tokenizer) -> MarkerList:
|
||||
"""
|
||||
marker = marker_atom (BOOLOP marker_atom)+
|
||||
"""
|
||||
expression = [_parse_marker_atom(tokenizer)]
|
||||
while tokenizer.check("BOOLOP"):
|
||||
token = tokenizer.read()
|
||||
expr_right = _parse_marker_atom(tokenizer)
|
||||
expression.extend((token.text, expr_right))
|
||||
return expression
|
||||
|
||||
|
||||
def _parse_marker_atom(tokenizer: Tokenizer) -> MarkerAtom:
|
||||
"""
|
||||
marker_atom = WS? LEFT_PARENTHESIS WS? marker WS? RIGHT_PARENTHESIS WS?
|
||||
| WS? marker_item WS?
|
||||
"""
|
||||
|
||||
tokenizer.consume("WS")
|
||||
if tokenizer.check("LEFT_PARENTHESIS", peek=True):
|
||||
with tokenizer.enclosing_tokens(
|
||||
"LEFT_PARENTHESIS",
|
||||
"RIGHT_PARENTHESIS",
|
||||
around="marker expression",
|
||||
):
|
||||
tokenizer.consume("WS")
|
||||
marker: MarkerAtom = _parse_marker(tokenizer)
|
||||
tokenizer.consume("WS")
|
||||
else:
|
||||
marker = _parse_marker_item(tokenizer)
|
||||
tokenizer.consume("WS")
|
||||
return marker
|
||||
|
||||
|
||||
def _parse_marker_item(tokenizer: Tokenizer) -> MarkerItem:
|
||||
"""
|
||||
marker_item = WS? marker_var WS? marker_op WS? marker_var WS?
|
||||
"""
|
||||
tokenizer.consume("WS")
|
||||
marker_var_left = _parse_marker_var(tokenizer)
|
||||
tokenizer.consume("WS")
|
||||
marker_op = _parse_marker_op(tokenizer)
|
||||
tokenizer.consume("WS")
|
||||
marker_var_right = _parse_marker_var(tokenizer)
|
||||
tokenizer.consume("WS")
|
||||
return (marker_var_left, marker_op, marker_var_right)
|
||||
|
||||
|
||||
def _parse_marker_var(tokenizer: Tokenizer) -> MarkerVar:
|
||||
"""
|
||||
marker_var = VARIABLE | QUOTED_STRING
|
||||
"""
|
||||
if tokenizer.check("VARIABLE"):
|
||||
return process_env_var(tokenizer.read().text.replace(".", "_"))
|
||||
elif tokenizer.check("QUOTED_STRING"):
|
||||
return process_python_str(tokenizer.read().text)
|
||||
else:
|
||||
tokenizer.raise_syntax_error(
|
||||
message="Expected a marker variable or quoted string"
|
||||
)
|
||||
|
||||
|
||||
def process_env_var(env_var: str) -> Variable:
|
||||
if (
|
||||
env_var == "platform_python_implementation"
|
||||
or env_var == "python_implementation"
|
||||
):
|
||||
return Variable("platform_python_implementation")
|
||||
else:
|
||||
return Variable(env_var)
|
||||
|
||||
|
||||
def process_python_str(python_str: str) -> Value:
|
||||
value = ast.literal_eval(python_str)
|
||||
return Value(str(value))
|
||||
|
||||
|
||||
def _parse_marker_op(tokenizer: Tokenizer) -> Op:
|
||||
"""
|
||||
marker_op = IN | NOT IN | OP
|
||||
"""
|
||||
if tokenizer.check("IN"):
|
||||
tokenizer.read()
|
||||
return Op("in")
|
||||
elif tokenizer.check("NOT"):
|
||||
tokenizer.read()
|
||||
tokenizer.expect("WS", expected="whitespace after 'not'")
|
||||
tokenizer.expect("IN", expected="'in' after 'not'")
|
||||
return Op("not in")
|
||||
elif tokenizer.check("OP"):
|
||||
return Op(tokenizer.read().text)
|
||||
else:
|
||||
return tokenizer.raise_syntax_error(
|
||||
"Expected marker operator, one of "
|
||||
"<=, <, !=, ==, >=, >, ~=, ===, in, not in"
|
||||
)
|
||||
@@ -19,9 +19,6 @@ class InfinityType:
|
||||
def __eq__(self, other: object) -> bool:
|
||||
return isinstance(other, self.__class__)
|
||||
|
||||
def __ne__(self, other: object) -> bool:
|
||||
return not isinstance(other, self.__class__)
|
||||
|
||||
def __gt__(self, other: object) -> bool:
|
||||
return True
|
||||
|
||||
@@ -51,9 +48,6 @@ class NegativeInfinityType:
|
||||
def __eq__(self, other: object) -> bool:
|
||||
return isinstance(other, self.__class__)
|
||||
|
||||
def __ne__(self, other: object) -> bool:
|
||||
return not isinstance(other, self.__class__)
|
||||
|
||||
def __gt__(self, other: object) -> bool:
|
||||
return False
|
||||
|
||||
|
||||
192
lib/pkg_resources/_vendor/packaging/_tokenizer.py
Normal file
192
lib/pkg_resources/_vendor/packaging/_tokenizer.py
Normal file
@@ -0,0 +1,192 @@
|
||||
import contextlib
|
||||
import re
|
||||
from dataclasses import dataclass
|
||||
from typing import Dict, Iterator, NoReturn, Optional, Tuple, Union
|
||||
|
||||
from .specifiers import Specifier
|
||||
|
||||
|
||||
@dataclass
|
||||
class Token:
|
||||
name: str
|
||||
text: str
|
||||
position: int
|
||||
|
||||
|
||||
class ParserSyntaxError(Exception):
|
||||
"""The provided source text could not be parsed correctly."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
message: str,
|
||||
*,
|
||||
source: str,
|
||||
span: Tuple[int, int],
|
||||
) -> None:
|
||||
self.span = span
|
||||
self.message = message
|
||||
self.source = source
|
||||
|
||||
super().__init__()
|
||||
|
||||
def __str__(self) -> str:
|
||||
marker = " " * self.span[0] + "~" * (self.span[1] - self.span[0]) + "^"
|
||||
return "\n ".join([self.message, self.source, marker])
|
||||
|
||||
|
||||
DEFAULT_RULES: "Dict[str, Union[str, re.Pattern[str]]]" = {
|
||||
"LEFT_PARENTHESIS": r"\(",
|
||||
"RIGHT_PARENTHESIS": r"\)",
|
||||
"LEFT_BRACKET": r"\[",
|
||||
"RIGHT_BRACKET": r"\]",
|
||||
"SEMICOLON": r";",
|
||||
"COMMA": r",",
|
||||
"QUOTED_STRING": re.compile(
|
||||
r"""
|
||||
(
|
||||
('[^']*')
|
||||
|
|
||||
("[^"]*")
|
||||
)
|
||||
""",
|
||||
re.VERBOSE,
|
||||
),
|
||||
"OP": r"(===|==|~=|!=|<=|>=|<|>)",
|
||||
"BOOLOP": r"\b(or|and)\b",
|
||||
"IN": r"\bin\b",
|
||||
"NOT": r"\bnot\b",
|
||||
"VARIABLE": re.compile(
|
||||
r"""
|
||||
\b(
|
||||
python_version
|
||||
|python_full_version
|
||||
|os[._]name
|
||||
|sys[._]platform
|
||||
|platform_(release|system)
|
||||
|platform[._](version|machine|python_implementation)
|
||||
|python_implementation
|
||||
|implementation_(name|version)
|
||||
|extra
|
||||
)\b
|
||||
""",
|
||||
re.VERBOSE,
|
||||
),
|
||||
"SPECIFIER": re.compile(
|
||||
Specifier._operator_regex_str + Specifier._version_regex_str,
|
||||
re.VERBOSE | re.IGNORECASE,
|
||||
),
|
||||
"AT": r"\@",
|
||||
"URL": r"[^ \t]+",
|
||||
"IDENTIFIER": r"\b[a-zA-Z0-9][a-zA-Z0-9._-]*\b",
|
||||
"VERSION_PREFIX_TRAIL": r"\.\*",
|
||||
"VERSION_LOCAL_LABEL_TRAIL": r"\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*",
|
||||
"WS": r"[ \t]+",
|
||||
"END": r"$",
|
||||
}
|
||||
|
||||
|
||||
class Tokenizer:
|
||||
"""Context-sensitive token parsing.
|
||||
|
||||
Provides methods to examine the input stream to check whether the next token
|
||||
matches.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
source: str,
|
||||
*,
|
||||
rules: "Dict[str, Union[str, re.Pattern[str]]]",
|
||||
) -> None:
|
||||
self.source = source
|
||||
self.rules: Dict[str, re.Pattern[str]] = {
|
||||
name: re.compile(pattern) for name, pattern in rules.items()
|
||||
}
|
||||
self.next_token: Optional[Token] = None
|
||||
self.position = 0
|
||||
|
||||
def consume(self, name: str) -> None:
|
||||
"""Move beyond provided token name, if at current position."""
|
||||
if self.check(name):
|
||||
self.read()
|
||||
|
||||
def check(self, name: str, *, peek: bool = False) -> bool:
|
||||
"""Check whether the next token has the provided name.
|
||||
|
||||
By default, if the check succeeds, the token *must* be read before
|
||||
another check. If `peek` is set to `True`, the token is not loaded and
|
||||
would need to be checked again.
|
||||
"""
|
||||
assert (
|
||||
self.next_token is None
|
||||
), f"Cannot check for {name!r}, already have {self.next_token!r}"
|
||||
assert name in self.rules, f"Unknown token name: {name!r}"
|
||||
|
||||
expression = self.rules[name]
|
||||
|
||||
match = expression.match(self.source, self.position)
|
||||
if match is None:
|
||||
return False
|
||||
if not peek:
|
||||
self.next_token = Token(name, match[0], self.position)
|
||||
return True
|
||||
|
||||
def expect(self, name: str, *, expected: str) -> Token:
|
||||
"""Expect a certain token name next, failing with a syntax error otherwise.
|
||||
|
||||
The token is *not* read.
|
||||
"""
|
||||
if not self.check(name):
|
||||
raise self.raise_syntax_error(f"Expected {expected}")
|
||||
return self.read()
|
||||
|
||||
def read(self) -> Token:
|
||||
"""Consume the next token and return it."""
|
||||
token = self.next_token
|
||||
assert token is not None
|
||||
|
||||
self.position += len(token.text)
|
||||
self.next_token = None
|
||||
|
||||
return token
|
||||
|
||||
def raise_syntax_error(
|
||||
self,
|
||||
message: str,
|
||||
*,
|
||||
span_start: Optional[int] = None,
|
||||
span_end: Optional[int] = None,
|
||||
) -> NoReturn:
|
||||
"""Raise ParserSyntaxError at the given position."""
|
||||
span = (
|
||||
self.position if span_start is None else span_start,
|
||||
self.position if span_end is None else span_end,
|
||||
)
|
||||
raise ParserSyntaxError(
|
||||
message,
|
||||
source=self.source,
|
||||
span=span,
|
||||
)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def enclosing_tokens(
|
||||
self, open_token: str, close_token: str, *, around: str
|
||||
) -> Iterator[None]:
|
||||
if self.check(open_token):
|
||||
open_position = self.position
|
||||
self.read()
|
||||
else:
|
||||
open_position = None
|
||||
|
||||
yield
|
||||
|
||||
if open_position is None:
|
||||
return
|
||||
|
||||
if not self.check(close_token):
|
||||
self.raise_syntax_error(
|
||||
f"Expected matching {close_token} for {open_token}, after {around}",
|
||||
span_start=open_position,
|
||||
)
|
||||
|
||||
self.read()
|
||||
@@ -8,19 +8,17 @@ import platform
|
||||
import sys
|
||||
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
||||
|
||||
from pkg_resources.extern.pyparsing import ( # noqa: N817
|
||||
Forward,
|
||||
Group,
|
||||
Literal as L,
|
||||
ParseException,
|
||||
ParseResults,
|
||||
QuotedString,
|
||||
ZeroOrMore,
|
||||
stringEnd,
|
||||
stringStart,
|
||||
from ._parser import (
|
||||
MarkerAtom,
|
||||
MarkerList,
|
||||
Op,
|
||||
Value,
|
||||
Variable,
|
||||
parse_marker as _parse_marker,
|
||||
)
|
||||
|
||||
from ._tokenizer import ParserSyntaxError
|
||||
from .specifiers import InvalidSpecifier, Specifier
|
||||
from .utils import canonicalize_name
|
||||
|
||||
__all__ = [
|
||||
"InvalidMarker",
|
||||
@@ -52,101 +50,24 @@ class UndefinedEnvironmentName(ValueError):
|
||||
"""
|
||||
|
||||
|
||||
class Node:
|
||||
def __init__(self, value: Any) -> None:
|
||||
self.value = value
|
||||
|
||||
def __str__(self) -> str:
|
||||
return str(self.value)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<{self.__class__.__name__}('{self}')>"
|
||||
|
||||
def serialize(self) -> str:
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class Variable(Node):
|
||||
def serialize(self) -> str:
|
||||
return str(self)
|
||||
|
||||
|
||||
class Value(Node):
|
||||
def serialize(self) -> str:
|
||||
return f'"{self}"'
|
||||
|
||||
|
||||
class Op(Node):
|
||||
def serialize(self) -> str:
|
||||
return str(self)
|
||||
|
||||
|
||||
VARIABLE = (
|
||||
L("implementation_version")
|
||||
| L("platform_python_implementation")
|
||||
| L("implementation_name")
|
||||
| L("python_full_version")
|
||||
| L("platform_release")
|
||||
| L("platform_version")
|
||||
| L("platform_machine")
|
||||
| L("platform_system")
|
||||
| L("python_version")
|
||||
| L("sys_platform")
|
||||
| L("os_name")
|
||||
| L("os.name") # PEP-345
|
||||
| L("sys.platform") # PEP-345
|
||||
| L("platform.version") # PEP-345
|
||||
| L("platform.machine") # PEP-345
|
||||
| L("platform.python_implementation") # PEP-345
|
||||
| L("python_implementation") # undocumented setuptools legacy
|
||||
| L("extra") # PEP-508
|
||||
)
|
||||
ALIASES = {
|
||||
"os.name": "os_name",
|
||||
"sys.platform": "sys_platform",
|
||||
"platform.version": "platform_version",
|
||||
"platform.machine": "platform_machine",
|
||||
"platform.python_implementation": "platform_python_implementation",
|
||||
"python_implementation": "platform_python_implementation",
|
||||
}
|
||||
VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0])))
|
||||
|
||||
VERSION_CMP = (
|
||||
L("===") | L("==") | L(">=") | L("<=") | L("!=") | L("~=") | L(">") | L("<")
|
||||
)
|
||||
|
||||
MARKER_OP = VERSION_CMP | L("not in") | L("in")
|
||||
MARKER_OP.setParseAction(lambda s, l, t: Op(t[0]))
|
||||
|
||||
MARKER_VALUE = QuotedString("'") | QuotedString('"')
|
||||
MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0]))
|
||||
|
||||
BOOLOP = L("and") | L("or")
|
||||
|
||||
MARKER_VAR = VARIABLE | MARKER_VALUE
|
||||
|
||||
MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR)
|
||||
MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0]))
|
||||
|
||||
LPAREN = L("(").suppress()
|
||||
RPAREN = L(")").suppress()
|
||||
|
||||
MARKER_EXPR = Forward()
|
||||
MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN)
|
||||
MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR)
|
||||
|
||||
MARKER = stringStart + MARKER_EXPR + stringEnd
|
||||
|
||||
|
||||
def _coerce_parse_result(results: Union[ParseResults, List[Any]]) -> List[Any]:
|
||||
if isinstance(results, ParseResults):
|
||||
return [_coerce_parse_result(i) for i in results]
|
||||
else:
|
||||
return results
|
||||
def _normalize_extra_values(results: Any) -> Any:
|
||||
"""
|
||||
Normalize extra values.
|
||||
"""
|
||||
if isinstance(results[0], tuple):
|
||||
lhs, op, rhs = results[0]
|
||||
if isinstance(lhs, Variable) and lhs.value == "extra":
|
||||
normalized_extra = canonicalize_name(rhs.value)
|
||||
rhs = Value(normalized_extra)
|
||||
elif isinstance(rhs, Variable) and rhs.value == "extra":
|
||||
normalized_extra = canonicalize_name(lhs.value)
|
||||
lhs = Value(normalized_extra)
|
||||
results[0] = lhs, op, rhs
|
||||
return results
|
||||
|
||||
|
||||
def _format_marker(
|
||||
marker: Union[List[str], Tuple[Node, ...], str], first: Optional[bool] = True
|
||||
marker: Union[List[str], MarkerAtom, str], first: Optional[bool] = True
|
||||
) -> str:
|
||||
|
||||
assert isinstance(marker, (list, tuple, str))
|
||||
@@ -192,7 +113,7 @@ def _eval_op(lhs: str, op: Op, rhs: str) -> bool:
|
||||
except InvalidSpecifier:
|
||||
pass
|
||||
else:
|
||||
return spec.contains(lhs)
|
||||
return spec.contains(lhs, prereleases=True)
|
||||
|
||||
oper: Optional[Operator] = _operators.get(op.serialize())
|
||||
if oper is None:
|
||||
@@ -201,25 +122,19 @@ def _eval_op(lhs: str, op: Op, rhs: str) -> bool:
|
||||
return oper(lhs, rhs)
|
||||
|
||||
|
||||
class Undefined:
|
||||
pass
|
||||
def _normalize(*values: str, key: str) -> Tuple[str, ...]:
|
||||
# PEP 685 – Comparison of extra names for optional distribution dependencies
|
||||
# https://peps.python.org/pep-0685/
|
||||
# > When comparing extra names, tools MUST normalize the names being
|
||||
# > compared using the semantics outlined in PEP 503 for names
|
||||
if key == "extra":
|
||||
return tuple(canonicalize_name(v) for v in values)
|
||||
|
||||
# other environment markers don't have such standards
|
||||
return values
|
||||
|
||||
|
||||
_undefined = Undefined()
|
||||
|
||||
|
||||
def _get_env(environment: Dict[str, str], name: str) -> str:
|
||||
value: Union[str, Undefined] = environment.get(name, _undefined)
|
||||
|
||||
if isinstance(value, Undefined):
|
||||
raise UndefinedEnvironmentName(
|
||||
f"{name!r} does not exist in evaluation environment."
|
||||
)
|
||||
|
||||
return value
|
||||
|
||||
|
||||
def _evaluate_markers(markers: List[Any], environment: Dict[str, str]) -> bool:
|
||||
def _evaluate_markers(markers: MarkerList, environment: Dict[str, str]) -> bool:
|
||||
groups: List[List[bool]] = [[]]
|
||||
|
||||
for marker in markers:
|
||||
@@ -231,12 +146,15 @@ def _evaluate_markers(markers: List[Any], environment: Dict[str, str]) -> bool:
|
||||
lhs, op, rhs = marker
|
||||
|
||||
if isinstance(lhs, Variable):
|
||||
lhs_value = _get_env(environment, lhs.value)
|
||||
environment_key = lhs.value
|
||||
lhs_value = environment[environment_key]
|
||||
rhs_value = rhs.value
|
||||
else:
|
||||
lhs_value = lhs.value
|
||||
rhs_value = _get_env(environment, rhs.value)
|
||||
environment_key = rhs.value
|
||||
rhs_value = environment[environment_key]
|
||||
|
||||
lhs_value, rhs_value = _normalize(lhs_value, rhs_value, key=environment_key)
|
||||
groups[-1].append(_eval_op(lhs_value, op, rhs_value))
|
||||
else:
|
||||
assert marker in ["and", "or"]
|
||||
@@ -274,13 +192,29 @@ def default_environment() -> Dict[str, str]:
|
||||
|
||||
class Marker:
|
||||
def __init__(self, marker: str) -> None:
|
||||
# Note: We create a Marker object without calling this constructor in
|
||||
# packaging.requirements.Requirement. If any additional logic is
|
||||
# added here, make sure to mirror/adapt Requirement.
|
||||
try:
|
||||
self._markers = _coerce_parse_result(MARKER.parseString(marker))
|
||||
except ParseException as e:
|
||||
raise InvalidMarker(
|
||||
f"Invalid marker: {marker!r}, parse error at "
|
||||
f"{marker[e.loc : e.loc + 8]!r}"
|
||||
)
|
||||
self._markers = _normalize_extra_values(_parse_marker(marker))
|
||||
# The attribute `_markers` can be described in terms of a recursive type:
|
||||
# MarkerList = List[Union[Tuple[Node, ...], str, MarkerList]]
|
||||
#
|
||||
# For example, the following expression:
|
||||
# python_version > "3.6" or (python_version == "3.6" and os_name == "unix")
|
||||
#
|
||||
# is parsed into:
|
||||
# [
|
||||
# (<Variable('python_version')>, <Op('>')>, <Value('3.6')>),
|
||||
# 'and',
|
||||
# [
|
||||
# (<Variable('python_version')>, <Op('==')>, <Value('3.6')>),
|
||||
# 'or',
|
||||
# (<Variable('os_name')>, <Op('==')>, <Value('unix')>)
|
||||
# ]
|
||||
# ]
|
||||
except ParserSyntaxError as e:
|
||||
raise InvalidMarker(str(e)) from e
|
||||
|
||||
def __str__(self) -> str:
|
||||
return _format_marker(self._markers)
|
||||
@@ -288,6 +222,15 @@ class Marker:
|
||||
def __repr__(self) -> str:
|
||||
return f"<Marker('{self}')>"
|
||||
|
||||
def __hash__(self) -> int:
|
||||
return hash((self.__class__.__name__, str(self)))
|
||||
|
||||
def __eq__(self, other: Any) -> bool:
|
||||
if not isinstance(other, Marker):
|
||||
return NotImplemented
|
||||
|
||||
return str(self) == str(other)
|
||||
|
||||
def evaluate(self, environment: Optional[Dict[str, str]] = None) -> bool:
|
||||
"""Evaluate a marker.
|
||||
|
||||
@@ -298,7 +241,12 @@ class Marker:
|
||||
The environment is determined from the current Python process.
|
||||
"""
|
||||
current_environment = default_environment()
|
||||
current_environment["extra"] = ""
|
||||
if environment is not None:
|
||||
current_environment.update(environment)
|
||||
# The API used to allow setting extra to None. We need to handle this
|
||||
# case for backwards compatibility.
|
||||
if current_environment["extra"] is None:
|
||||
current_environment["extra"] = ""
|
||||
|
||||
return _evaluate_markers(self._markers, current_environment)
|
||||
|
||||
408
lib/pkg_resources/_vendor/packaging/metadata.py
Normal file
408
lib/pkg_resources/_vendor/packaging/metadata.py
Normal file
@@ -0,0 +1,408 @@
|
||||
import email.feedparser
|
||||
import email.header
|
||||
import email.message
|
||||
import email.parser
|
||||
import email.policy
|
||||
import sys
|
||||
import typing
|
||||
from typing import Dict, List, Optional, Tuple, Union, cast
|
||||
|
||||
if sys.version_info >= (3, 8): # pragma: no cover
|
||||
from typing import TypedDict
|
||||
else: # pragma: no cover
|
||||
if typing.TYPE_CHECKING:
|
||||
from typing_extensions import TypedDict
|
||||
else:
|
||||
try:
|
||||
from typing_extensions import TypedDict
|
||||
except ImportError:
|
||||
|
||||
class TypedDict:
|
||||
def __init_subclass__(*_args, **_kwargs):
|
||||
pass
|
||||
|
||||
|
||||
# The RawMetadata class attempts to make as few assumptions about the underlying
|
||||
# serialization formats as possible. The idea is that as long as a serialization
|
||||
# formats offer some very basic primitives in *some* way then we can support
|
||||
# serializing to and from that format.
|
||||
class RawMetadata(TypedDict, total=False):
|
||||
"""A dictionary of raw core metadata.
|
||||
|
||||
Each field in core metadata maps to a key of this dictionary (when data is
|
||||
provided). The key is lower-case and underscores are used instead of dashes
|
||||
compared to the equivalent core metadata field. Any core metadata field that
|
||||
can be specified multiple times or can hold multiple values in a single
|
||||
field have a key with a plural name.
|
||||
|
||||
Core metadata fields that can be specified multiple times are stored as a
|
||||
list or dict depending on which is appropriate for the field. Any fields
|
||||
which hold multiple values in a single field are stored as a list.
|
||||
|
||||
"""
|
||||
|
||||
# Metadata 1.0 - PEP 241
|
||||
metadata_version: str
|
||||
name: str
|
||||
version: str
|
||||
platforms: List[str]
|
||||
summary: str
|
||||
description: str
|
||||
keywords: List[str]
|
||||
home_page: str
|
||||
author: str
|
||||
author_email: str
|
||||
license: str
|
||||
|
||||
# Metadata 1.1 - PEP 314
|
||||
supported_platforms: List[str]
|
||||
download_url: str
|
||||
classifiers: List[str]
|
||||
requires: List[str]
|
||||
provides: List[str]
|
||||
obsoletes: List[str]
|
||||
|
||||
# Metadata 1.2 - PEP 345
|
||||
maintainer: str
|
||||
maintainer_email: str
|
||||
requires_dist: List[str]
|
||||
provides_dist: List[str]
|
||||
obsoletes_dist: List[str]
|
||||
requires_python: str
|
||||
requires_external: List[str]
|
||||
project_urls: Dict[str, str]
|
||||
|
||||
# Metadata 2.0
|
||||
# PEP 426 attempted to completely revamp the metadata format
|
||||
# but got stuck without ever being able to build consensus on
|
||||
# it and ultimately ended up withdrawn.
|
||||
#
|
||||
# However, a number of tools had started emiting METADATA with
|
||||
# `2.0` Metadata-Version, so for historical reasons, this version
|
||||
# was skipped.
|
||||
|
||||
# Metadata 2.1 - PEP 566
|
||||
description_content_type: str
|
||||
provides_extra: List[str]
|
||||
|
||||
# Metadata 2.2 - PEP 643
|
||||
dynamic: List[str]
|
||||
|
||||
# Metadata 2.3 - PEP 685
|
||||
# No new fields were added in PEP 685, just some edge case were
|
||||
# tightened up to provide better interoptability.
|
||||
|
||||
|
||||
_STRING_FIELDS = {
|
||||
"author",
|
||||
"author_email",
|
||||
"description",
|
||||
"description_content_type",
|
||||
"download_url",
|
||||
"home_page",
|
||||
"license",
|
||||
"maintainer",
|
||||
"maintainer_email",
|
||||
"metadata_version",
|
||||
"name",
|
||||
"requires_python",
|
||||
"summary",
|
||||
"version",
|
||||
}
|
||||
|
||||
_LIST_STRING_FIELDS = {
|
||||
"classifiers",
|
||||
"dynamic",
|
||||
"obsoletes",
|
||||
"obsoletes_dist",
|
||||
"platforms",
|
||||
"provides",
|
||||
"provides_dist",
|
||||
"provides_extra",
|
||||
"requires",
|
||||
"requires_dist",
|
||||
"requires_external",
|
||||
"supported_platforms",
|
||||
}
|
||||
|
||||
|
||||
def _parse_keywords(data: str) -> List[str]:
|
||||
"""Split a string of comma-separate keyboards into a list of keywords."""
|
||||
return [k.strip() for k in data.split(",")]
|
||||
|
||||
|
||||
def _parse_project_urls(data: List[str]) -> Dict[str, str]:
|
||||
"""Parse a list of label/URL string pairings separated by a comma."""
|
||||
urls = {}
|
||||
for pair in data:
|
||||
# Our logic is slightly tricky here as we want to try and do
|
||||
# *something* reasonable with malformed data.
|
||||
#
|
||||
# The main thing that we have to worry about, is data that does
|
||||
# not have a ',' at all to split the label from the Value. There
|
||||
# isn't a singular right answer here, and we will fail validation
|
||||
# later on (if the caller is validating) so it doesn't *really*
|
||||
# matter, but since the missing value has to be an empty str
|
||||
# and our return value is dict[str, str], if we let the key
|
||||
# be the missing value, then they'd have multiple '' values that
|
||||
# overwrite each other in a accumulating dict.
|
||||
#
|
||||
# The other potentional issue is that it's possible to have the
|
||||
# same label multiple times in the metadata, with no solid "right"
|
||||
# answer with what to do in that case. As such, we'll do the only
|
||||
# thing we can, which is treat the field as unparseable and add it
|
||||
# to our list of unparsed fields.
|
||||
parts = [p.strip() for p in pair.split(",", 1)]
|
||||
parts.extend([""] * (max(0, 2 - len(parts)))) # Ensure 2 items
|
||||
|
||||
# TODO: The spec doesn't say anything about if the keys should be
|
||||
# considered case sensitive or not... logically they should
|
||||
# be case-preserving and case-insensitive, but doing that
|
||||
# would open up more cases where we might have duplicate
|
||||
# entries.
|
||||
label, url = parts
|
||||
if label in urls:
|
||||
# The label already exists in our set of urls, so this field
|
||||
# is unparseable, and we can just add the whole thing to our
|
||||
# unparseable data and stop processing it.
|
||||
raise KeyError("duplicate labels in project urls")
|
||||
urls[label] = url
|
||||
|
||||
return urls
|
||||
|
||||
|
||||
def _get_payload(msg: email.message.Message, source: Union[bytes, str]) -> str:
|
||||
"""Get the body of the message."""
|
||||
# If our source is a str, then our caller has managed encodings for us,
|
||||
# and we don't need to deal with it.
|
||||
if isinstance(source, str):
|
||||
payload: str = msg.get_payload()
|
||||
return payload
|
||||
# If our source is a bytes, then we're managing the encoding and we need
|
||||
# to deal with it.
|
||||
else:
|
||||
bpayload: bytes = msg.get_payload(decode=True)
|
||||
try:
|
||||
return bpayload.decode("utf8", "strict")
|
||||
except UnicodeDecodeError:
|
||||
raise ValueError("payload in an invalid encoding")
|
||||
|
||||
|
||||
# The various parse_FORMAT functions here are intended to be as lenient as
|
||||
# possible in their parsing, while still returning a correctly typed
|
||||
# RawMetadata.
|
||||
#
|
||||
# To aid in this, we also generally want to do as little touching of the
|
||||
# data as possible, except where there are possibly some historic holdovers
|
||||
# that make valid data awkward to work with.
|
||||
#
|
||||
# While this is a lower level, intermediate format than our ``Metadata``
|
||||
# class, some light touch ups can make a massive difference in usability.
|
||||
|
||||
# Map METADATA fields to RawMetadata.
|
||||
_EMAIL_TO_RAW_MAPPING = {
|
||||
"author": "author",
|
||||
"author-email": "author_email",
|
||||
"classifier": "classifiers",
|
||||
"description": "description",
|
||||
"description-content-type": "description_content_type",
|
||||
"download-url": "download_url",
|
||||
"dynamic": "dynamic",
|
||||
"home-page": "home_page",
|
||||
"keywords": "keywords",
|
||||
"license": "license",
|
||||
"maintainer": "maintainer",
|
||||
"maintainer-email": "maintainer_email",
|
||||
"metadata-version": "metadata_version",
|
||||
"name": "name",
|
||||
"obsoletes": "obsoletes",
|
||||
"obsoletes-dist": "obsoletes_dist",
|
||||
"platform": "platforms",
|
||||
"project-url": "project_urls",
|
||||
"provides": "provides",
|
||||
"provides-dist": "provides_dist",
|
||||
"provides-extra": "provides_extra",
|
||||
"requires": "requires",
|
||||
"requires-dist": "requires_dist",
|
||||
"requires-external": "requires_external",
|
||||
"requires-python": "requires_python",
|
||||
"summary": "summary",
|
||||
"supported-platform": "supported_platforms",
|
||||
"version": "version",
|
||||
}
|
||||
|
||||
|
||||
def parse_email(data: Union[bytes, str]) -> Tuple[RawMetadata, Dict[str, List[str]]]:
|
||||
"""Parse a distribution's metadata.
|
||||
|
||||
This function returns a two-item tuple of dicts. The first dict is of
|
||||
recognized fields from the core metadata specification. Fields that can be
|
||||
parsed and translated into Python's built-in types are converted
|
||||
appropriately. All other fields are left as-is. Fields that are allowed to
|
||||
appear multiple times are stored as lists.
|
||||
|
||||
The second dict contains all other fields from the metadata. This includes
|
||||
any unrecognized fields. It also includes any fields which are expected to
|
||||
be parsed into a built-in type but were not formatted appropriately. Finally,
|
||||
any fields that are expected to appear only once but are repeated are
|
||||
included in this dict.
|
||||
|
||||
"""
|
||||
raw: Dict[str, Union[str, List[str], Dict[str, str]]] = {}
|
||||
unparsed: Dict[str, List[str]] = {}
|
||||
|
||||
if isinstance(data, str):
|
||||
parsed = email.parser.Parser(policy=email.policy.compat32).parsestr(data)
|
||||
else:
|
||||
parsed = email.parser.BytesParser(policy=email.policy.compat32).parsebytes(data)
|
||||
|
||||
# We have to wrap parsed.keys() in a set, because in the case of multiple
|
||||
# values for a key (a list), the key will appear multiple times in the
|
||||
# list of keys, but we're avoiding that by using get_all().
|
||||
for name in frozenset(parsed.keys()):
|
||||
# Header names in RFC are case insensitive, so we'll normalize to all
|
||||
# lower case to make comparisons easier.
|
||||
name = name.lower()
|
||||
|
||||
# We use get_all() here, even for fields that aren't multiple use,
|
||||
# because otherwise someone could have e.g. two Name fields, and we
|
||||
# would just silently ignore it rather than doing something about it.
|
||||
headers = parsed.get_all(name)
|
||||
|
||||
# The way the email module works when parsing bytes is that it
|
||||
# unconditionally decodes the bytes as ascii using the surrogateescape
|
||||
# handler. When you pull that data back out (such as with get_all() ),
|
||||
# it looks to see if the str has any surrogate escapes, and if it does
|
||||
# it wraps it in a Header object instead of returning the string.
|
||||
#
|
||||
# As such, we'll look for those Header objects, and fix up the encoding.
|
||||
value = []
|
||||
# Flag if we have run into any issues processing the headers, thus
|
||||
# signalling that the data belongs in 'unparsed'.
|
||||
valid_encoding = True
|
||||
for h in headers:
|
||||
# It's unclear if this can return more types than just a Header or
|
||||
# a str, so we'll just assert here to make sure.
|
||||
assert isinstance(h, (email.header.Header, str))
|
||||
|
||||
# If it's a header object, we need to do our little dance to get
|
||||
# the real data out of it. In cases where there is invalid data
|
||||
# we're going to end up with mojibake, but there's no obvious, good
|
||||
# way around that without reimplementing parts of the Header object
|
||||
# ourselves.
|
||||
#
|
||||
# That should be fine since, if mojibacked happens, this key is
|
||||
# going into the unparsed dict anyways.
|
||||
if isinstance(h, email.header.Header):
|
||||
# The Header object stores it's data as chunks, and each chunk
|
||||
# can be independently encoded, so we'll need to check each
|
||||
# of them.
|
||||
chunks: List[Tuple[bytes, Optional[str]]] = []
|
||||
for bin, encoding in email.header.decode_header(h):
|
||||
try:
|
||||
bin.decode("utf8", "strict")
|
||||
except UnicodeDecodeError:
|
||||
# Enable mojibake.
|
||||
encoding = "latin1"
|
||||
valid_encoding = False
|
||||
else:
|
||||
encoding = "utf8"
|
||||
chunks.append((bin, encoding))
|
||||
|
||||
# Turn our chunks back into a Header object, then let that
|
||||
# Header object do the right thing to turn them into a
|
||||
# string for us.
|
||||
value.append(str(email.header.make_header(chunks)))
|
||||
# This is already a string, so just add it.
|
||||
else:
|
||||
value.append(h)
|
||||
|
||||
# We've processed all of our values to get them into a list of str,
|
||||
# but we may have mojibake data, in which case this is an unparsed
|
||||
# field.
|
||||
if not valid_encoding:
|
||||
unparsed[name] = value
|
||||
continue
|
||||
|
||||
raw_name = _EMAIL_TO_RAW_MAPPING.get(name)
|
||||
if raw_name is None:
|
||||
# This is a bit of a weird situation, we've encountered a key that
|
||||
# we don't know what it means, so we don't know whether it's meant
|
||||
# to be a list or not.
|
||||
#
|
||||
# Since we can't really tell one way or another, we'll just leave it
|
||||
# as a list, even though it may be a single item list, because that's
|
||||
# what makes the most sense for email headers.
|
||||
unparsed[name] = value
|
||||
continue
|
||||
|
||||
# If this is one of our string fields, then we'll check to see if our
|
||||
# value is a list of a single item. If it is then we'll assume that
|
||||
# it was emitted as a single string, and unwrap the str from inside
|
||||
# the list.
|
||||
#
|
||||
# If it's any other kind of data, then we haven't the faintest clue
|
||||
# what we should parse it as, and we have to just add it to our list
|
||||
# of unparsed stuff.
|
||||
if raw_name in _STRING_FIELDS and len(value) == 1:
|
||||
raw[raw_name] = value[0]
|
||||
# If this is one of our list of string fields, then we can just assign
|
||||
# the value, since email *only* has strings, and our get_all() call
|
||||
# above ensures that this is a list.
|
||||
elif raw_name in _LIST_STRING_FIELDS:
|
||||
raw[raw_name] = value
|
||||
# Special Case: Keywords
|
||||
# The keywords field is implemented in the metadata spec as a str,
|
||||
# but it conceptually is a list of strings, and is serialized using
|
||||
# ", ".join(keywords), so we'll do some light data massaging to turn
|
||||
# this into what it logically is.
|
||||
elif raw_name == "keywords" and len(value) == 1:
|
||||
raw[raw_name] = _parse_keywords(value[0])
|
||||
# Special Case: Project-URL
|
||||
# The project urls is implemented in the metadata spec as a list of
|
||||
# specially-formatted strings that represent a key and a value, which
|
||||
# is fundamentally a mapping, however the email format doesn't support
|
||||
# mappings in a sane way, so it was crammed into a list of strings
|
||||
# instead.
|
||||
#
|
||||
# We will do a little light data massaging to turn this into a map as
|
||||
# it logically should be.
|
||||
elif raw_name == "project_urls":
|
||||
try:
|
||||
raw[raw_name] = _parse_project_urls(value)
|
||||
except KeyError:
|
||||
unparsed[name] = value
|
||||
# Nothing that we've done has managed to parse this, so it'll just
|
||||
# throw it in our unparseable data and move on.
|
||||
else:
|
||||
unparsed[name] = value
|
||||
|
||||
# We need to support getting the Description from the message payload in
|
||||
# addition to getting it from the the headers. This does mean, though, there
|
||||
# is the possibility of it being set both ways, in which case we put both
|
||||
# in 'unparsed' since we don't know which is right.
|
||||
try:
|
||||
payload = _get_payload(parsed, data)
|
||||
except ValueError:
|
||||
unparsed.setdefault("description", []).append(
|
||||
parsed.get_payload(decode=isinstance(data, bytes))
|
||||
)
|
||||
else:
|
||||
if payload:
|
||||
# Check to see if we've already got a description, if so then both
|
||||
# it, and this body move to unparseable.
|
||||
if "description" in raw:
|
||||
description_header = cast(str, raw.pop("description"))
|
||||
unparsed.setdefault("description", []).extend(
|
||||
[description_header, payload]
|
||||
)
|
||||
elif "description" in unparsed:
|
||||
unparsed["description"].append(payload)
|
||||
else:
|
||||
raw["description"] = payload
|
||||
|
||||
# We need to cast our `raw` to a metadata, because a TypedDict only support
|
||||
# literal key names, but we're computing our key names on purpose, but the
|
||||
# way this function is implemented, our `TypedDict` can only have valid key
|
||||
# names.
|
||||
return cast(RawMetadata, raw), unparsed
|
||||
@@ -2,26 +2,13 @@
|
||||
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
|
||||
# for complete details.
|
||||
|
||||
import re
|
||||
import string
|
||||
import urllib.parse
|
||||
from typing import List, Optional as TOptional, Set
|
||||
from typing import Any, List, Optional, Set
|
||||
|
||||
from pkg_resources.extern.pyparsing import ( # noqa
|
||||
Combine,
|
||||
Literal as L,
|
||||
Optional,
|
||||
ParseException,
|
||||
Regex,
|
||||
Word,
|
||||
ZeroOrMore,
|
||||
originalTextFor,
|
||||
stringEnd,
|
||||
stringStart,
|
||||
)
|
||||
|
||||
from .markers import MARKER_EXPR, Marker
|
||||
from .specifiers import LegacySpecifier, Specifier, SpecifierSet
|
||||
from ._parser import parse_requirement as _parse_requirement
|
||||
from ._tokenizer import ParserSyntaxError
|
||||
from .markers import Marker, _normalize_extra_values
|
||||
from .specifiers import SpecifierSet
|
||||
|
||||
|
||||
class InvalidRequirement(ValueError):
|
||||
@@ -30,60 +17,6 @@ class InvalidRequirement(ValueError):
|
||||
"""
|
||||
|
||||
|
||||
ALPHANUM = Word(string.ascii_letters + string.digits)
|
||||
|
||||
LBRACKET = L("[").suppress()
|
||||
RBRACKET = L("]").suppress()
|
||||
LPAREN = L("(").suppress()
|
||||
RPAREN = L(")").suppress()
|
||||
COMMA = L(",").suppress()
|
||||
SEMICOLON = L(";").suppress()
|
||||
AT = L("@").suppress()
|
||||
|
||||
PUNCTUATION = Word("-_.")
|
||||
IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM)
|
||||
IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END))
|
||||
|
||||
NAME = IDENTIFIER("name")
|
||||
EXTRA = IDENTIFIER
|
||||
|
||||
URI = Regex(r"[^ ]+")("url")
|
||||
URL = AT + URI
|
||||
|
||||
EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA)
|
||||
EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras")
|
||||
|
||||
VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE)
|
||||
VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE)
|
||||
|
||||
VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY
|
||||
VERSION_MANY = Combine(
|
||||
VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE), joinString=",", adjacent=False
|
||||
)("_raw_spec")
|
||||
_VERSION_SPEC = Optional((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY)
|
||||
_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or "")
|
||||
|
||||
VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier")
|
||||
VERSION_SPEC.setParseAction(lambda s, l, t: t[1])
|
||||
|
||||
MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker")
|
||||
MARKER_EXPR.setParseAction(
|
||||
lambda s, l, t: Marker(s[t._original_start : t._original_end])
|
||||
)
|
||||
MARKER_SEPARATOR = SEMICOLON
|
||||
MARKER = MARKER_SEPARATOR + MARKER_EXPR
|
||||
|
||||
VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER)
|
||||
URL_AND_MARKER = URL + Optional(MARKER)
|
||||
|
||||
NAMED_REQUIREMENT = NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER)
|
||||
|
||||
REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd
|
||||
# pkg_resources.extern.pyparsing isn't thread safe during initialization, so we do it eagerly, see
|
||||
# issue #104
|
||||
REQUIREMENT.parseString("x[]")
|
||||
|
||||
|
||||
class Requirement:
|
||||
"""Parse a requirement.
|
||||
|
||||
@@ -99,28 +32,29 @@ class Requirement:
|
||||
|
||||
def __init__(self, requirement_string: str) -> None:
|
||||
try:
|
||||
req = REQUIREMENT.parseString(requirement_string)
|
||||
except ParseException as e:
|
||||
raise InvalidRequirement(
|
||||
f'Parse error at "{ requirement_string[e.loc : e.loc + 8]!r}": {e.msg}'
|
||||
)
|
||||
parsed = _parse_requirement(requirement_string)
|
||||
except ParserSyntaxError as e:
|
||||
raise InvalidRequirement(str(e)) from e
|
||||
|
||||
self.name: str = req.name
|
||||
if req.url:
|
||||
parsed_url = urllib.parse.urlparse(req.url)
|
||||
self.name: str = parsed.name
|
||||
if parsed.url:
|
||||
parsed_url = urllib.parse.urlparse(parsed.url)
|
||||
if parsed_url.scheme == "file":
|
||||
if urllib.parse.urlunparse(parsed_url) != req.url:
|
||||
if urllib.parse.urlunparse(parsed_url) != parsed.url:
|
||||
raise InvalidRequirement("Invalid URL given")
|
||||
elif not (parsed_url.scheme and parsed_url.netloc) or (
|
||||
not parsed_url.scheme and not parsed_url.netloc
|
||||
):
|
||||
raise InvalidRequirement(f"Invalid URL: {req.url}")
|
||||
self.url: TOptional[str] = req.url
|
||||
raise InvalidRequirement(f"Invalid URL: {parsed.url}")
|
||||
self.url: Optional[str] = parsed.url
|
||||
else:
|
||||
self.url = None
|
||||
self.extras: Set[str] = set(req.extras.asList() if req.extras else [])
|
||||
self.specifier: SpecifierSet = SpecifierSet(req.specifier)
|
||||
self.marker: TOptional[Marker] = req.marker if req.marker else None
|
||||
self.extras: Set[str] = set(parsed.extras if parsed.extras else [])
|
||||
self.specifier: SpecifierSet = SpecifierSet(parsed.specifier)
|
||||
self.marker: Optional[Marker] = None
|
||||
if parsed.marker is not None:
|
||||
self.marker = Marker.__new__(Marker)
|
||||
self.marker._markers = _normalize_extra_values(parsed.marker)
|
||||
|
||||
def __str__(self) -> str:
|
||||
parts: List[str] = [self.name]
|
||||
@@ -144,3 +78,18 @@ class Requirement:
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<Requirement('{self}')>"
|
||||
|
||||
def __hash__(self) -> int:
|
||||
return hash((self.__class__.__name__, str(self)))
|
||||
|
||||
def __eq__(self, other: Any) -> bool:
|
||||
if not isinstance(other, Requirement):
|
||||
return NotImplemented
|
||||
|
||||
return (
|
||||
self.name == other.name
|
||||
and self.extras == other.extras
|
||||
and self.specifier == other.specifier
|
||||
and self.url == other.url
|
||||
and self.marker == other.marker
|
||||
)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -4,6 +4,7 @@
|
||||
|
||||
import logging
|
||||
import platform
|
||||
import subprocess
|
||||
import sys
|
||||
import sysconfig
|
||||
from importlib.machinery import EXTENSION_SUFFIXES
|
||||
@@ -36,7 +37,7 @@ INTERPRETER_SHORT_NAMES: Dict[str, str] = {
|
||||
}
|
||||
|
||||
|
||||
_32_BIT_INTERPRETER = sys.maxsize <= 2 ** 32
|
||||
_32_BIT_INTERPRETER = sys.maxsize <= 2**32
|
||||
|
||||
|
||||
class Tag:
|
||||
@@ -90,7 +91,7 @@ class Tag:
|
||||
return f"{self._interpreter}-{self._abi}-{self._platform}"
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return "<{self} @ {self_id}>".format(self=self, self_id=id(self))
|
||||
return f"<{self} @ {id(self)}>"
|
||||
|
||||
|
||||
def parse_tag(tag: str) -> FrozenSet[Tag]:
|
||||
@@ -110,7 +111,7 @@ def parse_tag(tag: str) -> FrozenSet[Tag]:
|
||||
|
||||
|
||||
def _get_config_var(name: str, warn: bool = False) -> Union[int, str, None]:
|
||||
value = sysconfig.get_config_var(name)
|
||||
value: Union[int, str, None] = sysconfig.get_config_var(name)
|
||||
if value is None and warn:
|
||||
logger.debug(
|
||||
"Config variable '%s' is unset, Python ABI tag may be incorrect", name
|
||||
@@ -119,7 +120,7 @@ def _get_config_var(name: str, warn: bool = False) -> Union[int, str, None]:
|
||||
|
||||
|
||||
def _normalize_string(string: str) -> str:
|
||||
return string.replace(".", "_").replace("-", "_")
|
||||
return string.replace(".", "_").replace("-", "_").replace(" ", "_")
|
||||
|
||||
|
||||
def _abi3_applies(python_version: PythonVersion) -> bool:
|
||||
@@ -192,7 +193,7 @@ def cpython_tags(
|
||||
if not python_version:
|
||||
python_version = sys.version_info[:2]
|
||||
|
||||
interpreter = "cp{}".format(_version_nodot(python_version[:2]))
|
||||
interpreter = f"cp{_version_nodot(python_version[:2])}"
|
||||
|
||||
if abis is None:
|
||||
if len(python_version) > 1:
|
||||
@@ -224,10 +225,45 @@ def cpython_tags(
|
||||
yield Tag(interpreter, "abi3", platform_)
|
||||
|
||||
|
||||
def _generic_abi() -> Iterator[str]:
|
||||
abi = sysconfig.get_config_var("SOABI")
|
||||
if abi:
|
||||
yield _normalize_string(abi)
|
||||
def _generic_abi() -> List[str]:
|
||||
"""
|
||||
Return the ABI tag based on EXT_SUFFIX.
|
||||
"""
|
||||
# The following are examples of `EXT_SUFFIX`.
|
||||
# We want to keep the parts which are related to the ABI and remove the
|
||||
# parts which are related to the platform:
|
||||
# - linux: '.cpython-310-x86_64-linux-gnu.so' => cp310
|
||||
# - mac: '.cpython-310-darwin.so' => cp310
|
||||
# - win: '.cp310-win_amd64.pyd' => cp310
|
||||
# - win: '.pyd' => cp37 (uses _cpython_abis())
|
||||
# - pypy: '.pypy38-pp73-x86_64-linux-gnu.so' => pypy38_pp73
|
||||
# - graalpy: '.graalpy-38-native-x86_64-darwin.dylib'
|
||||
# => graalpy_38_native
|
||||
|
||||
ext_suffix = _get_config_var("EXT_SUFFIX", warn=True)
|
||||
if not isinstance(ext_suffix, str) or ext_suffix[0] != ".":
|
||||
raise SystemError("invalid sysconfig.get_config_var('EXT_SUFFIX')")
|
||||
parts = ext_suffix.split(".")
|
||||
if len(parts) < 3:
|
||||
# CPython3.7 and earlier uses ".pyd" on Windows.
|
||||
return _cpython_abis(sys.version_info[:2])
|
||||
soabi = parts[1]
|
||||
if soabi.startswith("cpython"):
|
||||
# non-windows
|
||||
abi = "cp" + soabi.split("-")[1]
|
||||
elif soabi.startswith("cp"):
|
||||
# windows
|
||||
abi = soabi.split("-")[0]
|
||||
elif soabi.startswith("pypy"):
|
||||
abi = "-".join(soabi.split("-")[:2])
|
||||
elif soabi.startswith("graalpy"):
|
||||
abi = "-".join(soabi.split("-")[:3])
|
||||
elif soabi:
|
||||
# pyston, ironpython, others?
|
||||
abi = soabi
|
||||
else:
|
||||
return []
|
||||
return [_normalize_string(abi)]
|
||||
|
||||
|
||||
def generic_tags(
|
||||
@@ -251,8 +287,9 @@ def generic_tags(
|
||||
interpreter = "".join([interp_name, interp_version])
|
||||
if abis is None:
|
||||
abis = _generic_abi()
|
||||
else:
|
||||
abis = list(abis)
|
||||
platforms = list(platforms or platform_tags())
|
||||
abis = list(abis)
|
||||
if "none" not in abis:
|
||||
abis.append("none")
|
||||
for abi in abis:
|
||||
@@ -268,11 +305,11 @@ def _py_interpreter_range(py_version: PythonVersion) -> Iterator[str]:
|
||||
all previous versions of that major version.
|
||||
"""
|
||||
if len(py_version) > 1:
|
||||
yield "py{version}".format(version=_version_nodot(py_version[:2]))
|
||||
yield "py{major}".format(major=py_version[0])
|
||||
yield f"py{_version_nodot(py_version[:2])}"
|
||||
yield f"py{py_version[0]}"
|
||||
if len(py_version) > 1:
|
||||
for minor in range(py_version[1] - 1, -1, -1):
|
||||
yield "py{version}".format(version=_version_nodot((py_version[0], minor)))
|
||||
yield f"py{_version_nodot((py_version[0], minor))}"
|
||||
|
||||
|
||||
def compatible_tags(
|
||||
@@ -356,6 +393,22 @@ def mac_platforms(
|
||||
version_str, _, cpu_arch = platform.mac_ver()
|
||||
if version is None:
|
||||
version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2])))
|
||||
if version == (10, 16):
|
||||
# When built against an older macOS SDK, Python will report macOS 10.16
|
||||
# instead of the real version.
|
||||
version_str = subprocess.run(
|
||||
[
|
||||
sys.executable,
|
||||
"-sS",
|
||||
"-c",
|
||||
"import platform; print(platform.mac_ver()[0])",
|
||||
],
|
||||
check=True,
|
||||
env={"SYSTEM_VERSION_COMPAT": "0"},
|
||||
stdout=subprocess.PIPE,
|
||||
universal_newlines=True,
|
||||
).stdout
|
||||
version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2])))
|
||||
else:
|
||||
version = version
|
||||
if arch is None:
|
||||
@@ -446,6 +499,9 @@ def platform_tags() -> Iterator[str]:
|
||||
def interpreter_name() -> str:
|
||||
"""
|
||||
Returns the name of the running interpreter.
|
||||
|
||||
Some implementations have a reserved, two-letter abbreviation which will
|
||||
be returned when appropriate.
|
||||
"""
|
||||
name = sys.implementation.name
|
||||
return INTERPRETER_SHORT_NAMES.get(name) or name
|
||||
@@ -481,4 +537,10 @@ def sys_tags(*, warn: bool = False) -> Iterator[Tag]:
|
||||
else:
|
||||
yield from generic_tags()
|
||||
|
||||
yield from compatible_tags()
|
||||
if interp_name == "pp":
|
||||
interp = "pp3"
|
||||
elif interp_name == "cp":
|
||||
interp = "cp" + interpreter_version(warn=warn)
|
||||
else:
|
||||
interp = None
|
||||
yield from compatible_tags(interpreter=interp)
|
||||
|
||||
@@ -35,7 +35,9 @@ def canonicalize_name(name: str) -> NormalizedName:
|
||||
return cast(NormalizedName, value)
|
||||
|
||||
|
||||
def canonicalize_version(version: Union[Version, str]) -> str:
|
||||
def canonicalize_version(
|
||||
version: Union[Version, str], *, strip_trailing_zero: bool = True
|
||||
) -> str:
|
||||
"""
|
||||
This is very similar to Version.__str__, but has one subtle difference
|
||||
with the way it handles the release segment.
|
||||
@@ -56,8 +58,11 @@ def canonicalize_version(version: Union[Version, str]) -> str:
|
||||
parts.append(f"{parsed.epoch}!")
|
||||
|
||||
# Release segment
|
||||
# NB: This strips trailing '.0's to normalize
|
||||
parts.append(re.sub(r"(\.0)+$", "", ".".join(str(x) for x in parsed.release)))
|
||||
release_segment = ".".join(str(x) for x in parsed.release)
|
||||
if strip_trailing_zero:
|
||||
# NB: This strips trailing '.0's to normalize
|
||||
release_segment = re.sub(r"(\.0)+$", "", release_segment)
|
||||
parts.append(release_segment)
|
||||
|
||||
# Pre-release
|
||||
if parsed.pre is not None:
|
||||
|
||||
@@ -1,16 +1,20 @@
|
||||
# This file is dual licensed under the terms of the Apache License, Version
|
||||
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
|
||||
# for complete details.
|
||||
"""
|
||||
.. testsetup::
|
||||
|
||||
from packaging.version import parse, Version
|
||||
"""
|
||||
|
||||
import collections
|
||||
import itertools
|
||||
import re
|
||||
import warnings
|
||||
from typing import Callable, Iterator, List, Optional, SupportsInt, Tuple, Union
|
||||
from typing import Any, Callable, Optional, SupportsInt, Tuple, Union
|
||||
|
||||
from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType
|
||||
|
||||
__all__ = ["parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"]
|
||||
__all__ = ["VERSION_PATTERN", "parse", "Version", "InvalidVersion"]
|
||||
|
||||
InfiniteTypes = Union[InfinityType, NegativeInfinityType]
|
||||
PrePostDevType = Union[InfiniteTypes, Tuple[str, int]]
|
||||
@@ -29,36 +33,37 @@ LocalType = Union[
|
||||
CmpKey = Tuple[
|
||||
int, Tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType
|
||||
]
|
||||
LegacyCmpKey = Tuple[int, Tuple[str, ...]]
|
||||
VersionComparisonMethod = Callable[
|
||||
[Union[CmpKey, LegacyCmpKey], Union[CmpKey, LegacyCmpKey]], bool
|
||||
]
|
||||
VersionComparisonMethod = Callable[[CmpKey, CmpKey], bool]
|
||||
|
||||
_Version = collections.namedtuple(
|
||||
"_Version", ["epoch", "release", "dev", "pre", "post", "local"]
|
||||
)
|
||||
|
||||
|
||||
def parse(version: str) -> Union["LegacyVersion", "Version"]:
|
||||
def parse(version: str) -> "Version":
|
||||
"""Parse the given version string.
|
||||
|
||||
>>> parse('1.0.dev1')
|
||||
<Version('1.0.dev1')>
|
||||
|
||||
:param version: The version string to parse.
|
||||
:raises InvalidVersion: When the version string is not a valid version.
|
||||
"""
|
||||
Parse the given version string and return either a :class:`Version` object
|
||||
or a :class:`LegacyVersion` object depending on if the given version is
|
||||
a valid PEP 440 version or a legacy version.
|
||||
"""
|
||||
try:
|
||||
return Version(version)
|
||||
except InvalidVersion:
|
||||
return LegacyVersion(version)
|
||||
return Version(version)
|
||||
|
||||
|
||||
class InvalidVersion(ValueError):
|
||||
"""
|
||||
An invalid version was found, users should refer to PEP 440.
|
||||
"""Raised when a version string is not a valid version.
|
||||
|
||||
>>> Version("invalid")
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
packaging.version.InvalidVersion: Invalid version: 'invalid'
|
||||
"""
|
||||
|
||||
|
||||
class _BaseVersion:
|
||||
_key: Union[CmpKey, LegacyCmpKey]
|
||||
_key: Tuple[Any, ...]
|
||||
|
||||
def __hash__(self) -> int:
|
||||
return hash(self._key)
|
||||
@@ -103,126 +108,9 @@ class _BaseVersion:
|
||||
return self._key != other._key
|
||||
|
||||
|
||||
class LegacyVersion(_BaseVersion):
|
||||
def __init__(self, version: str) -> None:
|
||||
self._version = str(version)
|
||||
self._key = _legacy_cmpkey(self._version)
|
||||
|
||||
warnings.warn(
|
||||
"Creating a LegacyVersion has been deprecated and will be "
|
||||
"removed in the next major release",
|
||||
DeprecationWarning,
|
||||
)
|
||||
|
||||
def __str__(self) -> str:
|
||||
return self._version
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<LegacyVersion('{self}')>"
|
||||
|
||||
@property
|
||||
def public(self) -> str:
|
||||
return self._version
|
||||
|
||||
@property
|
||||
def base_version(self) -> str:
|
||||
return self._version
|
||||
|
||||
@property
|
||||
def epoch(self) -> int:
|
||||
return -1
|
||||
|
||||
@property
|
||||
def release(self) -> None:
|
||||
return None
|
||||
|
||||
@property
|
||||
def pre(self) -> None:
|
||||
return None
|
||||
|
||||
@property
|
||||
def post(self) -> None:
|
||||
return None
|
||||
|
||||
@property
|
||||
def dev(self) -> None:
|
||||
return None
|
||||
|
||||
@property
|
||||
def local(self) -> None:
|
||||
return None
|
||||
|
||||
@property
|
||||
def is_prerelease(self) -> bool:
|
||||
return False
|
||||
|
||||
@property
|
||||
def is_postrelease(self) -> bool:
|
||||
return False
|
||||
|
||||
@property
|
||||
def is_devrelease(self) -> bool:
|
||||
return False
|
||||
|
||||
|
||||
_legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE)
|
||||
|
||||
_legacy_version_replacement_map = {
|
||||
"pre": "c",
|
||||
"preview": "c",
|
||||
"-": "final-",
|
||||
"rc": "c",
|
||||
"dev": "@",
|
||||
}
|
||||
|
||||
|
||||
def _parse_version_parts(s: str) -> Iterator[str]:
|
||||
for part in _legacy_version_component_re.split(s):
|
||||
part = _legacy_version_replacement_map.get(part, part)
|
||||
|
||||
if not part or part == ".":
|
||||
continue
|
||||
|
||||
if part[:1] in "0123456789":
|
||||
# pad for numeric comparison
|
||||
yield part.zfill(8)
|
||||
else:
|
||||
yield "*" + part
|
||||
|
||||
# ensure that alpha/beta/candidate are before final
|
||||
yield "*final"
|
||||
|
||||
|
||||
def _legacy_cmpkey(version: str) -> LegacyCmpKey:
|
||||
|
||||
# We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch
|
||||
# greater than or equal to 0. This will effectively put the LegacyVersion,
|
||||
# which uses the defacto standard originally implemented by setuptools,
|
||||
# as before all PEP 440 versions.
|
||||
epoch = -1
|
||||
|
||||
# This scheme is taken from pkg_resources.parse_version setuptools prior to
|
||||
# it's adoption of the packaging library.
|
||||
parts: List[str] = []
|
||||
for part in _parse_version_parts(version.lower()):
|
||||
if part.startswith("*"):
|
||||
# remove "-" before a prerelease tag
|
||||
if part < "*final":
|
||||
while parts and parts[-1] == "*final-":
|
||||
parts.pop()
|
||||
|
||||
# remove trailing zeros from each series of numeric parts
|
||||
while parts and parts[-1] == "00000000":
|
||||
parts.pop()
|
||||
|
||||
parts.append(part)
|
||||
|
||||
return epoch, tuple(parts)
|
||||
|
||||
|
||||
# Deliberately not anchored to the start and end of the string, to make it
|
||||
# easier for 3rd party code to reuse
|
||||
VERSION_PATTERN = r"""
|
||||
_VERSION_PATTERN = r"""
|
||||
v?
|
||||
(?:
|
||||
(?:(?P<epoch>[0-9]+)!)? # epoch
|
||||
@@ -253,12 +141,56 @@ VERSION_PATTERN = r"""
|
||||
(?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
|
||||
"""
|
||||
|
||||
VERSION_PATTERN = _VERSION_PATTERN
|
||||
"""
|
||||
A string containing the regular expression used to match a valid version.
|
||||
|
||||
The pattern is not anchored at either end, and is intended for embedding in larger
|
||||
expressions (for example, matching a version number as part of a file name). The
|
||||
regular expression should be compiled with the ``re.VERBOSE`` and ``re.IGNORECASE``
|
||||
flags set.
|
||||
|
||||
:meta hide-value:
|
||||
"""
|
||||
|
||||
|
||||
class Version(_BaseVersion):
|
||||
"""This class abstracts handling of a project's versions.
|
||||
|
||||
A :class:`Version` instance is comparison aware and can be compared and
|
||||
sorted using the standard Python interfaces.
|
||||
|
||||
>>> v1 = Version("1.0a5")
|
||||
>>> v2 = Version("1.0")
|
||||
>>> v1
|
||||
<Version('1.0a5')>
|
||||
>>> v2
|
||||
<Version('1.0')>
|
||||
>>> v1 < v2
|
||||
True
|
||||
>>> v1 == v2
|
||||
False
|
||||
>>> v1 > v2
|
||||
False
|
||||
>>> v1 >= v2
|
||||
False
|
||||
>>> v1 <= v2
|
||||
True
|
||||
"""
|
||||
|
||||
_regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
|
||||
_key: CmpKey
|
||||
|
||||
def __init__(self, version: str) -> None:
|
||||
"""Initialize a Version object.
|
||||
|
||||
:param version:
|
||||
The string representation of a version which will be parsed and normalized
|
||||
before use.
|
||||
:raises InvalidVersion:
|
||||
If the ``version`` does not conform to PEP 440 in any way then this
|
||||
exception will be raised.
|
||||
"""
|
||||
|
||||
# Validate the version and parse it into pieces
|
||||
match = self._regex.search(version)
|
||||
@@ -288,9 +220,19 @@ class Version(_BaseVersion):
|
||||
)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
"""A representation of the Version that shows all internal state.
|
||||
|
||||
>>> Version('1.0.0')
|
||||
<Version('1.0.0')>
|
||||
"""
|
||||
return f"<Version('{self}')>"
|
||||
|
||||
def __str__(self) -> str:
|
||||
"""A string representation of the version that can be rounded-tripped.
|
||||
|
||||
>>> str(Version("1.0a5"))
|
||||
'1.0a5'
|
||||
"""
|
||||
parts = []
|
||||
|
||||
# Epoch
|
||||
@@ -320,29 +262,80 @@ class Version(_BaseVersion):
|
||||
|
||||
@property
|
||||
def epoch(self) -> int:
|
||||
"""The epoch of the version.
|
||||
|
||||
>>> Version("2.0.0").epoch
|
||||
0
|
||||
>>> Version("1!2.0.0").epoch
|
||||
1
|
||||
"""
|
||||
_epoch: int = self._version.epoch
|
||||
return _epoch
|
||||
|
||||
@property
|
||||
def release(self) -> Tuple[int, ...]:
|
||||
"""The components of the "release" segment of the version.
|
||||
|
||||
>>> Version("1.2.3").release
|
||||
(1, 2, 3)
|
||||
>>> Version("2.0.0").release
|
||||
(2, 0, 0)
|
||||
>>> Version("1!2.0.0.post0").release
|
||||
(2, 0, 0)
|
||||
|
||||
Includes trailing zeroes but not the epoch or any pre-release / development /
|
||||
post-release suffixes.
|
||||
"""
|
||||
_release: Tuple[int, ...] = self._version.release
|
||||
return _release
|
||||
|
||||
@property
|
||||
def pre(self) -> Optional[Tuple[str, int]]:
|
||||
"""The pre-release segment of the version.
|
||||
|
||||
>>> print(Version("1.2.3").pre)
|
||||
None
|
||||
>>> Version("1.2.3a1").pre
|
||||
('a', 1)
|
||||
>>> Version("1.2.3b1").pre
|
||||
('b', 1)
|
||||
>>> Version("1.2.3rc1").pre
|
||||
('rc', 1)
|
||||
"""
|
||||
_pre: Optional[Tuple[str, int]] = self._version.pre
|
||||
return _pre
|
||||
|
||||
@property
|
||||
def post(self) -> Optional[int]:
|
||||
"""The post-release number of the version.
|
||||
|
||||
>>> print(Version("1.2.3").post)
|
||||
None
|
||||
>>> Version("1.2.3.post1").post
|
||||
1
|
||||
"""
|
||||
return self._version.post[1] if self._version.post else None
|
||||
|
||||
@property
|
||||
def dev(self) -> Optional[int]:
|
||||
"""The development number of the version.
|
||||
|
||||
>>> print(Version("1.2.3").dev)
|
||||
None
|
||||
>>> Version("1.2.3.dev1").dev
|
||||
1
|
||||
"""
|
||||
return self._version.dev[1] if self._version.dev else None
|
||||
|
||||
@property
|
||||
def local(self) -> Optional[str]:
|
||||
"""The local version segment of the version.
|
||||
|
||||
>>> print(Version("1.2.3").local)
|
||||
None
|
||||
>>> Version("1.2.3+abc").local
|
||||
'abc'
|
||||
"""
|
||||
if self._version.local:
|
||||
return ".".join(str(x) for x in self._version.local)
|
||||
else:
|
||||
@@ -350,10 +343,31 @@ class Version(_BaseVersion):
|
||||
|
||||
@property
|
||||
def public(self) -> str:
|
||||
"""The public portion of the version.
|
||||
|
||||
>>> Version("1.2.3").public
|
||||
'1.2.3'
|
||||
>>> Version("1.2.3+abc").public
|
||||
'1.2.3'
|
||||
>>> Version("1.2.3+abc.dev1").public
|
||||
'1.2.3'
|
||||
"""
|
||||
return str(self).split("+", 1)[0]
|
||||
|
||||
@property
|
||||
def base_version(self) -> str:
|
||||
"""The "base version" of the version.
|
||||
|
||||
>>> Version("1.2.3").base_version
|
||||
'1.2.3'
|
||||
>>> Version("1.2.3+abc").base_version
|
||||
'1.2.3'
|
||||
>>> Version("1!1.2.3+abc.dev1").base_version
|
||||
'1!1.2.3'
|
||||
|
||||
The "base version" is the public version of the project without any pre or post
|
||||
release markers.
|
||||
"""
|
||||
parts = []
|
||||
|
||||
# Epoch
|
||||
@@ -367,26 +381,72 @@ class Version(_BaseVersion):
|
||||
|
||||
@property
|
||||
def is_prerelease(self) -> bool:
|
||||
"""Whether this version is a pre-release.
|
||||
|
||||
>>> Version("1.2.3").is_prerelease
|
||||
False
|
||||
>>> Version("1.2.3a1").is_prerelease
|
||||
True
|
||||
>>> Version("1.2.3b1").is_prerelease
|
||||
True
|
||||
>>> Version("1.2.3rc1").is_prerelease
|
||||
True
|
||||
>>> Version("1.2.3dev1").is_prerelease
|
||||
True
|
||||
"""
|
||||
return self.dev is not None or self.pre is not None
|
||||
|
||||
@property
|
||||
def is_postrelease(self) -> bool:
|
||||
"""Whether this version is a post-release.
|
||||
|
||||
>>> Version("1.2.3").is_postrelease
|
||||
False
|
||||
>>> Version("1.2.3.post1").is_postrelease
|
||||
True
|
||||
"""
|
||||
return self.post is not None
|
||||
|
||||
@property
|
||||
def is_devrelease(self) -> bool:
|
||||
"""Whether this version is a development release.
|
||||
|
||||
>>> Version("1.2.3").is_devrelease
|
||||
False
|
||||
>>> Version("1.2.3.dev1").is_devrelease
|
||||
True
|
||||
"""
|
||||
return self.dev is not None
|
||||
|
||||
@property
|
||||
def major(self) -> int:
|
||||
"""The first item of :attr:`release` or ``0`` if unavailable.
|
||||
|
||||
>>> Version("1.2.3").major
|
||||
1
|
||||
"""
|
||||
return self.release[0] if len(self.release) >= 1 else 0
|
||||
|
||||
@property
|
||||
def minor(self) -> int:
|
||||
"""The second item of :attr:`release` or ``0`` if unavailable.
|
||||
|
||||
>>> Version("1.2.3").minor
|
||||
2
|
||||
>>> Version("1").minor
|
||||
0
|
||||
"""
|
||||
return self.release[1] if len(self.release) >= 2 else 0
|
||||
|
||||
@property
|
||||
def micro(self) -> int:
|
||||
"""The third item of :attr:`release` or ``0`` if unavailable.
|
||||
|
||||
>>> Version("1.2.3").micro
|
||||
3
|
||||
>>> Version("1").micro
|
||||
0
|
||||
"""
|
||||
return self.release[2] if len(self.release) >= 3 else 0
|
||||
|
||||
|
||||
|
||||
342
lib/pkg_resources/_vendor/platformdirs/__init__.py
Normal file
342
lib/pkg_resources/_vendor/platformdirs/__init__.py
Normal file
@@ -0,0 +1,342 @@
|
||||
"""
|
||||
Utilities for determining application-specific dirs. See <https://github.com/platformdirs/platformdirs> for details and
|
||||
usage.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
if sys.version_info >= (3, 8): # pragma: no cover (py38+)
|
||||
from typing import Literal
|
||||
else: # pragma: no cover (py38+)
|
||||
from ..typing_extensions import Literal
|
||||
|
||||
from .api import PlatformDirsABC
|
||||
from .version import __version__
|
||||
from .version import __version_tuple__ as __version_info__
|
||||
|
||||
|
||||
def _set_platform_dir_class() -> type[PlatformDirsABC]:
|
||||
if sys.platform == "win32":
|
||||
from .windows import Windows as Result
|
||||
elif sys.platform == "darwin":
|
||||
from .macos import MacOS as Result
|
||||
else:
|
||||
from .unix import Unix as Result
|
||||
|
||||
if os.getenv("ANDROID_DATA") == "/data" and os.getenv("ANDROID_ROOT") == "/system":
|
||||
|
||||
if os.getenv("SHELL") or os.getenv("PREFIX"):
|
||||
return Result
|
||||
|
||||
from .android import _android_folder
|
||||
|
||||
if _android_folder() is not None:
|
||||
from .android import Android
|
||||
|
||||
return Android # return to avoid redefinition of result
|
||||
|
||||
return Result
|
||||
|
||||
|
||||
PlatformDirs = _set_platform_dir_class() #: Currently active platform
|
||||
AppDirs = PlatformDirs #: Backwards compatibility with appdirs
|
||||
|
||||
|
||||
def user_data_dir(
|
||||
appname: str | None = None,
|
||||
appauthor: str | None | Literal[False] = None,
|
||||
version: str | None = None,
|
||||
roaming: bool = False,
|
||||
) -> str:
|
||||
"""
|
||||
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
|
||||
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
|
||||
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
|
||||
:param roaming: See `roaming <platformdirs.api.PlatformDirsABC.version>`.
|
||||
:returns: data directory tied to the user
|
||||
"""
|
||||
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_data_dir
|
||||
|
||||
|
||||
def site_data_dir(
|
||||
appname: str | None = None,
|
||||
appauthor: str | None | Literal[False] = None,
|
||||
version: str | None = None,
|
||||
multipath: bool = False,
|
||||
) -> str:
|
||||
"""
|
||||
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
|
||||
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
|
||||
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
|
||||
:param multipath: See `roaming <platformdirs.api.PlatformDirsABC.multipath>`.
|
||||
:returns: data directory shared by users
|
||||
"""
|
||||
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, multipath=multipath).site_data_dir
|
||||
|
||||
|
||||
def user_config_dir(
|
||||
appname: str | None = None,
|
||||
appauthor: str | None | Literal[False] = None,
|
||||
version: str | None = None,
|
||||
roaming: bool = False,
|
||||
) -> str:
|
||||
"""
|
||||
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
|
||||
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
|
||||
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
|
||||
:param roaming: See `roaming <platformdirs.api.PlatformDirsABC.version>`.
|
||||
:returns: config directory tied to the user
|
||||
"""
|
||||
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_config_dir
|
||||
|
||||
|
||||
def site_config_dir(
|
||||
appname: str | None = None,
|
||||
appauthor: str | None | Literal[False] = None,
|
||||
version: str | None = None,
|
||||
multipath: bool = False,
|
||||
) -> str:
|
||||
"""
|
||||
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
|
||||
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
|
||||
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
|
||||
:param multipath: See `roaming <platformdirs.api.PlatformDirsABC.multipath>`.
|
||||
:returns: config directory shared by the users
|
||||
"""
|
||||
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, multipath=multipath).site_config_dir
|
||||
|
||||
|
||||
def user_cache_dir(
|
||||
appname: str | None = None,
|
||||
appauthor: str | None | Literal[False] = None,
|
||||
version: str | None = None,
|
||||
opinion: bool = True,
|
||||
) -> str:
|
||||
"""
|
||||
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
|
||||
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
|
||||
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
|
||||
:param opinion: See `roaming <platformdirs.api.PlatformDirsABC.opinion>`.
|
||||
:returns: cache directory tied to the user
|
||||
"""
|
||||
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_cache_dir
|
||||
|
||||
|
||||
def user_state_dir(
|
||||
appname: str | None = None,
|
||||
appauthor: str | None | Literal[False] = None,
|
||||
version: str | None = None,
|
||||
roaming: bool = False,
|
||||
) -> str:
|
||||
"""
|
||||
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
|
||||
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
|
||||
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
|
||||
:param roaming: See `roaming <platformdirs.api.PlatformDirsABC.version>`.
|
||||
:returns: state directory tied to the user
|
||||
"""
|
||||
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_state_dir
|
||||
|
||||
|
||||
def user_log_dir(
|
||||
appname: str | None = None,
|
||||
appauthor: str | None | Literal[False] = None,
|
||||
version: str | None = None,
|
||||
opinion: bool = True,
|
||||
) -> str:
|
||||
"""
|
||||
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
|
||||
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
|
||||
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
|
||||
:param opinion: See `roaming <platformdirs.api.PlatformDirsABC.opinion>`.
|
||||
:returns: log directory tied to the user
|
||||
"""
|
||||
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_log_dir
|
||||
|
||||
|
||||
def user_documents_dir() -> str:
|
||||
"""
|
||||
:returns: documents directory tied to the user
|
||||
"""
|
||||
return PlatformDirs().user_documents_dir
|
||||
|
||||
|
||||
def user_runtime_dir(
|
||||
appname: str | None = None,
|
||||
appauthor: str | None | Literal[False] = None,
|
||||
version: str | None = None,
|
||||
opinion: bool = True,
|
||||
) -> str:
|
||||
"""
|
||||
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
|
||||
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
|
||||
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
|
||||
:param opinion: See `opinion <platformdirs.api.PlatformDirsABC.opinion>`.
|
||||
:returns: runtime directory tied to the user
|
||||
"""
|
||||
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_runtime_dir
|
||||
|
||||
|
||||
def user_data_path(
|
||||
appname: str | None = None,
|
||||
appauthor: str | None | Literal[False] = None,
|
||||
version: str | None = None,
|
||||
roaming: bool = False,
|
||||
) -> Path:
|
||||
"""
|
||||
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
|
||||
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
|
||||
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
|
||||
:param roaming: See `roaming <platformdirs.api.PlatformDirsABC.version>`.
|
||||
:returns: data path tied to the user
|
||||
"""
|
||||
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_data_path
|
||||
|
||||
|
||||
def site_data_path(
|
||||
appname: str | None = None,
|
||||
appauthor: str | None | Literal[False] = None,
|
||||
version: str | None = None,
|
||||
multipath: bool = False,
|
||||
) -> Path:
|
||||
"""
|
||||
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
|
||||
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
|
||||
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
|
||||
:param multipath: See `multipath <platformdirs.api.PlatformDirsABC.multipath>`.
|
||||
:returns: data path shared by users
|
||||
"""
|
||||
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, multipath=multipath).site_data_path
|
||||
|
||||
|
||||
def user_config_path(
|
||||
appname: str | None = None,
|
||||
appauthor: str | None | Literal[False] = None,
|
||||
version: str | None = None,
|
||||
roaming: bool = False,
|
||||
) -> Path:
|
||||
"""
|
||||
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
|
||||
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
|
||||
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
|
||||
:param roaming: See `roaming <platformdirs.api.PlatformDirsABC.version>`.
|
||||
:returns: config path tied to the user
|
||||
"""
|
||||
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_config_path
|
||||
|
||||
|
||||
def site_config_path(
|
||||
appname: str | None = None,
|
||||
appauthor: str | None | Literal[False] = None,
|
||||
version: str | None = None,
|
||||
multipath: bool = False,
|
||||
) -> Path:
|
||||
"""
|
||||
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
|
||||
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
|
||||
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
|
||||
:param multipath: See `roaming <platformdirs.api.PlatformDirsABC.multipath>`.
|
||||
:returns: config path shared by the users
|
||||
"""
|
||||
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, multipath=multipath).site_config_path
|
||||
|
||||
|
||||
def user_cache_path(
|
||||
appname: str | None = None,
|
||||
appauthor: str | None | Literal[False] = None,
|
||||
version: str | None = None,
|
||||
opinion: bool = True,
|
||||
) -> Path:
|
||||
"""
|
||||
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
|
||||
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
|
||||
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
|
||||
:param opinion: See `roaming <platformdirs.api.PlatformDirsABC.opinion>`.
|
||||
:returns: cache path tied to the user
|
||||
"""
|
||||
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_cache_path
|
||||
|
||||
|
||||
def user_state_path(
|
||||
appname: str | None = None,
|
||||
appauthor: str | None | Literal[False] = None,
|
||||
version: str | None = None,
|
||||
roaming: bool = False,
|
||||
) -> Path:
|
||||
"""
|
||||
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
|
||||
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
|
||||
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
|
||||
:param roaming: See `roaming <platformdirs.api.PlatformDirsABC.version>`.
|
||||
:returns: state path tied to the user
|
||||
"""
|
||||
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_state_path
|
||||
|
||||
|
||||
def user_log_path(
|
||||
appname: str | None = None,
|
||||
appauthor: str | None | Literal[False] = None,
|
||||
version: str | None = None,
|
||||
opinion: bool = True,
|
||||
) -> Path:
|
||||
"""
|
||||
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
|
||||
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
|
||||
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
|
||||
:param opinion: See `roaming <platformdirs.api.PlatformDirsABC.opinion>`.
|
||||
:returns: log path tied to the user
|
||||
"""
|
||||
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_log_path
|
||||
|
||||
|
||||
def user_documents_path() -> Path:
|
||||
"""
|
||||
:returns: documents path tied to the user
|
||||
"""
|
||||
return PlatformDirs().user_documents_path
|
||||
|
||||
|
||||
def user_runtime_path(
|
||||
appname: str | None = None,
|
||||
appauthor: str | None | Literal[False] = None,
|
||||
version: str | None = None,
|
||||
opinion: bool = True,
|
||||
) -> Path:
|
||||
"""
|
||||
:param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
|
||||
:param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
|
||||
:param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
|
||||
:param opinion: See `opinion <platformdirs.api.PlatformDirsABC.opinion>`.
|
||||
:returns: runtime path tied to the user
|
||||
"""
|
||||
return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_runtime_path
|
||||
|
||||
|
||||
__all__ = [
|
||||
"__version__",
|
||||
"__version_info__",
|
||||
"PlatformDirs",
|
||||
"AppDirs",
|
||||
"PlatformDirsABC",
|
||||
"user_data_dir",
|
||||
"user_config_dir",
|
||||
"user_cache_dir",
|
||||
"user_state_dir",
|
||||
"user_log_dir",
|
||||
"user_documents_dir",
|
||||
"user_runtime_dir",
|
||||
"site_data_dir",
|
||||
"site_config_dir",
|
||||
"user_data_path",
|
||||
"user_config_path",
|
||||
"user_cache_path",
|
||||
"user_state_path",
|
||||
"user_log_path",
|
||||
"user_documents_path",
|
||||
"user_runtime_path",
|
||||
"site_data_path",
|
||||
"site_config_path",
|
||||
]
|
||||
46
lib/pkg_resources/_vendor/platformdirs/__main__.py
Normal file
46
lib/pkg_resources/_vendor/platformdirs/__main__.py
Normal file
@@ -0,0 +1,46 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from platformdirs import PlatformDirs, __version__
|
||||
|
||||
PROPS = (
|
||||
"user_data_dir",
|
||||
"user_config_dir",
|
||||
"user_cache_dir",
|
||||
"user_state_dir",
|
||||
"user_log_dir",
|
||||
"user_documents_dir",
|
||||
"user_runtime_dir",
|
||||
"site_data_dir",
|
||||
"site_config_dir",
|
||||
)
|
||||
|
||||
|
||||
def main() -> None:
|
||||
app_name = "MyApp"
|
||||
app_author = "MyCompany"
|
||||
|
||||
print(f"-- platformdirs {__version__} --")
|
||||
|
||||
print("-- app dirs (with optional 'version')")
|
||||
dirs = PlatformDirs(app_name, app_author, version="1.0")
|
||||
for prop in PROPS:
|
||||
print(f"{prop}: {getattr(dirs, prop)}")
|
||||
|
||||
print("\n-- app dirs (without optional 'version')")
|
||||
dirs = PlatformDirs(app_name, app_author)
|
||||
for prop in PROPS:
|
||||
print(f"{prop}: {getattr(dirs, prop)}")
|
||||
|
||||
print("\n-- app dirs (without optional 'appauthor')")
|
||||
dirs = PlatformDirs(app_name)
|
||||
for prop in PROPS:
|
||||
print(f"{prop}: {getattr(dirs, prop)}")
|
||||
|
||||
print("\n-- app dirs (with disabled 'appauthor')")
|
||||
dirs = PlatformDirs(app_name, appauthor=False)
|
||||
for prop in PROPS:
|
||||
print(f"{prop}: {getattr(dirs, prop)}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
120
lib/pkg_resources/_vendor/platformdirs/android.py
Normal file
120
lib/pkg_resources/_vendor/platformdirs/android.py
Normal file
@@ -0,0 +1,120 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
from functools import lru_cache
|
||||
from typing import cast
|
||||
|
||||
from .api import PlatformDirsABC
|
||||
|
||||
|
||||
class Android(PlatformDirsABC):
|
||||
"""
|
||||
Follows the guidance `from here <https://android.stackexchange.com/a/216132>`_. Makes use of the
|
||||
`appname <platformdirs.api.PlatformDirsABC.appname>` and
|
||||
`version <platformdirs.api.PlatformDirsABC.version>`.
|
||||
"""
|
||||
|
||||
@property
|
||||
def user_data_dir(self) -> str:
|
||||
""":return: data directory tied to the user, e.g. ``/data/user/<userid>/<packagename>/files/<AppName>``"""
|
||||
return self._append_app_name_and_version(cast(str, _android_folder()), "files")
|
||||
|
||||
@property
|
||||
def site_data_dir(self) -> str:
|
||||
""":return: data directory shared by users, same as `user_data_dir`"""
|
||||
return self.user_data_dir
|
||||
|
||||
@property
|
||||
def user_config_dir(self) -> str:
|
||||
"""
|
||||
:return: config directory tied to the user, e.g. ``/data/user/<userid>/<packagename>/shared_prefs/<AppName>``
|
||||
"""
|
||||
return self._append_app_name_and_version(cast(str, _android_folder()), "shared_prefs")
|
||||
|
||||
@property
|
||||
def site_config_dir(self) -> str:
|
||||
""":return: config directory shared by the users, same as `user_config_dir`"""
|
||||
return self.user_config_dir
|
||||
|
||||
@property
|
||||
def user_cache_dir(self) -> str:
|
||||
""":return: cache directory tied to the user, e.g. e.g. ``/data/user/<userid>/<packagename>/cache/<AppName>``"""
|
||||
return self._append_app_name_and_version(cast(str, _android_folder()), "cache")
|
||||
|
||||
@property
|
||||
def user_state_dir(self) -> str:
|
||||
""":return: state directory tied to the user, same as `user_data_dir`"""
|
||||
return self.user_data_dir
|
||||
|
||||
@property
|
||||
def user_log_dir(self) -> str:
|
||||
"""
|
||||
:return: log directory tied to the user, same as `user_cache_dir` if not opinionated else ``log`` in it,
|
||||
e.g. ``/data/user/<userid>/<packagename>/cache/<AppName>/log``
|
||||
"""
|
||||
path = self.user_cache_dir
|
||||
if self.opinion:
|
||||
path = os.path.join(path, "log")
|
||||
return path
|
||||
|
||||
@property
|
||||
def user_documents_dir(self) -> str:
|
||||
"""
|
||||
:return: documents directory tied to the user e.g. ``/storage/emulated/0/Documents``
|
||||
"""
|
||||
return _android_documents_folder()
|
||||
|
||||
@property
|
||||
def user_runtime_dir(self) -> str:
|
||||
"""
|
||||
:return: runtime directory tied to the user, same as `user_cache_dir` if not opinionated else ``tmp`` in it,
|
||||
e.g. ``/data/user/<userid>/<packagename>/cache/<AppName>/tmp``
|
||||
"""
|
||||
path = self.user_cache_dir
|
||||
if self.opinion:
|
||||
path = os.path.join(path, "tmp")
|
||||
return path
|
||||
|
||||
|
||||
@lru_cache(maxsize=1)
|
||||
def _android_folder() -> str | None:
|
||||
""":return: base folder for the Android OS or None if cannot be found"""
|
||||
try:
|
||||
# First try to get path to android app via pyjnius
|
||||
from jnius import autoclass
|
||||
|
||||
Context = autoclass("android.content.Context") # noqa: N806
|
||||
result: str | None = Context.getFilesDir().getParentFile().getAbsolutePath()
|
||||
except Exception:
|
||||
# if fails find an android folder looking path on the sys.path
|
||||
pattern = re.compile(r"/data/(data|user/\d+)/(.+)/files")
|
||||
for path in sys.path:
|
||||
if pattern.match(path):
|
||||
result = path.split("/files")[0]
|
||||
break
|
||||
else:
|
||||
result = None
|
||||
return result
|
||||
|
||||
|
||||
@lru_cache(maxsize=1)
|
||||
def _android_documents_folder() -> str:
|
||||
""":return: documents folder for the Android OS"""
|
||||
# Get directories with pyjnius
|
||||
try:
|
||||
from jnius import autoclass
|
||||
|
||||
Context = autoclass("android.content.Context") # noqa: N806
|
||||
Environment = autoclass("android.os.Environment") # noqa: N806
|
||||
documents_dir: str = Context.getExternalFilesDir(Environment.DIRECTORY_DOCUMENTS).getAbsolutePath()
|
||||
except Exception:
|
||||
documents_dir = "/storage/emulated/0/Documents"
|
||||
|
||||
return documents_dir
|
||||
|
||||
|
||||
__all__ = [
|
||||
"Android",
|
||||
]
|
||||
156
lib/pkg_resources/_vendor/platformdirs/api.py
Normal file
156
lib/pkg_resources/_vendor/platformdirs/api.py
Normal file
@@ -0,0 +1,156 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import sys
|
||||
from abc import ABC, abstractmethod
|
||||
from pathlib import Path
|
||||
|
||||
if sys.version_info >= (3, 8): # pragma: no branch
|
||||
from typing import Literal # pragma: no cover
|
||||
|
||||
|
||||
class PlatformDirsABC(ABC):
|
||||
"""
|
||||
Abstract base class for platform directories.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
appname: str | None = None,
|
||||
appauthor: str | None | Literal[False] = None,
|
||||
version: str | None = None,
|
||||
roaming: bool = False,
|
||||
multipath: bool = False,
|
||||
opinion: bool = True,
|
||||
):
|
||||
"""
|
||||
Create a new platform directory.
|
||||
|
||||
:param appname: See `appname`.
|
||||
:param appauthor: See `appauthor`.
|
||||
:param version: See `version`.
|
||||
:param roaming: See `roaming`.
|
||||
:param multipath: See `multipath`.
|
||||
:param opinion: See `opinion`.
|
||||
"""
|
||||
self.appname = appname #: The name of application.
|
||||
self.appauthor = appauthor
|
||||
"""
|
||||
The name of the app author or distributing body for this application. Typically, it is the owning company name.
|
||||
Defaults to `appname`. You may pass ``False`` to disable it.
|
||||
"""
|
||||
self.version = version
|
||||
"""
|
||||
An optional version path element to append to the path. You might want to use this if you want multiple versions
|
||||
of your app to be able to run independently. If used, this would typically be ``<major>.<minor>``.
|
||||
"""
|
||||
self.roaming = roaming
|
||||
"""
|
||||
Whether to use the roaming appdata directory on Windows. That means that for users on a Windows network setup
|
||||
for roaming profiles, this user data will be synced on login (see
|
||||
`here <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>`_).
|
||||
"""
|
||||
self.multipath = multipath
|
||||
"""
|
||||
An optional parameter only applicable to Unix/Linux which indicates that the entire list of data dirs should be
|
||||
returned. By default, the first item would only be returned.
|
||||
"""
|
||||
self.opinion = opinion #: A flag to indicating to use opinionated values.
|
||||
|
||||
def _append_app_name_and_version(self, *base: str) -> str:
|
||||
params = list(base[1:])
|
||||
if self.appname:
|
||||
params.append(self.appname)
|
||||
if self.version:
|
||||
params.append(self.version)
|
||||
return os.path.join(base[0], *params)
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def user_data_dir(self) -> str:
|
||||
""":return: data directory tied to the user"""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def site_data_dir(self) -> str:
|
||||
""":return: data directory shared by users"""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def user_config_dir(self) -> str:
|
||||
""":return: config directory tied to the user"""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def site_config_dir(self) -> str:
|
||||
""":return: config directory shared by the users"""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def user_cache_dir(self) -> str:
|
||||
""":return: cache directory tied to the user"""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def user_state_dir(self) -> str:
|
||||
""":return: state directory tied to the user"""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def user_log_dir(self) -> str:
|
||||
""":return: log directory tied to the user"""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def user_documents_dir(self) -> str:
|
||||
""":return: documents directory tied to the user"""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def user_runtime_dir(self) -> str:
|
||||
""":return: runtime directory tied to the user"""
|
||||
|
||||
@property
|
||||
def user_data_path(self) -> Path:
|
||||
""":return: data path tied to the user"""
|
||||
return Path(self.user_data_dir)
|
||||
|
||||
@property
|
||||
def site_data_path(self) -> Path:
|
||||
""":return: data path shared by users"""
|
||||
return Path(self.site_data_dir)
|
||||
|
||||
@property
|
||||
def user_config_path(self) -> Path:
|
||||
""":return: config path tied to the user"""
|
||||
return Path(self.user_config_dir)
|
||||
|
||||
@property
|
||||
def site_config_path(self) -> Path:
|
||||
""":return: config path shared by the users"""
|
||||
return Path(self.site_config_dir)
|
||||
|
||||
@property
|
||||
def user_cache_path(self) -> Path:
|
||||
""":return: cache path tied to the user"""
|
||||
return Path(self.user_cache_dir)
|
||||
|
||||
@property
|
||||
def user_state_path(self) -> Path:
|
||||
""":return: state path tied to the user"""
|
||||
return Path(self.user_state_dir)
|
||||
|
||||
@property
|
||||
def user_log_path(self) -> Path:
|
||||
""":return: log path tied to the user"""
|
||||
return Path(self.user_log_dir)
|
||||
|
||||
@property
|
||||
def user_documents_path(self) -> Path:
|
||||
""":return: documents path tied to the user"""
|
||||
return Path(self.user_documents_dir)
|
||||
|
||||
@property
|
||||
def user_runtime_path(self) -> Path:
|
||||
""":return: runtime path tied to the user"""
|
||||
return Path(self.user_runtime_dir)
|
||||
64
lib/pkg_resources/_vendor/platformdirs/macos.py
Normal file
64
lib/pkg_resources/_vendor/platformdirs/macos.py
Normal file
@@ -0,0 +1,64 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
|
||||
from .api import PlatformDirsABC
|
||||
|
||||
|
||||
class MacOS(PlatformDirsABC):
|
||||
"""
|
||||
Platform directories for the macOS operating system. Follows the guidance from `Apple documentation
|
||||
<https://developer.apple.com/library/archive/documentation/FileManagement/Conceptual/FileSystemProgrammingGuide/MacOSXDirectories/MacOSXDirectories.html>`_.
|
||||
Makes use of the `appname <platformdirs.api.PlatformDirsABC.appname>` and
|
||||
`version <platformdirs.api.PlatformDirsABC.version>`.
|
||||
"""
|
||||
|
||||
@property
|
||||
def user_data_dir(self) -> str:
|
||||
""":return: data directory tied to the user, e.g. ``~/Library/Application Support/$appname/$version``"""
|
||||
return self._append_app_name_and_version(os.path.expanduser("~/Library/Application Support/"))
|
||||
|
||||
@property
|
||||
def site_data_dir(self) -> str:
|
||||
""":return: data directory shared by users, e.g. ``/Library/Application Support/$appname/$version``"""
|
||||
return self._append_app_name_and_version("/Library/Application Support")
|
||||
|
||||
@property
|
||||
def user_config_dir(self) -> str:
|
||||
""":return: config directory tied to the user, e.g. ``~/Library/Preferences/$appname/$version``"""
|
||||
return self._append_app_name_and_version(os.path.expanduser("~/Library/Preferences/"))
|
||||
|
||||
@property
|
||||
def site_config_dir(self) -> str:
|
||||
""":return: config directory shared by the users, e.g. ``/Library/Preferences/$appname``"""
|
||||
return self._append_app_name_and_version("/Library/Preferences")
|
||||
|
||||
@property
|
||||
def user_cache_dir(self) -> str:
|
||||
""":return: cache directory tied to the user, e.g. ``~/Library/Caches/$appname/$version``"""
|
||||
return self._append_app_name_and_version(os.path.expanduser("~/Library/Caches"))
|
||||
|
||||
@property
|
||||
def user_state_dir(self) -> str:
|
||||
""":return: state directory tied to the user, same as `user_data_dir`"""
|
||||
return self.user_data_dir
|
||||
|
||||
@property
|
||||
def user_log_dir(self) -> str:
|
||||
""":return: log directory tied to the user, e.g. ``~/Library/Logs/$appname/$version``"""
|
||||
return self._append_app_name_and_version(os.path.expanduser("~/Library/Logs"))
|
||||
|
||||
@property
|
||||
def user_documents_dir(self) -> str:
|
||||
""":return: documents directory tied to the user, e.g. ``~/Documents``"""
|
||||
return os.path.expanduser("~/Documents")
|
||||
|
||||
@property
|
||||
def user_runtime_dir(self) -> str:
|
||||
""":return: runtime directory tied to the user, e.g. ``~/Library/Caches/TemporaryItems/$appname/$version``"""
|
||||
return self._append_app_name_and_version(os.path.expanduser("~/Library/Caches/TemporaryItems"))
|
||||
|
||||
|
||||
__all__ = [
|
||||
"MacOS",
|
||||
]
|
||||
0
lib/pkg_resources/_vendor/platformdirs/py.typed
Normal file
0
lib/pkg_resources/_vendor/platformdirs/py.typed
Normal file
181
lib/pkg_resources/_vendor/platformdirs/unix.py
Normal file
181
lib/pkg_resources/_vendor/platformdirs/unix.py
Normal file
@@ -0,0 +1,181 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import sys
|
||||
from configparser import ConfigParser
|
||||
from pathlib import Path
|
||||
|
||||
from .api import PlatformDirsABC
|
||||
|
||||
if sys.platform.startswith("linux"): # pragma: no branch # no op check, only to please the type checker
|
||||
from os import getuid
|
||||
else:
|
||||
|
||||
def getuid() -> int:
|
||||
raise RuntimeError("should only be used on Linux")
|
||||
|
||||
|
||||
class Unix(PlatformDirsABC):
|
||||
"""
|
||||
On Unix/Linux, we follow the
|
||||
`XDG Basedir Spec <https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html>`_. The spec allows
|
||||
overriding directories with environment variables. The examples show are the default values, alongside the name of
|
||||
the environment variable that overrides them. Makes use of the
|
||||
`appname <platformdirs.api.PlatformDirsABC.appname>`,
|
||||
`version <platformdirs.api.PlatformDirsABC.version>`,
|
||||
`multipath <platformdirs.api.PlatformDirsABC.multipath>`,
|
||||
`opinion <platformdirs.api.PlatformDirsABC.opinion>`.
|
||||
"""
|
||||
|
||||
@property
|
||||
def user_data_dir(self) -> str:
|
||||
"""
|
||||
:return: data directory tied to the user, e.g. ``~/.local/share/$appname/$version`` or
|
||||
``$XDG_DATA_HOME/$appname/$version``
|
||||
"""
|
||||
path = os.environ.get("XDG_DATA_HOME", "")
|
||||
if not path.strip():
|
||||
path = os.path.expanduser("~/.local/share")
|
||||
return self._append_app_name_and_version(path)
|
||||
|
||||
@property
|
||||
def site_data_dir(self) -> str:
|
||||
"""
|
||||
:return: data directories shared by users (if `multipath <platformdirs.api.PlatformDirsABC.multipath>` is
|
||||
enabled and ``XDG_DATA_DIR`` is set and a multi path the response is also a multi path separated by the OS
|
||||
path separator), e.g. ``/usr/local/share/$appname/$version`` or ``/usr/share/$appname/$version``
|
||||
"""
|
||||
# XDG default for $XDG_DATA_DIRS; only first, if multipath is False
|
||||
path = os.environ.get("XDG_DATA_DIRS", "")
|
||||
if not path.strip():
|
||||
path = f"/usr/local/share{os.pathsep}/usr/share"
|
||||
return self._with_multi_path(path)
|
||||
|
||||
def _with_multi_path(self, path: str) -> str:
|
||||
path_list = path.split(os.pathsep)
|
||||
if not self.multipath:
|
||||
path_list = path_list[0:1]
|
||||
path_list = [self._append_app_name_and_version(os.path.expanduser(p)) for p in path_list]
|
||||
return os.pathsep.join(path_list)
|
||||
|
||||
@property
|
||||
def user_config_dir(self) -> str:
|
||||
"""
|
||||
:return: config directory tied to the user, e.g. ``~/.config/$appname/$version`` or
|
||||
``$XDG_CONFIG_HOME/$appname/$version``
|
||||
"""
|
||||
path = os.environ.get("XDG_CONFIG_HOME", "")
|
||||
if not path.strip():
|
||||
path = os.path.expanduser("~/.config")
|
||||
return self._append_app_name_and_version(path)
|
||||
|
||||
@property
|
||||
def site_config_dir(self) -> str:
|
||||
"""
|
||||
:return: config directories shared by users (if `multipath <platformdirs.api.PlatformDirsABC.multipath>`
|
||||
is enabled and ``XDG_DATA_DIR`` is set and a multi path the response is also a multi path separated by the OS
|
||||
path separator), e.g. ``/etc/xdg/$appname/$version``
|
||||
"""
|
||||
# XDG default for $XDG_CONFIG_DIRS only first, if multipath is False
|
||||
path = os.environ.get("XDG_CONFIG_DIRS", "")
|
||||
if not path.strip():
|
||||
path = "/etc/xdg"
|
||||
return self._with_multi_path(path)
|
||||
|
||||
@property
|
||||
def user_cache_dir(self) -> str:
|
||||
"""
|
||||
:return: cache directory tied to the user, e.g. ``~/.cache/$appname/$version`` or
|
||||
``~/$XDG_CACHE_HOME/$appname/$version``
|
||||
"""
|
||||
path = os.environ.get("XDG_CACHE_HOME", "")
|
||||
if not path.strip():
|
||||
path = os.path.expanduser("~/.cache")
|
||||
return self._append_app_name_and_version(path)
|
||||
|
||||
@property
|
||||
def user_state_dir(self) -> str:
|
||||
"""
|
||||
:return: state directory tied to the user, e.g. ``~/.local/state/$appname/$version`` or
|
||||
``$XDG_STATE_HOME/$appname/$version``
|
||||
"""
|
||||
path = os.environ.get("XDG_STATE_HOME", "")
|
||||
if not path.strip():
|
||||
path = os.path.expanduser("~/.local/state")
|
||||
return self._append_app_name_and_version(path)
|
||||
|
||||
@property
|
||||
def user_log_dir(self) -> str:
|
||||
"""
|
||||
:return: log directory tied to the user, same as `user_state_dir` if not opinionated else ``log`` in it
|
||||
"""
|
||||
path = self.user_state_dir
|
||||
if self.opinion:
|
||||
path = os.path.join(path, "log")
|
||||
return path
|
||||
|
||||
@property
|
||||
def user_documents_dir(self) -> str:
|
||||
"""
|
||||
:return: documents directory tied to the user, e.g. ``~/Documents``
|
||||
"""
|
||||
documents_dir = _get_user_dirs_folder("XDG_DOCUMENTS_DIR")
|
||||
if documents_dir is None:
|
||||
documents_dir = os.environ.get("XDG_DOCUMENTS_DIR", "").strip()
|
||||
if not documents_dir:
|
||||
documents_dir = os.path.expanduser("~/Documents")
|
||||
|
||||
return documents_dir
|
||||
|
||||
@property
|
||||
def user_runtime_dir(self) -> str:
|
||||
"""
|
||||
:return: runtime directory tied to the user, e.g. ``/run/user/$(id -u)/$appname/$version`` or
|
||||
``$XDG_RUNTIME_DIR/$appname/$version``
|
||||
"""
|
||||
path = os.environ.get("XDG_RUNTIME_DIR", "")
|
||||
if not path.strip():
|
||||
path = f"/run/user/{getuid()}"
|
||||
return self._append_app_name_and_version(path)
|
||||
|
||||
@property
|
||||
def site_data_path(self) -> Path:
|
||||
""":return: data path shared by users. Only return first item, even if ``multipath`` is set to ``True``"""
|
||||
return self._first_item_as_path_if_multipath(self.site_data_dir)
|
||||
|
||||
@property
|
||||
def site_config_path(self) -> Path:
|
||||
""":return: config path shared by the users. Only return first item, even if ``multipath`` is set to ``True``"""
|
||||
return self._first_item_as_path_if_multipath(self.site_config_dir)
|
||||
|
||||
def _first_item_as_path_if_multipath(self, directory: str) -> Path:
|
||||
if self.multipath:
|
||||
# If multipath is True, the first path is returned.
|
||||
directory = directory.split(os.pathsep)[0]
|
||||
return Path(directory)
|
||||
|
||||
|
||||
def _get_user_dirs_folder(key: str) -> str | None:
|
||||
"""Return directory from user-dirs.dirs config file. See https://freedesktop.org/wiki/Software/xdg-user-dirs/"""
|
||||
user_dirs_config_path = os.path.join(Unix().user_config_dir, "user-dirs.dirs")
|
||||
if os.path.exists(user_dirs_config_path):
|
||||
parser = ConfigParser()
|
||||
|
||||
with open(user_dirs_config_path) as stream:
|
||||
# Add fake section header, so ConfigParser doesn't complain
|
||||
parser.read_string(f"[top]\n{stream.read()}")
|
||||
|
||||
if key not in parser["top"]:
|
||||
return None
|
||||
|
||||
path = parser["top"][key].strip('"')
|
||||
# Handle relative home paths
|
||||
path = path.replace("$HOME", os.path.expanduser("~"))
|
||||
return path
|
||||
|
||||
return None
|
||||
|
||||
|
||||
__all__ = [
|
||||
"Unix",
|
||||
]
|
||||
4
lib/pkg_resources/_vendor/platformdirs/version.py
Normal file
4
lib/pkg_resources/_vendor/platformdirs/version.py
Normal file
@@ -0,0 +1,4 @@
|
||||
# file generated by setuptools_scm
|
||||
# don't change, don't track in version control
|
||||
__version__ = version = '2.6.2'
|
||||
__version_tuple__ = version_tuple = (2, 6, 2)
|
||||
184
lib/pkg_resources/_vendor/platformdirs/windows.py
Normal file
184
lib/pkg_resources/_vendor/platformdirs/windows.py
Normal file
@@ -0,0 +1,184 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import ctypes
|
||||
import os
|
||||
import sys
|
||||
from functools import lru_cache
|
||||
from typing import Callable
|
||||
|
||||
from .api import PlatformDirsABC
|
||||
|
||||
|
||||
class Windows(PlatformDirsABC):
|
||||
"""`MSDN on where to store app data files
|
||||
<http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120>`_.
|
||||
Makes use of the
|
||||
`appname <platformdirs.api.PlatformDirsABC.appname>`,
|
||||
`appauthor <platformdirs.api.PlatformDirsABC.appauthor>`,
|
||||
`version <platformdirs.api.PlatformDirsABC.version>`,
|
||||
`roaming <platformdirs.api.PlatformDirsABC.roaming>`,
|
||||
`opinion <platformdirs.api.PlatformDirsABC.opinion>`."""
|
||||
|
||||
@property
|
||||
def user_data_dir(self) -> str:
|
||||
"""
|
||||
:return: data directory tied to the user, e.g.
|
||||
``%USERPROFILE%\\AppData\\Local\\$appauthor\\$appname`` (not roaming) or
|
||||
``%USERPROFILE%\\AppData\\Roaming\\$appauthor\\$appname`` (roaming)
|
||||
"""
|
||||
const = "CSIDL_APPDATA" if self.roaming else "CSIDL_LOCAL_APPDATA"
|
||||
path = os.path.normpath(get_win_folder(const))
|
||||
return self._append_parts(path)
|
||||
|
||||
def _append_parts(self, path: str, *, opinion_value: str | None = None) -> str:
|
||||
params = []
|
||||
if self.appname:
|
||||
if self.appauthor is not False:
|
||||
author = self.appauthor or self.appname
|
||||
params.append(author)
|
||||
params.append(self.appname)
|
||||
if opinion_value is not None and self.opinion:
|
||||
params.append(opinion_value)
|
||||
if self.version:
|
||||
params.append(self.version)
|
||||
return os.path.join(path, *params)
|
||||
|
||||
@property
|
||||
def site_data_dir(self) -> str:
|
||||
""":return: data directory shared by users, e.g. ``C:\\ProgramData\\$appauthor\\$appname``"""
|
||||
path = os.path.normpath(get_win_folder("CSIDL_COMMON_APPDATA"))
|
||||
return self._append_parts(path)
|
||||
|
||||
@property
|
||||
def user_config_dir(self) -> str:
|
||||
""":return: config directory tied to the user, same as `user_data_dir`"""
|
||||
return self.user_data_dir
|
||||
|
||||
@property
|
||||
def site_config_dir(self) -> str:
|
||||
""":return: config directory shared by the users, same as `site_data_dir`"""
|
||||
return self.site_data_dir
|
||||
|
||||
@property
|
||||
def user_cache_dir(self) -> str:
|
||||
"""
|
||||
:return: cache directory tied to the user (if opinionated with ``Cache`` folder within ``$appname``) e.g.
|
||||
``%USERPROFILE%\\AppData\\Local\\$appauthor\\$appname\\Cache\\$version``
|
||||
"""
|
||||
path = os.path.normpath(get_win_folder("CSIDL_LOCAL_APPDATA"))
|
||||
return self._append_parts(path, opinion_value="Cache")
|
||||
|
||||
@property
|
||||
def user_state_dir(self) -> str:
|
||||
""":return: state directory tied to the user, same as `user_data_dir`"""
|
||||
return self.user_data_dir
|
||||
|
||||
@property
|
||||
def user_log_dir(self) -> str:
|
||||
"""
|
||||
:return: log directory tied to the user, same as `user_data_dir` if not opinionated else ``Logs`` in it
|
||||
"""
|
||||
path = self.user_data_dir
|
||||
if self.opinion:
|
||||
path = os.path.join(path, "Logs")
|
||||
return path
|
||||
|
||||
@property
|
||||
def user_documents_dir(self) -> str:
|
||||
"""
|
||||
:return: documents directory tied to the user e.g. ``%USERPROFILE%\\Documents``
|
||||
"""
|
||||
return os.path.normpath(get_win_folder("CSIDL_PERSONAL"))
|
||||
|
||||
@property
|
||||
def user_runtime_dir(self) -> str:
|
||||
"""
|
||||
:return: runtime directory tied to the user, e.g.
|
||||
``%USERPROFILE%\\AppData\\Local\\Temp\\$appauthor\\$appname``
|
||||
"""
|
||||
path = os.path.normpath(os.path.join(get_win_folder("CSIDL_LOCAL_APPDATA"), "Temp"))
|
||||
return self._append_parts(path)
|
||||
|
||||
|
||||
def get_win_folder_from_env_vars(csidl_name: str) -> str:
|
||||
"""Get folder from environment variables."""
|
||||
if csidl_name == "CSIDL_PERSONAL": # does not have an environment name
|
||||
return os.path.join(os.path.normpath(os.environ["USERPROFILE"]), "Documents")
|
||||
|
||||
env_var_name = {
|
||||
"CSIDL_APPDATA": "APPDATA",
|
||||
"CSIDL_COMMON_APPDATA": "ALLUSERSPROFILE",
|
||||
"CSIDL_LOCAL_APPDATA": "LOCALAPPDATA",
|
||||
}.get(csidl_name)
|
||||
if env_var_name is None:
|
||||
raise ValueError(f"Unknown CSIDL name: {csidl_name}")
|
||||
result = os.environ.get(env_var_name)
|
||||
if result is None:
|
||||
raise ValueError(f"Unset environment variable: {env_var_name}")
|
||||
return result
|
||||
|
||||
|
||||
def get_win_folder_from_registry(csidl_name: str) -> str:
|
||||
"""Get folder from the registry.
|
||||
|
||||
This is a fallback technique at best. I'm not sure if using the
|
||||
registry for this guarantees us the correct answer for all CSIDL_*
|
||||
names.
|
||||
"""
|
||||
shell_folder_name = {
|
||||
"CSIDL_APPDATA": "AppData",
|
||||
"CSIDL_COMMON_APPDATA": "Common AppData",
|
||||
"CSIDL_LOCAL_APPDATA": "Local AppData",
|
||||
"CSIDL_PERSONAL": "Personal",
|
||||
}.get(csidl_name)
|
||||
if shell_folder_name is None:
|
||||
raise ValueError(f"Unknown CSIDL name: {csidl_name}")
|
||||
if sys.platform != "win32": # only needed for mypy type checker to know that this code runs only on Windows
|
||||
raise NotImplementedError
|
||||
import winreg
|
||||
|
||||
key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders")
|
||||
directory, _ = winreg.QueryValueEx(key, shell_folder_name)
|
||||
return str(directory)
|
||||
|
||||
|
||||
def get_win_folder_via_ctypes(csidl_name: str) -> str:
|
||||
"""Get folder with ctypes."""
|
||||
csidl_const = {
|
||||
"CSIDL_APPDATA": 26,
|
||||
"CSIDL_COMMON_APPDATA": 35,
|
||||
"CSIDL_LOCAL_APPDATA": 28,
|
||||
"CSIDL_PERSONAL": 5,
|
||||
}.get(csidl_name)
|
||||
if csidl_const is None:
|
||||
raise ValueError(f"Unknown CSIDL name: {csidl_name}")
|
||||
|
||||
buf = ctypes.create_unicode_buffer(1024)
|
||||
windll = getattr(ctypes, "windll") # noqa: B009 # using getattr to avoid false positive with mypy type checker
|
||||
windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
|
||||
|
||||
# Downgrade to short path name if it has highbit chars.
|
||||
if any(ord(c) > 255 for c in buf):
|
||||
buf2 = ctypes.create_unicode_buffer(1024)
|
||||
if windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
|
||||
buf = buf2
|
||||
|
||||
return buf.value
|
||||
|
||||
|
||||
def _pick_get_win_folder() -> Callable[[str], str]:
|
||||
if hasattr(ctypes, "windll"):
|
||||
return get_win_folder_via_ctypes
|
||||
try:
|
||||
import winreg # noqa: F401
|
||||
except ImportError:
|
||||
return get_win_folder_from_env_vars
|
||||
else:
|
||||
return get_win_folder_from_registry
|
||||
|
||||
|
||||
get_win_folder = lru_cache(maxsize=None)(_pick_get_win_folder())
|
||||
|
||||
__all__ = [
|
||||
"Windows",
|
||||
]
|
||||
@@ -1,18 +0,0 @@
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
File diff suppressed because it is too large
Load Diff
2209
lib/pkg_resources/_vendor/typing_extensions.py
Normal file
2209
lib/pkg_resources/_vendor/typing_extensions.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,3 +1,11 @@
|
||||
packaging==21.2
|
||||
pyparsing==2.2.1
|
||||
appdirs==1.4.3
|
||||
packaging==23.1
|
||||
|
||||
platformdirs==2.6.2
|
||||
# required for platformdirs on Python < 3.8
|
||||
typing_extensions==4.4.0
|
||||
|
||||
jaraco.text==3.7.0
|
||||
# required for jaraco.text on older Pythons
|
||||
importlib_resources==5.10.2
|
||||
# required for importlib_resources on older Pythons
|
||||
zipp==3.7.0
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
zipp
|
||||
329
lib/pkg_resources/_vendor/zipp.py
Normal file
329
lib/pkg_resources/_vendor/zipp.py
Normal file
@@ -0,0 +1,329 @@
|
||||
import io
|
||||
import posixpath
|
||||
import zipfile
|
||||
import itertools
|
||||
import contextlib
|
||||
import sys
|
||||
import pathlib
|
||||
|
||||
if sys.version_info < (3, 7):
|
||||
from collections import OrderedDict
|
||||
else:
|
||||
OrderedDict = dict
|
||||
|
||||
|
||||
__all__ = ['Path']
|
||||
|
||||
|
||||
def _parents(path):
|
||||
"""
|
||||
Given a path with elements separated by
|
||||
posixpath.sep, generate all parents of that path.
|
||||
|
||||
>>> list(_parents('b/d'))
|
||||
['b']
|
||||
>>> list(_parents('/b/d/'))
|
||||
['/b']
|
||||
>>> list(_parents('b/d/f/'))
|
||||
['b/d', 'b']
|
||||
>>> list(_parents('b'))
|
||||
[]
|
||||
>>> list(_parents(''))
|
||||
[]
|
||||
"""
|
||||
return itertools.islice(_ancestry(path), 1, None)
|
||||
|
||||
|
||||
def _ancestry(path):
|
||||
"""
|
||||
Given a path with elements separated by
|
||||
posixpath.sep, generate all elements of that path
|
||||
|
||||
>>> list(_ancestry('b/d'))
|
||||
['b/d', 'b']
|
||||
>>> list(_ancestry('/b/d/'))
|
||||
['/b/d', '/b']
|
||||
>>> list(_ancestry('b/d/f/'))
|
||||
['b/d/f', 'b/d', 'b']
|
||||
>>> list(_ancestry('b'))
|
||||
['b']
|
||||
>>> list(_ancestry(''))
|
||||
[]
|
||||
"""
|
||||
path = path.rstrip(posixpath.sep)
|
||||
while path and path != posixpath.sep:
|
||||
yield path
|
||||
path, tail = posixpath.split(path)
|
||||
|
||||
|
||||
_dedupe = OrderedDict.fromkeys
|
||||
"""Deduplicate an iterable in original order"""
|
||||
|
||||
|
||||
def _difference(minuend, subtrahend):
|
||||
"""
|
||||
Return items in minuend not in subtrahend, retaining order
|
||||
with O(1) lookup.
|
||||
"""
|
||||
return itertools.filterfalse(set(subtrahend).__contains__, minuend)
|
||||
|
||||
|
||||
class CompleteDirs(zipfile.ZipFile):
|
||||
"""
|
||||
A ZipFile subclass that ensures that implied directories
|
||||
are always included in the namelist.
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def _implied_dirs(names):
|
||||
parents = itertools.chain.from_iterable(map(_parents, names))
|
||||
as_dirs = (p + posixpath.sep for p in parents)
|
||||
return _dedupe(_difference(as_dirs, names))
|
||||
|
||||
def namelist(self):
|
||||
names = super(CompleteDirs, self).namelist()
|
||||
return names + list(self._implied_dirs(names))
|
||||
|
||||
def _name_set(self):
|
||||
return set(self.namelist())
|
||||
|
||||
def resolve_dir(self, name):
|
||||
"""
|
||||
If the name represents a directory, return that name
|
||||
as a directory (with the trailing slash).
|
||||
"""
|
||||
names = self._name_set()
|
||||
dirname = name + '/'
|
||||
dir_match = name not in names and dirname in names
|
||||
return dirname if dir_match else name
|
||||
|
||||
@classmethod
|
||||
def make(cls, source):
|
||||
"""
|
||||
Given a source (filename or zipfile), return an
|
||||
appropriate CompleteDirs subclass.
|
||||
"""
|
||||
if isinstance(source, CompleteDirs):
|
||||
return source
|
||||
|
||||
if not isinstance(source, zipfile.ZipFile):
|
||||
return cls(_pathlib_compat(source))
|
||||
|
||||
# Only allow for FastLookup when supplied zipfile is read-only
|
||||
if 'r' not in source.mode:
|
||||
cls = CompleteDirs
|
||||
|
||||
source.__class__ = cls
|
||||
return source
|
||||
|
||||
|
||||
class FastLookup(CompleteDirs):
|
||||
"""
|
||||
ZipFile subclass to ensure implicit
|
||||
dirs exist and are resolved rapidly.
|
||||
"""
|
||||
|
||||
def namelist(self):
|
||||
with contextlib.suppress(AttributeError):
|
||||
return self.__names
|
||||
self.__names = super(FastLookup, self).namelist()
|
||||
return self.__names
|
||||
|
||||
def _name_set(self):
|
||||
with contextlib.suppress(AttributeError):
|
||||
return self.__lookup
|
||||
self.__lookup = super(FastLookup, self)._name_set()
|
||||
return self.__lookup
|
||||
|
||||
|
||||
def _pathlib_compat(path):
|
||||
"""
|
||||
For path-like objects, convert to a filename for compatibility
|
||||
on Python 3.6.1 and earlier.
|
||||
"""
|
||||
try:
|
||||
return path.__fspath__()
|
||||
except AttributeError:
|
||||
return str(path)
|
||||
|
||||
|
||||
class Path:
|
||||
"""
|
||||
A pathlib-compatible interface for zip files.
|
||||
|
||||
Consider a zip file with this structure::
|
||||
|
||||
.
|
||||
├── a.txt
|
||||
└── b
|
||||
├── c.txt
|
||||
└── d
|
||||
└── e.txt
|
||||
|
||||
>>> data = io.BytesIO()
|
||||
>>> zf = zipfile.ZipFile(data, 'w')
|
||||
>>> zf.writestr('a.txt', 'content of a')
|
||||
>>> zf.writestr('b/c.txt', 'content of c')
|
||||
>>> zf.writestr('b/d/e.txt', 'content of e')
|
||||
>>> zf.filename = 'mem/abcde.zip'
|
||||
|
||||
Path accepts the zipfile object itself or a filename
|
||||
|
||||
>>> root = Path(zf)
|
||||
|
||||
From there, several path operations are available.
|
||||
|
||||
Directory iteration (including the zip file itself):
|
||||
|
||||
>>> a, b = root.iterdir()
|
||||
>>> a
|
||||
Path('mem/abcde.zip', 'a.txt')
|
||||
>>> b
|
||||
Path('mem/abcde.zip', 'b/')
|
||||
|
||||
name property:
|
||||
|
||||
>>> b.name
|
||||
'b'
|
||||
|
||||
join with divide operator:
|
||||
|
||||
>>> c = b / 'c.txt'
|
||||
>>> c
|
||||
Path('mem/abcde.zip', 'b/c.txt')
|
||||
>>> c.name
|
||||
'c.txt'
|
||||
|
||||
Read text:
|
||||
|
||||
>>> c.read_text()
|
||||
'content of c'
|
||||
|
||||
existence:
|
||||
|
||||
>>> c.exists()
|
||||
True
|
||||
>>> (b / 'missing.txt').exists()
|
||||
False
|
||||
|
||||
Coercion to string:
|
||||
|
||||
>>> import os
|
||||
>>> str(c).replace(os.sep, posixpath.sep)
|
||||
'mem/abcde.zip/b/c.txt'
|
||||
|
||||
At the root, ``name``, ``filename``, and ``parent``
|
||||
resolve to the zipfile. Note these attributes are not
|
||||
valid and will raise a ``ValueError`` if the zipfile
|
||||
has no filename.
|
||||
|
||||
>>> root.name
|
||||
'abcde.zip'
|
||||
>>> str(root.filename).replace(os.sep, posixpath.sep)
|
||||
'mem/abcde.zip'
|
||||
>>> str(root.parent)
|
||||
'mem'
|
||||
"""
|
||||
|
||||
__repr = "{self.__class__.__name__}({self.root.filename!r}, {self.at!r})"
|
||||
|
||||
def __init__(self, root, at=""):
|
||||
"""
|
||||
Construct a Path from a ZipFile or filename.
|
||||
|
||||
Note: When the source is an existing ZipFile object,
|
||||
its type (__class__) will be mutated to a
|
||||
specialized type. If the caller wishes to retain the
|
||||
original type, the caller should either create a
|
||||
separate ZipFile object or pass a filename.
|
||||
"""
|
||||
self.root = FastLookup.make(root)
|
||||
self.at = at
|
||||
|
||||
def open(self, mode='r', *args, pwd=None, **kwargs):
|
||||
"""
|
||||
Open this entry as text or binary following the semantics
|
||||
of ``pathlib.Path.open()`` by passing arguments through
|
||||
to io.TextIOWrapper().
|
||||
"""
|
||||
if self.is_dir():
|
||||
raise IsADirectoryError(self)
|
||||
zip_mode = mode[0]
|
||||
if not self.exists() and zip_mode == 'r':
|
||||
raise FileNotFoundError(self)
|
||||
stream = self.root.open(self.at, zip_mode, pwd=pwd)
|
||||
if 'b' in mode:
|
||||
if args or kwargs:
|
||||
raise ValueError("encoding args invalid for binary operation")
|
||||
return stream
|
||||
return io.TextIOWrapper(stream, *args, **kwargs)
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return pathlib.Path(self.at).name or self.filename.name
|
||||
|
||||
@property
|
||||
def suffix(self):
|
||||
return pathlib.Path(self.at).suffix or self.filename.suffix
|
||||
|
||||
@property
|
||||
def suffixes(self):
|
||||
return pathlib.Path(self.at).suffixes or self.filename.suffixes
|
||||
|
||||
@property
|
||||
def stem(self):
|
||||
return pathlib.Path(self.at).stem or self.filename.stem
|
||||
|
||||
@property
|
||||
def filename(self):
|
||||
return pathlib.Path(self.root.filename).joinpath(self.at)
|
||||
|
||||
def read_text(self, *args, **kwargs):
|
||||
with self.open('r', *args, **kwargs) as strm:
|
||||
return strm.read()
|
||||
|
||||
def read_bytes(self):
|
||||
with self.open('rb') as strm:
|
||||
return strm.read()
|
||||
|
||||
def _is_child(self, path):
|
||||
return posixpath.dirname(path.at.rstrip("/")) == self.at.rstrip("/")
|
||||
|
||||
def _next(self, at):
|
||||
return self.__class__(self.root, at)
|
||||
|
||||
def is_dir(self):
|
||||
return not self.at or self.at.endswith("/")
|
||||
|
||||
def is_file(self):
|
||||
return self.exists() and not self.is_dir()
|
||||
|
||||
def exists(self):
|
||||
return self.at in self.root._name_set()
|
||||
|
||||
def iterdir(self):
|
||||
if not self.is_dir():
|
||||
raise ValueError("Can't listdir a file")
|
||||
subs = map(self._next, self.root.namelist())
|
||||
return filter(self._is_child, subs)
|
||||
|
||||
def __str__(self):
|
||||
return posixpath.join(self.root.filename, self.at)
|
||||
|
||||
def __repr__(self):
|
||||
return self.__repr.format(self=self)
|
||||
|
||||
def joinpath(self, *other):
|
||||
next = posixpath.join(self.at, *map(_pathlib_compat, other))
|
||||
return self._next(self.root.resolve_dir(next))
|
||||
|
||||
__truediv__ = joinpath
|
||||
|
||||
@property
|
||||
def parent(self):
|
||||
if not self.at:
|
||||
return self.filename.parent
|
||||
parent_at = posixpath.dirname(self.at.rstrip('/'))
|
||||
if parent_at:
|
||||
parent_at += '/'
|
||||
return self._next(parent_at)
|
||||
@@ -1,401 +0,0 @@
|
||||
Pluggable Distributions of Python Software
|
||||
==========================================
|
||||
|
||||
Distributions
|
||||
-------------
|
||||
|
||||
A "Distribution" is a collection of files that represent a "Release" of a
|
||||
"Project" as of a particular point in time, denoted by a
|
||||
"Version"::
|
||||
|
||||
>>> import sys, pkg_resources
|
||||
>>> from pkg_resources import Distribution
|
||||
>>> Distribution(project_name="Foo", version="1.2")
|
||||
Foo 1.2
|
||||
|
||||
Distributions have a location, which can be a filename, URL, or really anything
|
||||
else you care to use::
|
||||
|
||||
>>> dist = Distribution(
|
||||
... location="http://example.com/something",
|
||||
... project_name="Bar", version="0.9"
|
||||
... )
|
||||
|
||||
>>> dist
|
||||
Bar 0.9 (http://example.com/something)
|
||||
|
||||
|
||||
Distributions have various introspectable attributes::
|
||||
|
||||
>>> dist.location
|
||||
'http://example.com/something'
|
||||
|
||||
>>> dist.project_name
|
||||
'Bar'
|
||||
|
||||
>>> dist.version
|
||||
'0.9'
|
||||
|
||||
>>> dist.py_version == '{}.{}'.format(*sys.version_info)
|
||||
True
|
||||
|
||||
>>> print(dist.platform)
|
||||
None
|
||||
|
||||
Including various computed attributes::
|
||||
|
||||
>>> from pkg_resources import parse_version
|
||||
>>> dist.parsed_version == parse_version(dist.version)
|
||||
True
|
||||
|
||||
>>> dist.key # case-insensitive form of the project name
|
||||
'bar'
|
||||
|
||||
Distributions are compared (and hashed) by version first::
|
||||
|
||||
>>> Distribution(version='1.0') == Distribution(version='1.0')
|
||||
True
|
||||
>>> Distribution(version='1.0') == Distribution(version='1.1')
|
||||
False
|
||||
>>> Distribution(version='1.0') < Distribution(version='1.1')
|
||||
True
|
||||
|
||||
but also by project name (case-insensitive), platform, Python version,
|
||||
location, etc.::
|
||||
|
||||
>>> Distribution(project_name="Foo",version="1.0") == \
|
||||
... Distribution(project_name="Foo",version="1.0")
|
||||
True
|
||||
|
||||
>>> Distribution(project_name="Foo",version="1.0") == \
|
||||
... Distribution(project_name="foo",version="1.0")
|
||||
True
|
||||
|
||||
>>> Distribution(project_name="Foo",version="1.0") == \
|
||||
... Distribution(project_name="Foo",version="1.1")
|
||||
False
|
||||
|
||||
>>> Distribution(project_name="Foo",py_version="2.3",version="1.0") == \
|
||||
... Distribution(project_name="Foo",py_version="2.4",version="1.0")
|
||||
False
|
||||
|
||||
>>> Distribution(location="spam",version="1.0") == \
|
||||
... Distribution(location="spam",version="1.0")
|
||||
True
|
||||
|
||||
>>> Distribution(location="spam",version="1.0") == \
|
||||
... Distribution(location="baz",version="1.0")
|
||||
False
|
||||
|
||||
|
||||
|
||||
Hash and compare distribution by prio/plat
|
||||
|
||||
Get version from metadata
|
||||
provider capabilities
|
||||
egg_name()
|
||||
as_requirement()
|
||||
from_location, from_filename (w/path normalization)
|
||||
|
||||
Releases may have zero or more "Requirements", which indicate
|
||||
what releases of another project the release requires in order to
|
||||
function. A Requirement names the other project, expresses some criteria
|
||||
as to what releases of that project are acceptable, and lists any "Extras"
|
||||
that the requiring release may need from that project. (An Extra is an
|
||||
optional feature of a Release, that can only be used if its additional
|
||||
Requirements are satisfied.)
|
||||
|
||||
|
||||
|
||||
The Working Set
|
||||
---------------
|
||||
|
||||
A collection of active distributions is called a Working Set. Note that a
|
||||
Working Set can contain any importable distribution, not just pluggable ones.
|
||||
For example, the Python standard library is an importable distribution that
|
||||
will usually be part of the Working Set, even though it is not pluggable.
|
||||
Similarly, when you are doing development work on a project, the files you are
|
||||
editing are also a Distribution. (And, with a little attention to the
|
||||
directory names used, and including some additional metadata, such a
|
||||
"development distribution" can be made pluggable as well.)
|
||||
|
||||
>>> from pkg_resources import WorkingSet
|
||||
|
||||
A working set's entries are the sys.path entries that correspond to the active
|
||||
distributions. By default, the working set's entries are the items on
|
||||
``sys.path``::
|
||||
|
||||
>>> ws = WorkingSet()
|
||||
>>> ws.entries == sys.path
|
||||
True
|
||||
|
||||
But you can also create an empty working set explicitly, and add distributions
|
||||
to it::
|
||||
|
||||
>>> ws = WorkingSet([])
|
||||
>>> ws.add(dist)
|
||||
>>> ws.entries
|
||||
['http://example.com/something']
|
||||
>>> dist in ws
|
||||
True
|
||||
>>> Distribution('foo',version="") in ws
|
||||
False
|
||||
|
||||
And you can iterate over its distributions::
|
||||
|
||||
>>> list(ws)
|
||||
[Bar 0.9 (http://example.com/something)]
|
||||
|
||||
Adding the same distribution more than once is a no-op::
|
||||
|
||||
>>> ws.add(dist)
|
||||
>>> list(ws)
|
||||
[Bar 0.9 (http://example.com/something)]
|
||||
|
||||
For that matter, adding multiple distributions for the same project also does
|
||||
nothing, because a working set can only hold one active distribution per
|
||||
project -- the first one added to it::
|
||||
|
||||
>>> ws.add(
|
||||
... Distribution(
|
||||
... 'http://example.com/something', project_name="Bar",
|
||||
... version="7.2"
|
||||
... )
|
||||
... )
|
||||
>>> list(ws)
|
||||
[Bar 0.9 (http://example.com/something)]
|
||||
|
||||
You can append a path entry to a working set using ``add_entry()``::
|
||||
|
||||
>>> ws.entries
|
||||
['http://example.com/something']
|
||||
>>> ws.add_entry(pkg_resources.__file__)
|
||||
>>> ws.entries
|
||||
['http://example.com/something', '...pkg_resources...']
|
||||
|
||||
Multiple additions result in multiple entries, even if the entry is already in
|
||||
the working set (because ``sys.path`` can contain the same entry more than
|
||||
once)::
|
||||
|
||||
>>> ws.add_entry(pkg_resources.__file__)
|
||||
>>> ws.entries
|
||||
['...example.com...', '...pkg_resources...', '...pkg_resources...']
|
||||
|
||||
And you can specify the path entry a distribution was found under, using the
|
||||
optional second parameter to ``add()``::
|
||||
|
||||
>>> ws = WorkingSet([])
|
||||
>>> ws.add(dist,"foo")
|
||||
>>> ws.entries
|
||||
['foo']
|
||||
|
||||
But even if a distribution is found under multiple path entries, it still only
|
||||
shows up once when iterating the working set:
|
||||
|
||||
>>> ws.add_entry(ws.entries[0])
|
||||
>>> list(ws)
|
||||
[Bar 0.9 (http://example.com/something)]
|
||||
|
||||
You can ask a WorkingSet to ``find()`` a distribution matching a requirement::
|
||||
|
||||
>>> from pkg_resources import Requirement
|
||||
>>> print(ws.find(Requirement.parse("Foo==1.0"))) # no match, return None
|
||||
None
|
||||
|
||||
>>> ws.find(Requirement.parse("Bar==0.9")) # match, return distribution
|
||||
Bar 0.9 (http://example.com/something)
|
||||
|
||||
Note that asking for a conflicting version of a distribution already in a
|
||||
working set triggers a ``pkg_resources.VersionConflict`` error:
|
||||
|
||||
>>> try:
|
||||
... ws.find(Requirement.parse("Bar==1.0"))
|
||||
... except pkg_resources.VersionConflict as exc:
|
||||
... print(str(exc))
|
||||
... else:
|
||||
... raise AssertionError("VersionConflict was not raised")
|
||||
(Bar 0.9 (http://example.com/something), Requirement.parse('Bar==1.0'))
|
||||
|
||||
You can subscribe a callback function to receive notifications whenever a new
|
||||
distribution is added to a working set. The callback is immediately invoked
|
||||
once for each existing distribution in the working set, and then is called
|
||||
again for new distributions added thereafter::
|
||||
|
||||
>>> def added(dist): print("Added %s" % dist)
|
||||
>>> ws.subscribe(added)
|
||||
Added Bar 0.9
|
||||
>>> foo12 = Distribution(project_name="Foo", version="1.2", location="f12")
|
||||
>>> ws.add(foo12)
|
||||
Added Foo 1.2
|
||||
|
||||
Note, however, that only the first distribution added for a given project name
|
||||
will trigger a callback, even during the initial ``subscribe()`` callback::
|
||||
|
||||
>>> foo14 = Distribution(project_name="Foo", version="1.4", location="f14")
|
||||
>>> ws.add(foo14) # no callback, because Foo 1.2 is already active
|
||||
|
||||
>>> ws = WorkingSet([])
|
||||
>>> ws.add(foo12)
|
||||
>>> ws.add(foo14)
|
||||
>>> ws.subscribe(added)
|
||||
Added Foo 1.2
|
||||
|
||||
And adding a callback more than once has no effect, either::
|
||||
|
||||
>>> ws.subscribe(added) # no callbacks
|
||||
|
||||
# and no double-callbacks on subsequent additions, either
|
||||
>>> just_a_test = Distribution(project_name="JustATest", version="0.99")
|
||||
>>> ws.add(just_a_test)
|
||||
Added JustATest 0.99
|
||||
|
||||
|
||||
Finding Plugins
|
||||
---------------
|
||||
|
||||
``WorkingSet`` objects can be used to figure out what plugins in an
|
||||
``Environment`` can be loaded without any resolution errors::
|
||||
|
||||
>>> from pkg_resources import Environment
|
||||
|
||||
>>> plugins = Environment([]) # normally, a list of plugin directories
|
||||
>>> plugins.add(foo12)
|
||||
>>> plugins.add(foo14)
|
||||
>>> plugins.add(just_a_test)
|
||||
|
||||
In the simplest case, we just get the newest version of each distribution in
|
||||
the plugin environment::
|
||||
|
||||
>>> ws = WorkingSet([])
|
||||
>>> ws.find_plugins(plugins)
|
||||
([JustATest 0.99, Foo 1.4 (f14)], {})
|
||||
|
||||
But if there's a problem with a version conflict or missing requirements, the
|
||||
method falls back to older versions, and the error info dict will contain an
|
||||
exception instance for each unloadable plugin::
|
||||
|
||||
>>> ws.add(foo12) # this will conflict with Foo 1.4
|
||||
>>> ws.find_plugins(plugins)
|
||||
([JustATest 0.99, Foo 1.2 (f12)], {Foo 1.4 (f14): VersionConflict(...)})
|
||||
|
||||
But if you disallow fallbacks, the failed plugin will be skipped instead of
|
||||
trying older versions::
|
||||
|
||||
>>> ws.find_plugins(plugins, fallback=False)
|
||||
([JustATest 0.99], {Foo 1.4 (f14): VersionConflict(...)})
|
||||
|
||||
|
||||
|
||||
Platform Compatibility Rules
|
||||
----------------------------
|
||||
|
||||
On the Mac, there are potential compatibility issues for modules compiled
|
||||
on newer versions of macOS than what the user is running. Additionally,
|
||||
macOS will soon have two platforms to contend with: Intel and PowerPC.
|
||||
|
||||
Basic equality works as on other platforms::
|
||||
|
||||
>>> from pkg_resources import compatible_platforms as cp
|
||||
>>> reqd = 'macosx-10.4-ppc'
|
||||
>>> cp(reqd, reqd)
|
||||
True
|
||||
>>> cp("win32", reqd)
|
||||
False
|
||||
|
||||
Distributions made on other machine types are not compatible::
|
||||
|
||||
>>> cp("macosx-10.4-i386", reqd)
|
||||
False
|
||||
|
||||
Distributions made on earlier versions of the OS are compatible, as
|
||||
long as they are from the same top-level version. The patchlevel version
|
||||
number does not matter::
|
||||
|
||||
>>> cp("macosx-10.4-ppc", reqd)
|
||||
True
|
||||
>>> cp("macosx-10.3-ppc", reqd)
|
||||
True
|
||||
>>> cp("macosx-10.5-ppc", reqd)
|
||||
False
|
||||
>>> cp("macosx-9.5-ppc", reqd)
|
||||
False
|
||||
|
||||
Backwards compatibility for packages made via earlier versions of
|
||||
setuptools is provided as well::
|
||||
|
||||
>>> cp("darwin-8.2.0-Power_Macintosh", reqd)
|
||||
True
|
||||
>>> cp("darwin-7.2.0-Power_Macintosh", reqd)
|
||||
True
|
||||
>>> cp("darwin-8.2.0-Power_Macintosh", "macosx-10.3-ppc")
|
||||
False
|
||||
|
||||
|
||||
Environment Markers
|
||||
-------------------
|
||||
|
||||
>>> from pkg_resources import invalid_marker as im, evaluate_marker as em
|
||||
>>> import os
|
||||
|
||||
>>> print(im("sys_platform"))
|
||||
Invalid marker: 'sys_platform', parse error at ''
|
||||
|
||||
>>> print(im("sys_platform=="))
|
||||
Invalid marker: 'sys_platform==', parse error at ''
|
||||
|
||||
>>> print(im("sys_platform=='win32'"))
|
||||
False
|
||||
|
||||
>>> print(im("sys=='x'"))
|
||||
Invalid marker: "sys=='x'", parse error at "sys=='x'"
|
||||
|
||||
>>> print(im("(extra)"))
|
||||
Invalid marker: '(extra)', parse error at ')'
|
||||
|
||||
>>> print(im("(extra"))
|
||||
Invalid marker: '(extra', parse error at ''
|
||||
|
||||
>>> print(im("os.open('foo')=='y'"))
|
||||
Invalid marker: "os.open('foo')=='y'", parse error at 'os.open('
|
||||
|
||||
>>> print(im("'x'=='y' and os.open('foo')=='y'")) # no short-circuit!
|
||||
Invalid marker: "'x'=='y' and os.open('foo')=='y'", parse error at 'and os.o'
|
||||
|
||||
>>> print(im("'x'=='x' or os.open('foo')=='y'")) # no short-circuit!
|
||||
Invalid marker: "'x'=='x' or os.open('foo')=='y'", parse error at 'or os.op'
|
||||
|
||||
>>> print(im("'x' < 'y' < 'z'"))
|
||||
Invalid marker: "'x' < 'y' < 'z'", parse error at "< 'z'"
|
||||
|
||||
>>> print(im("r'x'=='x'"))
|
||||
Invalid marker: "r'x'=='x'", parse error at "r'x'=='x"
|
||||
|
||||
>>> print(im("'''x'''=='x'"))
|
||||
Invalid marker: "'''x'''=='x'", parse error at "'x'''=='"
|
||||
|
||||
>>> print(im('"""x"""=="x"'))
|
||||
Invalid marker: '"""x"""=="x"', parse error at '"x"""=="'
|
||||
|
||||
>>> print(im(r"x\n=='x'"))
|
||||
Invalid marker: "x\\n=='x'", parse error at "x\\n=='x'"
|
||||
|
||||
>>> print(im("os.open=='y'"))
|
||||
Invalid marker: "os.open=='y'", parse error at 'os.open='
|
||||
|
||||
>>> em("sys_platform=='win32'") == (sys.platform=='win32')
|
||||
True
|
||||
|
||||
>>> em("python_version >= '2.7'")
|
||||
True
|
||||
|
||||
>>> em("python_version > '2.6'")
|
||||
True
|
||||
|
||||
>>> im("implementation_name=='cpython'")
|
||||
False
|
||||
|
||||
>>> im("platform_python_implementation=='CPython'")
|
||||
False
|
||||
|
||||
>>> im("implementation_version=='3.5.1'")
|
||||
False
|
||||
11
lib/pkg_resources/extern/__init__.py
vendored
11
lib/pkg_resources/extern/__init__.py
vendored
@@ -58,7 +58,8 @@ class VendorImporter:
|
||||
"""Return a module spec for vendored names."""
|
||||
return (
|
||||
importlib.util.spec_from_loader(fullname, self)
|
||||
if self._module_matches_namespace(fullname) else None
|
||||
if self._module_matches_namespace(fullname)
|
||||
else None
|
||||
)
|
||||
|
||||
def install(self):
|
||||
@@ -69,5 +70,11 @@ class VendorImporter:
|
||||
sys.meta_path.append(self)
|
||||
|
||||
|
||||
names = 'packaging', 'pyparsing', 'appdirs'
|
||||
names = (
|
||||
'packaging',
|
||||
'platformdirs',
|
||||
'jaraco',
|
||||
'importlib_resources',
|
||||
'more_itertools',
|
||||
)
|
||||
VendorImporter(__name__, names).install()
|
||||
|
||||
Reference in New Issue
Block a user