mirror of
https://github.com/MAGICGrants/truenas-apps.git
synced 2026-01-09 20:47:58 -05:00
Publish new changes in catalog [skip ci]
This commit is contained in:
3
trains/community/metube/1.0.0/README.md
Normal file
3
trains/community/metube/1.0.0/README.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# MeTube
|
||||
|
||||
[MeTube](https://github.com/alexta69/metube) is a web GUI for youtube-dl (using the yt-dlp fork) with playlist support.
|
||||
33
trains/community/metube/1.0.0/app.yaml
Normal file
33
trains/community/metube/1.0.0/app.yaml
Normal file
@@ -0,0 +1,33 @@
|
||||
app_version: '2024-07-09'
|
||||
capabilities: []
|
||||
categories:
|
||||
- media
|
||||
description: MeTube is a web GUI for youtube-dl (using the yt-dlp fork) with playlist
|
||||
support.
|
||||
home: https://github.com/alexta69/metube
|
||||
host_mounts: []
|
||||
icon: https://media.sys.truenas.net/apps/metube/icons/icon.svg
|
||||
keywords:
|
||||
- youtube-dl
|
||||
- yt-dlp
|
||||
lib_version: 1.0.0
|
||||
lib_version_hash: 66c98111180da566a3bcc9ee1d1be4f673356f453b5d97ee7c784c9a38ee9999
|
||||
maintainers:
|
||||
- email: dev@ixsystems.com
|
||||
name: truenas
|
||||
url: https://www.truenas.com/
|
||||
name: metube
|
||||
run_as_context:
|
||||
- description: MeTube runs as any non-root user.
|
||||
gid: 568
|
||||
group_name: metube
|
||||
uid: 568
|
||||
user_name: metube
|
||||
screenshots:
|
||||
- https://media.sys.truenas.net/apps/metube/screenshots/screenshot1.png
|
||||
sources:
|
||||
- https://github.com/truenas/charts/tree/master/library/ix-dev/community/metube
|
||||
- https://github.com/alexta69/metube
|
||||
title: MeTube
|
||||
train: community
|
||||
version: 1.0.0
|
||||
9
trains/community/metube/1.0.0/ix_values.yaml
Normal file
9
trains/community/metube/1.0.0/ix_values.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
images:
|
||||
image:
|
||||
repository: alexta69/metube
|
||||
tag: "2024-07-09"
|
||||
|
||||
consts:
|
||||
metube_container_name: metube
|
||||
perms_container_name: permissions
|
||||
downloads_path: /downloads
|
||||
@@ -0,0 +1,27 @@
|
||||
import math
|
||||
import re
|
||||
import os
|
||||
|
||||
CPU_COUNT = os.cpu_count()
|
||||
|
||||
NUMBER_REGEX = re.compile(r"^[1-9][0-9]$")
|
||||
FLOAT_REGEX = re.compile(r"^[0-9]+\.[0-9]+$")
|
||||
MILI_CPU_REGEX = re.compile(r"^[0-9]+m$")
|
||||
|
||||
|
||||
def transform_cpu(cpu) -> int:
|
||||
result = 2
|
||||
if NUMBER_REGEX.match(cpu):
|
||||
result = int(cpu)
|
||||
elif FLOAT_REGEX.match(cpu):
|
||||
result = int(math.ceil(float(cpu)))
|
||||
elif MILI_CPU_REGEX.match(cpu):
|
||||
num = int(cpu[:-1])
|
||||
num = num / 1000
|
||||
result = int(math.ceil(num))
|
||||
|
||||
if CPU_COUNT is not None:
|
||||
# Do not exceed the actual CPU count
|
||||
result = min(result, CPU_COUNT)
|
||||
|
||||
return result
|
||||
@@ -0,0 +1,9 @@
|
||||
def migrate_dns_config(dns_config):
|
||||
if not dns_config:
|
||||
return []
|
||||
|
||||
dns_opts = []
|
||||
for opt in dns_config.get("options", []):
|
||||
dns_opts.append(f"{opt['name']}:{opt['value']}")
|
||||
|
||||
return dns_opts
|
||||
@@ -0,0 +1,15 @@
|
||||
def get_value_from_secret(secrets={}, secret_name="", key=""):
|
||||
if not secrets or not secret_name or not key:
|
||||
raise ValueError("Expected [secrets], [secret_name] and [key] to be set")
|
||||
for secret in secrets.items():
|
||||
curr_secret_name = secret[0]
|
||||
curr_data = secret[1]
|
||||
|
||||
if curr_secret_name.endswith(secret_name):
|
||||
if not curr_data.get(key, None):
|
||||
raise ValueError(
|
||||
f"Expected [{key}] to be set in secret [{curr_secret_name}]"
|
||||
)
|
||||
return curr_data[key]
|
||||
|
||||
raise ValueError(f"Secret [{secret_name}] not found")
|
||||
@@ -0,0 +1,49 @@
|
||||
import re
|
||||
import math
|
||||
import psutil
|
||||
|
||||
TOTAL_MEM = psutil.virtual_memory().total
|
||||
|
||||
SINGLE_SUFFIX_REGEX = re.compile(r"^[1-9][0-9]*([EPTGMK])$")
|
||||
DOUBLE_SUFFIX_REGEX = re.compile(r"^[1-9][0-9]*([EPTGMK])i$")
|
||||
BYTES_INTEGER_REGEX = re.compile(r"^[1-9][0-9]*$")
|
||||
EXPONENT_REGEX = re.compile(r"^[1-9][0-9]*e[0-9]+$")
|
||||
|
||||
SUFFIX_MULTIPLIERS = {
|
||||
"K": 10**3,
|
||||
"M": 10**6,
|
||||
"G": 10**9,
|
||||
"T": 10**12,
|
||||
"P": 10**15,
|
||||
"E": 10**18,
|
||||
}
|
||||
|
||||
DOUBLE_SUFFIX_MULTIPLIERS = {
|
||||
"Ki": 2**10,
|
||||
"Mi": 2**20,
|
||||
"Gi": 2**30,
|
||||
"Ti": 2**40,
|
||||
"Pi": 2**50,
|
||||
"Ei": 2**60,
|
||||
}
|
||||
|
||||
|
||||
def transform_memory(memory):
|
||||
result = 4096 # Default to 4GB
|
||||
|
||||
if re.match(SINGLE_SUFFIX_REGEX, memory):
|
||||
suffix = memory[-1]
|
||||
result = int(memory[:-1]) * SUFFIX_MULTIPLIERS[suffix]
|
||||
elif re.match(DOUBLE_SUFFIX_REGEX, memory):
|
||||
suffix = memory[-2:]
|
||||
result = int(memory[:-2]) * DOUBLE_SUFFIX_MULTIPLIERS[suffix]
|
||||
elif re.match(BYTES_INTEGER_REGEX, memory):
|
||||
result = int(memory)
|
||||
elif re.match(EXPONENT_REGEX, memory):
|
||||
result = int(float(memory))
|
||||
|
||||
result = math.ceil(result)
|
||||
result = min(result, TOTAL_MEM)
|
||||
# Convert to Megabytes
|
||||
result = result / 1024 / 1024
|
||||
return int(result)
|
||||
@@ -0,0 +1,59 @@
|
||||
from .memory import transform_memory, TOTAL_MEM
|
||||
from .cpu import transform_cpu, CPU_COUNT
|
||||
|
||||
|
||||
def migrate_resources(resources, gpus=None, system_gpus=None):
|
||||
gpus = gpus or {}
|
||||
system_gpus = system_gpus or []
|
||||
|
||||
result = {
|
||||
"limits": {
|
||||
"cpus": (CPU_COUNT or 2) / 2,
|
||||
"memory": {TOTAL_MEM / 1024 / 1024},
|
||||
}
|
||||
}
|
||||
|
||||
if resources.get("limits", {}).get("cpu", ""):
|
||||
result["limits"].update(
|
||||
{"cpus": transform_cpu(resources.get("limits", {}).get("cpu", ""))}
|
||||
)
|
||||
if resources.get("limits", {}).get("memory", ""):
|
||||
result["limits"].update(
|
||||
{"memory": transform_memory(resources.get("limits", {}).get("memory", ""))}
|
||||
)
|
||||
|
||||
gpus_result = {}
|
||||
for gpu in gpus.items() if gpus else []:
|
||||
kind = gpu[0].lower() # Kind of gpu (amd, nvidia, intel)
|
||||
count = gpu[1] # Number of gpus user requested
|
||||
|
||||
if count == 0:
|
||||
continue
|
||||
|
||||
if "amd" in kind or "intel" in kind:
|
||||
gpus_result.update({"use_all_gpus": True})
|
||||
elif "nvidia" in kind:
|
||||
sys_gpus = [
|
||||
gpu_item
|
||||
for gpu_item in system_gpus
|
||||
if gpu_item.get("error") is None
|
||||
and gpu_item.get("vendor", None) is not None
|
||||
and gpu_item.get("vendor", "").upper() == "NVIDIA"
|
||||
]
|
||||
for sys_gpu in sys_gpus:
|
||||
if count == 0: # We passed # of gpus that user previously requested
|
||||
break
|
||||
guid = sys_gpu.get("vendor_specific_config", {}).get("uuid", "")
|
||||
pci_slot = sys_gpu.get("pci_slot", "")
|
||||
if not guid or not pci_slot:
|
||||
continue
|
||||
|
||||
gpus_result.update(
|
||||
{"nvidia_gpu_selection": {pci_slot: {"uuid": guid, "use_gpu": True}}}
|
||||
)
|
||||
count -= 1
|
||||
|
||||
if gpus_result:
|
||||
result.update({"gpus": gpus_result})
|
||||
|
||||
return result
|
||||
@@ -0,0 +1,115 @@
|
||||
def migrate_storage_item(storage_item, include_read_only=False):
|
||||
if not storage_item:
|
||||
raise ValueError("Expected [storage_item] to be set")
|
||||
|
||||
result = {}
|
||||
if storage_item["type"] == "ixVolume":
|
||||
result = migrate_ix_volume_type(storage_item)
|
||||
elif storage_item["type"] == "hostPath":
|
||||
result = migrate_host_path_type(storage_item)
|
||||
elif storage_item["type"] == "emptyDir":
|
||||
result = migrate_empty_dir_type(storage_item)
|
||||
elif storage_item["type"] == "smb-pv-pvc":
|
||||
result = migrate_smb_pv_pvc_type(storage_item)
|
||||
|
||||
mount_path = storage_item.get("mountPath", "")
|
||||
if mount_path:
|
||||
result.update({"mount_path": mount_path})
|
||||
|
||||
if include_read_only:
|
||||
result.update({"read_only": storage_item.get("readOnly", False)})
|
||||
return result
|
||||
|
||||
|
||||
def migrate_smb_pv_pvc_type(smb_pv_pvc):
|
||||
smb_config = smb_pv_pvc.get("smbConfig", {})
|
||||
if not smb_config:
|
||||
raise ValueError("Expected [smb_pv_pvc] to have [smbConfig] set")
|
||||
|
||||
return {
|
||||
"type": "cifs",
|
||||
"cifs_config": {
|
||||
"server": smb_config["server"],
|
||||
"share": smb_config["share"],
|
||||
"domain": smb_config.get("domain", ""),
|
||||
"username": smb_config["username"],
|
||||
"password": smb_config["password"],
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def migrate_empty_dir_type(empty_dir):
|
||||
empty_dir_config = empty_dir.get("emptyDirConfig", {})
|
||||
if not empty_dir_config:
|
||||
raise ValueError("Expected [empty_dir] to have [emptyDirConfig] set")
|
||||
|
||||
if empty_dir_config.get("medium", "") == "Memory":
|
||||
# Convert Gi to Mi
|
||||
size = empty_dir_config.get("size", 0.5) * 1024
|
||||
return {
|
||||
"type": "tmpfs",
|
||||
"tmpfs_config": {"size": size},
|
||||
}
|
||||
|
||||
return {"type": "temporary"}
|
||||
|
||||
|
||||
def migrate_ix_volume_type(ix_volume):
|
||||
vol_config = ix_volume.get("ixVolumeConfig", {})
|
||||
if not vol_config:
|
||||
raise ValueError("Expected [ix_volume] to have [ixVolumeConfig] set")
|
||||
|
||||
result = {
|
||||
"type": "ix_volume",
|
||||
"ix_volume_config": {
|
||||
"acl_enable": vol_config.get("aclEnable", False),
|
||||
"dataset_name": vol_config.get("datasetName", ""),
|
||||
},
|
||||
}
|
||||
|
||||
if vol_config.get("aclEnable", False):
|
||||
result["ix_volume_config"].update(
|
||||
{"acl_entries": migrate_acl_entries(vol_config["aclEntries"])}
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def migrate_host_path_type(host_path):
|
||||
path_config = host_path.get("hostPathConfig", {})
|
||||
if not path_config:
|
||||
raise ValueError("Expected [host_path] to have [hostPathConfig] set")
|
||||
|
||||
result = {
|
||||
"type": "host_path",
|
||||
"host_path_config": {
|
||||
"acl_enable": path_config.get("aclEnable", False),
|
||||
},
|
||||
}
|
||||
|
||||
if path_config.get("aclEnable", False):
|
||||
result["host_path_config"].update(
|
||||
{"acl": migrate_acl_entries(path_config.get("acl", {}))}
|
||||
)
|
||||
else:
|
||||
result["host_path_config"].update({"path": path_config["hostPath"]})
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def migrate_acl_entries(acl_entries: dict) -> dict:
|
||||
entries = []
|
||||
for entry in acl_entries.get("entries", []):
|
||||
entries.append(
|
||||
{
|
||||
"access": entry["access"],
|
||||
"id": entry["id"],
|
||||
"id_type": entry["id_type"],
|
||||
}
|
||||
)
|
||||
|
||||
return {
|
||||
"entries": entries,
|
||||
"options": {"force": acl_entries.get("force", False)},
|
||||
"path": acl_entries["path"],
|
||||
}
|
||||
344
trains/community/metube/1.0.0/questions.yaml
Normal file
344
trains/community/metube/1.0.0/questions.yaml
Normal file
@@ -0,0 +1,344 @@
|
||||
groups:
|
||||
- name: MeTube Configuration
|
||||
description: Configure MeTube
|
||||
- name: User and Group Configuration
|
||||
description: Configure User and Group for MeTube
|
||||
- name: Network Configuration
|
||||
description: Configure Network for MeTube
|
||||
- name: Storage Configuration
|
||||
description: Configure Storage for MeTube
|
||||
- name: Resources Configuration
|
||||
description: Configure Resources for MeTube
|
||||
|
||||
questions:
|
||||
- variable: metube
|
||||
label: ""
|
||||
group: MeTube Configuration
|
||||
schema:
|
||||
type: dict
|
||||
attrs:
|
||||
- variable: default_theme
|
||||
label: Default Theme
|
||||
description: The default theme for MeTube.
|
||||
schema:
|
||||
type: string
|
||||
default: auto
|
||||
required: true
|
||||
enum:
|
||||
- value: auto
|
||||
description: Auto
|
||||
- value: dark
|
||||
description: Dark
|
||||
- value: light
|
||||
description: Light
|
||||
- variable: additional_envs
|
||||
label: Additional Environment Variables
|
||||
description: Configure additional environment variables for MeTube.
|
||||
schema:
|
||||
type: list
|
||||
default: []
|
||||
items:
|
||||
- variable: env
|
||||
label: Environment Variable
|
||||
schema:
|
||||
type: dict
|
||||
attrs:
|
||||
- variable: name
|
||||
label: Name
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
- variable: value
|
||||
label: Value
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
- variable: run_as
|
||||
label: ""
|
||||
group: User and Group Configuration
|
||||
schema:
|
||||
type: dict
|
||||
attrs:
|
||||
- variable: user
|
||||
label: User ID
|
||||
description: The user id that MeTube files will be owned by.
|
||||
schema:
|
||||
type: int
|
||||
min: 568
|
||||
default: 568
|
||||
required: true
|
||||
- variable: group
|
||||
label: Group ID
|
||||
description: The group id that MeTube files will be owned by.
|
||||
schema:
|
||||
type: int
|
||||
min: 568
|
||||
default: 568
|
||||
required: true
|
||||
|
||||
- variable: network
|
||||
label: ""
|
||||
group: Network Configuration
|
||||
schema:
|
||||
type: dict
|
||||
attrs:
|
||||
- variable: web_port
|
||||
label: WebUI Port
|
||||
description: The port for MeTube WebUI
|
||||
schema:
|
||||
type: int
|
||||
default: 30094
|
||||
required: true
|
||||
$ref:
|
||||
- definitions/port
|
||||
- variable: host_network
|
||||
label: Host Network
|
||||
description: |
|
||||
Bind to the host network. It's recommended to keep this disabled.
|
||||
schema:
|
||||
type: boolean
|
||||
default: false
|
||||
- variable: storage
|
||||
label: ""
|
||||
group: Storage Configuration
|
||||
schema:
|
||||
type: dict
|
||||
attrs:
|
||||
- variable: downloads
|
||||
label: MeTube Downloads Storage
|
||||
description: The path to store MeTube Downloads.
|
||||
schema:
|
||||
type: dict
|
||||
attrs:
|
||||
- variable: type
|
||||
label: Type
|
||||
description: |
|
||||
ixVolume: Is dataset created automatically by the system.</br>
|
||||
Host Path: Is a path that already exists on the system.
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
immutable: true
|
||||
default: "ix_volume"
|
||||
enum:
|
||||
- value: "host_path"
|
||||
description: Host Path (Path that already exists on the system)
|
||||
- value: "ix_volume"
|
||||
description: ixVolume (Dataset created automatically by the system)
|
||||
- variable: ix_volume_config
|
||||
label: ixVolume Configuration
|
||||
description: The configuration for the ixVolume dataset.
|
||||
schema:
|
||||
type: dict
|
||||
show_if: [["type", "=", "ix_volume"]]
|
||||
$ref:
|
||||
- "normalize/ix_volume"
|
||||
attrs:
|
||||
- variable: acl_enable
|
||||
label: Enable ACL
|
||||
description: Enable ACL for the storage.
|
||||
schema:
|
||||
type: boolean
|
||||
default: false
|
||||
- variable: dataset_name
|
||||
label: Dataset Name
|
||||
description: The name of the dataset to use for storage.
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
immutable: true
|
||||
hidden: true
|
||||
default: "downloads"
|
||||
- variable: acl_entries
|
||||
label: ACL Configuration
|
||||
schema:
|
||||
type: dict
|
||||
show_if: [["acl_enable", "=", true]]
|
||||
attrs: []
|
||||
- variable: host_path_config
|
||||
label: Host Path Configuration
|
||||
schema:
|
||||
type: dict
|
||||
show_if: [["type", "=", "host_path"]]
|
||||
attrs:
|
||||
- variable: acl_enable
|
||||
label: Enable ACL
|
||||
description: Enable ACL for the storage.
|
||||
schema:
|
||||
type: boolean
|
||||
default: false
|
||||
- variable: acl
|
||||
label: ACL Configuration
|
||||
schema:
|
||||
type: dict
|
||||
show_if: [["acl_enable", "=", true]]
|
||||
attrs: []
|
||||
$ref:
|
||||
- "normalize/acl"
|
||||
- variable: path
|
||||
label: Host Path
|
||||
description: The host path to use for storage.
|
||||
schema:
|
||||
type: hostpath
|
||||
show_if: [["acl_enable", "=", false]]
|
||||
required: true
|
||||
- variable: additional_storage
|
||||
label: Additional Storage
|
||||
description: Additional storage for MeTube.
|
||||
schema:
|
||||
type: list
|
||||
default: []
|
||||
items:
|
||||
- variable: storageEntry
|
||||
label: Storage Entry
|
||||
schema:
|
||||
type: dict
|
||||
attrs:
|
||||
- variable: type
|
||||
label: Type
|
||||
description: |
|
||||
ixVolume: Is dataset created automatically by the system.</br>
|
||||
Host Path: Is a path that already exists on the system.</br>
|
||||
SMB Share: Is a SMB share that is mounted to a persistent volume claim.
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
default: "ixVolume"
|
||||
immutable: true
|
||||
enum:
|
||||
- value: "host_path"
|
||||
description: Host Path (Path that already exists on the system)
|
||||
- value: "ix_volume"
|
||||
description: ixVolume (Dataset created automatically by the system)
|
||||
- value: "cifs"
|
||||
description: SMB/CIFS Share (Mounts a persistent volume claim to a SMB share)
|
||||
- variable: read_only
|
||||
label: Read Only
|
||||
description: Mount the volume as read only.
|
||||
schema:
|
||||
type: boolean
|
||||
default: false
|
||||
- variable: mount_path
|
||||
label: Mount Path
|
||||
description: The path inside the container to mount the storage.
|
||||
schema:
|
||||
type: path
|
||||
required: true
|
||||
- variable: host_path_config
|
||||
label: Host Path Configuration
|
||||
schema:
|
||||
type: dict
|
||||
show_if: [["type", "=", "host_path"]]
|
||||
attrs:
|
||||
- variable: acl_enable
|
||||
label: Enable ACL
|
||||
description: Enable ACL for the storage.
|
||||
schema:
|
||||
type: boolean
|
||||
default: false
|
||||
- variable: acl
|
||||
label: ACL Configuration
|
||||
schema:
|
||||
type: dict
|
||||
show_if: [["acl_enable", "=", true]]
|
||||
attrs: []
|
||||
$ref:
|
||||
- "normalize/acl"
|
||||
- variable: path
|
||||
label: Host Path
|
||||
description: The host path to use for storage.
|
||||
schema:
|
||||
type: hostpath
|
||||
show_if: [["acl_enable", "=", false]]
|
||||
required: true
|
||||
- variable: ix_volume_config
|
||||
label: ixVolume Configuration
|
||||
description: The configuration for the ixVolume dataset.
|
||||
schema:
|
||||
type: dict
|
||||
show_if: [["type", "=", "ix_volume"]]
|
||||
$ref:
|
||||
- "normalize/ix_volume"
|
||||
attrs:
|
||||
- variable: acl_enable
|
||||
label: Enable ACL
|
||||
description: Enable ACL for the storage.
|
||||
schema:
|
||||
type: boolean
|
||||
default: false
|
||||
- variable: dataset_name
|
||||
label: Dataset Name
|
||||
description: The name of the dataset to use for storage.
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
immutable: true
|
||||
default: "storage_entry"
|
||||
- variable: acl_entries
|
||||
label: ACL Configuration
|
||||
schema:
|
||||
type: dict
|
||||
show_if: [["acl_enable", "=", true]]
|
||||
attrs: []
|
||||
- variable: cifs_config
|
||||
label: SMB Configuration
|
||||
description: The configuration for the SMB dataset.
|
||||
schema:
|
||||
type: dict
|
||||
show_if: [["type", "=", "smb"]]
|
||||
attrs:
|
||||
- variable: server
|
||||
label: Server
|
||||
description: The server to mount the SMB share.
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
- variable: path
|
||||
label: Path
|
||||
description: The path to mount the SMB share.
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
- variable: username
|
||||
label: Username
|
||||
description: The username to use for the SMB share.
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
- variable: password
|
||||
label: Password
|
||||
description: The password to use for the SMB share.
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
private: true
|
||||
- variable: domain
|
||||
label: Domain
|
||||
description: The domain to use for the SMB share.
|
||||
schema:
|
||||
type: string
|
||||
- variable: resources
|
||||
label: ""
|
||||
group: Resources Configuration
|
||||
schema:
|
||||
type: dict
|
||||
attrs:
|
||||
- variable: limits
|
||||
label: Limits
|
||||
schema:
|
||||
type: dict
|
||||
attrs:
|
||||
- variable: cpus
|
||||
label: CPUs
|
||||
description: CPUs limit for MeTube.
|
||||
schema:
|
||||
type: int
|
||||
default: 2
|
||||
required: true
|
||||
- variable: memory
|
||||
label: Memory (in MB)
|
||||
description: Memory limit for MeTube.
|
||||
schema:
|
||||
type: int
|
||||
default: 4096
|
||||
required: true
|
||||
81
trains/community/metube/1.0.0/templates/docker-compose.yaml
Normal file
81
trains/community/metube/1.0.0/templates/docker-compose.yaml
Normal file
@@ -0,0 +1,81 @@
|
||||
{% from "macros/global/perms/container.yaml.jinja" import perms_container %}
|
||||
|
||||
{# Stores storage items that contains info for volumes, vol mounts, perms dirs and perms mounts #}
|
||||
{% set storage_items = namespace(items=[]) %}
|
||||
{# Stores the top level volumes #}
|
||||
{% set volumes = namespace(items={}) %}
|
||||
{# Stores the container volume mounts #}
|
||||
{% set volume_mounts = namespace(items=[]) %}
|
||||
{# Stores the perms container volume mounts #}
|
||||
{% set perms_mounts = namespace(items=[]) %}
|
||||
{# Stores the perms container dirs #}
|
||||
{% set perms_dirs = namespace(items=[]) %}
|
||||
|
||||
{% do storage_items.items.append(ix_lib.base.storage.storage_item(data=dict(values.storage.downloads, **{"mount_path": values.consts.downloads_path}),
|
||||
values=values, perm_opts={"mount_path": "/mnt/metube/downloads", "mode": "check", "uid": values.run_as.user, "gid": values.run_as.group}
|
||||
)) %}
|
||||
{% do storage_items.items.append(ix_lib.base.storage.storage_item(data={"type":"anonymous", "mount_path": "/tmp"})) %}
|
||||
|
||||
{% for store in values.storage.additional_storage %}
|
||||
{% do storage_items.items.append(ix_lib.base.storage.storage_item(data=store, values=values,
|
||||
perm_opts={"mount_path": "/mnt/metube/dir_%s"|format(loop.index0), "mode": "check", "uid": values.run_as.user, "gid": values.run_as.group}
|
||||
)) %}
|
||||
{% endfor %}
|
||||
|
||||
{# Add each item to the above lists #}
|
||||
{% for item in storage_items.items %}
|
||||
{% if item.vol and volumes.items.update(item.vol) %}{% endif %}
|
||||
{% if item.vol_mount and volume_mounts.items.append(item.vol_mount) %}{% endif %}
|
||||
{% if item.perms_item and (perms_dirs.items.append(item.perms_item.perm_dir), perms_mounts.items.append(item.perms_item.vol_mount)) %}{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
{# Containers #}
|
||||
services:
|
||||
{{ values.consts.metube_container_name }}:
|
||||
user: {{ "%d:%d" | format(values.run_as.user, values.run_as.group) }}
|
||||
image: {{ ix_lib.base.utils.get_image(images=values.images, name="image") }}
|
||||
restart: unless-stopped
|
||||
deploy:
|
||||
resources: {{ ix_lib.base.resources.resources(values.resources) | tojson }}
|
||||
devices: {{ ix_lib.base.resources.get_devices(values.resources) | tojson }}
|
||||
{% if perms_dirs.items %}
|
||||
depends_on:
|
||||
{{ values.consts.perms_container_name }}:
|
||||
condition: service_completed_successfully
|
||||
{% endif %}
|
||||
{% if values.network.host_network %}
|
||||
network_mode: host
|
||||
{% endif %}
|
||||
cap_drop: {{ ix_lib.base.security.get_caps().drop | tojson }}
|
||||
security_opt: {{ ix_lib.base.security.get_sec_opts() | tojson }}
|
||||
{% if values.network.dns_opts %}
|
||||
dns_opt: {{ ix_lib.base.network.dns_opts(values.network.dns_opts) | tojson }}
|
||||
{% endif %}
|
||||
{% set test = ix_lib.base.healthchecks.curl_test(port=values.network.web_port, path="/") %}
|
||||
healthcheck: {{ ix_lib.base.healthchecks.check_health(test) | tojson }}
|
||||
environment: {{ ix_lib.base.environment.envs(app={
|
||||
"PORT": values.network.web_port,
|
||||
"DOWNLOAD_DIR": values.consts.downloads_path,
|
||||
"STATE_DIR": "%s/.metube" | format(values.consts.downloads_path),
|
||||
"DEFAULT_THEME": values.metube.default_theme,
|
||||
}, user=values.metube.additional_envs, values=values) | tojson }}
|
||||
{% if not values.network.host_network %}
|
||||
ports:
|
||||
- {{ ix_lib.base.ports.get_port(port={"target": values.network.web_port, "published": values.network.web_port}) | tojson }}
|
||||
{% endif %}
|
||||
volumes: {{ volume_mounts.items | tojson }}
|
||||
{% if perms_dirs.items %}
|
||||
{{ values.consts.perms_container_name }}:
|
||||
{{ perms_container(items=perms_dirs.items) | indent(4) }}
|
||||
volumes:
|
||||
{% for item in perms_mounts.items %}
|
||||
- {{ item | tojson }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
{% if volumes.items %}
|
||||
volumes: {{ volumes.items | tojson }}
|
||||
{% endif %}
|
||||
|
||||
x-portals: {{ ix_lib.base.metadata.get_portals([{"port": values.network.web_port}]) | tojson }}
|
||||
x-notes: {{ ix_lib.base.metadata.get_notes("MeTube") | tojson }}
|
||||
@@ -0,0 +1,90 @@
|
||||
from . import utils
|
||||
from .resources import get_nvidia_gpus_reservations
|
||||
|
||||
|
||||
def envs(app: dict | None = None, user: list | None = None, values: dict | None = None):
|
||||
app = app or {}
|
||||
user = user or []
|
||||
values = values or {}
|
||||
result = {}
|
||||
|
||||
if not values:
|
||||
utils.throw_error("Values cannot be empty in environment.py")
|
||||
|
||||
if not isinstance(user, list):
|
||||
utils.throw_error(
|
||||
f"Unsupported type for user environment variables [{type(user)}]"
|
||||
)
|
||||
|
||||
# Always set TZ
|
||||
result.update({"TZ": values.get("TZ", "Etc/UTC")})
|
||||
|
||||
# Update envs with nvidia variables
|
||||
if values.get("resources", {}).get("gpus", {}):
|
||||
result.update(get_nvidia_env(values.get("resources", {}).get("gpus", {})))
|
||||
|
||||
# Update envs with run_as variables
|
||||
if values.get("run_as"):
|
||||
result.update(get_run_as_envs(values.get("run_as", {})))
|
||||
|
||||
# Make sure we don't manually set any of the above
|
||||
for item in app.items():
|
||||
if not item[0]:
|
||||
utils.throw_error("Environment variable name cannot be empty.")
|
||||
if item[0] in result:
|
||||
utils.throw_error(
|
||||
f"Environment variable [{item[0]}] is already defined automatically from the library."
|
||||
)
|
||||
result[item[0]] = item[1]
|
||||
|
||||
for item in user:
|
||||
if not item.get("name"):
|
||||
utils.throw_error("Environment variable name cannot be empty.")
|
||||
if item.get("name") in result:
|
||||
utils.throw_error(
|
||||
f"Environment variable [{item['name']}] is already defined from the application developer."
|
||||
)
|
||||
result[item["name"]] = item.get("value")
|
||||
|
||||
return result
|
||||
|
||||
|
||||
# Sets some common variables that most applications use
|
||||
def get_run_as_envs(run_as: dict) -> dict:
|
||||
result = {}
|
||||
user = run_as.get("user")
|
||||
group = run_as.get("group")
|
||||
if user:
|
||||
result.update(
|
||||
{
|
||||
"PUID": user,
|
||||
"UID": user,
|
||||
"USER_ID": user,
|
||||
}
|
||||
)
|
||||
if group:
|
||||
result.update(
|
||||
{
|
||||
"PGID": group,
|
||||
"GID": group,
|
||||
"GROUP_ID": group,
|
||||
}
|
||||
)
|
||||
return result
|
||||
|
||||
|
||||
def get_nvidia_env(gpus: dict) -> dict:
|
||||
reservations = get_nvidia_gpus_reservations(gpus)
|
||||
if not reservations.get("device_ids"):
|
||||
return {
|
||||
"NVIDIA_VISIBLE_DEVICES": "void",
|
||||
}
|
||||
|
||||
return {
|
||||
"NVIDIA_VISIBLE_DEVICES": (
|
||||
",".join(reservations["device_ids"])
|
||||
if reservations.get("device_ids")
|
||||
else "void"
|
||||
),
|
||||
"NVIDIA_DRIVER_CAPABILITIES": "all",
|
||||
}
|
||||
@@ -0,0 +1,100 @@
|
||||
from . import utils
|
||||
|
||||
|
||||
def check_health(test, interval=10, timeout=10, retries=5, start_period=30):
|
||||
if not test:
|
||||
utils.throw_error("Expected [test] to be set")
|
||||
|
||||
return {
|
||||
"test": test,
|
||||
"interval": f"{interval}s",
|
||||
"timeout": f"{timeout}s",
|
||||
"retries": retries,
|
||||
"start_period": f"{start_period}s",
|
||||
}
|
||||
|
||||
|
||||
def pg_test(user, db, config=None):
|
||||
config = config or {}
|
||||
if not user or not db:
|
||||
utils.throw_error("Postgres container: [user] and [db] must be set")
|
||||
|
||||
host = config.get("host", "127.0.0.1")
|
||||
port = config.get("port", 5432)
|
||||
|
||||
return f"pg_isready -h {host} -p {port} -d {db} -U {user}"
|
||||
|
||||
|
||||
def curl_test(port, path, config=None):
|
||||
config = config or {}
|
||||
if not port or not path:
|
||||
utils.throw_error("Expected [port] and [path] to be set")
|
||||
|
||||
scheme = config.get("scheme", "http")
|
||||
host = config.get("host", "127.0.0.1")
|
||||
headers = config.get("headers", [])
|
||||
|
||||
opts = []
|
||||
if scheme == "https":
|
||||
opts.append("--insecure")
|
||||
|
||||
for header in headers:
|
||||
if not header[0] or not header[1]:
|
||||
utils.throw_error("Expected [header] to be a list of two items")
|
||||
opts.append(f'--header "{header[0]}: {header[1]}"')
|
||||
|
||||
return f"curl --silent --output /dev/null --show-error --fail {' '.join(opts)} {scheme}://{host}:{port}{path}"
|
||||
|
||||
|
||||
def wget_test(port, path, config=None):
|
||||
config = config or {}
|
||||
if not port or not path:
|
||||
utils.throw_error("Expected [port] and [path] to be set")
|
||||
|
||||
scheme = config.get("scheme", "http")
|
||||
host = config.get("host", "127.0.0.1")
|
||||
headers = config.get("headers", [])
|
||||
|
||||
opts = []
|
||||
if scheme == "https":
|
||||
opts.append("--no-check-certificate")
|
||||
|
||||
for header in headers:
|
||||
if not header[0] or not header[1]:
|
||||
utils.throw_error("Expected [header] to be a list of two items")
|
||||
opts.append(f'--header "{header[0]}: {header[1]}"')
|
||||
|
||||
return f"wget --spider --quiet {' '.join(opts)} {scheme}://{host}:{port}{path}"
|
||||
|
||||
|
||||
def http_test(port, path, config=None):
|
||||
config = config or {}
|
||||
if not port or not path:
|
||||
utils.throw_error("Expected [port] and [path] to be set")
|
||||
|
||||
host = config.get("host", "127.0.0.1")
|
||||
|
||||
return (
|
||||
f"/bin/bash -c 'exec {{health_check_fd}}<>/dev/tcp/{host}/{port} && echo -e \"GET {path} HTTP/1.1\\r\\nHost: "
|
||||
+ f"{host}\\r\\nConnection: close\\r\\n\\r\\n\" >&$${{health_check_fd}} && cat <&$${{health_check_fd}}'"
|
||||
)
|
||||
|
||||
|
||||
def netcat_test(port, config=None):
|
||||
config = config or {}
|
||||
if not port:
|
||||
utils.throw_error("Expected [port] to be set")
|
||||
|
||||
host = config.get("host", "127.0.0.1")
|
||||
|
||||
return f"nc -z -w 1 {host} {port}"
|
||||
|
||||
|
||||
def tcp_test(port, config=None):
|
||||
config = config or {}
|
||||
if not port:
|
||||
utils.throw_error("Expected [port] to be set")
|
||||
|
||||
host = config.get("host", "127.0.0.1")
|
||||
|
||||
return f"timeout 1 bash -c 'cat < /dev/null > /dev/tcp/{host}/{port}'"
|
||||
@@ -0,0 +1,71 @@
|
||||
from . import utils
|
||||
|
||||
|
||||
def get_header(app_name: str):
|
||||
return f"""# Welcome to TrueNAS SCALE
|
||||
|
||||
Thank you for installing {app_name}!
|
||||
"""
|
||||
|
||||
|
||||
def get_footer(app_name: str):
|
||||
return f"""## Documentation
|
||||
|
||||
Documentation for {app_name} can be found at https://www.truenas.com/docs.
|
||||
|
||||
## Bug reports
|
||||
|
||||
If you find a bug in this app, please file an issue at
|
||||
https://ixsystems.atlassian.net or https://github.com/truenas/apps
|
||||
|
||||
## Feature requests or improvements
|
||||
|
||||
If you find a feature request for this app, please file an issue at
|
||||
https://ixsystems.atlassian.net or https://github.com/truenas/apps
|
||||
"""
|
||||
|
||||
|
||||
def get_notes(app_name: str, body: str = ""):
|
||||
if not app_name:
|
||||
utils.throw_error("Expected [app_name] to be set")
|
||||
|
||||
return f"{get_header(app_name)}\n\n{body}\n\n{get_footer(app_name)}"
|
||||
|
||||
|
||||
def get_portals(portals: list):
|
||||
valid_schemes = ["http", "https"]
|
||||
result = []
|
||||
for portal in portals:
|
||||
# Most apps have a single portal, lets default to a standard name
|
||||
name = portal.get("name", "Web UI")
|
||||
scheme = portal.get("scheme", "http")
|
||||
path = portal.get("path", "/")
|
||||
|
||||
if not name:
|
||||
utils.throw_error("Expected [portal.name] to be set")
|
||||
if name in [p["name"] for p in result]:
|
||||
utils.throw_error(
|
||||
f"Expected [portal.name] to be unique, got [{', '.join([p['name'] for p in result]+[name])}]"
|
||||
)
|
||||
if scheme not in valid_schemes:
|
||||
utils.throw_error(
|
||||
f"Expected [portal.scheme] to be one of [{', '.join(valid_schemes)}], got [{portal['scheme']}]"
|
||||
)
|
||||
if not portal.get("port"):
|
||||
utils.throw_error("Expected [portal.port] to be set")
|
||||
if not path.startswith("/"):
|
||||
utils.throw_error(
|
||||
f"Expected [portal.path] to start with /, got [{portal['path']}]"
|
||||
)
|
||||
|
||||
result.append(
|
||||
{
|
||||
"name": name,
|
||||
"scheme": scheme,
|
||||
"host": portal.get("host", "0.0.0.0"),
|
||||
"port": portal["port"],
|
||||
"path": path,
|
||||
}
|
||||
)
|
||||
|
||||
return result
|
||||
@@ -0,0 +1,21 @@
|
||||
from . import utils
|
||||
|
||||
|
||||
def dns_opts(dns_options=None):
|
||||
dns_options = dns_options or []
|
||||
if not dns_options:
|
||||
return []
|
||||
|
||||
tracked = {}
|
||||
disallowed_opts = []
|
||||
for opt in dns_options:
|
||||
key = opt.split(":")[0]
|
||||
if key in tracked:
|
||||
utils.throw_error(
|
||||
f"Expected [dns_opts] to be unique, got [{', '.join([d.split(':')[0] for d in tracked])}]"
|
||||
)
|
||||
if key in disallowed_opts:
|
||||
utils.throw_error(f"Expected [dns_opts] to not contain [{key}] key.")
|
||||
tracked[key] = opt
|
||||
|
||||
return dns_options
|
||||
@@ -0,0 +1,42 @@
|
||||
import ipaddress
|
||||
|
||||
from . import utils
|
||||
|
||||
|
||||
def must_valid_port(num: int):
|
||||
if num < 1 or num > 65535:
|
||||
utils.throw_error(f"Expected a valid port number, got [{num}]")
|
||||
|
||||
|
||||
def must_valid_ip(ip: str):
|
||||
try:
|
||||
ipaddress.ip_address(ip)
|
||||
except ValueError:
|
||||
utils.throw_error(f"Expected a valid IP address, got [{ip}]")
|
||||
|
||||
|
||||
def must_valid_protocol(protocol: str):
|
||||
if protocol not in ["tcp", "udp"]:
|
||||
utils.throw_error(f"Expected a valid protocol, got [{protocol}]")
|
||||
|
||||
|
||||
def must_valid_mode(mode: str):
|
||||
if mode not in ["ingress", "host"]:
|
||||
utils.throw_error(f"Expected a valid mode, got [{mode}]")
|
||||
|
||||
|
||||
def get_port(port=None):
|
||||
port = port or {}
|
||||
must_valid_port(port["published"])
|
||||
must_valid_port(port["target"])
|
||||
must_valid_ip(port.get("host_ip", "0.0.0.0"))
|
||||
must_valid_protocol(port.get("protocol", "tcp"))
|
||||
must_valid_mode(port.get("mode", "ingress"))
|
||||
|
||||
return {
|
||||
"target": port["target"],
|
||||
"published": port["published"],
|
||||
"protocol": port.get("protocol", "tcp"),
|
||||
"mode": port.get("mode", "ingress"),
|
||||
"host_ip": port.get("host_ip", "0.0.0.0"),
|
||||
}
|
||||
@@ -0,0 +1,77 @@
|
||||
from . import utils
|
||||
from .security import get_caps, get_sec_opts
|
||||
from .network import dns_opts
|
||||
from .healthchecks import pg_test, check_health
|
||||
from .resources import resources
|
||||
|
||||
|
||||
def pg_url(variant, host, user, password, dbname, port=5432):
|
||||
if not host:
|
||||
utils.throw_error("Expected [host] to be set")
|
||||
if not user:
|
||||
utils.throw_error("Expected [user] to be set")
|
||||
if not password:
|
||||
utils.throw_error("Expected [password] to be set")
|
||||
if not dbname:
|
||||
utils.throw_error("Expected [dbname] to be set")
|
||||
|
||||
if variant == "postgresql":
|
||||
return f"postgresql://{user}:{password}@{host}:{port}/{dbname}?sslmode=disable"
|
||||
elif variant == "postgres":
|
||||
return f"postgres://{user}:{password}@{host}:{port}/{dbname}?sslmode=disable"
|
||||
else:
|
||||
utils.throw_error(
|
||||
f"Expected [variant] to be one of [postgresql, postgres], got [{variant}]"
|
||||
)
|
||||
|
||||
|
||||
def pg_env(user, password, dbname, port=5432):
|
||||
if not user:
|
||||
utils.throw_error("Expected [user] to be set for postgres")
|
||||
if not password:
|
||||
utils.throw_error("Expected [password] to be set for postgres")
|
||||
if not dbname:
|
||||
utils.throw_error("Expected [dbname] to be set for postgres")
|
||||
return {
|
||||
"POSTGRES_USER": user,
|
||||
"POSTGRES_PASSWORD": password,
|
||||
"POSTGRES_DB": dbname,
|
||||
"POSTGRES_PORT": port,
|
||||
}
|
||||
|
||||
|
||||
def pg_container(data={}):
|
||||
req_keys = ["db_user", "db_password", "db_name", "volumes", "resources"]
|
||||
for key in req_keys:
|
||||
if not data.get(key):
|
||||
utils.throw_error(f"Expected [{key}] to be set for postgres")
|
||||
|
||||
pg_user = data["db_user"]
|
||||
pg_password = data["db_password"]
|
||||
pg_dbname = data["db_name"]
|
||||
pg_port = data.get("port", 5432)
|
||||
depends = data.get("depends_on", {})
|
||||
depends_on = {}
|
||||
for key in depends:
|
||||
depends_on[key] = {
|
||||
"condition": depends[key].get("condition", "service_completed_successfully")
|
||||
}
|
||||
|
||||
return {
|
||||
"image": f"{data.get('image', 'postgres:15')}",
|
||||
"user": f"{data.get('user', '999')}:{data.get('group', '999')}",
|
||||
"restart": "unless-stopped",
|
||||
"cap_drop": get_caps()["drop"],
|
||||
"security_opt": get_sec_opts(),
|
||||
**({"dns_opt": dns_opts(data["dns_opt"])} if data.get("dns_opt") else {}),
|
||||
"healthcheck": check_health(pg_test(user=pg_user, db=pg_dbname)),
|
||||
"environment": pg_env(
|
||||
user=pg_user,
|
||||
password=pg_password,
|
||||
dbname=pg_dbname,
|
||||
port=pg_port,
|
||||
),
|
||||
"volumes": data["volumes"],
|
||||
"depends_on": depends_on,
|
||||
"deploy": {"resources": resources(data["resources"])},
|
||||
}
|
||||
@@ -0,0 +1,70 @@
|
||||
import re
|
||||
|
||||
from . import utils
|
||||
|
||||
|
||||
def resources(resources):
|
||||
gpus = resources.get("gpus", {})
|
||||
cpus = str(resources.get("limits", {}).get("cpus", 2.0))
|
||||
memory = str(resources.get("limits", {}).get("memory", 4096))
|
||||
if not re.match(r"^[1-9][0-9]*(\.[0-9]+)?$", cpus):
|
||||
utils.throw_error(f"Expected cpus to be a number or a float, got [{cpus}]")
|
||||
if not re.match(r"^[1-9][0-9]*$", memory):
|
||||
raise ValueError(f"Expected memory to be a number, got [{memory}]")
|
||||
|
||||
result = {
|
||||
"limits": {"cpus": cpus, "memory": f"{memory}M"},
|
||||
"reservations": {"devices": []},
|
||||
}
|
||||
|
||||
if gpus:
|
||||
gpu_result = get_nvidia_gpus_reservations(gpus)
|
||||
if gpu_result:
|
||||
# Appending to devices, as we can later extend this to support other types of devices. Eg. TPUs.
|
||||
result["reservations"]["devices"].append(get_nvidia_gpus_reservations(gpus))
|
||||
|
||||
# Docker does not like empty "things" all around.
|
||||
if not result["reservations"]["devices"]:
|
||||
del result["reservations"]
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def get_nvidia_gpus_reservations(gpus: dict) -> dict:
|
||||
"""
|
||||
Input:
|
||||
{
|
||||
"nvidia_gpu_selection": {
|
||||
"pci_slot_0": {"uuid": "uuid_0", "use_gpu": True},
|
||||
"pci_slot_1": {"uuid": "uuid_1", "use_gpu": True},
|
||||
},
|
||||
}
|
||||
"""
|
||||
if not gpus:
|
||||
return {}
|
||||
|
||||
device_ids = []
|
||||
for gpu in gpus.get("nvidia_gpu_selection", {}).values():
|
||||
if gpu["use_gpu"]:
|
||||
device_ids.append(gpu["uuid"])
|
||||
|
||||
if not device_ids:
|
||||
return {}
|
||||
|
||||
return {
|
||||
"capabilities": ["gpu"],
|
||||
"driver": "nvidia",
|
||||
"device_ids": device_ids,
|
||||
}
|
||||
|
||||
|
||||
# Returns the top level devices list
|
||||
# Accepting other_devices to allow manually adding devices
|
||||
# directly to the list. (Eg sound devices)
|
||||
def get_devices(resources: dict, other_devices: list = []) -> list:
|
||||
gpus = resources.get("gpus", {})
|
||||
devices = other_devices or []
|
||||
if gpus.get("use_all_gpus", False):
|
||||
devices.append("/dev/dri")
|
||||
|
||||
return devices
|
||||
@@ -0,0 +1,27 @@
|
||||
from base64 import b64encode
|
||||
|
||||
|
||||
def get_caps(add=None, drop=None):
|
||||
add = add or []
|
||||
drop = drop or ["ALL"]
|
||||
result = {"drop": drop}
|
||||
if add:
|
||||
result["add"] = add
|
||||
return result
|
||||
|
||||
|
||||
def get_sec_opts(add=None, remove=None):
|
||||
add = add or []
|
||||
remove = remove or []
|
||||
result = ["no-new-privileges"]
|
||||
for opt in add:
|
||||
if opt not in result:
|
||||
result.append(opt)
|
||||
for opt in remove:
|
||||
if opt in result:
|
||||
result.remove(opt)
|
||||
return result
|
||||
|
||||
|
||||
def htpasswd(username, password):
|
||||
return b64encode(f"{username}:{password}".encode("utf-8")).decode("utf-8")
|
||||
@@ -0,0 +1,363 @@
|
||||
import re
|
||||
|
||||
from . import utils
|
||||
|
||||
|
||||
BIND_TYPES = ["host_path", "ix_volume"]
|
||||
VOL_TYPES = ["volume", "nfs", "cifs", "temporary"]
|
||||
ALL_TYPES = BIND_TYPES + VOL_TYPES + ["tmpfs", "anonymous"]
|
||||
PROPAGATION_TYPES = ["shared", "slave", "private", "rshared", "rslave", "rprivate"]
|
||||
|
||||
|
||||
def _get_name_for_temporary(data):
|
||||
if not data.get("mount_path"):
|
||||
utils.throw_error("Expected [mount_path] to be set for temporary volume")
|
||||
|
||||
return (
|
||||
data["mount_path"]
|
||||
.lstrip("/")
|
||||
.lower()
|
||||
.replace("/", "_")
|
||||
.replace(".", "_")
|
||||
.replace(" ", "_")
|
||||
)
|
||||
|
||||
|
||||
# Returns a volume mount object (Used in container's "volumes" level)
|
||||
def vol_mount(data, values=None):
|
||||
values = values or {}
|
||||
ix_volumes = values.get("ix_volumes") or []
|
||||
vol_type = _get_docker_vol_type(data)
|
||||
|
||||
volume = {
|
||||
"type": vol_type,
|
||||
"target": utils.valid_path(data.get("mount_path", "")),
|
||||
"read_only": data.get("read_only", False),
|
||||
}
|
||||
if vol_type == "bind": # Default create_host_path is true in short-syntax
|
||||
volume.update(_get_bind_vol_config(data, ix_volumes))
|
||||
elif vol_type == "volume":
|
||||
volume.update(_get_volume_vol_config(data))
|
||||
elif vol_type == "tmpfs":
|
||||
volume.update(_get_tmpfs_vol_config(data))
|
||||
elif vol_type == "temporary":
|
||||
volume["type"] = "volume"
|
||||
volume.update(_get_volume_vol_config(data))
|
||||
elif vol_type == "anonymous":
|
||||
volume["type"] = "volume"
|
||||
volume.update(_get_anonymous_vol_config(data))
|
||||
|
||||
return volume
|
||||
|
||||
|
||||
def storage_item(data, values=None, perm_opts=None):
|
||||
values = values or {}
|
||||
perm_opts = perm_opts or {}
|
||||
if data.get("type") == "temporary":
|
||||
data.update({"volume_name": _get_name_for_temporary(data)})
|
||||
return {
|
||||
"vol_mount": vol_mount(data, values),
|
||||
"vol": vol(data),
|
||||
"perms_item": perms_item(data, values, perm_opts) if perm_opts else {},
|
||||
}
|
||||
|
||||
|
||||
def perms_item(data, values=None, opts=None):
|
||||
opts = opts or {}
|
||||
values = values or {}
|
||||
ix_context = values.get("ix_context") or {}
|
||||
vol_type = data.get("type", "")
|
||||
|
||||
# Temp volumes are always auto permissions
|
||||
if vol_type == "temporary":
|
||||
data.update({"auto_permissions": True})
|
||||
|
||||
# If its ix_volume and we are installing, we need to set auto permissions
|
||||
if vol_type == "ix_volume" and ix_context.get("is_install", False):
|
||||
data.update({"auto_permissions": True})
|
||||
|
||||
if not data.get("auto_permissions"):
|
||||
return {}
|
||||
|
||||
if vol_type == "host_path":
|
||||
if data.get("host_path_config", {}).get("acl_enable", False):
|
||||
return {}
|
||||
if vol_type == "ix_volume":
|
||||
if data.get("ix_volume_config", {}).get("acl_enable", False):
|
||||
return {}
|
||||
|
||||
req_keys = ["mount_path", "mode", "uid", "gid"]
|
||||
for key in req_keys:
|
||||
if not opts.get(key):
|
||||
utils.throw_error(f"Expected opts passed to [perms_item] to have [{key}] key")
|
||||
|
||||
data.update({"mount_path": opts["mount_path"]})
|
||||
volume_mount = vol_mount(data, values)
|
||||
|
||||
return {
|
||||
"vol_mount": volume_mount,
|
||||
"perm_dir": {
|
||||
"dir": volume_mount["target"],
|
||||
"mode": opts["mode"],
|
||||
"uid": opts["uid"],
|
||||
"gid": opts["gid"],
|
||||
"chmod": opts.get("chmod", "false"),
|
||||
"is_temporary": data["type"] == "temporary",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def _get_bind_vol_config(data, ix_volumes=None):
|
||||
ix_volumes = ix_volumes or []
|
||||
path = host_path(data, ix_volumes)
|
||||
if data.get("propagation", "rprivate") not in PROPAGATION_TYPES:
|
||||
utils.throw_error(
|
||||
f"Expected [propagation] to be one of [{', '.join(PROPAGATION_TYPES)}], got [{data['propagation']}]"
|
||||
)
|
||||
|
||||
# https://docs.docker.com/storage/bind-mounts/#configure-bind-propagation
|
||||
return {
|
||||
"source": path,
|
||||
"bind": {
|
||||
"create_host_path": data.get("host_path_config", {}).get(
|
||||
"create_host_path", True
|
||||
),
|
||||
"propagation": _get_valid_propagation(data),
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def _get_volume_vol_config(data):
|
||||
if not data.get("volume_name"):
|
||||
utils.throw_error("Expected [volume_name] to be set for [volume] type")
|
||||
|
||||
return {"source": data["volume_name"], "volume": _process_volume_config(data)}
|
||||
|
||||
|
||||
def _get_anonymous_vol_config(data):
|
||||
return {"volume": _process_volume_config(data)}
|
||||
|
||||
|
||||
mode_regex = re.compile(r"^0[0-7]{3}$")
|
||||
|
||||
|
||||
def _get_tmpfs_vol_config(data):
|
||||
tmpfs = {}
|
||||
config = data.get("tmpfs_config", {})
|
||||
|
||||
if config.get("size"):
|
||||
if not isinstance(config["size"], int):
|
||||
utils.throw_error("Expected [size] to be an integer for [tmpfs] type")
|
||||
if not config["size"] > 0:
|
||||
utils.throw_error("Expected [size] to be greater than 0 for [tmpfs] type")
|
||||
# Convert Mebibytes to Bytes
|
||||
tmpfs.update({"size": config["size"] * 1024 * 1024})
|
||||
|
||||
if config.get("mode"):
|
||||
if not mode_regex.match(str(config["mode"])):
|
||||
utils.throw_error(
|
||||
f"Expected [mode] to be a octal string for [tmpfs] type, got [{config['mode']}]"
|
||||
)
|
||||
tmpfs.update({"mode": int(config["mode"], 8)})
|
||||
|
||||
return {"tmpfs": tmpfs}
|
||||
|
||||
|
||||
# Returns a volume object (Used in top "volumes" level)
|
||||
def vol(data):
|
||||
if not data or _get_docker_vol_type(data) != "volume":
|
||||
return {}
|
||||
|
||||
if not data.get("volume_name"):
|
||||
utils.throw_error("Expected [volume_name] to be set for [volume] type")
|
||||
|
||||
if data["type"] == "nfs":
|
||||
return {data["volume_name"]: _process_nfs(data)}
|
||||
elif data["type"] == "cifs":
|
||||
return {data["volume_name"]: _process_cifs(data)}
|
||||
else:
|
||||
return {data["volume_name"]: {}}
|
||||
|
||||
|
||||
def _is_host_path(data):
|
||||
return data.get("type") == "host_path"
|
||||
|
||||
|
||||
def _get_valid_propagation(data):
|
||||
if not data.get("propagation"):
|
||||
return "rprivate"
|
||||
if not data["propagation"] in PROPAGATION_TYPES:
|
||||
utils.throw_error(
|
||||
f"Expected [propagation] to be one of [{', '.join(PROPAGATION_TYPES)}], got [{data['propagation']}]"
|
||||
)
|
||||
return data["propagation"]
|
||||
|
||||
|
||||
def _is_ix_volume(data):
|
||||
return data.get("type") == "ix_volume"
|
||||
|
||||
|
||||
# Returns the host path for a for either a host_path or ix_volume
|
||||
def host_path(data, ix_volumes=None):
|
||||
ix_volumes = ix_volumes or []
|
||||
path = ""
|
||||
if _is_host_path(data):
|
||||
path = _process_host_path_config(data)
|
||||
elif _is_ix_volume(data):
|
||||
path = _process_ix_volume_config(data, ix_volumes)
|
||||
else:
|
||||
utils.throw_error(
|
||||
f"Expected [host_path()] to be called only for types [host_path, ix_volume], got [{data['type']}]"
|
||||
)
|
||||
|
||||
return utils.valid_path(path)
|
||||
|
||||
|
||||
# Returns the type of storage as used in docker-compose
|
||||
def _get_docker_vol_type(data):
|
||||
if not data.get("type"):
|
||||
utils.throw_error("Expected [type] to be set for storage")
|
||||
|
||||
if data["type"] not in ALL_TYPES:
|
||||
utils.throw_error(
|
||||
f"Expected storage [type] to be one of {ALL_TYPES}, got [{data['type']}]"
|
||||
)
|
||||
|
||||
if data["type"] in BIND_TYPES:
|
||||
return "bind"
|
||||
elif data["type"] in VOL_TYPES:
|
||||
return "volume"
|
||||
else:
|
||||
return data["type"]
|
||||
|
||||
|
||||
def _process_host_path_config(data):
|
||||
if data.get("host_path_config", {}).get("acl_enable", False):
|
||||
if not data["host_path_config"].get("acl", {}).get("path"):
|
||||
utils.throw_error(
|
||||
"Expected [host_path_config.acl.path] to be set for [host_path] type with ACL enabled"
|
||||
)
|
||||
return data["host_path_config"]["acl"]["path"]
|
||||
|
||||
if not data.get("host_path_config", {}).get("path"):
|
||||
utils.throw_error(
|
||||
"Expected [host_path_config.path] to be set for [host_path] type"
|
||||
)
|
||||
|
||||
return data["host_path_config"]["path"]
|
||||
|
||||
|
||||
def _process_volume_config(data):
|
||||
return {"nocopy": data.get("volume_config", {}).get("nocopy", False)}
|
||||
|
||||
|
||||
def _process_ix_volume_config(data, ix_volumes):
|
||||
path = ""
|
||||
if not data.get("ix_volume_config", {}).get("dataset_name"):
|
||||
utils.throw_error(
|
||||
"Expected [ix_volume_config.dataset_name] to be set for [ix_volume] type"
|
||||
)
|
||||
|
||||
if not ix_volumes:
|
||||
utils.throw_error("Expected [ix_volumes] to be set for [ix_volume] type")
|
||||
|
||||
ds = data["ix_volume_config"]["dataset_name"]
|
||||
path = ix_volumes.get(ds, None)
|
||||
if not path:
|
||||
utils.throw_error(f"Expected the key [{ds}] to be set in [ix_volumes]")
|
||||
|
||||
return path
|
||||
|
||||
|
||||
# Constructs a volume object for a cifs type
|
||||
def _process_cifs(data):
|
||||
if not data.get("cifs_config"):
|
||||
utils.throw_error("Expected [cifs_config] to be set for [cifs] type")
|
||||
|
||||
required_keys = ["server", "path", "username", "password"]
|
||||
for key in required_keys:
|
||||
if not data["cifs_config"].get(key):
|
||||
utils.throw_error(f"Expected [{key}] to be set for [cifs] type")
|
||||
|
||||
opts = [
|
||||
f"user={data['cifs_config']['username']}",
|
||||
f"password={data['cifs_config']['password']}",
|
||||
]
|
||||
if data["cifs_config"].get("domain"):
|
||||
opts.append(f'domain={data["cifs_config"]["domain"]}')
|
||||
|
||||
if data["cifs_config"].get("options"):
|
||||
if not isinstance(data["cifs_config"]["options"], list):
|
||||
utils.throw_error(
|
||||
"Expected [cifs_config.options] to be a list for [cifs] type"
|
||||
)
|
||||
|
||||
disallowed_opts = ["user", "password", "domain"]
|
||||
for opt in data["cifs_config"]["options"]:
|
||||
if not isinstance(opt, str):
|
||||
utils.throw_error(
|
||||
"Expected [cifs_config.options] to be a list of strings for [cifs] type"
|
||||
)
|
||||
|
||||
key = opt.split("=")[0]
|
||||
for disallowed in disallowed_opts:
|
||||
if key == disallowed:
|
||||
utils.throw_error(
|
||||
f"Expected [cifs_config.options] to not start with [{disallowed}] for [cifs] type"
|
||||
)
|
||||
|
||||
opts.append(opt)
|
||||
|
||||
server = data["cifs_config"]["server"].lstrip("/")
|
||||
path = data["cifs_config"]["path"]
|
||||
volume = {
|
||||
"driver_opts": {
|
||||
"type": "cifs",
|
||||
"device": f"//{server}/{path}",
|
||||
"o": f"{','.join(opts)}",
|
||||
},
|
||||
}
|
||||
|
||||
return volume
|
||||
|
||||
|
||||
# Constructs a volume object for a nfs type
|
||||
def _process_nfs(data):
|
||||
if not data.get("nfs_config"):
|
||||
utils.throw_error("Expected [nfs_config] to be set for [nfs] type")
|
||||
|
||||
required_keys = ["server", "path"]
|
||||
for key in required_keys:
|
||||
if not data["nfs_config"].get(key):
|
||||
utils.throw_error(f"Expected [{key}] to be set for [nfs] type")
|
||||
|
||||
opts = [f"addr={data['nfs_config']['server']}"]
|
||||
if data["nfs_config"].get("options"):
|
||||
if not isinstance(data["nfs_config"]["options"], list):
|
||||
utils.throw_error("Expected [nfs_config.options] to be a list for [nfs] type")
|
||||
|
||||
disallowed_opts = ["addr"]
|
||||
for opt in data["nfs_config"]["options"]:
|
||||
if not isinstance(opt, str):
|
||||
utils.throw_error(
|
||||
"Expected [nfs_config.options] to be a list of strings for [nfs] type"
|
||||
)
|
||||
|
||||
key = opt.split("=")[0]
|
||||
for disallowed in disallowed_opts:
|
||||
if key == disallowed:
|
||||
utils.throw_error(
|
||||
f"Expected [nfs_config.options] to not start with [{disallowed}] for [nfs] type"
|
||||
)
|
||||
|
||||
opts.append(opt)
|
||||
|
||||
volume = {
|
||||
"driver_opts": {
|
||||
"type": "nfs",
|
||||
"device": f":{data['nfs_config']['path']}",
|
||||
"o": f"{','.join(opts)}",
|
||||
},
|
||||
}
|
||||
|
||||
return volume
|
||||
@@ -0,0 +1,70 @@
|
||||
import secrets
|
||||
import sys
|
||||
|
||||
from . import security
|
||||
|
||||
|
||||
class TemplateException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def throw_error(message):
|
||||
# When throwing a known error, hide the traceback
|
||||
# This is because the error is also shown in the UI
|
||||
# and having a traceback makes it hard for user to read
|
||||
sys.tracebacklimit = 0
|
||||
raise TemplateException(message)
|
||||
|
||||
|
||||
def secure_string(length):
|
||||
return secrets.token_urlsafe(length)
|
||||
|
||||
|
||||
def basic_auth_header(username, password):
|
||||
return f"Basic {security.htpasswd(username, password)}"
|
||||
|
||||
|
||||
def merge_dicts(*dicts):
|
||||
merged_dict = {}
|
||||
for dictionary in dicts:
|
||||
merged_dict.update(dictionary)
|
||||
return merged_dict
|
||||
|
||||
|
||||
# Basic validation for a path (Expand later)
|
||||
def valid_path(path=""):
|
||||
if not path.startswith("/"):
|
||||
throw_error(f"Expected path [{path}] to start with /")
|
||||
|
||||
# There is no reason to allow / as a path, either on host or in a container
|
||||
if path == "/":
|
||||
throw_error(f"Expected path [{path}] to not be /")
|
||||
|
||||
return path
|
||||
|
||||
|
||||
def camel_case(string):
|
||||
return string.title()
|
||||
|
||||
|
||||
def is_boolean(string):
|
||||
return string.lower() in ["true", "false"]
|
||||
|
||||
|
||||
def is_number(string):
|
||||
try:
|
||||
float(string)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
def get_image(images={}, name=""):
|
||||
if not images:
|
||||
throw_error("Expected [images] to be set")
|
||||
if name not in images:
|
||||
throw_error(f"Expected [images.{name}] to be set")
|
||||
if not images[name].get("repository") or not images[name].get("tag"):
|
||||
throw_error(f"Expected [images.{name}.repository] and [images.{name}.tag] to be set")
|
||||
|
||||
return f"{images[name]['repository']}:{images[name]['tag']}"
|
||||
@@ -0,0 +1,48 @@
|
||||
{% from "macros/global/perms/script.sh.jinja" import process_dir_func %}
|
||||
|
||||
{# Takes a list of items to process #}
|
||||
{# Each item is a dictionary with the following keys: #}
|
||||
{# - dir: directory to process #}
|
||||
{# - mode: always, check. (
|
||||
always: Always changes ownership and permissions,
|
||||
check: Checks the top level dir, and only applies if there is a mismatch.
|
||||
) #}
|
||||
{# - uid: uid to change to #}
|
||||
{# - gid: gid to change to #}
|
||||
{# - chmod: chmod to change to (Optional, default is no change) #}
|
||||
{% macro perms_container(items=[]) %}
|
||||
image: bash
|
||||
user: root
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
cpus: "1.0"
|
||||
memory: 512m
|
||||
entrypoint:
|
||||
- bash
|
||||
- -c
|
||||
command:
|
||||
- |
|
||||
{{- process_dir_func() | indent(4) }}
|
||||
{%- for item in items %}
|
||||
process_dir {{ item.dir }} {{ item.mode }} {{ item.uid }} {{ item.gid }} {{ item.chmod }} {{ item.is_temporary|lower }}
|
||||
{%- endfor %}
|
||||
{% endmacro %}
|
||||
|
||||
{# Examples #}
|
||||
{# perms_container([
|
||||
{
|
||||
"dir": "/mnt/directories/dir1",
|
||||
"mode": "always",
|
||||
"uid": 500,
|
||||
"gid": 500,
|
||||
"chmod": "755",
|
||||
},
|
||||
{
|
||||
"dir": "/mnt/directories/dir2",
|
||||
"mode": "check",
|
||||
"uid": 500,
|
||||
"gid": 500,
|
||||
"chmod": "755",
|
||||
},
|
||||
]) #}
|
||||
@@ -0,0 +1,75 @@
|
||||
{#
|
||||
Don't forget to use double $ for shell variables,
|
||||
otherwise docker-compose will try to expand them
|
||||
#}
|
||||
|
||||
{% macro process_dir_func() %}
|
||||
function process_dir() {
|
||||
local dir=$$1
|
||||
local mode=$$2
|
||||
local uid=$$3
|
||||
local gid=$$4
|
||||
local chmod=$$5
|
||||
local is_temporary=$$6
|
||||
|
||||
local fix_owner="false"
|
||||
local fix_perms="false"
|
||||
|
||||
if [ ! -d "$$dir" ]; then
|
||||
echo "Path [$$dir] does is not a directory, skipping..."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ "$$is_temporary" = "true" ]; then
|
||||
echo "Path [$$dir] is a temporary directory, ensuring it is empty..."
|
||||
rm -rf "$$dir/{*,.*}"
|
||||
fi
|
||||
|
||||
echo "Current Ownership and Permissions on [$$dir]:"
|
||||
echo "chown: $$(stat -c "%u %g" "$$dir")"
|
||||
echo "chmod: $$(stat -c "%a" "$$dir")"
|
||||
|
||||
if [ "$$mode" = "always" ]; then
|
||||
fix_owner="true"
|
||||
fix_perms="true"
|
||||
fi
|
||||
|
||||
if [ "$$mode" = "check" ]; then
|
||||
if [ $$(stat -c %u "$$dir") -eq $$uid ] && [ $$(stat -c %g "$$dir") -eq $$gid ]; then
|
||||
echo "Ownership is correct. Skipping..."
|
||||
fix_owner="false"
|
||||
else
|
||||
echo "Ownership is incorrect. Fixing..."
|
||||
fix_owner="true"
|
||||
fi
|
||||
|
||||
if [ "$$chmod" = "false" ]; then
|
||||
echo "Skipping permissions check, chmod is false"
|
||||
elif [ -n "$$chmod" ]; then
|
||||
if [ $$(stat -c %a "$$dir") -eq $$chmod ]; then
|
||||
echo "Permissions are correct. Skipping..."
|
||||
fix_perms="false"
|
||||
else
|
||||
echo "Permissions are incorrect. Fixing..."
|
||||
fix_perms="true"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$$fix_owner" = "true" ]; then
|
||||
echo "Changing ownership to $$uid:$$gid on: [$$dir]"
|
||||
chown -R "$$uid:$$gid" "$$dir"
|
||||
echo "Finished changing ownership"
|
||||
echo "Ownership after changes:"
|
||||
stat -c "%u %g" "$$dir"
|
||||
fi
|
||||
|
||||
if [ -n "$$chmod" ] && [ "$$fix_perms" = "true" ]; then
|
||||
echo "Changing permissions to $$chmod on: [$$dir]"
|
||||
chmod -R "$$chmod" "$$dir"
|
||||
echo "Finished changing permissions"
|
||||
echo "Permissions after changes:"
|
||||
stat -c "%a" "$$dir"
|
||||
fi
|
||||
}
|
||||
{% endmacro %}
|
||||
@@ -0,0 +1,26 @@
|
||||
resources:
|
||||
limits:
|
||||
cpus: 2.0
|
||||
memory: 4096
|
||||
|
||||
metube:
|
||||
default_theme: auto
|
||||
additional_envs: []
|
||||
network:
|
||||
host_network: false
|
||||
web_port: 8080
|
||||
|
||||
run_as:
|
||||
user: 568
|
||||
group: 568
|
||||
|
||||
storage:
|
||||
downloads:
|
||||
type: volume
|
||||
auto_permissions: true
|
||||
volume_name: metube_downloads
|
||||
additional_storage:
|
||||
- type: anonymous
|
||||
mount_path: /scratchpad
|
||||
volume_config:
|
||||
nocopy: true
|
||||
8
trains/community/metube/item.yaml
Normal file
8
trains/community/metube/item.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
categories:
|
||||
- media
|
||||
icon_url: https://media.sys.truenas.net/apps/metube/icons/icon.svg
|
||||
screenshots:
|
||||
- https://media.sys.truenas.net/apps/metube/screenshots/screenshot1.png
|
||||
tags:
|
||||
- youtube-dl
|
||||
- yt-dlp
|
||||
Reference in New Issue
Block a user