mirror of
https://github.com/MAGICGrants/truenas-apps.git
synced 2026-01-09 20:47:58 -05:00
Migrate apps to support host-ip per port (#1550)
* collabora * diskoverdata * elastic-search * emby * home-assistant * ix-app * minio * netdata * nextcloud * photoprism * pihole * plex * prometheis * storj * syncthing * wg-easy * minio * syncthing * asigra-ds-system * actual-budget * adguard-home * audiobookshelf * autobrr * bazarr * briefkasten * calibre * castopod * chia * clamav * dashy * ddns-updater * deluge * distribution * dockge * drawio * eclipse-mosquitto * filebrowser * firefly-iii * flame * flaresolverr * freshrss * frigate * fscrawler * gaseous-server * gitea * grafana * handbrake * homepage * homer * immich * invidious * ipfs * jellyfin * jellyseerr * jenkins * joplin * Migrate apps to support host-ip per port (part 2) (#1606) * kapowarr * kavita * komga * lidarr * linkding * listmonk * logseq * mealie * metube * minecraft * mineos * mumble * n8n * navidrome * netbootxyz * nginx-proxy-manager * node-red * odoo * ollama * open-webui * organizr * overseerr * omada * palworld * paperless-ngx * passbolt * penpot * pgadmin * pigallery2 * piwigo * planka * portainer * postgres * prowlarr * qbittorrent * radarr * readarr * redis * roundcube * rsyncd * rust-desk * sabnzbd * scrutiny * Migrate apps to support host-ip per port (part 3) (#1653) * searxng * sftpgo * sonarr * tautulli * tdarr * terraria * tftpd-hpa * tiny-media-manager * transmission * twofactor-auth * unifi-controller * uptime-kuma * vaultwarden * vikunja * webdav * whoogle * wordpress * fix * fmt * one mroe * add library * fix port validation on container_prot * fix visibility of host-ips * fix more visibility issues * fix postgres * revert * remove unneeded config in scrutiny * switch default pg image to 17 * remove k8s migrations * bump * clean * filestasth * immich: allow setting mlcache storage * fix name * searxng: update test values * fmt * fix port * remove annotations * sftpgo host_ips on ranges ranges * fix * postgres: update test values * remove field * dawarich: remove field * home assistant: fix port * flood: allow running as any user, fixes #2377 * cleaner * remove duplicate port * no migs for wg-easy * restore files * untouch * untouch * update metadata * sort as well * mapping * bumpo * update migration versions * add annotations * fix
This commit is contained in:
75
.github/scripts/generate_metadata.py
vendored
75
.github/scripts/generate_metadata.py
vendored
@@ -118,23 +118,53 @@ class DockerCapabilityRegistry:
|
||||
"WAKE_ALARM": "able to trigger system wake alarms",
|
||||
}
|
||||
|
||||
_RENAME_MAPPINGS = {
|
||||
"dssystem": "DS System",
|
||||
"npm": "Nginx Proxy Manager",
|
||||
"omada": "Omada Controller",
|
||||
"zigbee2mqtt": "Zigbee2MQTT",
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def service_name_to_title(service_name: str) -> str:
|
||||
"""Convert a service name to a human-readable title."""
|
||||
return service_name.replace("-", " ").replace("_", " ").title()
|
||||
|
||||
@staticmethod
|
||||
def hash_service_name(service_name: str) -> str:
|
||||
"""Hash a service name to a short, unique identifier."""
|
||||
return service_name.lower().replace("_", "").replace("-", "").replace(" ", "")
|
||||
|
||||
@classmethod
|
||||
def create_capability_description(cls, capability_name: str, service_names: List[str]) -> str:
|
||||
def create_capability_description(cls, capability_name: str, service_names: List[str], title: str) -> str:
|
||||
"""Create a human-readable description for a capability and its services."""
|
||||
if capability_name not in cls._CAPABILITY_DESCRIPTIONS:
|
||||
raise ValueError(f"Unknown capability: {capability_name}")
|
||||
|
||||
if not service_names:
|
||||
raise ValueError(f"No services provided for capability: {capability_name}")
|
||||
clean_service_names = set()
|
||||
for name in service_names:
|
||||
parts = name.split("-")
|
||||
if parts[-1].isnumeric():
|
||||
name = "-".join(parts[:-1])
|
||||
clean_service_names.add(name)
|
||||
|
||||
formatted_services = []
|
||||
for name in clean_service_names:
|
||||
if cls.hash_service_name(name) == cls.hash_service_name(title):
|
||||
formatted_services.append(title)
|
||||
elif name.lower() in cls._RENAME_MAPPINGS:
|
||||
formatted_services.append(cls._RENAME_MAPPINGS[name.lower()])
|
||||
else:
|
||||
formatted_services.append(cls.service_name_to_title(name))
|
||||
|
||||
# Format service names (replace hyphens with spaces and title case)
|
||||
formatted_services = [name.replace("-", " ").title() for name in service_names]
|
||||
base_description = cls._CAPABILITY_DESCRIPTIONS[capability_name]
|
||||
|
||||
if len(formatted_services) == 1:
|
||||
return f"{formatted_services[0]} is {base_description}"
|
||||
else:
|
||||
return f"{', '.join(formatted_services)} are {base_description}"
|
||||
return f"{', '.join(sorted(formatted_services))} are {base_description}"
|
||||
|
||||
|
||||
class FileSystemCache:
|
||||
@@ -164,6 +194,10 @@ class FileSystemCache:
|
||||
with open(file_path, "r") as f:
|
||||
data = yaml.safe_load(f)
|
||||
|
||||
# Ensure we have a dict
|
||||
if not isinstance(data, dict):
|
||||
raise ValueError(f"YAML file {file_path} must contain a dictionary at root level, got {type(data)}")
|
||||
|
||||
# Cache with modification time
|
||||
mtime = file_path.stat().st_mtime
|
||||
self._yaml_cache[file_key] = (data, mtime)
|
||||
@@ -304,16 +338,22 @@ class DockerComposeRenderer:
|
||||
raise RuntimeError(f"Rendering failed for {app_manifest.name}") from e
|
||||
|
||||
# Read rendered compose file
|
||||
rendered_compose_path = app_manifest.path / Config.RENDERED_COMPOSE_PATH
|
||||
if not rendered_compose_path.exists():
|
||||
raise FileNotFoundError(f"Rendered compose file not found: {rendered_compose_path}")
|
||||
compose_path = app_manifest.path / Config.RENDERED_COMPOSE_PATH
|
||||
if not compose_path.exists():
|
||||
raise FileNotFoundError(f"Rendered compose file not found: {compose_path}")
|
||||
|
||||
try:
|
||||
self._fix_file_permissions(rendered_compose_path)
|
||||
with open(rendered_compose_path, "r") as f:
|
||||
return yaml.safe_load(f)
|
||||
self._fix_file_permissions(compose_path)
|
||||
with open(compose_path, "r") as f:
|
||||
data = yaml.safe_load(f)
|
||||
|
||||
# Ensure we have a dict
|
||||
if not isinstance(data, dict):
|
||||
raise ValueError(f"YAML file {compose_path} must contain a dictionary at root level, got {type(data)}")
|
||||
return data
|
||||
|
||||
except yaml.YAMLError as e:
|
||||
raise RuntimeError(f"Failed to parse rendered compose: {rendered_compose_path}") from e
|
||||
raise RuntimeError(f"Failed to parse rendered compose: {compose_path}") from e
|
||||
|
||||
def _fix_file_permissions(self, file_path: Path) -> None:
|
||||
"""Fix file permissions using Docker container."""
|
||||
@@ -576,6 +616,13 @@ class TrueNASAppCapabilityManager:
|
||||
logger.warning(f"No test configurations for {app_manifest.name}")
|
||||
return AppAnalysisResult([], [], "")
|
||||
|
||||
# Extract app title from app.yaml
|
||||
app_metadata_path = app_manifest.path / Config.APP_METADATA_FILE
|
||||
app_config = self.file_cache.read_yaml_file(app_metadata_path)
|
||||
if not isinstance(app_config, dict):
|
||||
raise ValueError(f"Invalid app config in {app_metadata_path}")
|
||||
app_title = app_config.get("title", app_manifest.name)
|
||||
|
||||
# Track capabilities across all test configurations
|
||||
capability_to_services: Dict[str, Set[str]] = {}
|
||||
all_service_names = set()
|
||||
@@ -609,7 +656,9 @@ class TrueNASAppCapabilityManager:
|
||||
capabilities = []
|
||||
for capability_name, services in capability_to_services.items():
|
||||
try:
|
||||
description = self.capability_registry.create_capability_description(capability_name, sorted(services))
|
||||
description = self.capability_registry.create_capability_description(
|
||||
capability_name, sorted(services), app_title
|
||||
)
|
||||
capabilities.append(DockerCapability(capability_name, description))
|
||||
except ValueError as e:
|
||||
logger.error(f"Failed to create capability description: {e}")
|
||||
@@ -621,7 +670,7 @@ class TrueNASAppCapabilityManager:
|
||||
return AppAnalysisResult(
|
||||
capabilities=sorted(capabilities, key=lambda c: c.name),
|
||||
service_names=sorted(all_service_names),
|
||||
app_version=current_version,
|
||||
app_version=str(current_version),
|
||||
)
|
||||
|
||||
def update_single_app(self, app_manifest: AppManifest) -> None:
|
||||
|
||||
@@ -131,6 +131,7 @@ words:
|
||||
- homarr
|
||||
- homebox
|
||||
- hostable
|
||||
- htauth
|
||||
- htpasswd
|
||||
- htsp
|
||||
- icanhazip
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
annotations:
|
||||
min_scale_version: 24.10.2.2
|
||||
app_version: 25.5.0
|
||||
capabilities: []
|
||||
categories:
|
||||
@@ -34,4 +36,4 @@ sources:
|
||||
- https://hub.docker.com/r/actualbudget/actual-server
|
||||
title: Actual Budget
|
||||
train: community
|
||||
version: 1.2.20
|
||||
version: 1.3.0
|
||||
|
||||
6
ix-dev/community/actual-budget/app_migrations.yaml
Normal file
6
ix-dev/community/actual-budget/app_migrations.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
migrations:
|
||||
- file: ip_port_migration
|
||||
from:
|
||||
max_version: 1.2.20
|
||||
target:
|
||||
min_version: 1.3.0
|
||||
23
ix-dev/community/actual-budget/migrations/ip_port_migration
Executable file
23
ix-dev/community/actual-budget/migrations/ip_port_migration
Executable file
@@ -0,0 +1,23 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
import os
|
||||
import sys
|
||||
import yaml
|
||||
|
||||
|
||||
def migrate(values):
|
||||
values["network"]["web_port"] = {
|
||||
"port_number": values["network"]["web_port"],
|
||||
"bind_mode": "published",
|
||||
"host_ips": [],
|
||||
}
|
||||
return values
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) != 2:
|
||||
exit(1)
|
||||
|
||||
if os.path.exists(sys.argv[1]):
|
||||
with open(sys.argv[1], "r") as f:
|
||||
print(yaml.dump(migrate(yaml.safe_load(f.read()))))
|
||||
@@ -1,48 +0,0 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
import os
|
||||
import sys
|
||||
import yaml
|
||||
|
||||
from migration_helpers.resources import migrate_resources
|
||||
from migration_helpers.storage import migrate_storage_item
|
||||
|
||||
|
||||
def migrate(values):
|
||||
config = values.get("helm_secret", {}).get("config", {})
|
||||
if not config:
|
||||
raise ValueError("No config found in values")
|
||||
|
||||
new_values = {
|
||||
"actual_budget": {
|
||||
"additional_envs": config["actualConfig"].get("additionalEnvs", []),
|
||||
},
|
||||
"run_as": {
|
||||
"user": config["actualRunAs"].get("user", 568),
|
||||
"group": config["actualRunAs"].get("group", 568),
|
||||
},
|
||||
"network": {
|
||||
"host_network": config["actualNetwork"].get("hostNetwork", False),
|
||||
"web_port": config["actualNetwork"].get("webPort", 32400),
|
||||
"certificate_id": config["actualNetwork"].get("certificateID"),
|
||||
},
|
||||
"storage": {
|
||||
"data": migrate_storage_item(config["actualStorage"]["data"]),
|
||||
"additional_storage": [
|
||||
migrate_storage_item(item, include_read_only=True)
|
||||
for item in config["actualStorage"]["additionalStorages"]
|
||||
],
|
||||
},
|
||||
"resources": migrate_resources(config["resources"]),
|
||||
}
|
||||
|
||||
return new_values
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) != 2:
|
||||
exit(1)
|
||||
|
||||
if os.path.exists(sys.argv[1]):
|
||||
with open(sys.argv[1], "r") as f:
|
||||
print(yaml.dump(migrate(yaml.safe_load(f.read()))))
|
||||
@@ -1,30 +0,0 @@
|
||||
import math
|
||||
import re
|
||||
import os
|
||||
|
||||
CPU_COUNT = os.cpu_count()
|
||||
|
||||
NUMBER_REGEX = re.compile(r"^[1-9][0-9]$")
|
||||
FLOAT_REGEX = re.compile(r"^[0-9]+\.[0-9]+$")
|
||||
MILI_CPU_REGEX = re.compile(r"^[0-9]+m$")
|
||||
|
||||
|
||||
def transform_cpu(cpu) -> int:
|
||||
result = 2
|
||||
if NUMBER_REGEX.match(cpu):
|
||||
result = int(cpu)
|
||||
elif FLOAT_REGEX.match(cpu):
|
||||
result = int(math.ceil(float(cpu)))
|
||||
elif MILI_CPU_REGEX.match(cpu):
|
||||
num = int(cpu[:-1])
|
||||
num = num / 1000
|
||||
result = int(math.ceil(num))
|
||||
|
||||
if CPU_COUNT is not None:
|
||||
# Do not exceed the actual CPU count
|
||||
result = min(result, CPU_COUNT)
|
||||
|
||||
if int(result) == 0:
|
||||
result = CPU_COUNT if CPU_COUNT else 2
|
||||
|
||||
return int(result)
|
||||
@@ -1,9 +0,0 @@
|
||||
def migrate_dns_config(dns_config):
|
||||
if not dns_config:
|
||||
return []
|
||||
|
||||
dns_opts = []
|
||||
for opt in dns_config.get("options", []):
|
||||
dns_opts.append(f"{opt['name']}:{opt['value']}")
|
||||
|
||||
return dns_opts
|
||||
@@ -1,16 +0,0 @@
|
||||
def get_value_from_secret(secrets=None, secret_name=None, key=None):
|
||||
secrets = secrets if secrets else dict()
|
||||
secret_name = secret_name if secret_name else ""
|
||||
key = key if key else ""
|
||||
|
||||
if not secrets or not secret_name or not key:
|
||||
raise ValueError("Expected [secrets], [secret_name] and [key] to be set")
|
||||
for curr_secret_name, curr_data in secrets.items():
|
||||
if curr_secret_name.endswith(secret_name):
|
||||
if not curr_data.get(key, None):
|
||||
raise ValueError(
|
||||
f"Expected [{key}] to be set in secret [{curr_secret_name}]"
|
||||
)
|
||||
return curr_data[key]
|
||||
|
||||
raise ValueError(f"Secret [{secret_name}] not found")
|
||||
@@ -1,61 +0,0 @@
|
||||
import re
|
||||
import math
|
||||
|
||||
|
||||
def get_total_memory():
|
||||
with open("/proc/meminfo") as f:
|
||||
for line in filter(lambda x: "MemTotal" in x, f):
|
||||
return int(line.split()[1]) * 1024
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
TOTAL_MEM = get_total_memory()
|
||||
|
||||
SINGLE_SUFFIX_REGEX = re.compile(r"^[1-9][0-9]*([EPTGMK])$")
|
||||
DOUBLE_SUFFIX_REGEX = re.compile(r"^[1-9][0-9]*([EPTGMK])i$")
|
||||
BYTES_INTEGER_REGEX = re.compile(r"^[1-9][0-9]*$")
|
||||
EXPONENT_REGEX = re.compile(r"^[1-9][0-9]*e[0-9]+$")
|
||||
|
||||
SUFFIX_MULTIPLIERS = {
|
||||
"K": 10**3,
|
||||
"M": 10**6,
|
||||
"G": 10**9,
|
||||
"T": 10**12,
|
||||
"P": 10**15,
|
||||
"E": 10**18,
|
||||
}
|
||||
|
||||
DOUBLE_SUFFIX_MULTIPLIERS = {
|
||||
"Ki": 2**10,
|
||||
"Mi": 2**20,
|
||||
"Gi": 2**30,
|
||||
"Ti": 2**40,
|
||||
"Pi": 2**50,
|
||||
"Ei": 2**60,
|
||||
}
|
||||
|
||||
|
||||
def transform_memory(memory):
|
||||
result = 4096 # Default to 4GB
|
||||
|
||||
if re.match(SINGLE_SUFFIX_REGEX, memory):
|
||||
suffix = memory[-1]
|
||||
result = int(memory[:-1]) * SUFFIX_MULTIPLIERS[suffix]
|
||||
elif re.match(DOUBLE_SUFFIX_REGEX, memory):
|
||||
suffix = memory[-2:]
|
||||
result = int(memory[:-2]) * DOUBLE_SUFFIX_MULTIPLIERS[suffix]
|
||||
elif re.match(BYTES_INTEGER_REGEX, memory):
|
||||
result = int(memory)
|
||||
elif re.match(EXPONENT_REGEX, memory):
|
||||
result = int(float(memory))
|
||||
|
||||
result = math.ceil(result)
|
||||
result = min(result, TOTAL_MEM)
|
||||
# Convert to Megabytes
|
||||
result = result / 1024 / 1024
|
||||
|
||||
if int(result) == 0:
|
||||
result = TOTAL_MEM if TOTAL_MEM else 4096
|
||||
|
||||
return int(result)
|
||||
@@ -1,59 +0,0 @@
|
||||
from .memory import transform_memory, TOTAL_MEM
|
||||
from .cpu import transform_cpu, CPU_COUNT
|
||||
|
||||
|
||||
def migrate_resources(resources, gpus=None, system_gpus=None):
|
||||
gpus = gpus or {}
|
||||
system_gpus = system_gpus or []
|
||||
|
||||
result = {
|
||||
"limits": {
|
||||
"cpus": int((CPU_COUNT or 2) / 2),
|
||||
"memory": int(TOTAL_MEM / 1024 / 1024),
|
||||
}
|
||||
}
|
||||
|
||||
if resources.get("limits", {}).get("cpu", ""):
|
||||
result["limits"].update(
|
||||
{"cpus": transform_cpu(resources.get("limits", {}).get("cpu", ""))}
|
||||
)
|
||||
if resources.get("limits", {}).get("memory", ""):
|
||||
result["limits"].update(
|
||||
{"memory": transform_memory(resources.get("limits", {}).get("memory", ""))}
|
||||
)
|
||||
|
||||
gpus_result = {}
|
||||
for gpu in gpus.items() if gpus else []:
|
||||
kind = gpu[0].lower() # Kind of gpu (amd, nvidia, intel)
|
||||
count = gpu[1] # Number of gpus user requested
|
||||
|
||||
if count == 0:
|
||||
continue
|
||||
|
||||
if "amd" in kind or "intel" in kind:
|
||||
gpus_result.update({"use_all_gpus": True})
|
||||
elif "nvidia" in kind:
|
||||
sys_gpus = [
|
||||
gpu_item
|
||||
for gpu_item in system_gpus
|
||||
if gpu_item.get("error") is None
|
||||
and gpu_item.get("vendor", None) is not None
|
||||
and gpu_item.get("vendor", "").upper() == "NVIDIA"
|
||||
]
|
||||
for sys_gpu in sys_gpus:
|
||||
if count == 0: # We passed # of gpus that user previously requested
|
||||
break
|
||||
guid = sys_gpu.get("vendor_specific_config", {}).get("uuid", "")
|
||||
pci_slot = sys_gpu.get("pci_slot", "")
|
||||
if not guid or not pci_slot:
|
||||
continue
|
||||
|
||||
gpus_result.update(
|
||||
{"nvidia_gpu_selection": {pci_slot: {"uuid": guid, "use_gpu": True}}}
|
||||
)
|
||||
count -= 1
|
||||
|
||||
if gpus_result:
|
||||
result.update({"gpus": gpus_result})
|
||||
|
||||
return result
|
||||
@@ -1,155 +0,0 @@
|
||||
def migrate_storage_item(storage_item, include_read_only=False):
|
||||
if not storage_item:
|
||||
raise ValueError("Expected [storage_item] to be set")
|
||||
|
||||
result = {}
|
||||
if storage_item["type"] == "ixVolume":
|
||||
if storage_item.get("ixVolumeConfig"):
|
||||
result = migrate_ix_volume_type(storage_item)
|
||||
elif storage_item.get("datasetName"):
|
||||
result = migrate_old_ix_volume_type(storage_item)
|
||||
else:
|
||||
raise ValueError(
|
||||
"Expected [ix_volume] to have [ixVolumeConfig] or [datasetName] set"
|
||||
)
|
||||
elif storage_item["type"] == "hostPath":
|
||||
if storage_item.get("hostPathConfig"):
|
||||
result = migrate_host_path_type(storage_item)
|
||||
elif storage_item.get("hostPath"):
|
||||
result = migrate_old_host_path_type(storage_item)
|
||||
else:
|
||||
raise ValueError(
|
||||
"Expected [host_path] to have [hostPathConfig] or [hostPath] set"
|
||||
)
|
||||
elif storage_item["type"] == "emptyDir":
|
||||
result = migrate_empty_dir_type(storage_item)
|
||||
elif storage_item["type"] == "smb-pv-pvc":
|
||||
result = migrate_smb_pv_pvc_type(storage_item)
|
||||
|
||||
mount_path = storage_item.get("mountPath", "")
|
||||
if mount_path:
|
||||
result.update({"mount_path": mount_path})
|
||||
|
||||
if include_read_only:
|
||||
result.update({"read_only": storage_item.get("readOnly", False)})
|
||||
return result
|
||||
|
||||
|
||||
def migrate_smb_pv_pvc_type(smb_pv_pvc):
|
||||
smb_config = smb_pv_pvc.get("smbConfig", {})
|
||||
if not smb_config:
|
||||
raise ValueError("Expected [smb_pv_pvc] to have [smbConfig] set")
|
||||
|
||||
return {
|
||||
"type": "cifs",
|
||||
"cifs_config": {
|
||||
"server": smb_config["server"],
|
||||
"path": smb_config["share"],
|
||||
"domain": smb_config.get("domain", ""),
|
||||
"username": smb_config["username"],
|
||||
"password": smb_config["password"],
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def migrate_empty_dir_type(empty_dir):
|
||||
empty_dir_config = empty_dir.get("emptyDirConfig", {})
|
||||
if not empty_dir_config:
|
||||
raise ValueError("Expected [empty_dir] to have [emptyDirConfig] set")
|
||||
|
||||
if empty_dir_config.get("medium", "") == "Memory":
|
||||
# Convert Gi to Mi
|
||||
size = empty_dir_config.get("size", 0.5) * 1024
|
||||
return {
|
||||
"type": "tmpfs",
|
||||
"tmpfs_config": {"size": size},
|
||||
}
|
||||
|
||||
return {"type": "temporary"}
|
||||
|
||||
|
||||
def migrate_old_ix_volume_type(ix_volume):
|
||||
if not ix_volume.get("datasetName"):
|
||||
raise ValueError("Expected [ix_volume] to have [datasetName] set")
|
||||
|
||||
return {
|
||||
"type": "ix_volume",
|
||||
"ix_volume_config": {
|
||||
"acl_enable": False,
|
||||
"dataset_name": ix_volume["datasetName"],
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def migrate_ix_volume_type(ix_volume):
|
||||
vol_config = ix_volume.get("ixVolumeConfig", {})
|
||||
if not vol_config:
|
||||
raise ValueError("Expected [ix_volume] to have [ixVolumeConfig] set")
|
||||
|
||||
result = {
|
||||
"type": "ix_volume",
|
||||
"ix_volume_config": {
|
||||
"acl_enable": vol_config.get("aclEnable", False),
|
||||
"dataset_name": vol_config.get("datasetName", ""),
|
||||
},
|
||||
}
|
||||
|
||||
if vol_config.get("aclEnable", False):
|
||||
result["ix_volume_config"].update(
|
||||
{"acl_entries": migrate_acl_entries(vol_config["aclEntries"])}
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def migrate_old_host_path_type(host_path):
|
||||
if not host_path.get("hostPath"):
|
||||
raise ValueError("Expected [host_path] to have [hostPath] set")
|
||||
|
||||
return {
|
||||
"type": "host_path",
|
||||
"host_path_config": {
|
||||
"acl_enable": False,
|
||||
"path": host_path["hostPath"],
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def migrate_host_path_type(host_path):
|
||||
path_config = host_path.get("hostPathConfig", {})
|
||||
if not path_config:
|
||||
raise ValueError("Expected [host_path] to have [hostPathConfig] set")
|
||||
|
||||
result = {
|
||||
"type": "host_path",
|
||||
"host_path_config": {
|
||||
"acl_enable": path_config.get("aclEnable", False),
|
||||
},
|
||||
}
|
||||
|
||||
if path_config.get("aclEnable", False):
|
||||
result["host_path_config"].update(
|
||||
{"acl": migrate_acl_entries(path_config.get("acl", {}))}
|
||||
)
|
||||
else:
|
||||
result["host_path_config"].update({"path": path_config["hostPath"]})
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def migrate_acl_entries(acl_entries: dict) -> dict:
|
||||
entries = []
|
||||
for entry in acl_entries.get("entries", []):
|
||||
entries.append(
|
||||
{
|
||||
"access": entry["access"],
|
||||
"id": entry["id"],
|
||||
"id_type": entry["id_type"],
|
||||
}
|
||||
)
|
||||
|
||||
return {
|
||||
"entries": entries,
|
||||
"options": {"force": acl_entries.get("options", {}).get("force", False)},
|
||||
"path": acl_entries["path"],
|
||||
}
|
||||
@@ -72,13 +72,51 @@ questions:
|
||||
attrs:
|
||||
- variable: web_port
|
||||
label: WebUI Port
|
||||
description: The port for Actual Budget WebUI
|
||||
schema:
|
||||
type: int
|
||||
default: 31012
|
||||
required: true
|
||||
$ref:
|
||||
- definitions/port
|
||||
type: dict
|
||||
attrs:
|
||||
- variable: bind_mode
|
||||
label: Port Bind Mode
|
||||
description: |
|
||||
The port bind mode.</br>
|
||||
- Publish: The port will be published on the host for external access.</br>
|
||||
- Expose: The port will be exposed for inter-container communication.</br>
|
||||
- None: The port will not be exposed or published.</br>
|
||||
Note: If the Dockerfile defines an EXPOSE directive,
|
||||
the port will still be exposed for inter-container communication regardless of this setting.
|
||||
schema:
|
||||
type: string
|
||||
default: "published"
|
||||
enum:
|
||||
- value: "published"
|
||||
description: Publish port on the host for external access
|
||||
- value: "exposed"
|
||||
description: Expose port for inter-container communication
|
||||
- value: ""
|
||||
description: None
|
||||
- variable: port_number
|
||||
label: Port Number
|
||||
schema:
|
||||
type: int
|
||||
default: 31012
|
||||
required: true
|
||||
$ref:
|
||||
- definitions/port
|
||||
- variable: host_ips
|
||||
label: Host IPs
|
||||
description: IPs on the host to bind this port
|
||||
schema:
|
||||
type: list
|
||||
show_if: [["bind_mode", "=", "published"]]
|
||||
default: []
|
||||
items:
|
||||
- variable: host_ip
|
||||
label: Host IP
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
$ref:
|
||||
- definitions/node_bind_ip
|
||||
- variable: host_network
|
||||
label: Host Network
|
||||
description: |
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
{% do c1.set_user(values.run_as.user, values.run_as.group) %}
|
||||
{% do c1.healthcheck.set_custom_test("NPM_CONFIG_UPDATE_NOTIFIER=false NODE_TLS_REJECT_UNAUTHORIZED=0 npm run health-check") %}
|
||||
|
||||
{% do c1.environment.add_env("ACTUAL_PORT", values.network.web_port) %}
|
||||
{% do c1.environment.add_env("ACTUAL_PORT", values.network.web_port.port_number) %}
|
||||
{% do c1.environment.add_env("ACTUAL_HOSTNAME", "0.0.0.0") %}
|
||||
{% do c1.environment.add_env("ACTUAL_SERVER_FILES", "%s/server-files" | format(values.consts.base_data_path)) %}
|
||||
{% do c1.environment.add_env("ACTUAL_USER_FILES", "%s/user-files" | format(values.consts.base_data_path)) %}
|
||||
@@ -22,7 +22,7 @@
|
||||
{% endif %}
|
||||
|
||||
{% do c1.environment.add_user_envs(values.actual_budget.additional_envs) %}
|
||||
{% do c1.ports.add_port(values.network.web_port, values.network.web_port) %}
|
||||
{% do c1.add_port(values.network.web_port) %}
|
||||
|
||||
{% do c1.add_storage("/data", values.storage.data) %}
|
||||
{% do perm_container.add_or_skip_action("data", values.storage.data, perms_config) %}
|
||||
@@ -37,6 +37,6 @@
|
||||
{% do c1.depends.add_dependency(values.consts.perms_container_name, "service_completed_successfully") %}
|
||||
{% endif %}
|
||||
|
||||
{% do tpl.portals.add_portal({"port": values.network.web_port, "scheme": "https" if values.network.certificate_id else "http"}) %}
|
||||
{% do tpl.portals.add_portal({"port": values.network.web_port.port_number, "scheme": "https" if values.network.certificate_id else "http"}) %}
|
||||
|
||||
{{ tpl.render() | tojson }}
|
||||
|
||||
@@ -8,7 +8,9 @@ actual_budget:
|
||||
network:
|
||||
host_network: false
|
||||
certificate_id: null
|
||||
web_port: 8080
|
||||
web_port:
|
||||
bind_mode: published
|
||||
port_number: 8080
|
||||
|
||||
run_as:
|
||||
user: 568
|
||||
|
||||
@@ -7,7 +7,9 @@ actual_budget:
|
||||
additional_envs: []
|
||||
network:
|
||||
host_network: true
|
||||
web_port: 8080
|
||||
web_port:
|
||||
bind_mode: published
|
||||
port_number: 8080
|
||||
|
||||
run_as:
|
||||
user: 568
|
||||
|
||||
@@ -8,7 +8,9 @@ actual_budget:
|
||||
network:
|
||||
host_network: false
|
||||
certificate_id: "1"
|
||||
web_port: 8080
|
||||
web_port:
|
||||
bind_mode: published
|
||||
port_number: 8080
|
||||
|
||||
run_as:
|
||||
user: 568
|
||||
|
||||
@@ -1,14 +1,16 @@
|
||||
annotations:
|
||||
min_scale_version: 24.10.2.2
|
||||
app_version: v0.107.62
|
||||
capabilities:
|
||||
- description: AdGuard Home is able to bind to a privileged port.
|
||||
name: NET_BIND_SERVICE
|
||||
- description: AdGuard Home is able to chown files.
|
||||
- description: Adguard is able to change file ownership arbitrarily
|
||||
name: CHOWN
|
||||
- description: AdGuard Home is able to bypass permission checks for it's sub-processes.
|
||||
name: FOWNER
|
||||
- description: AdGuard Home is able to bypass permission checks.
|
||||
- description: Adguard is able to bypass file permission checks
|
||||
name: DAC_OVERRIDE
|
||||
- description: AdGuard Home is able to bind to a privileged port.
|
||||
- description: Adguard is able to bypass permission checks for file operations
|
||||
name: FOWNER
|
||||
- description: Adguard is able to bind to privileged ports (< 1024)
|
||||
name: NET_BIND_SERVICE
|
||||
- description: Adguard is able to use raw and packet sockets
|
||||
name: NET_RAW
|
||||
categories:
|
||||
- networking
|
||||
@@ -43,4 +45,4 @@ sources:
|
||||
- https://hub.docker.com/r/adguard/adguardhome
|
||||
title: AdGuard Home
|
||||
train: community
|
||||
version: 1.1.27
|
||||
version: 1.2.0
|
||||
|
||||
6
ix-dev/community/adguard-home/app_migrations.yaml
Normal file
6
ix-dev/community/adguard-home/app_migrations.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
migrations:
|
||||
- file: ip_port_migration
|
||||
from:
|
||||
max_version: 1.1.27
|
||||
target:
|
||||
min_version: 1.2.0
|
||||
28
ix-dev/community/adguard-home/migrations/ip_port_migration
Executable file
28
ix-dev/community/adguard-home/migrations/ip_port_migration
Executable file
@@ -0,0 +1,28 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
import os
|
||||
import sys
|
||||
import yaml
|
||||
|
||||
|
||||
def migrate(values):
|
||||
values["network"]["web_port"] = {
|
||||
"port_number": values["network"]["web_port"],
|
||||
"bind_mode": "published",
|
||||
"host_ips": [],
|
||||
}
|
||||
values["network"]["dns_port"] = {
|
||||
"port_number": values["network"].get("dns_port", 1053),
|
||||
"bind_mode": "published",
|
||||
"host_ips": [],
|
||||
}
|
||||
return values
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) != 2:
|
||||
exit(1)
|
||||
|
||||
if os.path.exists(sys.argv[1]):
|
||||
with open(sys.argv[1], "r") as f:
|
||||
print(yaml.dump(migrate(yaml.safe_load(f.read()))))
|
||||
@@ -1,39 +0,0 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
import os
|
||||
import sys
|
||||
import yaml
|
||||
|
||||
from migration_helpers.resources import migrate_resources
|
||||
from migration_helpers.storage import migrate_storage_item
|
||||
|
||||
|
||||
def migrate(values):
|
||||
config = values.get("helm_secret", {}).get("config", {})
|
||||
if not config:
|
||||
raise ValueError("No config found in values")
|
||||
|
||||
new_values = {
|
||||
"network": {
|
||||
"web_port": config["adguardNetwork"].get("webPort", 32400),
|
||||
"dns_port": config["adguardNetwork"].get("dnsPort", 30153),
|
||||
"host_network": config["adguardNetwork"].get("hostNetwork", False),
|
||||
"dhcp_enabled": config["adguardNetwork"].get("enableDHCP", False),
|
||||
},
|
||||
"storage": {
|
||||
"config": migrate_storage_item(config["adguardStorage"]["conf"]),
|
||||
"work": migrate_storage_item(config["adguardStorage"]["work"]),
|
||||
},
|
||||
"resources": migrate_resources(config["resources"]),
|
||||
}
|
||||
|
||||
return new_values
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) != 2:
|
||||
exit(1)
|
||||
|
||||
if os.path.exists(sys.argv[1]):
|
||||
with open(sys.argv[1], "r") as f:
|
||||
print(yaml.dump(migrate(yaml.safe_load(f.read()))))
|
||||
@@ -1,30 +0,0 @@
|
||||
import math
|
||||
import re
|
||||
import os
|
||||
|
||||
CPU_COUNT = os.cpu_count()
|
||||
|
||||
NUMBER_REGEX = re.compile(r"^[1-9][0-9]$")
|
||||
FLOAT_REGEX = re.compile(r"^[0-9]+\.[0-9]+$")
|
||||
MILI_CPU_REGEX = re.compile(r"^[0-9]+m$")
|
||||
|
||||
|
||||
def transform_cpu(cpu) -> int:
|
||||
result = 2
|
||||
if NUMBER_REGEX.match(cpu):
|
||||
result = int(cpu)
|
||||
elif FLOAT_REGEX.match(cpu):
|
||||
result = int(math.ceil(float(cpu)))
|
||||
elif MILI_CPU_REGEX.match(cpu):
|
||||
num = int(cpu[:-1])
|
||||
num = num / 1000
|
||||
result = int(math.ceil(num))
|
||||
|
||||
if CPU_COUNT is not None:
|
||||
# Do not exceed the actual CPU count
|
||||
result = min(result, CPU_COUNT)
|
||||
|
||||
if int(result) == 0:
|
||||
result = CPU_COUNT if CPU_COUNT else 2
|
||||
|
||||
return int(result)
|
||||
@@ -1,9 +0,0 @@
|
||||
def migrate_dns_config(dns_config):
|
||||
if not dns_config:
|
||||
return []
|
||||
|
||||
dns_opts = []
|
||||
for opt in dns_config.get("options", []):
|
||||
dns_opts.append(f"{opt['name']}:{opt['value']}")
|
||||
|
||||
return dns_opts
|
||||
@@ -1,16 +0,0 @@
|
||||
def get_value_from_secret(secrets=None, secret_name=None, key=None):
|
||||
secrets = secrets if secrets else dict()
|
||||
secret_name = secret_name if secret_name else ""
|
||||
key = key if key else ""
|
||||
|
||||
if not secrets or not secret_name or not key:
|
||||
raise ValueError("Expected [secrets], [secret_name] and [key] to be set")
|
||||
for curr_secret_name, curr_data in secrets.items():
|
||||
if curr_secret_name.endswith(secret_name):
|
||||
if not curr_data.get(key, None):
|
||||
raise ValueError(
|
||||
f"Expected [{key}] to be set in secret [{curr_secret_name}]"
|
||||
)
|
||||
return curr_data[key]
|
||||
|
||||
raise ValueError(f"Secret [{secret_name}] not found")
|
||||
@@ -1,61 +0,0 @@
|
||||
import re
|
||||
import math
|
||||
|
||||
|
||||
def get_total_memory():
|
||||
with open("/proc/meminfo") as f:
|
||||
for line in filter(lambda x: "MemTotal" in x, f):
|
||||
return int(line.split()[1]) * 1024
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
TOTAL_MEM = get_total_memory()
|
||||
|
||||
SINGLE_SUFFIX_REGEX = re.compile(r"^[1-9][0-9]*([EPTGMK])$")
|
||||
DOUBLE_SUFFIX_REGEX = re.compile(r"^[1-9][0-9]*([EPTGMK])i$")
|
||||
BYTES_INTEGER_REGEX = re.compile(r"^[1-9][0-9]*$")
|
||||
EXPONENT_REGEX = re.compile(r"^[1-9][0-9]*e[0-9]+$")
|
||||
|
||||
SUFFIX_MULTIPLIERS = {
|
||||
"K": 10**3,
|
||||
"M": 10**6,
|
||||
"G": 10**9,
|
||||
"T": 10**12,
|
||||
"P": 10**15,
|
||||
"E": 10**18,
|
||||
}
|
||||
|
||||
DOUBLE_SUFFIX_MULTIPLIERS = {
|
||||
"Ki": 2**10,
|
||||
"Mi": 2**20,
|
||||
"Gi": 2**30,
|
||||
"Ti": 2**40,
|
||||
"Pi": 2**50,
|
||||
"Ei": 2**60,
|
||||
}
|
||||
|
||||
|
||||
def transform_memory(memory):
|
||||
result = 4096 # Default to 4GB
|
||||
|
||||
if re.match(SINGLE_SUFFIX_REGEX, memory):
|
||||
suffix = memory[-1]
|
||||
result = int(memory[:-1]) * SUFFIX_MULTIPLIERS[suffix]
|
||||
elif re.match(DOUBLE_SUFFIX_REGEX, memory):
|
||||
suffix = memory[-2:]
|
||||
result = int(memory[:-2]) * DOUBLE_SUFFIX_MULTIPLIERS[suffix]
|
||||
elif re.match(BYTES_INTEGER_REGEX, memory):
|
||||
result = int(memory)
|
||||
elif re.match(EXPONENT_REGEX, memory):
|
||||
result = int(float(memory))
|
||||
|
||||
result = math.ceil(result)
|
||||
result = min(result, TOTAL_MEM)
|
||||
# Convert to Megabytes
|
||||
result = result / 1024 / 1024
|
||||
|
||||
if int(result) == 0:
|
||||
result = TOTAL_MEM if TOTAL_MEM else 4096
|
||||
|
||||
return int(result)
|
||||
@@ -1,59 +0,0 @@
|
||||
from .memory import transform_memory, TOTAL_MEM
|
||||
from .cpu import transform_cpu, CPU_COUNT
|
||||
|
||||
|
||||
def migrate_resources(resources, gpus=None, system_gpus=None):
|
||||
gpus = gpus or {}
|
||||
system_gpus = system_gpus or []
|
||||
|
||||
result = {
|
||||
"limits": {
|
||||
"cpus": int((CPU_COUNT or 2) / 2),
|
||||
"memory": int(TOTAL_MEM / 1024 / 1024),
|
||||
}
|
||||
}
|
||||
|
||||
if resources.get("limits", {}).get("cpu", ""):
|
||||
result["limits"].update(
|
||||
{"cpus": transform_cpu(resources.get("limits", {}).get("cpu", ""))}
|
||||
)
|
||||
if resources.get("limits", {}).get("memory", ""):
|
||||
result["limits"].update(
|
||||
{"memory": transform_memory(resources.get("limits", {}).get("memory", ""))}
|
||||
)
|
||||
|
||||
gpus_result = {}
|
||||
for gpu in gpus.items() if gpus else []:
|
||||
kind = gpu[0].lower() # Kind of gpu (amd, nvidia, intel)
|
||||
count = gpu[1] # Number of gpus user requested
|
||||
|
||||
if count == 0:
|
||||
continue
|
||||
|
||||
if "amd" in kind or "intel" in kind:
|
||||
gpus_result.update({"use_all_gpus": True})
|
||||
elif "nvidia" in kind:
|
||||
sys_gpus = [
|
||||
gpu_item
|
||||
for gpu_item in system_gpus
|
||||
if gpu_item.get("error") is None
|
||||
and gpu_item.get("vendor", None) is not None
|
||||
and gpu_item.get("vendor", "").upper() == "NVIDIA"
|
||||
]
|
||||
for sys_gpu in sys_gpus:
|
||||
if count == 0: # We passed # of gpus that user previously requested
|
||||
break
|
||||
guid = sys_gpu.get("vendor_specific_config", {}).get("uuid", "")
|
||||
pci_slot = sys_gpu.get("pci_slot", "")
|
||||
if not guid or not pci_slot:
|
||||
continue
|
||||
|
||||
gpus_result.update(
|
||||
{"nvidia_gpu_selection": {pci_slot: {"uuid": guid, "use_gpu": True}}}
|
||||
)
|
||||
count -= 1
|
||||
|
||||
if gpus_result:
|
||||
result.update({"gpus": gpus_result})
|
||||
|
||||
return result
|
||||
@@ -1,155 +0,0 @@
|
||||
def migrate_storage_item(storage_item, include_read_only=False):
|
||||
if not storage_item:
|
||||
raise ValueError("Expected [storage_item] to be set")
|
||||
|
||||
result = {}
|
||||
if storage_item["type"] == "ixVolume":
|
||||
if storage_item.get("ixVolumeConfig"):
|
||||
result = migrate_ix_volume_type(storage_item)
|
||||
elif storage_item.get("datasetName"):
|
||||
result = migrate_old_ix_volume_type(storage_item)
|
||||
else:
|
||||
raise ValueError(
|
||||
"Expected [ix_volume] to have [ixVolumeConfig] or [datasetName] set"
|
||||
)
|
||||
elif storage_item["type"] == "hostPath":
|
||||
if storage_item.get("hostPathConfig"):
|
||||
result = migrate_host_path_type(storage_item)
|
||||
elif storage_item.get("hostPath"):
|
||||
result = migrate_old_host_path_type(storage_item)
|
||||
else:
|
||||
raise ValueError(
|
||||
"Expected [host_path] to have [hostPathConfig] or [hostPath] set"
|
||||
)
|
||||
elif storage_item["type"] == "emptyDir":
|
||||
result = migrate_empty_dir_type(storage_item)
|
||||
elif storage_item["type"] == "smb-pv-pvc":
|
||||
result = migrate_smb_pv_pvc_type(storage_item)
|
||||
|
||||
mount_path = storage_item.get("mountPath", "")
|
||||
if mount_path:
|
||||
result.update({"mount_path": mount_path})
|
||||
|
||||
if include_read_only:
|
||||
result.update({"read_only": storage_item.get("readOnly", False)})
|
||||
return result
|
||||
|
||||
|
||||
def migrate_smb_pv_pvc_type(smb_pv_pvc):
|
||||
smb_config = smb_pv_pvc.get("smbConfig", {})
|
||||
if not smb_config:
|
||||
raise ValueError("Expected [smb_pv_pvc] to have [smbConfig] set")
|
||||
|
||||
return {
|
||||
"type": "cifs",
|
||||
"cifs_config": {
|
||||
"server": smb_config["server"],
|
||||
"path": smb_config["share"],
|
||||
"domain": smb_config.get("domain", ""),
|
||||
"username": smb_config["username"],
|
||||
"password": smb_config["password"],
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def migrate_empty_dir_type(empty_dir):
|
||||
empty_dir_config = empty_dir.get("emptyDirConfig", {})
|
||||
if not empty_dir_config:
|
||||
raise ValueError("Expected [empty_dir] to have [emptyDirConfig] set")
|
||||
|
||||
if empty_dir_config.get("medium", "") == "Memory":
|
||||
# Convert Gi to Mi
|
||||
size = empty_dir_config.get("size", 0.5) * 1024
|
||||
return {
|
||||
"type": "tmpfs",
|
||||
"tmpfs_config": {"size": size},
|
||||
}
|
||||
|
||||
return {"type": "temporary"}
|
||||
|
||||
|
||||
def migrate_old_ix_volume_type(ix_volume):
|
||||
if not ix_volume.get("datasetName"):
|
||||
raise ValueError("Expected [ix_volume] to have [datasetName] set")
|
||||
|
||||
return {
|
||||
"type": "ix_volume",
|
||||
"ix_volume_config": {
|
||||
"acl_enable": False,
|
||||
"dataset_name": ix_volume["datasetName"],
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def migrate_ix_volume_type(ix_volume):
|
||||
vol_config = ix_volume.get("ixVolumeConfig", {})
|
||||
if not vol_config:
|
||||
raise ValueError("Expected [ix_volume] to have [ixVolumeConfig] set")
|
||||
|
||||
result = {
|
||||
"type": "ix_volume",
|
||||
"ix_volume_config": {
|
||||
"acl_enable": vol_config.get("aclEnable", False),
|
||||
"dataset_name": vol_config.get("datasetName", ""),
|
||||
},
|
||||
}
|
||||
|
||||
if vol_config.get("aclEnable", False):
|
||||
result["ix_volume_config"].update(
|
||||
{"acl_entries": migrate_acl_entries(vol_config["aclEntries"])}
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def migrate_old_host_path_type(host_path):
|
||||
if not host_path.get("hostPath"):
|
||||
raise ValueError("Expected [host_path] to have [hostPath] set")
|
||||
|
||||
return {
|
||||
"type": "host_path",
|
||||
"host_path_config": {
|
||||
"acl_enable": False,
|
||||
"path": host_path["hostPath"],
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def migrate_host_path_type(host_path):
|
||||
path_config = host_path.get("hostPathConfig", {})
|
||||
if not path_config:
|
||||
raise ValueError("Expected [host_path] to have [hostPathConfig] set")
|
||||
|
||||
result = {
|
||||
"type": "host_path",
|
||||
"host_path_config": {
|
||||
"acl_enable": path_config.get("aclEnable", False),
|
||||
},
|
||||
}
|
||||
|
||||
if path_config.get("aclEnable", False):
|
||||
result["host_path_config"].update(
|
||||
{"acl": migrate_acl_entries(path_config.get("acl", {}))}
|
||||
)
|
||||
else:
|
||||
result["host_path_config"].update({"path": path_config["hostPath"]})
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def migrate_acl_entries(acl_entries: dict) -> dict:
|
||||
entries = []
|
||||
for entry in acl_entries.get("entries", []):
|
||||
entries.append(
|
||||
{
|
||||
"access": entry["access"],
|
||||
"id": entry["id"],
|
||||
"id_type": entry["id_type"],
|
||||
}
|
||||
)
|
||||
|
||||
return {
|
||||
"entries": entries,
|
||||
"options": {"force": acl_entries.get("options", {}).get("force", False)},
|
||||
"path": acl_entries["path"],
|
||||
}
|
||||
@@ -69,22 +69,99 @@ questions:
|
||||
attrs:
|
||||
- variable: web_port
|
||||
label: WebUI Port
|
||||
description: The port for AdGuard Home WebUI
|
||||
schema:
|
||||
type: int
|
||||
default: 30004
|
||||
required: true
|
||||
$ref:
|
||||
- definitions/port
|
||||
type: dict
|
||||
attrs:
|
||||
- variable: bind_mode
|
||||
label: Port Bind Mode
|
||||
description: |
|
||||
The port bind mode.</br>
|
||||
- Publish: The port will be published on the host for external access.</br>
|
||||
- Expose: The port will be exposed for inter-container communication.</br>
|
||||
- None: The port will not be exposed or published.</br>
|
||||
Note: If the Dockerfile defines an EXPOSE directive,
|
||||
the port will still be exposed for inter-container communication regardless of this setting.
|
||||
schema:
|
||||
type: string
|
||||
default: "published"
|
||||
enum:
|
||||
- value: "published"
|
||||
description: Publish port on the host for external access
|
||||
- value: "exposed"
|
||||
description: Expose port for inter-container communication
|
||||
- value: ""
|
||||
description: None
|
||||
- variable: port_number
|
||||
label: Port Number
|
||||
schema:
|
||||
type: int
|
||||
default: 30004
|
||||
required: true
|
||||
$ref:
|
||||
- definitions/port
|
||||
- variable: host_ips
|
||||
label: Host IPs
|
||||
description: IPs on the host to bind this port
|
||||
schema:
|
||||
type: list
|
||||
show_if: [["bind_mode", "=", "published"]]
|
||||
default: []
|
||||
items:
|
||||
- variable: host_ip
|
||||
label: Host IP
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
$ref:
|
||||
- definitions/node_bind_ip
|
||||
- variable: dns_port
|
||||
label: DNS Port
|
||||
description: The port for AdGuard Home DNS
|
||||
schema:
|
||||
type: int
|
||||
default: 53
|
||||
required: true
|
||||
$ref:
|
||||
- definitions/port
|
||||
type: dict
|
||||
attrs:
|
||||
- variable: bind_mode
|
||||
label: Port Bind Mode
|
||||
description: |
|
||||
The port bind mode.</br>
|
||||
- Publish: The port will be published on the host for external access.</br>
|
||||
- Expose: The port will be exposed for inter-container communication.</br>
|
||||
- None: The port will not be exposed or published.</br>
|
||||
Note: If the Dockerfile defines an EXPOSE directive,
|
||||
the port will still be exposed for inter-container communication regardless of this setting.
|
||||
schema:
|
||||
type: string
|
||||
default: "published"
|
||||
enum:
|
||||
- value: "published"
|
||||
description: Publish port on the host for external access
|
||||
- value: "exposed"
|
||||
description: Expose port for inter-container communication
|
||||
- value: ""
|
||||
description: None
|
||||
- variable: port_number
|
||||
label: Port Number
|
||||
schema:
|
||||
type: int
|
||||
show_if: [["bind_mode", "=", "published"]]
|
||||
default: 53
|
||||
required: true
|
||||
$ref:
|
||||
- definitions/port
|
||||
- variable: host_ips
|
||||
label: Host IPs
|
||||
description: IPs on the host to bind this port
|
||||
schema:
|
||||
type: list
|
||||
show_if: [["bind_mode", "=", "published"]]
|
||||
default: []
|
||||
items:
|
||||
- variable: host_ip
|
||||
label: Host IP
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
$ref:
|
||||
- definitions/node_bind_ip
|
||||
- variable: host_network
|
||||
label: Host Network
|
||||
description: |
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
{% set c1 = tpl.add_container(values.consts.adguard_container_name, "image") %}
|
||||
{% do c1.healthcheck.set_test("wget", {
|
||||
"port": values.adguard.https_port_to_probe if values.adguard.use_https_probe else values.network.web_port,
|
||||
"port": values.adguard.https_port_to_probe if values.adguard.use_https_probe else values.network.web_port.port_number,
|
||||
"scheme": "https" if values.adguard.use_https_probe else "http",
|
||||
"path": "/",
|
||||
}) %}
|
||||
@@ -17,16 +17,16 @@
|
||||
{% do c1.set_command([
|
||||
"--no-check-update",
|
||||
"--web-addr",
|
||||
"0.0.0.0:%d"|format(values.network.web_port),
|
||||
"0.0.0.0:%d"|format(values.network.web_port.port_number),
|
||||
"--config",
|
||||
"%s/AdGuardHome.yaml"|format(values.consts.config_path),
|
||||
"--work-dir",
|
||||
values.consts.work_path,
|
||||
]) %}
|
||||
|
||||
{% do c1.ports.add_port(values.network.web_port, values.network.web_port) %}
|
||||
{% do c1.ports.add_port(values.network.dns_port, 53) %}
|
||||
{% do c1.ports.add_port(values.network.dns_port, 53, {"protocol": "udp"}) %}
|
||||
{% do c1.add_port(values.network.web_port) %}
|
||||
{% do c1.add_port(values.network.dns_port, {"container_port": 53}) %}
|
||||
{% do c1.add_port(values.network.dns_port, {"container_port": 53, "protocol": "udp"}) %}
|
||||
|
||||
{% for port in values.network.additional_ports %}
|
||||
{% do c1.add_port(port) %}
|
||||
@@ -39,6 +39,6 @@
|
||||
{% endfor %}
|
||||
|
||||
{% do tpl.notes.set_body(values.consts.notes_body) %}
|
||||
{% do tpl.portals.add_portal({"port": values.network.web_port}) %}
|
||||
{% do tpl.portals.add_portal({"port": values.network.web_port.port_number}) %}
|
||||
|
||||
{{ tpl.render() | tojson }}
|
||||
|
||||
@@ -6,8 +6,12 @@ resources:
|
||||
adguard:
|
||||
additional_envs: []
|
||||
network:
|
||||
web_port: 8080
|
||||
dns_port: 1053
|
||||
web_port:
|
||||
bind_mode: published
|
||||
port_number: 8080
|
||||
dns_port:
|
||||
bind_mode: published
|
||||
port_number: 1053
|
||||
host_network: false
|
||||
dhcp_enabled: false
|
||||
additional_ports: []
|
||||
|
||||
@@ -6,8 +6,12 @@ resources:
|
||||
adguard:
|
||||
additional_envs: []
|
||||
network:
|
||||
web_port: 8080
|
||||
dns_port: 1053
|
||||
web_port:
|
||||
bind_mode: published
|
||||
port_number: 8080
|
||||
dns_port:
|
||||
bind_mode: published
|
||||
port_number: 1053
|
||||
host_network: false
|
||||
dhcp_enabled: true
|
||||
additional_ports: []
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
annotations:
|
||||
min_scale_version: 24.10.2.2
|
||||
app_version: 2.24.0
|
||||
capabilities: []
|
||||
categories:
|
||||
@@ -34,4 +36,4 @@ sources:
|
||||
- https://github.com/advplyr/audiobookshelf
|
||||
title: Audiobookshelf
|
||||
train: community
|
||||
version: 1.3.28
|
||||
version: 1.4.0
|
||||
|
||||
6
ix-dev/community/audiobookshelf/app_migrations.yaml
Normal file
6
ix-dev/community/audiobookshelf/app_migrations.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
migrations:
|
||||
- file: ip_port_migration
|
||||
from:
|
||||
max_version: 1.3.28
|
||||
target:
|
||||
min_version: 1.4.0
|
||||
23
ix-dev/community/audiobookshelf/migrations/ip_port_migration
Executable file
23
ix-dev/community/audiobookshelf/migrations/ip_port_migration
Executable file
@@ -0,0 +1,23 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
import os
|
||||
import sys
|
||||
import yaml
|
||||
|
||||
|
||||
def migrate(values):
|
||||
values["network"]["web_port"] = {
|
||||
"port_number": values["network"]["web_port"],
|
||||
"bind_mode": "published",
|
||||
"host_ips": [],
|
||||
}
|
||||
return values
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) != 2:
|
||||
exit(1)
|
||||
|
||||
if os.path.exists(sys.argv[1]):
|
||||
with open(sys.argv[1], "r") as f:
|
||||
print(yaml.dump(migrate(yaml.safe_load(f.read()))))
|
||||
@@ -1,49 +0,0 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
import os
|
||||
import sys
|
||||
import yaml
|
||||
|
||||
from migration_helpers.resources import migrate_resources
|
||||
from migration_helpers.storage import migrate_storage_item
|
||||
|
||||
|
||||
def migrate(values):
|
||||
config = values.get("helm_secret", {}).get("config", {})
|
||||
if not config:
|
||||
raise ValueError("No config found in values")
|
||||
|
||||
new_values = {
|
||||
"TZ": config["TZ"],
|
||||
"audiobookshelf": {
|
||||
"additional_envs": config["audiobookshelfConfig"].get("additionalEnvs", []),
|
||||
},
|
||||
"run_as": {
|
||||
"user": config["audiobookshelfRunAs"].get("user", 568),
|
||||
"group": config["audiobookshelfRunAs"].get("group", 568),
|
||||
},
|
||||
"network": {
|
||||
"host_network": config["audiobookshelfNetwork"].get("hostNetwork", False),
|
||||
"web_port": config["audiobookshelfNetwork"].get("webPort", 32400),
|
||||
},
|
||||
"storage": {
|
||||
"config": migrate_storage_item(config["audiobookshelfStorage"]["config"]),
|
||||
"metadata": migrate_storage_item(config["audiobookshelfStorage"]["metadata"]),
|
||||
"additional_storage": [
|
||||
migrate_storage_item(item, include_read_only=True)
|
||||
for item in config["audiobookshelfStorage"]["additionalStorages"]
|
||||
],
|
||||
},
|
||||
"resources": migrate_resources(config["resources"]),
|
||||
}
|
||||
|
||||
return new_values
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) != 2:
|
||||
exit(1)
|
||||
|
||||
if os.path.exists(sys.argv[1]):
|
||||
with open(sys.argv[1], "r") as f:
|
||||
print(yaml.dump(migrate(yaml.safe_load(f.read()))))
|
||||
@@ -1,30 +0,0 @@
|
||||
import math
|
||||
import re
|
||||
import os
|
||||
|
||||
CPU_COUNT = os.cpu_count()
|
||||
|
||||
NUMBER_REGEX = re.compile(r"^[1-9][0-9]$")
|
||||
FLOAT_REGEX = re.compile(r"^[0-9]+\.[0-9]+$")
|
||||
MILI_CPU_REGEX = re.compile(r"^[0-9]+m$")
|
||||
|
||||
|
||||
def transform_cpu(cpu) -> int:
|
||||
result = 2
|
||||
if NUMBER_REGEX.match(cpu):
|
||||
result = int(cpu)
|
||||
elif FLOAT_REGEX.match(cpu):
|
||||
result = int(math.ceil(float(cpu)))
|
||||
elif MILI_CPU_REGEX.match(cpu):
|
||||
num = int(cpu[:-1])
|
||||
num = num / 1000
|
||||
result = int(math.ceil(num))
|
||||
|
||||
if CPU_COUNT is not None:
|
||||
# Do not exceed the actual CPU count
|
||||
result = min(result, CPU_COUNT)
|
||||
|
||||
if int(result) == 0:
|
||||
result = CPU_COUNT if CPU_COUNT else 2
|
||||
|
||||
return int(result)
|
||||
@@ -1,9 +0,0 @@
|
||||
def migrate_dns_config(dns_config):
|
||||
if not dns_config:
|
||||
return []
|
||||
|
||||
dns_opts = []
|
||||
for opt in dns_config.get("options", []):
|
||||
dns_opts.append(f"{opt['name']}:{opt['value']}")
|
||||
|
||||
return dns_opts
|
||||
@@ -1,16 +0,0 @@
|
||||
def get_value_from_secret(secrets=None, secret_name=None, key=None):
|
||||
secrets = secrets if secrets else dict()
|
||||
secret_name = secret_name if secret_name else ""
|
||||
key = key if key else ""
|
||||
|
||||
if not secrets or not secret_name or not key:
|
||||
raise ValueError("Expected [secrets], [secret_name] and [key] to be set")
|
||||
for curr_secret_name, curr_data in secrets.items():
|
||||
if curr_secret_name.endswith(secret_name):
|
||||
if not curr_data.get(key, None):
|
||||
raise ValueError(
|
||||
f"Expected [{key}] to be set in secret [{curr_secret_name}]"
|
||||
)
|
||||
return curr_data[key]
|
||||
|
||||
raise ValueError(f"Secret [{secret_name}] not found")
|
||||
@@ -1,61 +0,0 @@
|
||||
import re
|
||||
import math
|
||||
|
||||
|
||||
def get_total_memory():
|
||||
with open("/proc/meminfo") as f:
|
||||
for line in filter(lambda x: "MemTotal" in x, f):
|
||||
return int(line.split()[1]) * 1024
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
TOTAL_MEM = get_total_memory()
|
||||
|
||||
SINGLE_SUFFIX_REGEX = re.compile(r"^[1-9][0-9]*([EPTGMK])$")
|
||||
DOUBLE_SUFFIX_REGEX = re.compile(r"^[1-9][0-9]*([EPTGMK])i$")
|
||||
BYTES_INTEGER_REGEX = re.compile(r"^[1-9][0-9]*$")
|
||||
EXPONENT_REGEX = re.compile(r"^[1-9][0-9]*e[0-9]+$")
|
||||
|
||||
SUFFIX_MULTIPLIERS = {
|
||||
"K": 10**3,
|
||||
"M": 10**6,
|
||||
"G": 10**9,
|
||||
"T": 10**12,
|
||||
"P": 10**15,
|
||||
"E": 10**18,
|
||||
}
|
||||
|
||||
DOUBLE_SUFFIX_MULTIPLIERS = {
|
||||
"Ki": 2**10,
|
||||
"Mi": 2**20,
|
||||
"Gi": 2**30,
|
||||
"Ti": 2**40,
|
||||
"Pi": 2**50,
|
||||
"Ei": 2**60,
|
||||
}
|
||||
|
||||
|
||||
def transform_memory(memory):
|
||||
result = 4096 # Default to 4GB
|
||||
|
||||
if re.match(SINGLE_SUFFIX_REGEX, memory):
|
||||
suffix = memory[-1]
|
||||
result = int(memory[:-1]) * SUFFIX_MULTIPLIERS[suffix]
|
||||
elif re.match(DOUBLE_SUFFIX_REGEX, memory):
|
||||
suffix = memory[-2:]
|
||||
result = int(memory[:-2]) * DOUBLE_SUFFIX_MULTIPLIERS[suffix]
|
||||
elif re.match(BYTES_INTEGER_REGEX, memory):
|
||||
result = int(memory)
|
||||
elif re.match(EXPONENT_REGEX, memory):
|
||||
result = int(float(memory))
|
||||
|
||||
result = math.ceil(result)
|
||||
result = min(result, TOTAL_MEM)
|
||||
# Convert to Megabytes
|
||||
result = result / 1024 / 1024
|
||||
|
||||
if int(result) == 0:
|
||||
result = TOTAL_MEM if TOTAL_MEM else 4096
|
||||
|
||||
return int(result)
|
||||
@@ -1,59 +0,0 @@
|
||||
from .memory import transform_memory, TOTAL_MEM
|
||||
from .cpu import transform_cpu, CPU_COUNT
|
||||
|
||||
|
||||
def migrate_resources(resources, gpus=None, system_gpus=None):
|
||||
gpus = gpus or {}
|
||||
system_gpus = system_gpus or []
|
||||
|
||||
result = {
|
||||
"limits": {
|
||||
"cpus": int((CPU_COUNT or 2) / 2),
|
||||
"memory": int(TOTAL_MEM / 1024 / 1024),
|
||||
}
|
||||
}
|
||||
|
||||
if resources.get("limits", {}).get("cpu", ""):
|
||||
result["limits"].update(
|
||||
{"cpus": transform_cpu(resources.get("limits", {}).get("cpu", ""))}
|
||||
)
|
||||
if resources.get("limits", {}).get("memory", ""):
|
||||
result["limits"].update(
|
||||
{"memory": transform_memory(resources.get("limits", {}).get("memory", ""))}
|
||||
)
|
||||
|
||||
gpus_result = {}
|
||||
for gpu in gpus.items() if gpus else []:
|
||||
kind = gpu[0].lower() # Kind of gpu (amd, nvidia, intel)
|
||||
count = gpu[1] # Number of gpus user requested
|
||||
|
||||
if count == 0:
|
||||
continue
|
||||
|
||||
if "amd" in kind or "intel" in kind:
|
||||
gpus_result.update({"use_all_gpus": True})
|
||||
elif "nvidia" in kind:
|
||||
sys_gpus = [
|
||||
gpu_item
|
||||
for gpu_item in system_gpus
|
||||
if gpu_item.get("error") is None
|
||||
and gpu_item.get("vendor", None) is not None
|
||||
and gpu_item.get("vendor", "").upper() == "NVIDIA"
|
||||
]
|
||||
for sys_gpu in sys_gpus:
|
||||
if count == 0: # We passed # of gpus that user previously requested
|
||||
break
|
||||
guid = sys_gpu.get("vendor_specific_config", {}).get("uuid", "")
|
||||
pci_slot = sys_gpu.get("pci_slot", "")
|
||||
if not guid or not pci_slot:
|
||||
continue
|
||||
|
||||
gpus_result.update(
|
||||
{"nvidia_gpu_selection": {pci_slot: {"uuid": guid, "use_gpu": True}}}
|
||||
)
|
||||
count -= 1
|
||||
|
||||
if gpus_result:
|
||||
result.update({"gpus": gpus_result})
|
||||
|
||||
return result
|
||||
@@ -1,155 +0,0 @@
|
||||
def migrate_storage_item(storage_item, include_read_only=False):
|
||||
if not storage_item:
|
||||
raise ValueError("Expected [storage_item] to be set")
|
||||
|
||||
result = {}
|
||||
if storage_item["type"] == "ixVolume":
|
||||
if storage_item.get("ixVolumeConfig"):
|
||||
result = migrate_ix_volume_type(storage_item)
|
||||
elif storage_item.get("datasetName"):
|
||||
result = migrate_old_ix_volume_type(storage_item)
|
||||
else:
|
||||
raise ValueError(
|
||||
"Expected [ix_volume] to have [ixVolumeConfig] or [datasetName] set"
|
||||
)
|
||||
elif storage_item["type"] == "hostPath":
|
||||
if storage_item.get("hostPathConfig"):
|
||||
result = migrate_host_path_type(storage_item)
|
||||
elif storage_item.get("hostPath"):
|
||||
result = migrate_old_host_path_type(storage_item)
|
||||
else:
|
||||
raise ValueError(
|
||||
"Expected [host_path] to have [hostPathConfig] or [hostPath] set"
|
||||
)
|
||||
elif storage_item["type"] == "emptyDir":
|
||||
result = migrate_empty_dir_type(storage_item)
|
||||
elif storage_item["type"] == "smb-pv-pvc":
|
||||
result = migrate_smb_pv_pvc_type(storage_item)
|
||||
|
||||
mount_path = storage_item.get("mountPath", "")
|
||||
if mount_path:
|
||||
result.update({"mount_path": mount_path})
|
||||
|
||||
if include_read_only:
|
||||
result.update({"read_only": storage_item.get("readOnly", False)})
|
||||
return result
|
||||
|
||||
|
||||
def migrate_smb_pv_pvc_type(smb_pv_pvc):
|
||||
smb_config = smb_pv_pvc.get("smbConfig", {})
|
||||
if not smb_config:
|
||||
raise ValueError("Expected [smb_pv_pvc] to have [smbConfig] set")
|
||||
|
||||
return {
|
||||
"type": "cifs",
|
||||
"cifs_config": {
|
||||
"server": smb_config["server"],
|
||||
"path": smb_config["share"],
|
||||
"domain": smb_config.get("domain", ""),
|
||||
"username": smb_config["username"],
|
||||
"password": smb_config["password"],
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def migrate_empty_dir_type(empty_dir):
|
||||
empty_dir_config = empty_dir.get("emptyDirConfig", {})
|
||||
if not empty_dir_config:
|
||||
raise ValueError("Expected [empty_dir] to have [emptyDirConfig] set")
|
||||
|
||||
if empty_dir_config.get("medium", "") == "Memory":
|
||||
# Convert Gi to Mi
|
||||
size = empty_dir_config.get("size", 0.5) * 1024
|
||||
return {
|
||||
"type": "tmpfs",
|
||||
"tmpfs_config": {"size": size},
|
||||
}
|
||||
|
||||
return {"type": "temporary"}
|
||||
|
||||
|
||||
def migrate_old_ix_volume_type(ix_volume):
|
||||
if not ix_volume.get("datasetName"):
|
||||
raise ValueError("Expected [ix_volume] to have [datasetName] set")
|
||||
|
||||
return {
|
||||
"type": "ix_volume",
|
||||
"ix_volume_config": {
|
||||
"acl_enable": False,
|
||||
"dataset_name": ix_volume["datasetName"],
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def migrate_ix_volume_type(ix_volume):
|
||||
vol_config = ix_volume.get("ixVolumeConfig", {})
|
||||
if not vol_config:
|
||||
raise ValueError("Expected [ix_volume] to have [ixVolumeConfig] set")
|
||||
|
||||
result = {
|
||||
"type": "ix_volume",
|
||||
"ix_volume_config": {
|
||||
"acl_enable": vol_config.get("aclEnable", False),
|
||||
"dataset_name": vol_config.get("datasetName", ""),
|
||||
},
|
||||
}
|
||||
|
||||
if vol_config.get("aclEnable", False):
|
||||
result["ix_volume_config"].update(
|
||||
{"acl_entries": migrate_acl_entries(vol_config["aclEntries"])}
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def migrate_old_host_path_type(host_path):
|
||||
if not host_path.get("hostPath"):
|
||||
raise ValueError("Expected [host_path] to have [hostPath] set")
|
||||
|
||||
return {
|
||||
"type": "host_path",
|
||||
"host_path_config": {
|
||||
"acl_enable": False,
|
||||
"path": host_path["hostPath"],
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def migrate_host_path_type(host_path):
|
||||
path_config = host_path.get("hostPathConfig", {})
|
||||
if not path_config:
|
||||
raise ValueError("Expected [host_path] to have [hostPathConfig] set")
|
||||
|
||||
result = {
|
||||
"type": "host_path",
|
||||
"host_path_config": {
|
||||
"acl_enable": path_config.get("aclEnable", False),
|
||||
},
|
||||
}
|
||||
|
||||
if path_config.get("aclEnable", False):
|
||||
result["host_path_config"].update(
|
||||
{"acl": migrate_acl_entries(path_config.get("acl", {}))}
|
||||
)
|
||||
else:
|
||||
result["host_path_config"].update({"path": path_config["hostPath"]})
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def migrate_acl_entries(acl_entries: dict) -> dict:
|
||||
entries = []
|
||||
for entry in acl_entries.get("entries", []):
|
||||
entries.append(
|
||||
{
|
||||
"access": entry["access"],
|
||||
"id": entry["id"],
|
||||
"id_type": entry["id_type"],
|
||||
}
|
||||
)
|
||||
|
||||
return {
|
||||
"entries": entries,
|
||||
"options": {"force": acl_entries.get("options", {}).get("force", False)},
|
||||
"path": acl_entries["path"],
|
||||
}
|
||||
@@ -82,13 +82,51 @@ questions:
|
||||
attrs:
|
||||
- variable: web_port
|
||||
label: WebUI Port
|
||||
description: The port for Audiobookshelf WebUI
|
||||
schema:
|
||||
type: int
|
||||
default: 30067
|
||||
required: true
|
||||
$ref:
|
||||
- definitions/port
|
||||
type: dict
|
||||
attrs:
|
||||
- variable: bind_mode
|
||||
label: Port Bind Mode
|
||||
description: |
|
||||
The port bind mode.</br>
|
||||
- Publish: The port will be published on the host for external access.</br>
|
||||
- Expose: The port will be exposed for inter-container communication.</br>
|
||||
- None: The port will not be exposed or published.</br>
|
||||
Note: If the Dockerfile defines an EXPOSE directive,
|
||||
the port will still be exposed for inter-container communication regardless of this setting.
|
||||
schema:
|
||||
type: string
|
||||
default: "published"
|
||||
enum:
|
||||
- value: "published"
|
||||
description: Publish port on the host for external access
|
||||
- value: "exposed"
|
||||
description: Expose port for inter-container communication
|
||||
- value: ""
|
||||
description: None
|
||||
- variable: port_number
|
||||
label: Port Number
|
||||
schema:
|
||||
type: int
|
||||
default: 30067
|
||||
required: true
|
||||
$ref:
|
||||
- definitions/port
|
||||
- variable: host_ips
|
||||
label: Host IPs
|
||||
description: IPs on the host to bind this port
|
||||
schema:
|
||||
type: list
|
||||
show_if: [["bind_mode", "=", "published"]]
|
||||
default: []
|
||||
items:
|
||||
- variable: host_ip
|
||||
label: Host IP
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
$ref:
|
||||
- definitions/node_bind_ip
|
||||
- variable: host_network
|
||||
label: Host Network
|
||||
description: |
|
||||
|
||||
@@ -5,14 +5,14 @@
|
||||
{% set perms_config = {"uid": values.run_as.user, "gid": values.run_as.group, "mode": "check"} %}
|
||||
|
||||
{% do c1.set_user(values.run_as.user, values.run_as.group) %}
|
||||
{% do c1.healthcheck.set_test("wget", {"port": values.network.web_port, "path": "/healthcheck"}) %}
|
||||
{% do c1.healthcheck.set_test("wget", {"port": values.network.web_port.port_number, "path": "/healthcheck"}) %}
|
||||
|
||||
{% do c1.environment.add_env("PORT", values.network.web_port) %}
|
||||
{% do c1.environment.add_env("PORT", values.network.web_port.port_number) %}
|
||||
{% do c1.environment.add_env("CONFIG_PATH", values.consts.config_path) %}
|
||||
{% do c1.environment.add_env("METADATA_PATH", values.consts.metadata_path) %}
|
||||
|
||||
{% do c1.environment.add_user_envs(values.audiobookshelf.additional_envs) %}
|
||||
{% do c1.ports.add_port(values.network.web_port, values.network.web_port) %}
|
||||
{% do c1.add_port(values.network.web_port) %}
|
||||
|
||||
{% do c1.add_storage(values.consts.config_path, values.storage.config) %}
|
||||
{% do perm_container.add_or_skip_action("config", values.storage.config, perms_config) %}
|
||||
@@ -30,6 +30,6 @@
|
||||
{% do c1.depends.add_dependency(values.consts.perms_container_name, "service_completed_successfully") %}
|
||||
{% endif %}
|
||||
|
||||
{% do tpl.portals.add_portal({"port": values.network.web_port}) %}
|
||||
{% do tpl.portals.add_portal({"port": values.network.web_port.port_number}) %}
|
||||
|
||||
{{ tpl.render() | tojson }}
|
||||
|
||||
@@ -9,7 +9,9 @@ audiobookshelf:
|
||||
additional_envs: []
|
||||
network:
|
||||
host_network: false
|
||||
web_port: 8080
|
||||
web_port:
|
||||
bind_mode: published
|
||||
port_number: 8080
|
||||
|
||||
run_as:
|
||||
user: 568
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
annotations:
|
||||
min_scale_version: 24.10.2.2
|
||||
app_version: v1.62.0
|
||||
capabilities: []
|
||||
categories:
|
||||
@@ -32,4 +34,4 @@ sources:
|
||||
- https://github.com/autobrr/autobrr
|
||||
title: Autobrr
|
||||
train: community
|
||||
version: 1.2.25
|
||||
version: 1.3.0
|
||||
|
||||
6
ix-dev/community/autobrr/app_migrations.yaml
Normal file
6
ix-dev/community/autobrr/app_migrations.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
migrations:
|
||||
- file: ip_port_migration
|
||||
from:
|
||||
max_version: 1.2.25
|
||||
target:
|
||||
min_version: 1.3.0
|
||||
23
ix-dev/community/autobrr/migrations/ip_port_migration
Executable file
23
ix-dev/community/autobrr/migrations/ip_port_migration
Executable file
@@ -0,0 +1,23 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
import os
|
||||
import sys
|
||||
import yaml
|
||||
|
||||
|
||||
def migrate(values):
|
||||
values["network"]["web_port"] = {
|
||||
"port_number": values["network"]["web_port"],
|
||||
"bind_mode": "published",
|
||||
"host_ips": [],
|
||||
}
|
||||
return values
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) != 2:
|
||||
exit(1)
|
||||
|
||||
if os.path.exists(sys.argv[1]):
|
||||
with open(sys.argv[1], "r") as f:
|
||||
print(yaml.dump(migrate(yaml.safe_load(f.read()))))
|
||||
@@ -1,48 +0,0 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
import os
|
||||
import sys
|
||||
import yaml
|
||||
|
||||
from migration_helpers.resources import migrate_resources
|
||||
from migration_helpers.storage import migrate_storage_item
|
||||
|
||||
|
||||
def migrate(values):
|
||||
config = values.get("helm_secret", {}).get("config", {})
|
||||
if not config:
|
||||
raise ValueError("No config found in values")
|
||||
|
||||
new_values = {
|
||||
"TZ": config["TZ"],
|
||||
"autobrr": {
|
||||
"additional_envs": config["autobrrConfig"].get("additionalEnvs", []),
|
||||
},
|
||||
"run_as": {
|
||||
"user": config["autobrrRunAs"].get("user", 568),
|
||||
"group": config["autobrrRunAs"].get("group", 568),
|
||||
},
|
||||
"network": {
|
||||
"host_network": config["autobrrNetwork"].get("hostNetwork", False),
|
||||
"web_port": config["autobrrNetwork"].get("webPort", 32400),
|
||||
},
|
||||
"storage": {
|
||||
"config": migrate_storage_item(config["autobrrStorage"]["config"]),
|
||||
"additional_storage": [
|
||||
migrate_storage_item(item, include_read_only=True)
|
||||
for item in config["autobrrStorage"]["additionalStorages"]
|
||||
],
|
||||
},
|
||||
"resources": migrate_resources(config["resources"]),
|
||||
}
|
||||
|
||||
return new_values
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) != 2:
|
||||
exit(1)
|
||||
|
||||
if os.path.exists(sys.argv[1]):
|
||||
with open(sys.argv[1], "r") as f:
|
||||
print(yaml.dump(migrate(yaml.safe_load(f.read()))))
|
||||
@@ -1,30 +0,0 @@
|
||||
import math
|
||||
import re
|
||||
import os
|
||||
|
||||
CPU_COUNT = os.cpu_count()
|
||||
|
||||
NUMBER_REGEX = re.compile(r"^[1-9][0-9]$")
|
||||
FLOAT_REGEX = re.compile(r"^[0-9]+\.[0-9]+$")
|
||||
MILI_CPU_REGEX = re.compile(r"^[0-9]+m$")
|
||||
|
||||
|
||||
def transform_cpu(cpu) -> int:
|
||||
result = 2
|
||||
if NUMBER_REGEX.match(cpu):
|
||||
result = int(cpu)
|
||||
elif FLOAT_REGEX.match(cpu):
|
||||
result = int(math.ceil(float(cpu)))
|
||||
elif MILI_CPU_REGEX.match(cpu):
|
||||
num = int(cpu[:-1])
|
||||
num = num / 1000
|
||||
result = int(math.ceil(num))
|
||||
|
||||
if CPU_COUNT is not None:
|
||||
# Do not exceed the actual CPU count
|
||||
result = min(result, CPU_COUNT)
|
||||
|
||||
if int(result) == 0:
|
||||
result = CPU_COUNT if CPU_COUNT else 2
|
||||
|
||||
return int(result)
|
||||
@@ -1,9 +0,0 @@
|
||||
def migrate_dns_config(dns_config):
|
||||
if not dns_config:
|
||||
return []
|
||||
|
||||
dns_opts = []
|
||||
for opt in dns_config.get("options", []):
|
||||
dns_opts.append(f"{opt['name']}:{opt['value']}")
|
||||
|
||||
return dns_opts
|
||||
@@ -1,16 +0,0 @@
|
||||
def get_value_from_secret(secrets=None, secret_name=None, key=None):
|
||||
secrets = secrets if secrets else dict()
|
||||
secret_name = secret_name if secret_name else ""
|
||||
key = key if key else ""
|
||||
|
||||
if not secrets or not secret_name or not key:
|
||||
raise ValueError("Expected [secrets], [secret_name] and [key] to be set")
|
||||
for curr_secret_name, curr_data in secrets.items():
|
||||
if curr_secret_name.endswith(secret_name):
|
||||
if not curr_data.get(key, None):
|
||||
raise ValueError(
|
||||
f"Expected [{key}] to be set in secret [{curr_secret_name}]"
|
||||
)
|
||||
return curr_data[key]
|
||||
|
||||
raise ValueError(f"Secret [{secret_name}] not found")
|
||||
@@ -1,61 +0,0 @@
|
||||
import re
|
||||
import math
|
||||
|
||||
|
||||
def get_total_memory():
|
||||
with open("/proc/meminfo") as f:
|
||||
for line in filter(lambda x: "MemTotal" in x, f):
|
||||
return int(line.split()[1]) * 1024
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
TOTAL_MEM = get_total_memory()
|
||||
|
||||
SINGLE_SUFFIX_REGEX = re.compile(r"^[1-9][0-9]*([EPTGMK])$")
|
||||
DOUBLE_SUFFIX_REGEX = re.compile(r"^[1-9][0-9]*([EPTGMK])i$")
|
||||
BYTES_INTEGER_REGEX = re.compile(r"^[1-9][0-9]*$")
|
||||
EXPONENT_REGEX = re.compile(r"^[1-9][0-9]*e[0-9]+$")
|
||||
|
||||
SUFFIX_MULTIPLIERS = {
|
||||
"K": 10**3,
|
||||
"M": 10**6,
|
||||
"G": 10**9,
|
||||
"T": 10**12,
|
||||
"P": 10**15,
|
||||
"E": 10**18,
|
||||
}
|
||||
|
||||
DOUBLE_SUFFIX_MULTIPLIERS = {
|
||||
"Ki": 2**10,
|
||||
"Mi": 2**20,
|
||||
"Gi": 2**30,
|
||||
"Ti": 2**40,
|
||||
"Pi": 2**50,
|
||||
"Ei": 2**60,
|
||||
}
|
||||
|
||||
|
||||
def transform_memory(memory):
|
||||
result = 4096 # Default to 4GB
|
||||
|
||||
if re.match(SINGLE_SUFFIX_REGEX, memory):
|
||||
suffix = memory[-1]
|
||||
result = int(memory[:-1]) * SUFFIX_MULTIPLIERS[suffix]
|
||||
elif re.match(DOUBLE_SUFFIX_REGEX, memory):
|
||||
suffix = memory[-2:]
|
||||
result = int(memory[:-2]) * DOUBLE_SUFFIX_MULTIPLIERS[suffix]
|
||||
elif re.match(BYTES_INTEGER_REGEX, memory):
|
||||
result = int(memory)
|
||||
elif re.match(EXPONENT_REGEX, memory):
|
||||
result = int(float(memory))
|
||||
|
||||
result = math.ceil(result)
|
||||
result = min(result, TOTAL_MEM)
|
||||
# Convert to Megabytes
|
||||
result = result / 1024 / 1024
|
||||
|
||||
if int(result) == 0:
|
||||
result = TOTAL_MEM if TOTAL_MEM else 4096
|
||||
|
||||
return int(result)
|
||||
@@ -1,59 +0,0 @@
|
||||
from .memory import transform_memory, TOTAL_MEM
|
||||
from .cpu import transform_cpu, CPU_COUNT
|
||||
|
||||
|
||||
def migrate_resources(resources, gpus=None, system_gpus=None):
|
||||
gpus = gpus or {}
|
||||
system_gpus = system_gpus or []
|
||||
|
||||
result = {
|
||||
"limits": {
|
||||
"cpus": int((CPU_COUNT or 2) / 2),
|
||||
"memory": int(TOTAL_MEM / 1024 / 1024),
|
||||
}
|
||||
}
|
||||
|
||||
if resources.get("limits", {}).get("cpu", ""):
|
||||
result["limits"].update(
|
||||
{"cpus": transform_cpu(resources.get("limits", {}).get("cpu", ""))}
|
||||
)
|
||||
if resources.get("limits", {}).get("memory", ""):
|
||||
result["limits"].update(
|
||||
{"memory": transform_memory(resources.get("limits", {}).get("memory", ""))}
|
||||
)
|
||||
|
||||
gpus_result = {}
|
||||
for gpu in gpus.items() if gpus else []:
|
||||
kind = gpu[0].lower() # Kind of gpu (amd, nvidia, intel)
|
||||
count = gpu[1] # Number of gpus user requested
|
||||
|
||||
if count == 0:
|
||||
continue
|
||||
|
||||
if "amd" in kind or "intel" in kind:
|
||||
gpus_result.update({"use_all_gpus": True})
|
||||
elif "nvidia" in kind:
|
||||
sys_gpus = [
|
||||
gpu_item
|
||||
for gpu_item in system_gpus
|
||||
if gpu_item.get("error") is None
|
||||
and gpu_item.get("vendor", None) is not None
|
||||
and gpu_item.get("vendor", "").upper() == "NVIDIA"
|
||||
]
|
||||
for sys_gpu in sys_gpus:
|
||||
if count == 0: # We passed # of gpus that user previously requested
|
||||
break
|
||||
guid = sys_gpu.get("vendor_specific_config", {}).get("uuid", "")
|
||||
pci_slot = sys_gpu.get("pci_slot", "")
|
||||
if not guid or not pci_slot:
|
||||
continue
|
||||
|
||||
gpus_result.update(
|
||||
{"nvidia_gpu_selection": {pci_slot: {"uuid": guid, "use_gpu": True}}}
|
||||
)
|
||||
count -= 1
|
||||
|
||||
if gpus_result:
|
||||
result.update({"gpus": gpus_result})
|
||||
|
||||
return result
|
||||
@@ -1,155 +0,0 @@
|
||||
def migrate_storage_item(storage_item, include_read_only=False):
|
||||
if not storage_item:
|
||||
raise ValueError("Expected [storage_item] to be set")
|
||||
|
||||
result = {}
|
||||
if storage_item["type"] == "ixVolume":
|
||||
if storage_item.get("ixVolumeConfig"):
|
||||
result = migrate_ix_volume_type(storage_item)
|
||||
elif storage_item.get("datasetName"):
|
||||
result = migrate_old_ix_volume_type(storage_item)
|
||||
else:
|
||||
raise ValueError(
|
||||
"Expected [ix_volume] to have [ixVolumeConfig] or [datasetName] set"
|
||||
)
|
||||
elif storage_item["type"] == "hostPath":
|
||||
if storage_item.get("hostPathConfig"):
|
||||
result = migrate_host_path_type(storage_item)
|
||||
elif storage_item.get("hostPath"):
|
||||
result = migrate_old_host_path_type(storage_item)
|
||||
else:
|
||||
raise ValueError(
|
||||
"Expected [host_path] to have [hostPathConfig] or [hostPath] set"
|
||||
)
|
||||
elif storage_item["type"] == "emptyDir":
|
||||
result = migrate_empty_dir_type(storage_item)
|
||||
elif storage_item["type"] == "smb-pv-pvc":
|
||||
result = migrate_smb_pv_pvc_type(storage_item)
|
||||
|
||||
mount_path = storage_item.get("mountPath", "")
|
||||
if mount_path:
|
||||
result.update({"mount_path": mount_path})
|
||||
|
||||
if include_read_only:
|
||||
result.update({"read_only": storage_item.get("readOnly", False)})
|
||||
return result
|
||||
|
||||
|
||||
def migrate_smb_pv_pvc_type(smb_pv_pvc):
|
||||
smb_config = smb_pv_pvc.get("smbConfig", {})
|
||||
if not smb_config:
|
||||
raise ValueError("Expected [smb_pv_pvc] to have [smbConfig] set")
|
||||
|
||||
return {
|
||||
"type": "cifs",
|
||||
"cifs_config": {
|
||||
"server": smb_config["server"],
|
||||
"path": smb_config["share"],
|
||||
"domain": smb_config.get("domain", ""),
|
||||
"username": smb_config["username"],
|
||||
"password": smb_config["password"],
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def migrate_empty_dir_type(empty_dir):
|
||||
empty_dir_config = empty_dir.get("emptyDirConfig", {})
|
||||
if not empty_dir_config:
|
||||
raise ValueError("Expected [empty_dir] to have [emptyDirConfig] set")
|
||||
|
||||
if empty_dir_config.get("medium", "") == "Memory":
|
||||
# Convert Gi to Mi
|
||||
size = empty_dir_config.get("size", 0.5) * 1024
|
||||
return {
|
||||
"type": "tmpfs",
|
||||
"tmpfs_config": {"size": size},
|
||||
}
|
||||
|
||||
return {"type": "temporary"}
|
||||
|
||||
|
||||
def migrate_old_ix_volume_type(ix_volume):
|
||||
if not ix_volume.get("datasetName"):
|
||||
raise ValueError("Expected [ix_volume] to have [datasetName] set")
|
||||
|
||||
return {
|
||||
"type": "ix_volume",
|
||||
"ix_volume_config": {
|
||||
"acl_enable": False,
|
||||
"dataset_name": ix_volume["datasetName"],
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def migrate_ix_volume_type(ix_volume):
|
||||
vol_config = ix_volume.get("ixVolumeConfig", {})
|
||||
if not vol_config:
|
||||
raise ValueError("Expected [ix_volume] to have [ixVolumeConfig] set")
|
||||
|
||||
result = {
|
||||
"type": "ix_volume",
|
||||
"ix_volume_config": {
|
||||
"acl_enable": vol_config.get("aclEnable", False),
|
||||
"dataset_name": vol_config.get("datasetName", ""),
|
||||
},
|
||||
}
|
||||
|
||||
if vol_config.get("aclEnable", False):
|
||||
result["ix_volume_config"].update(
|
||||
{"acl_entries": migrate_acl_entries(vol_config["aclEntries"])}
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def migrate_old_host_path_type(host_path):
|
||||
if not host_path.get("hostPath"):
|
||||
raise ValueError("Expected [host_path] to have [hostPath] set")
|
||||
|
||||
return {
|
||||
"type": "host_path",
|
||||
"host_path_config": {
|
||||
"acl_enable": False,
|
||||
"path": host_path["hostPath"],
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def migrate_host_path_type(host_path):
|
||||
path_config = host_path.get("hostPathConfig", {})
|
||||
if not path_config:
|
||||
raise ValueError("Expected [host_path] to have [hostPathConfig] set")
|
||||
|
||||
result = {
|
||||
"type": "host_path",
|
||||
"host_path_config": {
|
||||
"acl_enable": path_config.get("aclEnable", False),
|
||||
},
|
||||
}
|
||||
|
||||
if path_config.get("aclEnable", False):
|
||||
result["host_path_config"].update(
|
||||
{"acl": migrate_acl_entries(path_config.get("acl", {}))}
|
||||
)
|
||||
else:
|
||||
result["host_path_config"].update({"path": path_config["hostPath"]})
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def migrate_acl_entries(acl_entries: dict) -> dict:
|
||||
entries = []
|
||||
for entry in acl_entries.get("entries", []):
|
||||
entries.append(
|
||||
{
|
||||
"access": entry["access"],
|
||||
"id": entry["id"],
|
||||
"id_type": entry["id_type"],
|
||||
}
|
||||
)
|
||||
|
||||
return {
|
||||
"entries": entries,
|
||||
"options": {"force": acl_entries.get("options", {}).get("force", False)},
|
||||
"path": acl_entries["path"],
|
||||
}
|
||||
@@ -82,13 +82,51 @@ questions:
|
||||
attrs:
|
||||
- variable: web_port
|
||||
label: WebUI Port
|
||||
description: The port for Autobrr WebUI
|
||||
schema:
|
||||
type: int
|
||||
default: 30016
|
||||
required: true
|
||||
$ref:
|
||||
- definitions/port
|
||||
type: dict
|
||||
attrs:
|
||||
- variable: bind_mode
|
||||
label: Port Bind Mode
|
||||
description: |
|
||||
The port bind mode.</br>
|
||||
- Publish: The port will be published on the host for external access.</br>
|
||||
- Expose: The port will be exposed for inter-container communication.</br>
|
||||
- None: The port will not be exposed or published.</br>
|
||||
Note: If the Dockerfile defines an EXPOSE directive,
|
||||
the port will still be exposed for inter-container communication regardless of this setting.
|
||||
schema:
|
||||
type: string
|
||||
default: "published"
|
||||
enum:
|
||||
- value: "published"
|
||||
description: Publish port on the host for external access
|
||||
- value: "exposed"
|
||||
description: Expose port for inter-container communication
|
||||
- value: ""
|
||||
description: None
|
||||
- variable: port_number
|
||||
label: Port Number
|
||||
schema:
|
||||
type: int
|
||||
default: 30016
|
||||
required: true
|
||||
$ref:
|
||||
- definitions/port
|
||||
- variable: host_ips
|
||||
label: Host IPs
|
||||
description: IPs on the host to bind this port
|
||||
schema:
|
||||
type: list
|
||||
show_if: [["bind_mode", "=", "published"]]
|
||||
default: []
|
||||
items:
|
||||
- variable: host_ip
|
||||
label: Host IP
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
$ref:
|
||||
- definitions/node_bind_ip
|
||||
- variable: host_network
|
||||
label: Host Network
|
||||
description: |
|
||||
|
||||
@@ -5,13 +5,13 @@
|
||||
{% set perms_config = {"uid": values.run_as.user, "gid": values.run_as.group, "mode": "check"} %}
|
||||
|
||||
{% do c1.set_user(values.run_as.user, values.run_as.group) %}
|
||||
{% do c1.healthcheck.set_test("curl", {"port": values.network.web_port, "path": "/api/healthz/liveness"}) %}
|
||||
{% do c1.healthcheck.set_test("curl", {"port": values.network.web_port.port_number, "path": "/api/healthz/liveness"}) %}
|
||||
|
||||
{% do c1.environment.add_env("AUTOBRR__PORT", values.network.web_port) %}
|
||||
{% do c1.environment.add_env("AUTOBRR__PORT", values.network.web_port.port_number) %}
|
||||
{% do c1.environment.add_env("AUTOBRR__HOST", "0.0.0.0") %}
|
||||
|
||||
{% do c1.environment.add_user_envs(values.autobrr.additional_envs) %}
|
||||
{% do c1.ports.add_port(values.network.web_port, values.network.web_port) %}
|
||||
{% do c1.add_port(values.network.web_port) %}
|
||||
|
||||
{% do c1.add_storage("/config", values.storage.config) %}
|
||||
{% do perm_container.add_or_skip_action("config", values.storage.config, perms_config) %}
|
||||
@@ -26,6 +26,6 @@
|
||||
{% do c1.depends.add_dependency(values.consts.perms_container_name, "service_completed_successfully") %}
|
||||
{% endif %}
|
||||
|
||||
{% do tpl.portals.add_portal({"port": values.network.web_port}) %}
|
||||
{% do tpl.portals.add_portal({"port": values.network.web_port.port_number}) %}
|
||||
|
||||
{{ tpl.render() | tojson }}
|
||||
|
||||
@@ -10,7 +10,9 @@ autobrr:
|
||||
|
||||
network:
|
||||
host_network: false
|
||||
web_port: 8080
|
||||
web_port:
|
||||
bind_mode: published
|
||||
port_number: 8080
|
||||
|
||||
run_as:
|
||||
user: 568
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
annotations:
|
||||
min_scale_version: 24.10.2.2
|
||||
app_version: 1.5.2
|
||||
capabilities: []
|
||||
categories:
|
||||
@@ -33,4 +35,4 @@ sources:
|
||||
- https://github.com/morpheus65535/bazarr
|
||||
title: Bazarr
|
||||
train: community
|
||||
version: 1.1.14
|
||||
version: 1.2.0
|
||||
|
||||
6
ix-dev/community/bazarr/app_migrations.yaml
Normal file
6
ix-dev/community/bazarr/app_migrations.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
migrations:
|
||||
- file: ip_port_migration
|
||||
from:
|
||||
max_version: 1.1.14
|
||||
target:
|
||||
min_version: 1.2.0
|
||||
23
ix-dev/community/bazarr/migrations/ip_port_migration
Executable file
23
ix-dev/community/bazarr/migrations/ip_port_migration
Executable file
@@ -0,0 +1,23 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
import os
|
||||
import sys
|
||||
import yaml
|
||||
|
||||
|
||||
def migrate(values):
|
||||
values["network"]["web_port"] = {
|
||||
"port_number": values["network"]["web_port"],
|
||||
"bind_mode": "published",
|
||||
"host_ips": [],
|
||||
}
|
||||
return values
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) != 2:
|
||||
exit(1)
|
||||
|
||||
if os.path.exists(sys.argv[1]):
|
||||
with open(sys.argv[1], "r") as f:
|
||||
print(yaml.dump(migrate(yaml.safe_load(f.read()))))
|
||||
@@ -1,47 +0,0 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
import os
|
||||
import sys
|
||||
import yaml
|
||||
|
||||
from migration_helpers.resources import migrate_resources
|
||||
from migration_helpers.storage import migrate_storage_item
|
||||
|
||||
|
||||
def migrate(values):
|
||||
config = values.get("helm_secret", {}).get("config", {})
|
||||
if not config:
|
||||
raise ValueError("No config found in values")
|
||||
|
||||
new_values = {
|
||||
"bazarr": {
|
||||
"additional_envs": config["bazarrConfig"].get("additionalEnvs", []),
|
||||
},
|
||||
"run_as": {
|
||||
"user": config["bazarrRunAs"].get("user", 568),
|
||||
"group": config["bazarrRunAs"].get("group", 568),
|
||||
},
|
||||
"network": {
|
||||
"host_network": config["bazarrNetwork"].get("hostNetwork", False),
|
||||
"web_port": config["bazarrNetwork"].get("webPort", 20910),
|
||||
},
|
||||
"storage": {
|
||||
"config": migrate_storage_item(config["bazarrStorage"]["config"]),
|
||||
"additional_storage": [
|
||||
migrate_storage_item(item, include_read_only=True)
|
||||
for item in config["bazarrStorage"]["additionalStorages"]
|
||||
],
|
||||
},
|
||||
"resources": migrate_resources(config["resources"]),
|
||||
}
|
||||
|
||||
return new_values
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) != 2:
|
||||
exit(1)
|
||||
|
||||
if os.path.exists(sys.argv[1]):
|
||||
with open(sys.argv[1], "r") as f:
|
||||
print(yaml.dump(migrate(yaml.safe_load(f.read()))))
|
||||
@@ -1,30 +0,0 @@
|
||||
import math
|
||||
import re
|
||||
import os
|
||||
|
||||
CPU_COUNT = os.cpu_count()
|
||||
|
||||
NUMBER_REGEX = re.compile(r"^[1-9][0-9]$")
|
||||
FLOAT_REGEX = re.compile(r"^[0-9]+\.[0-9]+$")
|
||||
MILI_CPU_REGEX = re.compile(r"^[0-9]+m$")
|
||||
|
||||
|
||||
def transform_cpu(cpu) -> int:
|
||||
result = 2
|
||||
if NUMBER_REGEX.match(cpu):
|
||||
result = int(cpu)
|
||||
elif FLOAT_REGEX.match(cpu):
|
||||
result = int(math.ceil(float(cpu)))
|
||||
elif MILI_CPU_REGEX.match(cpu):
|
||||
num = int(cpu[:-1])
|
||||
num = num / 1000
|
||||
result = int(math.ceil(num))
|
||||
|
||||
if CPU_COUNT is not None:
|
||||
# Do not exceed the actual CPU count
|
||||
result = min(result, CPU_COUNT)
|
||||
|
||||
if int(result) == 0:
|
||||
result = CPU_COUNT if CPU_COUNT else 2
|
||||
|
||||
return int(result)
|
||||
@@ -1,9 +0,0 @@
|
||||
def migrate_dns_config(dns_config):
|
||||
if not dns_config:
|
||||
return []
|
||||
|
||||
dns_opts = []
|
||||
for opt in dns_config.get("options", []):
|
||||
dns_opts.append(f"{opt['name']}:{opt['value']}")
|
||||
|
||||
return dns_opts
|
||||
@@ -1,16 +0,0 @@
|
||||
def get_value_from_secret(secrets=None, secret_name=None, key=None):
|
||||
secrets = secrets if secrets else dict()
|
||||
secret_name = secret_name if secret_name else ""
|
||||
key = key if key else ""
|
||||
|
||||
if not secrets or not secret_name or not key:
|
||||
raise ValueError("Expected [secrets], [secret_name] and [key] to be set")
|
||||
for curr_secret_name, curr_data in secrets.items():
|
||||
if curr_secret_name.endswith(secret_name):
|
||||
if not curr_data.get(key, None):
|
||||
raise ValueError(
|
||||
f"Expected [{key}] to be set in secret [{curr_secret_name}]"
|
||||
)
|
||||
return curr_data[key]
|
||||
|
||||
raise ValueError(f"Secret [{secret_name}] not found")
|
||||
@@ -1,61 +0,0 @@
|
||||
import re
|
||||
import math
|
||||
|
||||
|
||||
def get_total_memory():
|
||||
with open("/proc/meminfo") as f:
|
||||
for line in filter(lambda x: "MemTotal" in x, f):
|
||||
return int(line.split()[1]) * 1024
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
TOTAL_MEM = get_total_memory()
|
||||
|
||||
SINGLE_SUFFIX_REGEX = re.compile(r"^[1-9][0-9]*([EPTGMK])$")
|
||||
DOUBLE_SUFFIX_REGEX = re.compile(r"^[1-9][0-9]*([EPTGMK])i$")
|
||||
BYTES_INTEGER_REGEX = re.compile(r"^[1-9][0-9]*$")
|
||||
EXPONENT_REGEX = re.compile(r"^[1-9][0-9]*e[0-9]+$")
|
||||
|
||||
SUFFIX_MULTIPLIERS = {
|
||||
"K": 10**3,
|
||||
"M": 10**6,
|
||||
"G": 10**9,
|
||||
"T": 10**12,
|
||||
"P": 10**15,
|
||||
"E": 10**18,
|
||||
}
|
||||
|
||||
DOUBLE_SUFFIX_MULTIPLIERS = {
|
||||
"Ki": 2**10,
|
||||
"Mi": 2**20,
|
||||
"Gi": 2**30,
|
||||
"Ti": 2**40,
|
||||
"Pi": 2**50,
|
||||
"Ei": 2**60,
|
||||
}
|
||||
|
||||
|
||||
def transform_memory(memory):
|
||||
result = 4096 # Default to 4GB
|
||||
|
||||
if re.match(SINGLE_SUFFIX_REGEX, memory):
|
||||
suffix = memory[-1]
|
||||
result = int(memory[:-1]) * SUFFIX_MULTIPLIERS[suffix]
|
||||
elif re.match(DOUBLE_SUFFIX_REGEX, memory):
|
||||
suffix = memory[-2:]
|
||||
result = int(memory[:-2]) * DOUBLE_SUFFIX_MULTIPLIERS[suffix]
|
||||
elif re.match(BYTES_INTEGER_REGEX, memory):
|
||||
result = int(memory)
|
||||
elif re.match(EXPONENT_REGEX, memory):
|
||||
result = int(float(memory))
|
||||
|
||||
result = math.ceil(result)
|
||||
result = min(result, TOTAL_MEM)
|
||||
# Convert to Megabytes
|
||||
result = result / 1024 / 1024
|
||||
|
||||
if int(result) == 0:
|
||||
result = TOTAL_MEM if TOTAL_MEM else 4096
|
||||
|
||||
return int(result)
|
||||
@@ -1,59 +0,0 @@
|
||||
from .memory import transform_memory, TOTAL_MEM
|
||||
from .cpu import transform_cpu, CPU_COUNT
|
||||
|
||||
|
||||
def migrate_resources(resources, gpus=None, system_gpus=None):
|
||||
gpus = gpus or {}
|
||||
system_gpus = system_gpus or []
|
||||
|
||||
result = {
|
||||
"limits": {
|
||||
"cpus": int((CPU_COUNT or 2) / 2),
|
||||
"memory": int(TOTAL_MEM / 1024 / 1024),
|
||||
}
|
||||
}
|
||||
|
||||
if resources.get("limits", {}).get("cpu", ""):
|
||||
result["limits"].update(
|
||||
{"cpus": transform_cpu(resources.get("limits", {}).get("cpu", ""))}
|
||||
)
|
||||
if resources.get("limits", {}).get("memory", ""):
|
||||
result["limits"].update(
|
||||
{"memory": transform_memory(resources.get("limits", {}).get("memory", ""))}
|
||||
)
|
||||
|
||||
gpus_result = {}
|
||||
for gpu in gpus.items() if gpus else []:
|
||||
kind = gpu[0].lower() # Kind of gpu (amd, nvidia, intel)
|
||||
count = gpu[1] # Number of gpus user requested
|
||||
|
||||
if count == 0:
|
||||
continue
|
||||
|
||||
if "amd" in kind or "intel" in kind:
|
||||
gpus_result.update({"use_all_gpus": True})
|
||||
elif "nvidia" in kind:
|
||||
sys_gpus = [
|
||||
gpu_item
|
||||
for gpu_item in system_gpus
|
||||
if gpu_item.get("error") is None
|
||||
and gpu_item.get("vendor", None) is not None
|
||||
and gpu_item.get("vendor", "").upper() == "NVIDIA"
|
||||
]
|
||||
for sys_gpu in sys_gpus:
|
||||
if count == 0: # We passed # of gpus that user previously requested
|
||||
break
|
||||
guid = sys_gpu.get("vendor_specific_config", {}).get("uuid", "")
|
||||
pci_slot = sys_gpu.get("pci_slot", "")
|
||||
if not guid or not pci_slot:
|
||||
continue
|
||||
|
||||
gpus_result.update(
|
||||
{"nvidia_gpu_selection": {pci_slot: {"uuid": guid, "use_gpu": True}}}
|
||||
)
|
||||
count -= 1
|
||||
|
||||
if gpus_result:
|
||||
result.update({"gpus": gpus_result})
|
||||
|
||||
return result
|
||||
@@ -1,155 +0,0 @@
|
||||
def migrate_storage_item(storage_item, include_read_only=False):
|
||||
if not storage_item:
|
||||
raise ValueError("Expected [storage_item] to be set")
|
||||
|
||||
result = {}
|
||||
if storage_item["type"] == "ixVolume":
|
||||
if storage_item.get("ixVolumeConfig"):
|
||||
result = migrate_ix_volume_type(storage_item)
|
||||
elif storage_item.get("datasetName"):
|
||||
result = migrate_old_ix_volume_type(storage_item)
|
||||
else:
|
||||
raise ValueError(
|
||||
"Expected [ix_volume] to have [ixVolumeConfig] or [datasetName] set"
|
||||
)
|
||||
elif storage_item["type"] == "hostPath":
|
||||
if storage_item.get("hostPathConfig"):
|
||||
result = migrate_host_path_type(storage_item)
|
||||
elif storage_item.get("hostPath"):
|
||||
result = migrate_old_host_path_type(storage_item)
|
||||
else:
|
||||
raise ValueError(
|
||||
"Expected [host_path] to have [hostPathConfig] or [hostPath] set"
|
||||
)
|
||||
elif storage_item["type"] == "emptyDir":
|
||||
result = migrate_empty_dir_type(storage_item)
|
||||
elif storage_item["type"] == "smb-pv-pvc":
|
||||
result = migrate_smb_pv_pvc_type(storage_item)
|
||||
|
||||
mount_path = storage_item.get("mountPath", "")
|
||||
if mount_path:
|
||||
result.update({"mount_path": mount_path})
|
||||
|
||||
if include_read_only:
|
||||
result.update({"read_only": storage_item.get("readOnly", False)})
|
||||
return result
|
||||
|
||||
|
||||
def migrate_smb_pv_pvc_type(smb_pv_pvc):
|
||||
smb_config = smb_pv_pvc.get("smbConfig", {})
|
||||
if not smb_config:
|
||||
raise ValueError("Expected [smb_pv_pvc] to have [smbConfig] set")
|
||||
|
||||
return {
|
||||
"type": "cifs",
|
||||
"cifs_config": {
|
||||
"server": smb_config["server"],
|
||||
"path": smb_config["share"],
|
||||
"domain": smb_config.get("domain", ""),
|
||||
"username": smb_config["username"],
|
||||
"password": smb_config["password"],
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def migrate_empty_dir_type(empty_dir):
|
||||
empty_dir_config = empty_dir.get("emptyDirConfig", {})
|
||||
if not empty_dir_config:
|
||||
raise ValueError("Expected [empty_dir] to have [emptyDirConfig] set")
|
||||
|
||||
if empty_dir_config.get("medium", "") == "Memory":
|
||||
# Convert Gi to Mi
|
||||
size = empty_dir_config.get("size", 0.5) * 1024
|
||||
return {
|
||||
"type": "tmpfs",
|
||||
"tmpfs_config": {"size": size},
|
||||
}
|
||||
|
||||
return {"type": "temporary"}
|
||||
|
||||
|
||||
def migrate_old_ix_volume_type(ix_volume):
|
||||
if not ix_volume.get("datasetName"):
|
||||
raise ValueError("Expected [ix_volume] to have [datasetName] set")
|
||||
|
||||
return {
|
||||
"type": "ix_volume",
|
||||
"ix_volume_config": {
|
||||
"acl_enable": False,
|
||||
"dataset_name": ix_volume["datasetName"],
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def migrate_ix_volume_type(ix_volume):
|
||||
vol_config = ix_volume.get("ixVolumeConfig", {})
|
||||
if not vol_config:
|
||||
raise ValueError("Expected [ix_volume] to have [ixVolumeConfig] set")
|
||||
|
||||
result = {
|
||||
"type": "ix_volume",
|
||||
"ix_volume_config": {
|
||||
"acl_enable": vol_config.get("aclEnable", False),
|
||||
"dataset_name": vol_config.get("datasetName", ""),
|
||||
},
|
||||
}
|
||||
|
||||
if vol_config.get("aclEnable", False):
|
||||
result["ix_volume_config"].update(
|
||||
{"acl_entries": migrate_acl_entries(vol_config["aclEntries"])}
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def migrate_old_host_path_type(host_path):
|
||||
if not host_path.get("hostPath"):
|
||||
raise ValueError("Expected [host_path] to have [hostPath] set")
|
||||
|
||||
return {
|
||||
"type": "host_path",
|
||||
"host_path_config": {
|
||||
"acl_enable": False,
|
||||
"path": host_path["hostPath"],
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def migrate_host_path_type(host_path):
|
||||
path_config = host_path.get("hostPathConfig", {})
|
||||
if not path_config:
|
||||
raise ValueError("Expected [host_path] to have [hostPathConfig] set")
|
||||
|
||||
result = {
|
||||
"type": "host_path",
|
||||
"host_path_config": {
|
||||
"acl_enable": path_config.get("aclEnable", False),
|
||||
},
|
||||
}
|
||||
|
||||
if path_config.get("aclEnable", False):
|
||||
result["host_path_config"].update(
|
||||
{"acl": migrate_acl_entries(path_config.get("acl", {}))}
|
||||
)
|
||||
else:
|
||||
result["host_path_config"].update({"path": path_config["hostPath"]})
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def migrate_acl_entries(acl_entries: dict) -> dict:
|
||||
entries = []
|
||||
for entry in acl_entries.get("entries", []):
|
||||
entries.append(
|
||||
{
|
||||
"access": entry["access"],
|
||||
"id": entry["id"],
|
||||
"id_type": entry["id_type"],
|
||||
}
|
||||
)
|
||||
|
||||
return {
|
||||
"entries": entries,
|
||||
"options": {"force": acl_entries.get("options", {}).get("force", False)},
|
||||
"path": acl_entries["path"],
|
||||
}
|
||||
@@ -82,13 +82,51 @@ questions:
|
||||
attrs:
|
||||
- variable: web_port
|
||||
label: WebUI Port
|
||||
description: The port for Bazarr WebUI
|
||||
schema:
|
||||
type: int
|
||||
default: 30046
|
||||
required: true
|
||||
$ref:
|
||||
- definitions/port
|
||||
type: dict
|
||||
attrs:
|
||||
- variable: bind_mode
|
||||
label: Port Bind Mode
|
||||
description: |
|
||||
The port bind mode.</br>
|
||||
- Publish: The port will be published on the host for external access.</br>
|
||||
- Expose: The port will be exposed for inter-container communication.</br>
|
||||
- None: The port will not be exposed or published.</br>
|
||||
Note: If the Dockerfile defines an EXPOSE directive,
|
||||
the port will still be exposed for inter-container communication regardless of this setting.
|
||||
schema:
|
||||
type: string
|
||||
default: "published"
|
||||
enum:
|
||||
- value: "published"
|
||||
description: Publish port on the host for external access
|
||||
- value: "exposed"
|
||||
description: Expose port for inter-container communication
|
||||
- value: ""
|
||||
description: None
|
||||
- variable: port_number
|
||||
label: Port Number
|
||||
schema:
|
||||
type: int
|
||||
default: 30046
|
||||
required: true
|
||||
$ref:
|
||||
- definitions/port
|
||||
- variable: host_ips
|
||||
label: Host IPs
|
||||
description: IPs on the host to bind this port
|
||||
schema:
|
||||
type: list
|
||||
show_if: [["bind_mode", "=", "published"]]
|
||||
default: []
|
||||
items:
|
||||
- variable: host_ip
|
||||
label: Host IP
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
$ref:
|
||||
- definitions/node_bind_ip
|
||||
- variable: host_network
|
||||
label: Host Network
|
||||
description: |
|
||||
|
||||
@@ -5,13 +5,13 @@
|
||||
{% set perms_config = {"uid": values.run_as.user, "gid": values.run_as.group, "mode": "check"} %}
|
||||
|
||||
{% do c1.set_user(values.run_as.user, values.run_as.group) %}
|
||||
{% do c1.healthcheck.set_test("curl", {"port": values.network.web_port, "path": "/api/swagger.json"}) %}
|
||||
{% do c1.healthcheck.set_test("curl", {"port": values.network.web_port.port_number, "path": "/api/swagger.json"}) %}
|
||||
|
||||
{% do c1.set_entrypoint(["/entrypoint.sh"]) %}
|
||||
{% do c1.set_command(["--port", values.network.web_port]) %}
|
||||
{% do c1.set_command(["--port", values.network.web_port.port_number]) %}
|
||||
|
||||
{% do c1.environment.add_user_envs(values.bazarr.additional_envs) %}
|
||||
{% do c1.ports.add_port(values.network.web_port, values.network.web_port) %}
|
||||
{% do c1.add_port(values.network.web_port) %}
|
||||
|
||||
{% do c1.add_storage("/config", values.storage.config) %}
|
||||
{% do perm_container.add_or_skip_action("config", values.storage.config, perms_config) %}
|
||||
@@ -26,6 +26,6 @@
|
||||
{% do c1.depends.add_dependency(values.consts.perms_container_name, "service_completed_successfully") %}
|
||||
{% endif %}
|
||||
|
||||
{% do tpl.portals.add_portal({"port": values.network.web_port}) %}
|
||||
{% do tpl.portals.add_portal({"port": values.network.web_port.port_number}) %}
|
||||
|
||||
{{ tpl.render() | tojson }}
|
||||
|
||||
@@ -7,7 +7,9 @@ bazarr:
|
||||
additional_envs: []
|
||||
network:
|
||||
host_network: false
|
||||
web_port: 8080
|
||||
web_port:
|
||||
bind_mode: published
|
||||
port_number: 8080
|
||||
|
||||
run_as:
|
||||
user: 568
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
annotations:
|
||||
min_scale_version: 24.10.2.2
|
||||
app_version: latest
|
||||
capabilities: []
|
||||
categories:
|
||||
@@ -37,4 +39,4 @@ sources:
|
||||
- https://docs.briefkastenhq.com/
|
||||
title: Briefkasten
|
||||
train: community
|
||||
version: 1.2.13
|
||||
version: 1.3.0
|
||||
|
||||
6
ix-dev/community/briefkasten/app_migrations.yaml
Normal file
6
ix-dev/community/briefkasten/app_migrations.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
migrations:
|
||||
- file: ip_port_migration
|
||||
from:
|
||||
max_version: 1.2.13
|
||||
target:
|
||||
min_version: 1.3.0
|
||||
23
ix-dev/community/briefkasten/migrations/ip_port_migration
Executable file
23
ix-dev/community/briefkasten/migrations/ip_port_migration
Executable file
@@ -0,0 +1,23 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
import os
|
||||
import sys
|
||||
import yaml
|
||||
|
||||
|
||||
def migrate(values):
|
||||
values["network"]["web_port"] = {
|
||||
"port_number": values["network"]["web_port"],
|
||||
"bind_mode": "published",
|
||||
"host_ips": [],
|
||||
}
|
||||
return values
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) != 2:
|
||||
exit(1)
|
||||
|
||||
if os.path.exists(sys.argv[1]):
|
||||
with open(sys.argv[1], "r") as f:
|
||||
print(yaml.dump(migrate(yaml.safe_load(f.read()))))
|
||||
@@ -1,80 +0,0 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
import os
|
||||
import sys
|
||||
import yaml
|
||||
|
||||
from migration_helpers.resources import migrate_resources
|
||||
from migration_helpers.storage import migrate_storage_item
|
||||
from migration_helpers.kubernetes_secrets import get_value_from_secret
|
||||
|
||||
|
||||
def migrate(values):
|
||||
config = values.get("helm_secret", {}).get("config", {})
|
||||
k8s_secrets = values.get("release_secrets", {})
|
||||
if not config:
|
||||
raise ValueError("No config found in values")
|
||||
|
||||
new_values = {
|
||||
"briefkasten": {
|
||||
"additional_envs": config["briefkastenConfig"].get("additionalEnvs", []),
|
||||
"nextauth_url": config["briefkastenConfig"].get("url"),
|
||||
"nextauth_secret": get_value_from_secret(
|
||||
k8s_secrets, "briefkasten", "NEXTAUTH_SECRET"
|
||||
),
|
||||
"postgres_password": get_value_from_secret(
|
||||
k8s_secrets, "postgres-creds", "POSTGRES_PASSWORD"
|
||||
),
|
||||
"smtp": {
|
||||
"enabled": config["briefkastenConfig"]["smtp"]["enabled"],
|
||||
"server": config["briefkastenConfig"]["smtp"].get("server", ""),
|
||||
"from": config["briefkastenConfig"]["smtp"].get("from", ""),
|
||||
},
|
||||
"github": {
|
||||
"enabled": config["briefkastenConfig"]["github"]["enabled"],
|
||||
"id": config["briefkastenConfig"]["github"].get("id", ""),
|
||||
"secret": config["briefkastenConfig"]["github"].get("secret", ""),
|
||||
},
|
||||
"google": {
|
||||
"enabled": config["briefkastenConfig"]["google"]["enabled"],
|
||||
"id": config["briefkastenConfig"]["google"].get("id", ""),
|
||||
"secret": config["briefkastenConfig"]["google"].get("secret", ""),
|
||||
},
|
||||
"keycloak": {
|
||||
"enabled": config["briefkastenConfig"]["keycloak"]["enabled"],
|
||||
"name": config["briefkastenConfig"]["keycloak"].get("name", ""),
|
||||
"id": config["briefkastenConfig"]["keycloak"].get("id", ""),
|
||||
"secret": config["briefkastenConfig"]["keycloak"].get("secret", ""),
|
||||
"issuer": config["briefkastenConfig"]["keycloak"].get("issuer", ""),
|
||||
},
|
||||
"authentik": {
|
||||
"enabled": config["briefkastenConfig"]["authentik"]["enabled"],
|
||||
"name": config["briefkastenConfig"]["authentik"].get("name", ""),
|
||||
"id": config["briefkastenConfig"]["authentik"].get("id", ""),
|
||||
"secret": config["briefkastenConfig"]["authentik"].get("secret", ""),
|
||||
"issuer": config["briefkastenConfig"]["authentik"].get("issuer", ""),
|
||||
},
|
||||
},
|
||||
"network": {
|
||||
"web_port": config["briefkastenNetwork"].get("webPort", 32400),
|
||||
},
|
||||
"storage": {
|
||||
"postgres_data": migrate_storage_item(config["briefkastenStorage"]["pgData"]),
|
||||
"additional_storage": [
|
||||
migrate_storage_item(item, include_read_only=True)
|
||||
for item in config["briefkastenStorage"]["additionalStorages"]
|
||||
],
|
||||
},
|
||||
"resources": migrate_resources(config["resources"]),
|
||||
}
|
||||
|
||||
return new_values
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) != 2:
|
||||
exit(1)
|
||||
|
||||
if os.path.exists(sys.argv[1]):
|
||||
with open(sys.argv[1], "r") as f:
|
||||
print(yaml.dump(migrate(yaml.safe_load(f.read()))))
|
||||
@@ -1,30 +0,0 @@
|
||||
import math
|
||||
import re
|
||||
import os
|
||||
|
||||
CPU_COUNT = os.cpu_count()
|
||||
|
||||
NUMBER_REGEX = re.compile(r"^[1-9][0-9]$")
|
||||
FLOAT_REGEX = re.compile(r"^[0-9]+\.[0-9]+$")
|
||||
MILI_CPU_REGEX = re.compile(r"^[0-9]+m$")
|
||||
|
||||
|
||||
def transform_cpu(cpu) -> int:
|
||||
result = 2
|
||||
if NUMBER_REGEX.match(cpu):
|
||||
result = int(cpu)
|
||||
elif FLOAT_REGEX.match(cpu):
|
||||
result = int(math.ceil(float(cpu)))
|
||||
elif MILI_CPU_REGEX.match(cpu):
|
||||
num = int(cpu[:-1])
|
||||
num = num / 1000
|
||||
result = int(math.ceil(num))
|
||||
|
||||
if CPU_COUNT is not None:
|
||||
# Do not exceed the actual CPU count
|
||||
result = min(result, CPU_COUNT)
|
||||
|
||||
if int(result) == 0:
|
||||
result = CPU_COUNT if CPU_COUNT else 2
|
||||
|
||||
return int(result)
|
||||
@@ -1,9 +0,0 @@
|
||||
def migrate_dns_config(dns_config):
|
||||
if not dns_config:
|
||||
return []
|
||||
|
||||
dns_opts = []
|
||||
for opt in dns_config.get("options", []):
|
||||
dns_opts.append(f"{opt['name']}:{opt['value']}")
|
||||
|
||||
return dns_opts
|
||||
@@ -1,16 +0,0 @@
|
||||
def get_value_from_secret(secrets=None, secret_name=None, key=None):
|
||||
secrets = secrets if secrets else dict()
|
||||
secret_name = secret_name if secret_name else ""
|
||||
key = key if key else ""
|
||||
|
||||
if not secrets or not secret_name or not key:
|
||||
raise ValueError("Expected [secrets], [secret_name] and [key] to be set")
|
||||
for curr_secret_name, curr_data in secrets.items():
|
||||
if curr_secret_name.endswith(secret_name):
|
||||
if not curr_data.get(key, None):
|
||||
raise ValueError(
|
||||
f"Expected [{key}] to be set in secret [{curr_secret_name}]"
|
||||
)
|
||||
return curr_data[key]
|
||||
|
||||
raise ValueError(f"Secret [{secret_name}] not found")
|
||||
@@ -1,61 +0,0 @@
|
||||
import re
|
||||
import math
|
||||
|
||||
|
||||
def get_total_memory():
|
||||
with open("/proc/meminfo") as f:
|
||||
for line in filter(lambda x: "MemTotal" in x, f):
|
||||
return int(line.split()[1]) * 1024
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
TOTAL_MEM = get_total_memory()
|
||||
|
||||
SINGLE_SUFFIX_REGEX = re.compile(r"^[1-9][0-9]*([EPTGMK])$")
|
||||
DOUBLE_SUFFIX_REGEX = re.compile(r"^[1-9][0-9]*([EPTGMK])i$")
|
||||
BYTES_INTEGER_REGEX = re.compile(r"^[1-9][0-9]*$")
|
||||
EXPONENT_REGEX = re.compile(r"^[1-9][0-9]*e[0-9]+$")
|
||||
|
||||
SUFFIX_MULTIPLIERS = {
|
||||
"K": 10**3,
|
||||
"M": 10**6,
|
||||
"G": 10**9,
|
||||
"T": 10**12,
|
||||
"P": 10**15,
|
||||
"E": 10**18,
|
||||
}
|
||||
|
||||
DOUBLE_SUFFIX_MULTIPLIERS = {
|
||||
"Ki": 2**10,
|
||||
"Mi": 2**20,
|
||||
"Gi": 2**30,
|
||||
"Ti": 2**40,
|
||||
"Pi": 2**50,
|
||||
"Ei": 2**60,
|
||||
}
|
||||
|
||||
|
||||
def transform_memory(memory):
|
||||
result = 4096 # Default to 4GB
|
||||
|
||||
if re.match(SINGLE_SUFFIX_REGEX, memory):
|
||||
suffix = memory[-1]
|
||||
result = int(memory[:-1]) * SUFFIX_MULTIPLIERS[suffix]
|
||||
elif re.match(DOUBLE_SUFFIX_REGEX, memory):
|
||||
suffix = memory[-2:]
|
||||
result = int(memory[:-2]) * DOUBLE_SUFFIX_MULTIPLIERS[suffix]
|
||||
elif re.match(BYTES_INTEGER_REGEX, memory):
|
||||
result = int(memory)
|
||||
elif re.match(EXPONENT_REGEX, memory):
|
||||
result = int(float(memory))
|
||||
|
||||
result = math.ceil(result)
|
||||
result = min(result, TOTAL_MEM)
|
||||
# Convert to Megabytes
|
||||
result = result / 1024 / 1024
|
||||
|
||||
if int(result) == 0:
|
||||
result = TOTAL_MEM if TOTAL_MEM else 4096
|
||||
|
||||
return int(result)
|
||||
@@ -1,59 +0,0 @@
|
||||
from .memory import transform_memory, TOTAL_MEM
|
||||
from .cpu import transform_cpu, CPU_COUNT
|
||||
|
||||
|
||||
def migrate_resources(resources, gpus=None, system_gpus=None):
|
||||
gpus = gpus or {}
|
||||
system_gpus = system_gpus or []
|
||||
|
||||
result = {
|
||||
"limits": {
|
||||
"cpus": int((CPU_COUNT or 2) / 2),
|
||||
"memory": int(TOTAL_MEM / 1024 / 1024),
|
||||
}
|
||||
}
|
||||
|
||||
if resources.get("limits", {}).get("cpu", ""):
|
||||
result["limits"].update(
|
||||
{"cpus": transform_cpu(resources.get("limits", {}).get("cpu", ""))}
|
||||
)
|
||||
if resources.get("limits", {}).get("memory", ""):
|
||||
result["limits"].update(
|
||||
{"memory": transform_memory(resources.get("limits", {}).get("memory", ""))}
|
||||
)
|
||||
|
||||
gpus_result = {}
|
||||
for gpu in gpus.items() if gpus else []:
|
||||
kind = gpu[0].lower() # Kind of gpu (amd, nvidia, intel)
|
||||
count = gpu[1] # Number of gpus user requested
|
||||
|
||||
if count == 0:
|
||||
continue
|
||||
|
||||
if "amd" in kind or "intel" in kind:
|
||||
gpus_result.update({"use_all_gpus": True})
|
||||
elif "nvidia" in kind:
|
||||
sys_gpus = [
|
||||
gpu_item
|
||||
for gpu_item in system_gpus
|
||||
if gpu_item.get("error") is None
|
||||
and gpu_item.get("vendor", None) is not None
|
||||
and gpu_item.get("vendor", "").upper() == "NVIDIA"
|
||||
]
|
||||
for sys_gpu in sys_gpus:
|
||||
if count == 0: # We passed # of gpus that user previously requested
|
||||
break
|
||||
guid = sys_gpu.get("vendor_specific_config", {}).get("uuid", "")
|
||||
pci_slot = sys_gpu.get("pci_slot", "")
|
||||
if not guid or not pci_slot:
|
||||
continue
|
||||
|
||||
gpus_result.update(
|
||||
{"nvidia_gpu_selection": {pci_slot: {"uuid": guid, "use_gpu": True}}}
|
||||
)
|
||||
count -= 1
|
||||
|
||||
if gpus_result:
|
||||
result.update({"gpus": gpus_result})
|
||||
|
||||
return result
|
||||
@@ -1,155 +0,0 @@
|
||||
def migrate_storage_item(storage_item, include_read_only=False):
|
||||
if not storage_item:
|
||||
raise ValueError("Expected [storage_item] to be set")
|
||||
|
||||
result = {}
|
||||
if storage_item["type"] == "ixVolume":
|
||||
if storage_item.get("ixVolumeConfig"):
|
||||
result = migrate_ix_volume_type(storage_item)
|
||||
elif storage_item.get("datasetName"):
|
||||
result = migrate_old_ix_volume_type(storage_item)
|
||||
else:
|
||||
raise ValueError(
|
||||
"Expected [ix_volume] to have [ixVolumeConfig] or [datasetName] set"
|
||||
)
|
||||
elif storage_item["type"] == "hostPath":
|
||||
if storage_item.get("hostPathConfig"):
|
||||
result = migrate_host_path_type(storage_item)
|
||||
elif storage_item.get("hostPath"):
|
||||
result = migrate_old_host_path_type(storage_item)
|
||||
else:
|
||||
raise ValueError(
|
||||
"Expected [host_path] to have [hostPathConfig] or [hostPath] set"
|
||||
)
|
||||
elif storage_item["type"] == "emptyDir":
|
||||
result = migrate_empty_dir_type(storage_item)
|
||||
elif storage_item["type"] == "smb-pv-pvc":
|
||||
result = migrate_smb_pv_pvc_type(storage_item)
|
||||
|
||||
mount_path = storage_item.get("mountPath", "")
|
||||
if mount_path:
|
||||
result.update({"mount_path": mount_path})
|
||||
|
||||
if include_read_only:
|
||||
result.update({"read_only": storage_item.get("readOnly", False)})
|
||||
return result
|
||||
|
||||
|
||||
def migrate_smb_pv_pvc_type(smb_pv_pvc):
|
||||
smb_config = smb_pv_pvc.get("smbConfig", {})
|
||||
if not smb_config:
|
||||
raise ValueError("Expected [smb_pv_pvc] to have [smbConfig] set")
|
||||
|
||||
return {
|
||||
"type": "cifs",
|
||||
"cifs_config": {
|
||||
"server": smb_config["server"],
|
||||
"path": smb_config["share"],
|
||||
"domain": smb_config.get("domain", ""),
|
||||
"username": smb_config["username"],
|
||||
"password": smb_config["password"],
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def migrate_empty_dir_type(empty_dir):
|
||||
empty_dir_config = empty_dir.get("emptyDirConfig", {})
|
||||
if not empty_dir_config:
|
||||
raise ValueError("Expected [empty_dir] to have [emptyDirConfig] set")
|
||||
|
||||
if empty_dir_config.get("medium", "") == "Memory":
|
||||
# Convert Gi to Mi
|
||||
size = empty_dir_config.get("size", 0.5) * 1024
|
||||
return {
|
||||
"type": "tmpfs",
|
||||
"tmpfs_config": {"size": size},
|
||||
}
|
||||
|
||||
return {"type": "temporary"}
|
||||
|
||||
|
||||
def migrate_old_ix_volume_type(ix_volume):
|
||||
if not ix_volume.get("datasetName"):
|
||||
raise ValueError("Expected [ix_volume] to have [datasetName] set")
|
||||
|
||||
return {
|
||||
"type": "ix_volume",
|
||||
"ix_volume_config": {
|
||||
"acl_enable": False,
|
||||
"dataset_name": ix_volume["datasetName"],
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def migrate_ix_volume_type(ix_volume):
|
||||
vol_config = ix_volume.get("ixVolumeConfig", {})
|
||||
if not vol_config:
|
||||
raise ValueError("Expected [ix_volume] to have [ixVolumeConfig] set")
|
||||
|
||||
result = {
|
||||
"type": "ix_volume",
|
||||
"ix_volume_config": {
|
||||
"acl_enable": vol_config.get("aclEnable", False),
|
||||
"dataset_name": vol_config.get("datasetName", ""),
|
||||
},
|
||||
}
|
||||
|
||||
if vol_config.get("aclEnable", False):
|
||||
result["ix_volume_config"].update(
|
||||
{"acl_entries": migrate_acl_entries(vol_config["aclEntries"])}
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def migrate_old_host_path_type(host_path):
|
||||
if not host_path.get("hostPath"):
|
||||
raise ValueError("Expected [host_path] to have [hostPath] set")
|
||||
|
||||
return {
|
||||
"type": "host_path",
|
||||
"host_path_config": {
|
||||
"acl_enable": False,
|
||||
"path": host_path["hostPath"],
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def migrate_host_path_type(host_path):
|
||||
path_config = host_path.get("hostPathConfig", {})
|
||||
if not path_config:
|
||||
raise ValueError("Expected [host_path] to have [hostPathConfig] set")
|
||||
|
||||
result = {
|
||||
"type": "host_path",
|
||||
"host_path_config": {
|
||||
"acl_enable": path_config.get("aclEnable", False),
|
||||
},
|
||||
}
|
||||
|
||||
if path_config.get("aclEnable", False):
|
||||
result["host_path_config"].update(
|
||||
{"acl": migrate_acl_entries(path_config.get("acl", {}))}
|
||||
)
|
||||
else:
|
||||
result["host_path_config"].update({"path": path_config["hostPath"]})
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def migrate_acl_entries(acl_entries: dict) -> dict:
|
||||
entries = []
|
||||
for entry in acl_entries.get("entries", []):
|
||||
entries.append(
|
||||
{
|
||||
"access": entry["access"],
|
||||
"id": entry["id"],
|
||||
"id_type": entry["id_type"],
|
||||
}
|
||||
)
|
||||
|
||||
return {
|
||||
"entries": entries,
|
||||
"options": {"force": acl_entries.get("options", {}).get("force", False)},
|
||||
"path": acl_entries["path"],
|
||||
}
|
||||
@@ -40,7 +40,7 @@ questions:
|
||||
If something goes wrong, you will have to restore from backup.
|
||||
schema:
|
||||
type: string
|
||||
default: postgres_15_image
|
||||
default: postgres_17_image
|
||||
required: true
|
||||
enum:
|
||||
- value: postgres_15_image
|
||||
@@ -277,14 +277,51 @@ questions:
|
||||
attrs:
|
||||
- variable: web_port
|
||||
label: WebUI Port
|
||||
description: The port for Briefkasten WebUI
|
||||
schema:
|
||||
type: int
|
||||
default: 30080
|
||||
required: true
|
||||
$ref:
|
||||
- definitions/port
|
||||
|
||||
type: dict
|
||||
attrs:
|
||||
- variable: bind_mode
|
||||
label: Port Bind Mode
|
||||
description: |
|
||||
The port bind mode.</br>
|
||||
- Publish: The port will be published on the host for external access.</br>
|
||||
- Expose: The port will be exposed for inter-container communication.</br>
|
||||
- None: The port will not be exposed or published.</br>
|
||||
Note: If the Dockerfile defines an EXPOSE directive,
|
||||
the port will still be exposed for inter-container communication regardless of this setting.
|
||||
schema:
|
||||
type: string
|
||||
default: "published"
|
||||
enum:
|
||||
- value: "published"
|
||||
description: Publish port on the host for external access
|
||||
- value: "exposed"
|
||||
description: Expose port for inter-container communication
|
||||
- value: ""
|
||||
description: None
|
||||
- variable: port_number
|
||||
label: Port Number
|
||||
schema:
|
||||
type: int
|
||||
default: 30080
|
||||
required: true
|
||||
$ref:
|
||||
- definitions/port
|
||||
- variable: host_ips
|
||||
label: Host IPs
|
||||
description: IPs on the host to bind this port
|
||||
schema:
|
||||
type: list
|
||||
show_if: [["bind_mode", "=", "published"]]
|
||||
default: []
|
||||
items:
|
||||
- variable: host_ip
|
||||
label: Host IP
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
$ref:
|
||||
- definitions/node_bind_ip
|
||||
- variable: storage
|
||||
label: ""
|
||||
group: Storage Configuration
|
||||
|
||||
@@ -17,12 +17,12 @@
|
||||
) %}
|
||||
|
||||
{% set envs = namespace(x={
|
||||
"PORT": values.network.web_port,
|
||||
"PORT": values.network.web_port.port_number,
|
||||
"NODE_ENV": "production",
|
||||
"DATABASE_URL": postgres.get_url("postgres"),
|
||||
"NEXTAUTH_SECRET": values.briefkasten.nextauth_secret,
|
||||
"NEXTAUTH_URL": values.briefkasten.nextauth_url,
|
||||
"NEXTAUTH_URL_INTERNAL": "http://127.0.0.1:%d"|format(values.network.web_port),
|
||||
"NEXTAUTH_URL_INTERNAL": "http://127.0.0.1:%d"|format(values.network.web_port.port_number),
|
||||
}) %}
|
||||
|
||||
{% if values.briefkasten.smtp.enabled and envs.x.update({
|
||||
@@ -69,7 +69,7 @@
|
||||
{% do c1.set_user(values.consts.run_user, values.consts.run_group) %}
|
||||
{% do c1.depends.add_dependency(values.consts.pg_container_name, "service_healthy") %}
|
||||
{% do c1.depends.add_dependency(values.consts.init_container_name, "service_completed_successfully") %}
|
||||
{% do c1.healthcheck.set_test("tcp", {"port": values.network.web_port}) %}
|
||||
{% do c1.healthcheck.set_test("tcp", {"port": values.network.web_port.port_number}) %}
|
||||
|
||||
{% for key, value in envs.x.items() %}
|
||||
{% do init.environment.add_env(key, value) %}
|
||||
@@ -85,7 +85,7 @@
|
||||
{% do perm_container.add_or_skip_action(store.mount_path, store, perms_config) %}
|
||||
{% endfor %}
|
||||
|
||||
{% do c1.ports.add_port(values.network.web_port, values.network.web_port) %}
|
||||
{% do c1.add_port(values.network.web_port) %}
|
||||
|
||||
{% if perm_container.has_actions() %}
|
||||
{% do perm_container.activate() %}
|
||||
@@ -94,6 +94,6 @@
|
||||
{% do postgres.add_dependency(values.consts.perms_container_name, "service_completed_successfully") %}
|
||||
{% endif %}
|
||||
|
||||
{% do tpl.portals.add_portal({"port": values.network.web_port}) %}
|
||||
{% do tpl.portals.add_portal({"port": values.network.web_port.port_number}) %}
|
||||
|
||||
{{ tpl.render() | tojson }}
|
||||
|
||||
@@ -35,7 +35,9 @@ briefkasten:
|
||||
issuer: ''
|
||||
additional_envs: []
|
||||
network:
|
||||
web_port: 8080
|
||||
web_port:
|
||||
bind_mode: published
|
||||
port_number: 8080
|
||||
|
||||
ix_volumes:
|
||||
postgres_data: /opt/tests/mnt/postgres_data
|
||||
|
||||
@@ -36,7 +36,9 @@ briefkasten:
|
||||
issuer: ''
|
||||
additional_envs: []
|
||||
network:
|
||||
web_port: 8080
|
||||
web_port:
|
||||
bind_mode: published
|
||||
port_number: 8080
|
||||
|
||||
ix_volumes:
|
||||
postgres_data: /opt/tests/mnt/postgres_data
|
||||
|
||||
@@ -1,14 +1,16 @@
|
||||
annotations:
|
||||
min_scale_version: 24.10.2.2
|
||||
app_version: 8.4.0
|
||||
capabilities:
|
||||
- description: Calibre is able to chown files.
|
||||
- description: Calibre is able to change file ownership arbitrarily
|
||||
name: CHOWN
|
||||
- description: Calibre is able to bypass permission checks.
|
||||
- description: Calibre is able to bypass file permission checks
|
||||
name: DAC_OVERRIDE
|
||||
- description: Calibre is able bypass permission checks for it's sub-processes.
|
||||
- description: Calibre is able to bypass permission checks for file operations
|
||||
name: FOWNER
|
||||
- description: Calibre is able to set group ID for it's sub-processes.
|
||||
- description: Calibre is able to change group ID of processes
|
||||
name: SETGID
|
||||
- description: Calibre is able to set user ID for it's sub-processes.
|
||||
- description: Calibre is able to change user ID of processes
|
||||
name: SETUID
|
||||
categories:
|
||||
- media
|
||||
@@ -47,4 +49,4 @@ sources:
|
||||
- https://calibre-ebook.com/
|
||||
title: Calibre
|
||||
train: community
|
||||
version: 1.0.27
|
||||
version: 1.1.0
|
||||
|
||||
6
ix-dev/community/calibre/app_migrations.yaml
Normal file
6
ix-dev/community/calibre/app_migrations.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
migrations:
|
||||
- file: ip_port_migration
|
||||
from:
|
||||
max_version: 1.0.27
|
||||
target:
|
||||
min_version: 1.1.0
|
||||
34
ix-dev/community/calibre/migrations/ip_port_migration
Executable file
34
ix-dev/community/calibre/migrations/ip_port_migration
Executable file
@@ -0,0 +1,34 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
import os
|
||||
import sys
|
||||
import yaml
|
||||
|
||||
|
||||
def migrate(values):
|
||||
values["network"]["web_port"] = {
|
||||
"port_number": values["network"].get("web_port", 32014),
|
||||
"bind_mode": "published",
|
||||
"host_ips": [],
|
||||
}
|
||||
values["network"]["http_port"] = {
|
||||
"port_number": values["network"].get("http_port", 32015),
|
||||
"bind_mode": "published",
|
||||
"host_ips": [],
|
||||
}
|
||||
values["network"]["https_port"] = {
|
||||
"port_number": values["network"].get("https_port", 32016),
|
||||
"bind_mode": "published",
|
||||
"host_ips": [],
|
||||
}
|
||||
|
||||
return values
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) != 2:
|
||||
exit(1)
|
||||
|
||||
if os.path.exists(sys.argv[1]):
|
||||
with open(sys.argv[1], "r") as f:
|
||||
print(yaml.dump(migrate(yaml.safe_load(f.read()))))
|
||||
@@ -1,48 +0,0 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
import os
|
||||
import sys
|
||||
import yaml
|
||||
|
||||
from migration_helpers.resources import migrate_resources
|
||||
from migration_helpers.storage import migrate_storage_item
|
||||
|
||||
|
||||
def migrate(values):
|
||||
config = values.get("helm_secret", {}).get("config", {})
|
||||
if not config:
|
||||
raise ValueError("No config found in values")
|
||||
|
||||
new_values = {
|
||||
"lidarr": {
|
||||
"instance_name": config["lidarrConfig"].get("instanceName", "Lidarr"),
|
||||
"additional_envs": config["lidarrConfig"].get("additionalEnvs", []),
|
||||
},
|
||||
"run_as": {
|
||||
"user": config["lidarrRunAs"].get("user", 568),
|
||||
"group": config["lidarrRunAs"].get("group", 568),
|
||||
},
|
||||
"network": {
|
||||
"host_network": config["lidarrNetwork"].get("hostNetwork", False),
|
||||
"web_port": config["lidarrNetwork"].get("webPort", 20910),
|
||||
},
|
||||
"storage": {
|
||||
"config": migrate_storage_item(config["lidarrStorage"]["config"]),
|
||||
"additional_storage": [
|
||||
migrate_storage_item(item, include_read_only=True)
|
||||
for item in config["lidarrStorage"]["additionalStorages"]
|
||||
],
|
||||
},
|
||||
"resources": migrate_resources(config["resources"]),
|
||||
}
|
||||
|
||||
return new_values
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) != 2:
|
||||
exit(1)
|
||||
|
||||
if os.path.exists(sys.argv[1]):
|
||||
with open(sys.argv[1], "r") as f:
|
||||
print(yaml.dump(migrate(yaml.safe_load(f.read()))))
|
||||
@@ -1,30 +0,0 @@
|
||||
import math
|
||||
import re
|
||||
import os
|
||||
|
||||
CPU_COUNT = os.cpu_count()
|
||||
|
||||
NUMBER_REGEX = re.compile(r"^[1-9][0-9]$")
|
||||
FLOAT_REGEX = re.compile(r"^[0-9]+\.[0-9]+$")
|
||||
MILI_CPU_REGEX = re.compile(r"^[0-9]+m$")
|
||||
|
||||
|
||||
def transform_cpu(cpu) -> int:
|
||||
result = 2
|
||||
if NUMBER_REGEX.match(cpu):
|
||||
result = int(cpu)
|
||||
elif FLOAT_REGEX.match(cpu):
|
||||
result = int(math.ceil(float(cpu)))
|
||||
elif MILI_CPU_REGEX.match(cpu):
|
||||
num = int(cpu[:-1])
|
||||
num = num / 1000
|
||||
result = int(math.ceil(num))
|
||||
|
||||
if CPU_COUNT is not None:
|
||||
# Do not exceed the actual CPU count
|
||||
result = min(result, CPU_COUNT)
|
||||
|
||||
if int(result) == 0:
|
||||
result = CPU_COUNT if CPU_COUNT else 2
|
||||
|
||||
return int(result)
|
||||
@@ -1,9 +0,0 @@
|
||||
def migrate_dns_config(dns_config):
|
||||
if not dns_config:
|
||||
return []
|
||||
|
||||
dns_opts = []
|
||||
for opt in dns_config.get("options", []):
|
||||
dns_opts.append(f"{opt['name']}:{opt['value']}")
|
||||
|
||||
return dns_opts
|
||||
@@ -1,16 +0,0 @@
|
||||
def get_value_from_secret(secrets=None, secret_name=None, key=None):
|
||||
secrets = secrets if secrets else dict()
|
||||
secret_name = secret_name if secret_name else ""
|
||||
key = key if key else ""
|
||||
|
||||
if not secrets or not secret_name or not key:
|
||||
raise ValueError("Expected [secrets], [secret_name] and [key] to be set")
|
||||
for curr_secret_name, curr_data in secrets.items():
|
||||
if curr_secret_name.endswith(secret_name):
|
||||
if not curr_data.get(key, None):
|
||||
raise ValueError(
|
||||
f"Expected [{key}] to be set in secret [{curr_secret_name}]"
|
||||
)
|
||||
return curr_data[key]
|
||||
|
||||
raise ValueError(f"Secret [{secret_name}] not found")
|
||||
@@ -1,61 +0,0 @@
|
||||
import re
|
||||
import math
|
||||
|
||||
|
||||
def get_total_memory():
|
||||
with open("/proc/meminfo") as f:
|
||||
for line in filter(lambda x: "MemTotal" in x, f):
|
||||
return int(line.split()[1]) * 1024
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
TOTAL_MEM = get_total_memory()
|
||||
|
||||
SINGLE_SUFFIX_REGEX = re.compile(r"^[1-9][0-9]*([EPTGMK])$")
|
||||
DOUBLE_SUFFIX_REGEX = re.compile(r"^[1-9][0-9]*([EPTGMK])i$")
|
||||
BYTES_INTEGER_REGEX = re.compile(r"^[1-9][0-9]*$")
|
||||
EXPONENT_REGEX = re.compile(r"^[1-9][0-9]*e[0-9]+$")
|
||||
|
||||
SUFFIX_MULTIPLIERS = {
|
||||
"K": 10**3,
|
||||
"M": 10**6,
|
||||
"G": 10**9,
|
||||
"T": 10**12,
|
||||
"P": 10**15,
|
||||
"E": 10**18,
|
||||
}
|
||||
|
||||
DOUBLE_SUFFIX_MULTIPLIERS = {
|
||||
"Ki": 2**10,
|
||||
"Mi": 2**20,
|
||||
"Gi": 2**30,
|
||||
"Ti": 2**40,
|
||||
"Pi": 2**50,
|
||||
"Ei": 2**60,
|
||||
}
|
||||
|
||||
|
||||
def transform_memory(memory):
|
||||
result = 4096 # Default to 4GB
|
||||
|
||||
if re.match(SINGLE_SUFFIX_REGEX, memory):
|
||||
suffix = memory[-1]
|
||||
result = int(memory[:-1]) * SUFFIX_MULTIPLIERS[suffix]
|
||||
elif re.match(DOUBLE_SUFFIX_REGEX, memory):
|
||||
suffix = memory[-2:]
|
||||
result = int(memory[:-2]) * DOUBLE_SUFFIX_MULTIPLIERS[suffix]
|
||||
elif re.match(BYTES_INTEGER_REGEX, memory):
|
||||
result = int(memory)
|
||||
elif re.match(EXPONENT_REGEX, memory):
|
||||
result = int(float(memory))
|
||||
|
||||
result = math.ceil(result)
|
||||
result = min(result, TOTAL_MEM)
|
||||
# Convert to Megabytes
|
||||
result = result / 1024 / 1024
|
||||
|
||||
if int(result) == 0:
|
||||
result = TOTAL_MEM if TOTAL_MEM else 4096
|
||||
|
||||
return int(result)
|
||||
@@ -1,59 +0,0 @@
|
||||
from .memory import transform_memory, TOTAL_MEM
|
||||
from .cpu import transform_cpu, CPU_COUNT
|
||||
|
||||
|
||||
def migrate_resources(resources, gpus=None, system_gpus=None):
|
||||
gpus = gpus or {}
|
||||
system_gpus = system_gpus or []
|
||||
|
||||
result = {
|
||||
"limits": {
|
||||
"cpus": int((CPU_COUNT or 2) / 2),
|
||||
"memory": int(TOTAL_MEM / 1024 / 1024),
|
||||
}
|
||||
}
|
||||
|
||||
if resources.get("limits", {}).get("cpu", ""):
|
||||
result["limits"].update(
|
||||
{"cpus": transform_cpu(resources.get("limits", {}).get("cpu", ""))}
|
||||
)
|
||||
if resources.get("limits", {}).get("memory", ""):
|
||||
result["limits"].update(
|
||||
{"memory": transform_memory(resources.get("limits", {}).get("memory", ""))}
|
||||
)
|
||||
|
||||
gpus_result = {}
|
||||
for gpu in gpus.items() if gpus else []:
|
||||
kind = gpu[0].lower() # Kind of gpu (amd, nvidia, intel)
|
||||
count = gpu[1] # Number of gpus user requested
|
||||
|
||||
if count == 0:
|
||||
continue
|
||||
|
||||
if "amd" in kind or "intel" in kind:
|
||||
gpus_result.update({"use_all_gpus": True})
|
||||
elif "nvidia" in kind:
|
||||
sys_gpus = [
|
||||
gpu_item
|
||||
for gpu_item in system_gpus
|
||||
if gpu_item.get("error") is None
|
||||
and gpu_item.get("vendor", None) is not None
|
||||
and gpu_item.get("vendor", "").upper() == "NVIDIA"
|
||||
]
|
||||
for sys_gpu in sys_gpus:
|
||||
if count == 0: # We passed # of gpus that user previously requested
|
||||
break
|
||||
guid = sys_gpu.get("vendor_specific_config", {}).get("uuid", "")
|
||||
pci_slot = sys_gpu.get("pci_slot", "")
|
||||
if not guid or not pci_slot:
|
||||
continue
|
||||
|
||||
gpus_result.update(
|
||||
{"nvidia_gpu_selection": {pci_slot: {"uuid": guid, "use_gpu": True}}}
|
||||
)
|
||||
count -= 1
|
||||
|
||||
if gpus_result:
|
||||
result.update({"gpus": gpus_result})
|
||||
|
||||
return result
|
||||
@@ -1,155 +0,0 @@
|
||||
def migrate_storage_item(storage_item, include_read_only=False):
|
||||
if not storage_item:
|
||||
raise ValueError("Expected [storage_item] to be set")
|
||||
|
||||
result = {}
|
||||
if storage_item["type"] == "ixVolume":
|
||||
if storage_item.get("ixVolumeConfig"):
|
||||
result = migrate_ix_volume_type(storage_item)
|
||||
elif storage_item.get("datasetName"):
|
||||
result = migrate_old_ix_volume_type(storage_item)
|
||||
else:
|
||||
raise ValueError(
|
||||
"Expected [ix_volume] to have [ixVolumeConfig] or [datasetName] set"
|
||||
)
|
||||
elif storage_item["type"] == "hostPath":
|
||||
if storage_item.get("hostPathConfig"):
|
||||
result = migrate_host_path_type(storage_item)
|
||||
elif storage_item.get("hostPath"):
|
||||
result = migrate_old_host_path_type(storage_item)
|
||||
else:
|
||||
raise ValueError(
|
||||
"Expected [host_path] to have [hostPathConfig] or [hostPath] set"
|
||||
)
|
||||
elif storage_item["type"] == "emptyDir":
|
||||
result = migrate_empty_dir_type(storage_item)
|
||||
elif storage_item["type"] == "smb-pv-pvc":
|
||||
result = migrate_smb_pv_pvc_type(storage_item)
|
||||
|
||||
mount_path = storage_item.get("mountPath", "")
|
||||
if mount_path:
|
||||
result.update({"mount_path": mount_path})
|
||||
|
||||
if include_read_only:
|
||||
result.update({"read_only": storage_item.get("readOnly", False)})
|
||||
return result
|
||||
|
||||
|
||||
def migrate_smb_pv_pvc_type(smb_pv_pvc):
|
||||
smb_config = smb_pv_pvc.get("smbConfig", {})
|
||||
if not smb_config:
|
||||
raise ValueError("Expected [smb_pv_pvc] to have [smbConfig] set")
|
||||
|
||||
return {
|
||||
"type": "cifs",
|
||||
"cifs_config": {
|
||||
"server": smb_config["server"],
|
||||
"path": smb_config["share"],
|
||||
"domain": smb_config.get("domain", ""),
|
||||
"username": smb_config["username"],
|
||||
"password": smb_config["password"],
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def migrate_empty_dir_type(empty_dir):
|
||||
empty_dir_config = empty_dir.get("emptyDirConfig", {})
|
||||
if not empty_dir_config:
|
||||
raise ValueError("Expected [empty_dir] to have [emptyDirConfig] set")
|
||||
|
||||
if empty_dir_config.get("medium", "") == "Memory":
|
||||
# Convert Gi to Mi
|
||||
size = empty_dir_config.get("size", 0.5) * 1024
|
||||
return {
|
||||
"type": "tmpfs",
|
||||
"tmpfs_config": {"size": size},
|
||||
}
|
||||
|
||||
return {"type": "temporary"}
|
||||
|
||||
|
||||
def migrate_old_ix_volume_type(ix_volume):
|
||||
if not ix_volume.get("datasetName"):
|
||||
raise ValueError("Expected [ix_volume] to have [datasetName] set")
|
||||
|
||||
return {
|
||||
"type": "ix_volume",
|
||||
"ix_volume_config": {
|
||||
"acl_enable": False,
|
||||
"dataset_name": ix_volume["datasetName"],
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def migrate_ix_volume_type(ix_volume):
|
||||
vol_config = ix_volume.get("ixVolumeConfig", {})
|
||||
if not vol_config:
|
||||
raise ValueError("Expected [ix_volume] to have [ixVolumeConfig] set")
|
||||
|
||||
result = {
|
||||
"type": "ix_volume",
|
||||
"ix_volume_config": {
|
||||
"acl_enable": vol_config.get("aclEnable", False),
|
||||
"dataset_name": vol_config.get("datasetName", ""),
|
||||
},
|
||||
}
|
||||
|
||||
if vol_config.get("aclEnable", False):
|
||||
result["ix_volume_config"].update(
|
||||
{"acl_entries": migrate_acl_entries(vol_config["aclEntries"])}
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def migrate_old_host_path_type(host_path):
|
||||
if not host_path.get("hostPath"):
|
||||
raise ValueError("Expected [host_path] to have [hostPath] set")
|
||||
|
||||
return {
|
||||
"type": "host_path",
|
||||
"host_path_config": {
|
||||
"acl_enable": False,
|
||||
"path": host_path["hostPath"],
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def migrate_host_path_type(host_path):
|
||||
path_config = host_path.get("hostPathConfig", {})
|
||||
if not path_config:
|
||||
raise ValueError("Expected [host_path] to have [hostPathConfig] set")
|
||||
|
||||
result = {
|
||||
"type": "host_path",
|
||||
"host_path_config": {
|
||||
"acl_enable": path_config.get("aclEnable", False),
|
||||
},
|
||||
}
|
||||
|
||||
if path_config.get("aclEnable", False):
|
||||
result["host_path_config"].update(
|
||||
{"acl": migrate_acl_entries(path_config.get("acl", {}))}
|
||||
)
|
||||
else:
|
||||
result["host_path_config"].update({"path": path_config["hostPath"]})
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def migrate_acl_entries(acl_entries: dict) -> dict:
|
||||
entries = []
|
||||
for entry in acl_entries.get("entries", []):
|
||||
entries.append(
|
||||
{
|
||||
"access": entry["access"],
|
||||
"id": entry["id"],
|
||||
"id_type": entry["id_type"],
|
||||
}
|
||||
)
|
||||
|
||||
return {
|
||||
"entries": entries,
|
||||
"options": {"force": acl_entries.get("options", {}).get("force", False)},
|
||||
"path": acl_entries["path"],
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user