add syncthing enterprise (#125)

* test ci

* make sure if docker daemon does not start any containers, we fail

* fix image

* add minio enterprise

* test perms

* update lib funcs

* named arsg

* fix indents

* imports

* fix identation

* check if db is needed

* add todo

* one omre

* add dep

* fix

* add macros

* auto perms

* remove dummy file

* fix values

* updates

* minio perms

* abstract away

* cleaner

* safer,cleaner

* new storage funcs

* storage options

* use built ins

* rename

* spelling

* rename

* add vol suport

* test vol

* manually add container name

* volumes

* cleaner vols

* do some renames

* renames

* squash

* add todo

* move few things to the lib, currently broken

* more dynamic container names

* fix message

* cleanup

* update app

* spelling

* remove port

* more fixes

* fix pg test

* hm

* thats better

* fmt

* back to jinja

* duh

* wait for it

* typo

* typo

* ui

* fix checks

* todo

* items is a builtin

* fixes

* resources

* resources

* -.-

* ...

* use lib

* add rough migration paths

* Update docker-compose.yaml

* Update docker-compose.yaml

* move usages under value

* update usages

* cleanup

* fix url

* order

* another url

* fix

* fix hc too

* update app.yaml

* update lib

* update lib

* update lib

* update lib

* update lib

* update lib

* update lib

* update lib

* lint

* update lib

* fix ixvol

* update lib

* update lib

* update lib

* remove logsearch/postgres

* fmt

* replace set _

* update lib

* adapt to lib changes

* update lib

* update lib

* update lib

* update lib

* update lib

* update lib

* sync library

* updates

* update lib

* update compose

* typo

* update ci checks

* now actually fix app

* add migration

* fix migration and update error message

* order

* remove test data

* update readme

* syncthing init commit

* basic wokring setup

* small adjustment in cifs path

* add config container

* fix telemetry toggle

* add QoL in ci script for local testing

* ui and ports

* add migration mode

* add tz

* upate migration helper

* initial migration

* remove ixvolume for additional storage

* fix cifs name  and migration

* split
This commit is contained in:
Stavros Kois
2024-08-16 19:37:04 +03:00
committed by GitHub
parent fd71fdcb9a
commit 54c990c693
36 changed files with 2408 additions and 5 deletions

37
.github/scripts/ci.py vendored
View File

@@ -54,6 +54,13 @@ def parse_args():
type=bool,
help="Prints the rendered docker-compose file even if it's not a valid yaml",
)
parser.add_argument(
"--wait",
required=False,
default=False,
type=bool,
help="Wait for user input before stopping the app",
)
parsed = parser.parse_args()
return {
@@ -63,6 +70,7 @@ def parse_args():
"render_only": parsed.render_only,
"render_only_debug": parsed.render_only_debug,
"project": secrets.token_hex(16),
"wait": parsed.wait,
}
@@ -74,6 +82,7 @@ def print_info():
print_stderr(f" - test-file: [{args['test_file']}]")
print_stderr(f" - render-only: [{args['render_only']}]")
print_stderr(f" - render-only-debug: [{args['render_only_debug']}]")
print_stderr(f" - wait: [{args['wait']}]")
def command_exists(command):
@@ -146,6 +155,16 @@ def render_compose():
print_stderr("Done rendering docker-compose file")
def update_x_portals(parsed_compose):
portals = parsed_compose.get("x-portals", [])
for portal in portals:
scheme = portal.get("scheme", "http")
host = portal.get("host", "localhost").replace("0.0.0.0", "localhost")
port = str(portal.get("port", "80" if scheme == "http" else "443"))
url = scheme + "://" + host + ":" + port + portal.get("path", "")
x_portals.append(f"[{portal['name']}] - {url}")
def print_docker_compose_config():
print_stderr("Printing docker compose config (parsed compose)")
cmd = f"{get_base_cmd()} config"
@@ -165,7 +184,10 @@ def print_docker_compose_config():
print_stdout(res.stdout.decode("utf-8"))
sys.exit(0)
print_stderr(res.stdout.decode("utf-8"))
data = yaml.safe_load(res.stdout.decode("utf-8"))
update_x_portals(data)
print_stderr(data)
def separator_start():
@@ -439,6 +461,14 @@ def generate_item_file():
yaml.dump(item_data, f)
def wait_for_user_input():
print_stderr("Press enter to stop the app")
try:
input()
except KeyboardInterrupt:
pass
def main():
print_info()
check_app_dir_exists()
@@ -451,6 +481,10 @@ def main():
render_compose()
print_docker_compose_config()
res = run_app()
if args["wait"]:
print_stderr("\nPortals:")
print_stderr("\n".join(x_portals) + "\n")
wait_for_user_input()
docker_cleanup()
if res == 0:
@@ -462,6 +496,7 @@ def main():
args = parse_args()
x_portals = []
if __name__ == "__main__":
main()

View File

@@ -121,7 +121,7 @@ Thank you for your understanding.
| wordpress | community | - | - |
| zerotier | community | ✅ | - |
| minio | enterprise | ✅ | ✅ |
| syncthing | enterprise | - | - |
| syncthing | enterprise | | |
## New Apps

View File

@@ -120,6 +120,7 @@ words:
- publicip
- publicipv
- qbittorrent
- quic
- radarr
- rcat
- rclone
@@ -157,5 +158,6 @@ words:
- whiteboarding
- whoogle
- wtfismyip
- xattr
- zerotier
- zoneedit

View File

@@ -0,0 +1,20 @@
# Syncthing
[Syncthing](https://syncthing.net/) is a file synchronization program.
At each startup of the application, the following settings are applied:
- Disable automatic upgrades
- Disable anonymous usage reporting
- Disable NAT traversal
- Disable global discovery
- Disable local discovery
- Disable relaying
- Disable announcing LAN addresses
Additionally, the following defaults are set for new syncthing "folders":
- Max total size of `xattr`: 10 MiB
- Max size per `xattr`: 2 MiB
- Enable `send` and `sync` of `xattr`
- Enable `send` and `sync` of `ownership`

View File

@@ -0,0 +1,52 @@
app_version: 1.27.6
capabilities:
- description: Syncthing is able to chown files.
name: CHOWN
- description: Syncthing is able to bypass permission checks.
name: DAC_OVERRIDE
- description: Syncthing is able bypass permission checks for it's sub-processes.
name: FOWNER
- description: Syncthing is able to set user ID for it's sub-processes.
name: SETUID
- description: Syncthing is able to set group ID for it's sub-processes.
name: SETGID
- description: Syncthing is able to set process capabilities.
name: SETPCAP
- description: Syncthing is able to set file capabilities.
name: SETFCAP
- description: Syncthing is able to perform various system administration operations.
name: SYS_ADMIN
categories:
- storage
description: High Performance, Kubernetes Native Object Storage
home: https://syncthing.net
host_mounts: []
icon: https://media.sys.truenas.net/apps/syncthing/icons/icon.svg
keywords:
- sync
- file-sharing
lib_version: 1.0.0
lib_version_hash: c99e9b78fa296d9796875a1373d75fb779ae5b78ba7c4d01b1a23d564e0075c0
maintainers:
- email: dev@ixsystems.com
name: truenas
url: https://www.truenas.com/
name: syncthing
run_as_context:
- description: Syncthing runs as root user.
gid: 0
group_name: root
uid: 0
user_name: root
screenshots:
- https://media.sys.truenas.net/apps/syncthing/screenshots/screenshot1.png
- https://media.sys.truenas.net/apps/syncthing/screenshots/screenshot2.png
- https://media.sys.truenas.net/apps/syncthing/screenshots/screenshot3.png
- https://media.sys.truenas.net/apps/syncthing/screenshots/screenshot4.png
sources:
- https://syncthing.net/
- https://github.com/syncthing/syncthing
- https://hub.docker.com/r/syncthing/syncthing
title: Syncthing
train: enterprise
version: 1.0.0

View File

@@ -0,0 +1,11 @@
categories:
- storage
icon_url: https://media.sys.truenas.net/apps/syncthing/icons/icon.svg
screenshots:
- https://media.sys.truenas.net/apps/syncthing/screenshots/screenshot1.png
- https://media.sys.truenas.net/apps/syncthing/screenshots/screenshot2.png
- https://media.sys.truenas.net/apps/syncthing/screenshots/screenshot3.png
- https://media.sys.truenas.net/apps/syncthing/screenshots/screenshot4.png
tags:
- sync
- file-sharing

View File

@@ -0,0 +1,65 @@
images:
image:
repository: syncthing/syncthing
tag: 1.27.6
consts:
syncthing_container_name: syncthing
config_container_name: config
home_path: /var/syncthing
settings:
nat_traversal: false
local_discovery: false
global_discovery: false
telemetry: false
relaying: false
announce_lan_addresses: false
auto_upgrade_intervalh: 0
xattr_filter_max_total_size: 10485760
xattr_filter_max_single_entry_size: 2097152
send_ownership: true
sync_ownership: true
send_xattrs: true
sync_xattrs: true
ignore_perms: true
path: ""
logo_horizontal_svg_path: /var/truenas/assets/gui/default/assets/img/logo-horizontal.svg
logo_horizontal_svg: |
<svg id="TN_Scale_RGB" data-name="TN Scale RGB" xmlns="http://www.w3.org/2000/svg" width="129.29" height="25.448" viewBox="0 0 129.29 25.448">
<g id="Logomark">
<g id="logoMark-2" data-name="logoMark">
<path id="logoMark_PathItem_" data-name="logoMark &lt;PathItem&gt;" d="M48.576,16.032l-3.163,1.827-3.174-1.832L45.406,14.2Z" transform="translate(-27.484 -9.24)" fill="#aeadae"/>
<path id="logoMark_PathItem_2" data-name="logoMark &lt;PathItem&gt;" d="M60.539,4.1,57.368,5.929l-3.92-2.264V0Z" transform="translate(-34.778 -0.001)" fill="#0095d5"/>
<path id="logoMark_PathItem_3" data-name="logoMark &lt;PathItem&gt;" d="M35.956,0V3.663L32.04,5.922,28.868,4.091Z" transform="translate(-18.784)" fill="#31beec"/>
<path id="logoMark_PathItem_4" data-name="logoMark &lt;PathItem&gt;" d="M7.839,39.056,0,34.53l0-3.661L3.534,32.91l.029.016,4.274,2.468Z" transform="translate(0 -20.086)" fill="#0095d5"/>
<path id="logoMark_PathItem_5" data-name="logoMark &lt;PathItem&gt;" d="M21.853,31.471,18.686,33.3l-3.173-1.832,3.169-1.828Z" transform="translate(-10.094 -19.286)" fill="#aeadae"/>
<path id="logoMark_PathItem_6" data-name="logoMark &lt;PathItem&gt;" d="M9.226,19.115,5.314,21.372,2.142,19.541l7.083-4.088Z" transform="translate(-1.394 -10.055)" fill="#31beec"/>
<path id="logoMark_PathItem_7" data-name="logoMark &lt;PathItem&gt;" d="M33.826,19.547l-3.165,1.828-3.919-2.264V15.457l3.522,2.033.028.016Z" transform="translate(-17.4 -10.058)" fill="#0095d5"/>
<path id="logoMark_PathItem_8" data-name="logoMark &lt;PathItem&gt;" d="M61.308,46.429v3.662L60.6,50.5l-7.132,4.118V50.957l3.96-2.287,1.5-.865Z" transform="translate(-34.791 -30.211)" fill="#31beec"/>
<path id="logoMark_PathItem_9" data-name="logoMark &lt;PathItem&gt;" d="M48.583,47.023l-3.17,1.831-3.173-1.832,3.173-1.831Z" transform="translate(-27.484 -29.405)" fill="#aeadae"/>
<path id="logoMark_PathItem_10" data-name="logoMark &lt;PathItem&gt;" d="M35.963,30.993v3.663l-.715.413L32.04,36.919,32,36.9l-3.137-1.812,3.207-1.851,1.5-.865L35.956,31Z" transform="translate(-18.784 -20.167)" fill="#31beec"/>
<path id="logoMark_PathItem_11" data-name="logoMark &lt;PathItem&gt;" d="M34.568,50.957v3.66L27.44,50.5l-.715-.413,0-3.661.006,0,2.382,1.375,1.146.661.029.017.323.186Z" transform="translate(-17.389 -30.211)" fill="#0095d5"/>
<path id="logoMark_PathItem_12" data-name="logoMark &lt;PathItem&gt;" d="M88.058,30.871v3.663l-7.84,4.526V35.4Z" transform="translate(-52.197 -20.087)" fill="#31beec"/>
<path id="logoMark_PathItem_13" data-name="logoMark &lt;PathItem&gt;" d="M75.333,31.468,72.162,33.3l-3.173-1.833,3.173-1.83Z" transform="translate(-44.89 -19.284)" fill="#aeadae"/>
<path id="logoMark_PathItem_14" data-name="logoMark &lt;PathItem&gt;" d="M87.309,19.532l-3.172,1.833L80.218,19.1V15.438Z" transform="translate(-52.197 -10.045)" fill="#0095d5"/>
<path id="logoMark_PathItem_15" data-name="logoMark &lt;PathItem&gt;" d="M62.713,15.435V19.1L58.79,21.362,55.618,19.53Z" transform="translate(-36.19 -10.043)" fill="#31beec"/>
<path id="logoMark_PathItem_16" data-name="logoMark &lt;PathItem&gt;" d="M60.563,35.09,57.432,36.9h0l-3.956-2.284V31l2.38,1.374,1.5.865Z" transform="translate(-34.795 -20.169)" fill="#0095d5"/>
</g>
</g>
<g id="full-rgb" transform="translate(39.123)">
<g id="type" transform="translate(0 0)">
<path id="type_CompoundPathItem_" data-name="type &lt;CompoundPathItem&gt;" d="M12.7.646V2.6H7.426V17.958H5.269V2.6H0V.646Z" transform="translate(0 -0.421)" fill="#0095d5"/>
<path id="type_CompoundPathItem_2" data-name="type &lt;CompoundPathItem&gt;" d="M43.14,16.629a2.383,2.383,0,0,0-2.107-1.054,2.728,2.728,0,0,0-2.684,3.036v7.853H36.341V13.919h2.008v1.23a3.043,3.043,0,0,1,2.91-1.43,3.989,3.989,0,0,1,3.588,1.706Z" transform="translate(-23.647 -8.926)" fill="#0095d5"/>
<path id="type_CompoundPathItem_3" data-name="type &lt;CompoundPathItem&gt;" d="M74.576,26.838H72.568V25.609c-.627.953-1.5,1.43-3.361,1.43-2.684,0-4.566-1.405-4.566-4.918V14.293h2.007v7.8c0,2.534,1.38,3.086,2.86,3.086a2.923,2.923,0,0,0,3.061-3.061V14.293h2.008Z" transform="translate(-42.061 -9.3)" fill="#0095d5"/>
<path id="type_CompoundPathItem_4" data-name="type &lt;CompoundPathItem&gt;" d="M108.944,24.557c-.878,1.531-2.208,2.108-4.39,2.108-3.362,0-5.37-2.183-5.37-5.67V19.338c0-3.562,1.681-5.62,4.968-5.62,3.312,0,4.968,2.032,4.968,5.62v1.3h-7.928v.351c0,2.158,1.029,3.863,3.211,3.863,1.631,0,2.459-.476,2.985-1.4Zm-7.753-5.67h5.9c-.1-2.107-1.028-3.362-2.936-3.362C102.22,15.525,101.316,16.8,101.191,18.887Z" transform="translate(-64.538 -8.926)" fill="#0095d5"/>
<path id="type_CompoundPathItem_5" data-name="type &lt;CompoundPathItem&gt;" d="M149.265.646V17.958H146.68L139.63,6.191V17.958h-2.81V.646h2.509l7.126,11.917V.646Z" transform="translate(-89.027 -0.421)" fill="#0095d5"/>
<path id="type_CompoundPathItem_6" data-name="type &lt;CompoundPathItem&gt;" d="M178.044,17.958,184.066.646h2.76l6.021,17.312h-3.086l-1.18-3.588h-6.247l-1.179,3.588Zm5.093-6.1h4.617l-2.308-7Z" transform="translate(-115.851 -0.421)" fill="#0095d5"/>
<path id="type_CompoundPathItem_7" data-name="type &lt;CompoundPathItem&gt;" d="M232.654,4.416a4.038,4.038,0,0,0-3.738-1.882c-1.781,0-2.835.8-2.835,2.258,0,1.656,1.3,2.308,3.714,2.835,3.487.753,5.294,2.057,5.294,5.168,0,2.584-1.732,4.968-5.9,4.968-2.96,0-5.043-.9-6.473-2.835L225,13.347a4.634,4.634,0,0,0,4.039,1.882c2.384,0,3.136-1.054,3.136-2.308,0-1.38-.777-2.233-3.788-2.885-3.337-.7-5.219-2.308-5.219-5.244,0-2.609,1.706-4.792,5.771-4.792,2.76,0,4.692.928,5.921,2.835Z" transform="translate(-144.92 0)" fill="#0095d5"/>
<path id="type_CompoundPathItem_8" data-name="type &lt;CompoundPathItem&gt;" d="M16.888,61.246a1.006,1.006,0,0,0-.932-.469c-.444,0-.707.2-.707.563,0,.413.325.576.926.707.869.188,1.32.513,1.32,1.289,0,.644-.432,1.238-1.47,1.238a1.846,1.846,0,0,1-1.614-.707l.569-.394a1.156,1.156,0,0,0,1.007.469c.594,0,.782-.263.782-.575,0-.344-.194-.557-.944-.72-.832-.175-1.3-.575-1.3-1.307,0-.651.425-1.195,1.439-1.195a1.61,1.61,0,0,1,1.476.707Z" transform="translate(-9.377 -39.132)" fill="#aeadae"/>
<path id="type_CompoundPathItem_9" data-name="type &lt;CompoundPathItem&gt;" d="M29.1,61.551a.821.821,0,0,0-.869-.763c-.575,0-.888.375-.888,1.307v.55c0,.919.313,1.307.888,1.307a.81.81,0,0,0,.869-.763H29.8a1.445,1.445,0,0,1-1.564,1.395c-.963,0-1.614-.582-1.614-1.939V62.1c0-1.357.65-1.939,1.614-1.939a1.47,1.47,0,0,1,1.57,1.395Z" transform="translate(-17.321 -39.143)" fill="#aeadae"/>
<path id="type_CompoundPathItem_10" data-name="type &lt;CompoundPathItem&gt;" d="M38.021,64.633l1.5-4.316h.688l1.5,4.316h-.769l-.294-.894H39.091l-.294.894Zm1.27-1.52h1.151l-.575-1.745Z" transform="translate(-24.74 -39.248)" fill="#aeadae"/>
<path id="type_CompoundPathItem_11" data-name="type &lt;CompoundPathItem&gt;" d="M52.512,64.008h1.92v.626H51.787V60.317h.726Z" transform="translate(-33.697 -39.248)" fill="#aeadae"/>
<path id="type_CompoundPathItem_12" data-name="type &lt;CompoundPathItem&gt;" d="M65.226,60.317v.632h-1.92v1.138h1.733v.625H63.306v1.295h1.92v.626H62.581V60.317Z" transform="translate(-40.72 -39.248)" fill="#aeadae"/>
</g>
</g>
</svg>

View File

@@ -0,0 +1,57 @@
#!/usr/bin/python3
import os
import sys
import yaml
from migration_helpers.resources import migrate_resources
from migration_helpers.storage import migrate_storage_item
def migrate(values):
config = values.get("helm_secret", {}).get("config", {})
if not config:
raise ValueError("No config found in values")
new_values = {
"TZ": config["TZ"],
"syncthing": {
"additional_envs": config.get("additionalEnvs", []),
},
"run_as": {
"user": config["syncthingID"].get("user", 568),
"group": config["syncthingID"].get("group", 568),
},
"network": {
"host_network": config["syncthingNetwork"]["hostNetwork"],
"web_port": config["syncthingNetwork"]["webPort"],
"tcp_port": config["syncthingNetwork"].get("tcpPort", 22000),
"quic_port": config["syncthingNetwork"].get("quicPort", 22000),
"certificate_id": config["syncthingNetwork"].get("certificateID", None),
},
"storage": {
"home": migrate_storage_item(config["syncthingStorage"]["home"]),
"additional_storage": [],
},
"resources": migrate_resources(config["resources"]),
}
for old_item in config["syncthingStorage"].get("additionalStorages", []):
new_item = migrate_storage_item(old_item, include_read_only=True)
if new_item["type"] == "cifs":
new_migration_mode = old_item.get("smbConfig", {}).get("migrationMode", False)
assert isinstance(new_item["cifs_config"], dict), "something went wrong"
new_item["cifs_config"].update({"migration_mode": new_migration_mode})
new_values["storage"]["additional_storage"].append(new_item)
return new_values
if __name__ == "__main__":
if len(sys.argv) != 2:
exit(1)
if os.path.exists(sys.argv[1]):
with open(sys.argv[1], "r") as f:
print(yaml.dump(migrate(yaml.safe_load(f.read()))))

View File

@@ -0,0 +1,27 @@
import math
import re
import os
CPU_COUNT = os.cpu_count()
NUMBER_REGEX = re.compile(r"^[1-9][0-9]$")
FLOAT_REGEX = re.compile(r"^[0-9]+\.[0-9]+$")
MILI_CPU_REGEX = re.compile(r"^[0-9]+m$")
def transform_cpu(cpu) -> int:
result = 2
if NUMBER_REGEX.match(cpu):
result = int(cpu)
elif FLOAT_REGEX.match(cpu):
result = int(math.ceil(float(cpu)))
elif MILI_CPU_REGEX.match(cpu):
num = int(cpu[:-1])
num = num / 1000
result = int(math.ceil(num))
if CPU_COUNT is not None:
# Do not exceed the actual CPU count
result = min(result, CPU_COUNT)
return result

View File

@@ -0,0 +1,9 @@
def migrate_dns_config(dns_config):
if not dns_config:
return []
dns_opts = []
for opt in dns_config.get("options", []):
dns_opts.append(f"{opt['name']}:{opt['value']}")
return dns_opts

View File

@@ -0,0 +1,15 @@
def get_value_from_secret(secrets={}, secret_name="", key=""):
if not secrets or not secret_name or not key:
raise ValueError("Expected [secrets], [secret_name] and [key] to be set")
for secret in secrets.items():
curr_secret_name = secret[0]
curr_data = secret[1]
if curr_secret_name.endswith(secret_name):
if not curr_data.get(key, None):
raise ValueError(
f"Expected [{key}] to be set in secret [{curr_secret_name}]"
)
return curr_data[key]
raise ValueError(f"Secret [{secret_name}] not found")

View File

@@ -0,0 +1,49 @@
import re
import math
import psutil
TOTAL_MEM = psutil.virtual_memory().total
SINGLE_SUFFIX_REGEX = re.compile(r"^[1-9][0-9]*([EPTGMK])$")
DOUBLE_SUFFIX_REGEX = re.compile(r"^[1-9][0-9]*([EPTGMK])i$")
BYTES_INTEGER_REGEX = re.compile(r"^[1-9][0-9]*$")
EXPONENT_REGEX = re.compile(r"^[1-9][0-9]*e[0-9]+$")
SUFFIX_MULTIPLIERS = {
"K": 10**3,
"M": 10**6,
"G": 10**9,
"T": 10**12,
"P": 10**15,
"E": 10**18,
}
DOUBLE_SUFFIX_MULTIPLIERS = {
"Ki": 2**10,
"Mi": 2**20,
"Gi": 2**30,
"Ti": 2**40,
"Pi": 2**50,
"Ei": 2**60,
}
def transform_memory(memory):
result = 4096 # Default to 4GB
if re.match(SINGLE_SUFFIX_REGEX, memory):
suffix = memory[-1]
result = int(memory[:-1]) * SUFFIX_MULTIPLIERS[suffix]
elif re.match(DOUBLE_SUFFIX_REGEX, memory):
suffix = memory[-2:]
result = int(memory[:-2]) * DOUBLE_SUFFIX_MULTIPLIERS[suffix]
elif re.match(BYTES_INTEGER_REGEX, memory):
result = int(memory)
elif re.match(EXPONENT_REGEX, memory):
result = int(float(memory))
result = math.ceil(result)
result = min(result, TOTAL_MEM)
# Convert to Megabytes
result = result / 1024 / 1024
return int(result)

View File

@@ -0,0 +1,59 @@
from .memory import transform_memory, TOTAL_MEM
from .cpu import transform_cpu, CPU_COUNT
def migrate_resources(resources, gpus=None, system_gpus=None):
gpus = gpus or {}
system_gpus = system_gpus or []
result = {
"limits": {
"cpus": (CPU_COUNT or 2) / 2,
"memory": {TOTAL_MEM / 1024 / 1024},
}
}
if resources.get("limits", {}).get("cpu", ""):
result["limits"].update(
{"cpus": transform_cpu(resources.get("limits", {}).get("cpu", ""))}
)
if resources.get("limits", {}).get("memory", ""):
result["limits"].update(
{"memory": transform_memory(resources.get("limits", {}).get("memory", ""))}
)
gpus_result = {}
for gpu in gpus.items() if gpus else []:
kind = gpu[0].lower() # Kind of gpu (amd, nvidia, intel)
count = gpu[1] # Number of gpus user requested
if count == 0:
continue
if "amd" in kind or "intel" in kind:
gpus_result.update({"use_all_gpus": True})
elif "nvidia" in kind:
sys_gpus = [
gpu_item
for gpu_item in system_gpus
if gpu_item.get("error") is None
and gpu_item.get("vendor", None) is not None
and gpu_item.get("vendor", "").upper() == "NVIDIA"
]
for sys_gpu in sys_gpus:
if count == 0: # We passed # of gpus that user previously requested
break
guid = sys_gpu.get("vendor_specific_config", {}).get("uuid", "")
pci_slot = sys_gpu.get("pci_slot", "")
if not guid or not pci_slot:
continue
gpus_result.update(
{"nvidia_gpu_selection": {pci_slot: {"uuid": guid, "use_gpu": True}}}
)
count -= 1
if gpus_result:
result.update({"gpus": gpus_result})
return result

View File

@@ -0,0 +1,115 @@
def migrate_storage_item(storage_item, include_read_only=False):
if not storage_item:
raise ValueError("Expected [storage_item] to be set")
result = {}
if storage_item["type"] == "ixVolume":
result = migrate_ix_volume_type(storage_item)
elif storage_item["type"] == "hostPath":
result = migrate_host_path_type(storage_item)
elif storage_item["type"] == "emptyDir":
result = migrate_empty_dir_type(storage_item)
elif storage_item["type"] == "smb-pv-pvc":
result = migrate_smb_pv_pvc_type(storage_item)
mount_path = storage_item.get("mountPath", "")
if mount_path:
result.update({"mount_path": mount_path})
if include_read_only:
result.update({"read_only": storage_item.get("readOnly", False)})
return result
def migrate_smb_pv_pvc_type(smb_pv_pvc):
smb_config = smb_pv_pvc.get("smbConfig", {})
if not smb_config:
raise ValueError("Expected [smb_pv_pvc] to have [smbConfig] set")
return {
"type": "cifs",
"cifs_config": {
"server": smb_config["server"],
"path": smb_config["share"],
"domain": smb_config.get("domain", ""),
"username": smb_config["username"],
"password": smb_config["password"],
},
}
def migrate_empty_dir_type(empty_dir):
empty_dir_config = empty_dir.get("emptyDirConfig", {})
if not empty_dir_config:
raise ValueError("Expected [empty_dir] to have [emptyDirConfig] set")
if empty_dir_config.get("medium", "") == "Memory":
# Convert Gi to Mi
size = empty_dir_config.get("size", 0.5) * 1024
return {
"type": "tmpfs",
"tmpfs_config": {"size": size},
}
return {"type": "temporary"}
def migrate_ix_volume_type(ix_volume):
vol_config = ix_volume.get("ixVolumeConfig", {})
if not vol_config:
raise ValueError("Expected [ix_volume] to have [ixVolumeConfig] set")
result = {
"type": "ix_volume",
"ix_volume_config": {
"acl_enable": vol_config.get("aclEnable", False),
"dataset_name": vol_config.get("datasetName", ""),
},
}
if vol_config.get("aclEnable", False):
result["ix_volume_config"].update(
{"acl_entries": migrate_acl_entries(vol_config["aclEntries"])}
)
return result
def migrate_host_path_type(host_path):
path_config = host_path.get("hostPathConfig", {})
if not path_config:
raise ValueError("Expected [host_path] to have [hostPathConfig] set")
result = {
"type": "host_path",
"host_path_config": {
"acl_enable": path_config.get("aclEnable", False),
},
}
if path_config.get("aclEnable", False):
result["host_path_config"].update(
{"acl": migrate_acl_entries(path_config.get("acl", {}))}
)
else:
result["host_path_config"].update({"path": path_config["hostPath"]})
return result
def migrate_acl_entries(acl_entries: dict) -> dict:
entries = []
for entry in acl_entries.get("entries", []):
entries.append(
{
"access": entry["access"],
"id": entry["id"],
"id_type": entry["id_type"],
}
)
return {
"entries": entries,
"options": {"force": acl_entries.get("force", False)},
"path": acl_entries["path"],
}

View File

@@ -0,0 +1,361 @@
groups:
- name: Syncthing Configuration
description: Configure Syncthing
- name: User and Group Configuration
description: Configure User and Group for Syncthing
- name: Network Configuration
description: Configure Network for Syncthing
- name: Storage Configuration
description: Configure Storage for Syncthing
- name: Resources Configuration
description: Configure Resources for Syncthing
questions:
- variable: TZ
group: Syncthing Configuration
label: Timezone
schema:
type: string
default: Etc/UTC
required: true
$ref:
- definitions/timezone
- variable: syncthing
label: ""
group: Syncthing Configuration
schema:
type: dict
attrs:
- variable: additional_envs
label: Additional Environment Variables
description: Configure additional environment variables for Syncthing.
schema:
type: list
default: []
items:
- variable: env
label: Environment Variable
schema:
type: dict
attrs:
- variable: name
label: Name
schema:
type: string
required: true
- variable: value
label: Value
schema:
type: string
required: true
- variable: run_as
label: ""
group: User and Group Configuration
schema:
type: dict
attrs:
- variable: user
label: User ID
description: The user id that Syncthing will run as.
schema:
type: int
min: 568
default: 568
required: true
- variable: group
label: Group ID
description: The group id that Syncthing will run as.
schema:
type: int
min: 568
default: 568
required: true
- variable: network
label: ""
group: Network Configuration
schema:
type: dict
attrs:
- variable: web_port
label: Web UI Port
description: The port for the Syncthing Web UI.
schema:
type: int
default: 8384
required: true
$ref:
- "definitions/port"
- variable: tcp_port
label: TCP Port (File Transfers)
description: The TCP port for Syncthing transfers.
schema:
type: int
default: 22000
show_if: [["host_network", "=", false]]
required: true
$ref:
- "definitions/port"
- variable: quic_port
label: QUIC (UDP) Port (File Transfers)
description: The QUIC (UDP) port for Syncthing transfers.
schema:
type: int
default: 22000
show_if: [["host_network", "=", false]]
required: true
$ref:
- "definitions/port"
- variable: host_network
label: Host Network
description: |
Bind to the host network.</br></br>
If this is disabled, you will need to add your local networks in CIDR format to the Syncthing WebUI.</br>
In the Syncthing WebUI, go to Advanced Settings > Options > Always Local Nets</br>
Separate each CIDR network with a comma.</br>
Example: 192.168.0.0/24,192.168.1.0/24
schema:
type: boolean
default: true
- variable: certificate_id
label: Certificate
description: The certificate to use for Syncthing
schema:
type: int
"null": true
$ref:
- "definitions/certificate"
- variable: storage
label: ""
group: Storage Configuration
schema:
type: dict
attrs:
- variable: home
label: Syncthing Home Storage
description: The path to store Syncthing Home.
schema:
type: dict
attrs:
- variable: type
label: Type
description: |
ixVolume: Is dataset created automatically by the system.</br>
Host Path: Is a path that already exists on the system.
schema:
type: string
required: true
immutable: true
default: "ix_volume"
enum:
- value: "host_path"
description: Host Path (Path that already exists on the system)
- value: "ix_volume"
description: ixVolume (Dataset created automatically by the system)
- variable: ix_volume_config
label: ixVolume Configuration
description: The configuration for the ixVolume dataset.
schema:
type: dict
show_if: [["type", "=", "ix_volume"]]
$ref:
- "normalize/ix_volume"
attrs:
- variable: acl_enable
label: Enable ACL
description: Enable ACL for the storage.
schema:
type: boolean
default: false
- variable: dataset_name
label: Dataset Name
description: The name of the dataset to use for storage.
schema:
type: string
required: true
immutable: true
hidden: true
default: "home"
- variable: acl_entries
label: ACL Configuration
schema:
type: dict
show_if: [["acl_enable", "=", true]]
attrs: []
- variable: host_path_config
label: Host Path Configuration
schema:
type: dict
show_if: [["type", "=", "host_path"]]
attrs:
- variable: acl_enable
label: Enable ACL
description: Enable ACL for the storage.
schema:
type: boolean
default: false
- variable: acl
label: ACL Configuration
schema:
type: dict
show_if: [["acl_enable", "=", true]]
attrs: []
$ref:
- "normalize/acl"
- variable: path
label: Host Path
description: The host path to use for storage.
schema:
type: hostpath
show_if: [["acl_enable", "=", false]]
required: true
- variable: additional_storage
label: Additional Storage
description: Additional storage for Audiobookshelf.
schema:
type: list
default: []
items:
- variable: storageEntry
label: Storage Entry
schema:
type: dict
attrs:
- variable: type
label: Type
description: |
Host Path: Is a path that already exists on the system.</br>
SMB Share: Is a SMB share that is mounted to a persistent volume claim.
schema:
type: string
required: true
default: "ixVolume"
immutable: true
enum:
- value: "host_path"
description: Host Path (Path that already exists on the system)
- value: "cifs"
description: SMB/CIFS Share (Mounts a persistent volume claim to a SMB share)
- variable: read_only
label: Read Only
description: Mount the volume as read only.
schema:
type: boolean
default: false
- variable: mount_path
label: Mount Path
description: The path inside the container to mount the storage.
schema:
type: path
required: true
- variable: host_path_config
label: Host Path Configuration
schema:
type: dict
show_if: [["type", "=", "host_path"]]
attrs:
- variable: acl_enable
label: Enable ACL
description: Enable ACL for the storage.
schema:
type: boolean
default: false
- variable: acl
label: ACL Configuration
schema:
type: dict
show_if: [["acl_enable", "=", true]]
attrs: []
$ref:
- "normalize/acl"
- variable: path
label: Host Path
description: The host path to use for storage.
schema:
type: hostpath
show_if: [["acl_enable", "=", false]]
required: true
- variable: cifs_config
label: SMB Configuration
description: The configuration for the SMB dataset.
schema:
type: dict
show_if: [["type", "=", "cifs"]]
attrs:
- variable: migration_mode
label: Migration Mode
description: |
NOTE: This only works properly with TrueNAS SCALE 24.04.0 or newer.<br/>
Enabling this will force the following:</br>
- Read Only Mount, regardless of the value of the Read Only checkbox.</br>
- SMB/CIFS Mount Options will be set to "vers=3.0", "cifsacl", "noperm"</br>
This option is used to migrate data from third party</br>
NAS platforms onto TrueNAS SCALE.</br>
Keep in mind that the ACL preservation is not guaranteed when:</br>
- Non-AD environment</br>
- ACL or remote server contains local users</br>
schema:
type: boolean
default: false
- variable: server
label: Server
description: The server to mount the SMB share.
schema:
type: string
required: true
- variable: path
label: Path
description: The path to mount the SMB share.
schema:
type: string
required: true
- variable: username
label: Username
description: The username to use for the SMB share.
schema:
type: string
required: true
- variable: password
label: Password
description: The password to use for the SMB share.
schema:
type: string
required: true
private: true
- variable: domain
label: Domain
description: The domain to use for the SMB share.
schema:
type: string
- variable: resources
label: ""
group: Resources Configuration
schema:
type: dict
attrs:
- variable: limits
label: Limits
schema:
type: dict
attrs:
- variable: cpus
label: CPUs
description: CPUs limit for Syncthing.
schema:
type: int
default: 2
required: true
- variable: memory
label: Memory (in MB)
description: Memory limit for Syncthing.
schema:
type: int
default: 4096
required: true

View File

@@ -0,0 +1,183 @@
{# Stores Syncthing storage items that contains info for volumes, vol mounts, perms dirs and perms mounts #}
{% set storage_items = namespace(items=[]) %}
{# Stores the Syncthing container volume mounts #}
{% set volume_mounts = namespace(items=[]) %}
{# Stores the top level volumes #}
{% set volumes = namespace(items={}) %}
{% do storage_items.items.append(ix_lib.base.storage.storage_item(data=dict(values.storage.home, **{"mount_path": values.consts.home_path}), values=values)) %}
{% do storage_items.items.append(ix_lib.base.storage.storage_item(data={"type":"anonymous", "mount_path": "/tmp"})) %}
{% for store in values.storage.additional_storage %}
{% if store.type == "cifs" and store.cifs_config.migration_mode %}
{% do store.update({"read_only": true}) %}
{% do store.cifs_config.update({"options": ["noperm", "cifsacl", "vers=3.0"]}) %}
{% endif %}
{% do storage_items.items.append(ix_lib.base.storage.storage_item(data=store, values=values)) %}
{% else %}
{% do ix_lib.base.utils.throw_error("Expected at least one storage item to be set for Syncthing") %}
{% endfor %}
{# Add each item to the above lists #}
{% for item in storage_items.items %}
{% if item.vol and volumes.items.update(item.vol) %}{% endif %}
{% if item.vol_mount and volume_mounts.items.append(item.vol_mount) %}{% endif %}
{% endfor %}
{# Configs #}
configs:
logo-horizontal-svg:
content: {{ values.consts.logo_horizontal_svg | tojson }}
{% if values.network.certificate_id %}
private:
content: {{ values.ix_certificates[values.network.certificate_id].privatekey | tojson }}
public:
content: {{ values.ix_certificates[values.network.certificate_id].certificate | tojson }}
{% endif %}
{% set caps = ix_lib.base.security.get_caps(add=["CHOWN", "DAC_OVERRIDE", "FOWNER", "SETGID", "SETUID", "SETFCAP", "SETPCAP", "SYS_ADMIN"]) %}
{% set app_env = {
"PCAP": ["cap_sys_admin", "cap_chown", "cap_dac_override", "cap_fowner"]|join(",") + "+ep",
"STNOUPGRADE": true,
"STGUIADDRESS": "0.0.0.0:%d" | format(values.network.web_port),
"STGUIASSETS": "/var/truenas/assets/gui",
} %}
{# Containers #}
services:
{{ values.consts.config_container_name }}:
image: {{ ix_lib.base.utils.get_image(images=values.images, name="image") }}
user: "0:0"
deploy:
resources: {{ ix_lib.base.resources.resources(values.resources) | tojson }}
devices: {{ ix_lib.base.resources.get_devices(values.resources) | tojson }}
configs:
- source: logo-horizontal-svg
target: {{ values.consts.logo_horizontal_svg_path }}
{% if values.network.certificate_id %}
- source: private
target: {{ "%s/config/https-key.pem" | format(values.consts.home_path) }}
- source: public
target: {{ "%s/config/https-cert.pem" | format(values.consts.home_path) }}
{% endif %}
{% set config_caps = ix_lib.base.security.get_caps(add=caps.add + ["KILL"]) %}
cap_add: {{ config_caps.add | tojson }}
cap_drop: {{ config_caps.drop | tojson }}
security_opt: {{ ix_lib.base.security.get_sec_opts(remove=["no-new-privileges"]) | tojson }}
healthcheck:
disable: true
entrypoint:
- /bin/sh
{% set config_dir = "%s/config"|format(values.consts.home_path) %}
{% set cli = "syncthing cli --home %s config"|format(config_dir) %}
{% set st = values.consts.settings %}
{% set settings = [
{"cmd": "options announce-lanaddresses", "value": 1 if st.announce_lan_addresses else 0, "quote": true},
{"cmd": "options global-ann-enabled", "value": 1 if st.global_discovery else 0, "quote": true},
{"cmd": "options local-ann-enabled", "value": 1 if st.local_discovery else 0, "quote": true},
{"cmd": "options natenabled", "value": 1 if st.nat_traversal else 0, "quote": true},
{"cmd": "options relays-enabled", "value": 1 if st.relaying else 0, "quote": true},
{"cmd": "options uraccepted", "value": 1 if st.telemetry else -1, "quote": true},
{"cmd": "options auto-upgrade-intervalh", "value": st.auto_upgrade_intervalh, "quote": true},
{"cmd": "defaults folder xattr-filter max-total-size", "value": st.xattr_filter_max_total_size, "quote": false},
{"cmd": "defaults folder xattr-filter max-single-entry-size", "value": st.xattr_filter_max_single_entry_size, "quote": true},
{"cmd": "defaults folder send-ownership", "value": 1 if st.send_ownership else 0, "quote": false},
{"cmd": "defaults folder sync-ownership", "value": 1 if st.sync_ownership else 0, "quote": false},
{"cmd": "defaults folder send-xattrs", "value": 1 if st.send_xattrs else 0, "quote": false},
{"cmd": "defaults folder sync-xattrs", "value": 1 if st.sync_xattrs else 0, "quote": false},
{"cmd": "defaults folder ignore-perms", "value": 1 if st.ignore_perms else 0, "quote": false},
{"cmd": "defaults folder path", "value": st.path, "quote": true},
] %}
command:
- -c
- |
set -e
trap cleanup EXIT TERM
cleanup() {
echo "Gracefully stopping Syncthing..."
if kill -0 $$SYNCTHING_PID > /dev/null 2>&1; then
kill -SIGTERM $$SYNCTHING_PID
wait $$SYNCTHING_PID
fi
echo "Syncthing stopped."
}
try_for() {
local max_tries=$$1
local sleep_time=$$2
local cmd=$$3
tries=0
until eval "$$cmd"; do
[ $$tries -ge $$max_tries ] && return 1
tries=$$((tries+1))
sleep $$sleep_time
done
}
echo "Starting Syncthing in the background"
/bin/entrypoint.sh /bin/syncthing &
SYNCTHING_PID=$$!
echo "Syncthing started with PID [$$SYNCTHING_PID]"
echo "Waiting for Syncthing to be ready..."
try_for 15 2 "[ -f '{{ config_dir }}/config.xml' ]" || { echo "Syncthing did not become ready in time. Exiting..."; exit 1; }
try_for 15 2 "curl --silent --output /dev/null http://127.0.0.1:{{ values.network.web_port }}/rest/noauth/health" || { echo "Syncthing did not become ready in time. Exiting..."; exit 1; }
echo "Syncthing is ready."
{% for cfg in settings %}
echo 'Using subcommand [{{ cfg.cmd }}] to set value [{{ '\"%s\"' | format(cfg.value) if cfg.quote else cfg.value }}]'
{{ cli }} {{ cfg.cmd }} set -- {{ '"%s"' | format(cfg.value) if cfg.quote else cfg.value }} || { echo "Failed to apply. Exiting..."; exit 1; }
{% endfor %}
echo "Gracefully stopping Syncthing..."
kill -SIGTERM $$SYNCTHING_PID
wait $$SYNCTHING_PID
echo "Syncthing stopped."
environment: {{ ix_lib.base.environment.envs(app=app_env, user=values.syncthing.additional_envs, values=values) | tojson }}
volumes: {{ volume_mounts.items | tojson }}
{{ values.consts.syncthing_container_name }}:
image: {{ ix_lib.base.utils.get_image(images=values.images, name="image") }}
user: "0:0"
restart: unless-stopped
deploy:
resources: {{ ix_lib.base.resources.resources(values.resources) | tojson }}
devices: {{ ix_lib.base.resources.get_devices(values.resources) | tojson }}
cap_add: {{ caps.add | tojson }}
cap_drop: {{ caps.drop | tojson }}
security_opt: {{ ix_lib.base.security.get_sec_opts(remove=["no-new-privileges"]) | tojson }}
{% if values.network.host_network %}
network_mode: host
{% endif %}
depends_on:
{{ values.consts.config_container_name }}:
condition: service_completed_successfully
{% if values.network.dns_opts %}
dns_opt: {{ ix_lib.base.network.dns_opts(values.network.dns_opts) | tojson }}
{% endif %}
configs:
- source: logo-horizontal-svg
target: {{ values.consts.logo_horizontal_svg_path }}
{% if values.network.certificate_id %}
- source: private
target: {{ "%s/config/https-key.pem" | format(values.consts.home_path) }}
- source: public
target: {{ "%s/config/https-cert.pem" | format(values.consts.home_path) }}
{% endif %}
{% set test = ix_lib.base.healthchecks.wget_test(port=values.network.web_port, path="/rest/noauth/health") %}
healthcheck: {{ ix_lib.base.healthchecks.check_health(test) | tojson }}
volumes: {{ volume_mounts.items | tojson }}
environment: {{ ix_lib.base.environment.envs(app=app_env, user=values.syncthing.additional_envs, values=values) | tojson }}
{% if not values.network.host_network %}
ports:
- {{ ix_lib.base.ports.get_port(port={"target": values.network.web_port, "published": values.network.web_port}) | tojson }}
- {{ ix_lib.base.ports.get_port(port={"target": 22000, "published": values.network.tcp_port}) | tojson }}
- {{ ix_lib.base.ports.get_port(port={"target": 22000, "published": values.network.quic_port, "protocol": "udp"}) | tojson }}
{% if values.consts.settings.local_discovery %}
- {{ ix_lib.base.ports.get_port(port={"target": 27017 , "published": values.network.local_discover_port, "protocol": "udp"}) | tojson }}
{% endif %}
{% endif %}
{% if volumes.items %}
volumes: {{ volumes.items | tojson }}
{% endif %}
x-portals: {{ ix_lib.base.metadata.get_portals([{"port": values.network.web_port, "scheme": "https" if values.network.certificate_id else "http"}]) | tojson }}
x-notes: {{ ix_lib.base.metadata.get_notes("Syncthing") | tojson }}

View File

@@ -0,0 +1,90 @@
from . import utils
from .resources import get_nvidia_gpus_reservations
def envs(app: dict | None = None, user: list | None = None, values: dict | None = None):
app = app or {}
user = user or []
values = values or {}
result = {}
if not values:
utils.throw_error("Values cannot be empty in environment.py")
if not isinstance(user, list):
utils.throw_error(
f"Unsupported type for user environment variables [{type(user)}]"
)
# Always set TZ
result.update({"TZ": values.get("TZ", "Etc/UTC")})
# Update envs with nvidia variables
if values.get("resources", {}).get("gpus", {}):
result.update(get_nvidia_env(values.get("resources", {}).get("gpus", {})))
# Update envs with run_as variables
if values.get("run_as"):
result.update(get_run_as_envs(values.get("run_as", {})))
# Make sure we don't manually set any of the above
for item in app.items():
if not item[0]:
utils.throw_error("Environment variable name cannot be empty.")
if item[0] in result:
utils.throw_error(
f"Environment variable [{item[0]}] is already defined automatically from the library."
)
result[item[0]] = item[1]
for item in user:
if not item.get("name"):
utils.throw_error("Environment variable name cannot be empty.")
if item.get("name") in result:
utils.throw_error(
f"Environment variable [{item['name']}] is already defined from the application developer."
)
result[item["name"]] = item.get("value")
return result
# Sets some common variables that most applications use
def get_run_as_envs(run_as: dict) -> dict:
result = {}
user = run_as.get("user")
group = run_as.get("group")
if user:
result.update(
{
"PUID": user,
"UID": user,
"USER_ID": user,
}
)
if group:
result.update(
{
"PGID": group,
"GID": group,
"GROUP_ID": group,
}
)
return result
def get_nvidia_env(gpus: dict) -> dict:
reservations = get_nvidia_gpus_reservations(gpus)
if not reservations.get("device_ids"):
return {
"NVIDIA_VISIBLE_DEVICES": "void",
}
return {
"NVIDIA_VISIBLE_DEVICES": (
",".join(reservations["device_ids"])
if reservations.get("device_ids")
else "void"
),
"NVIDIA_DRIVER_CAPABILITIES": "all",
}

View File

@@ -0,0 +1,110 @@
from . import utils
def check_health(test, interval=10, timeout=10, retries=5, start_period=30):
if not test:
utils.throw_error("Expected [test] to be set")
return {
"test": test,
"interval": f"{interval}s",
"timeout": f"{timeout}s",
"retries": retries,
"start_period": f"{start_period}s",
}
def pg_test(user, db, config=None):
config = config or {}
if not user or not db:
utils.throw_error("Postgres container: [user] and [db] must be set")
host = config.get("host", "127.0.0.1")
port = config.get("port", 5432)
return f"pg_isready -h {host} -p {port} -d {db} -U {user}"
def redis_test(config=None):
config = config or {}
host = config.get("host", "127.0.0.1")
port = config.get("port", 6379)
password = "$$REDIS_PASSWORD"
return f"redis-cli -h {host} -p {port} -a {password} ping | grep -q PONG"
def curl_test(port, path, config=None):
config = config or {}
if not port or not path:
utils.throw_error("Expected [port] and [path] to be set")
scheme = config.get("scheme", "http")
host = config.get("host", "127.0.0.1")
headers = config.get("headers", [])
opts = []
if scheme == "https":
opts.append("--insecure")
for header in headers:
if not header[0] or not header[1]:
utils.throw_error("Expected [header] to be a list of two items")
opts.append(f'--header "{header[0]}: {header[1]}"')
return f"curl --silent --output /dev/null --show-error --fail {' '.join(opts)} {scheme}://{host}:{port}{path}"
def wget_test(port, path, config=None):
config = config or {}
if not port or not path:
utils.throw_error("Expected [port] and [path] to be set")
scheme = config.get("scheme", "http")
host = config.get("host", "127.0.0.1")
headers = config.get("headers", [])
opts = []
if scheme == "https":
opts.append("--no-check-certificate")
for header in headers:
if not header[0] or not header[1]:
utils.throw_error("Expected [header] to be a list of two items")
opts.append(f'--header "{header[0]}: {header[1]}"')
return f"wget --spider --quiet {' '.join(opts)} {scheme}://{host}:{port}{path}"
def http_test(port, path, config=None):
config = config or {}
if not port or not path:
utils.throw_error("Expected [port] and [path] to be set")
host = config.get("host", "127.0.0.1")
return (
f"/bin/bash -c 'exec {{health_check_fd}}<>/dev/tcp/{host}/{port} && echo -e \"GET {path} HTTP/1.1\\r\\nHost: "
+ f"{host}\\r\\nConnection: close\\r\\n\\r\\n\" >&$${{health_check_fd}} && cat <&$${{health_check_fd}}'"
)
def netcat_test(port, config=None):
config = config or {}
if not port:
utils.throw_error("Expected [port] to be set")
host = config.get("host", "127.0.0.1")
return f"nc -z -w 1 {host} {port}"
def tcp_test(port, config=None):
config = config or {}
if not port:
utils.throw_error("Expected [port] to be set")
host = config.get("host", "127.0.0.1")
return f"timeout 1 bash -c 'cat < /dev/null > /dev/tcp/{host}/{port}'"

View File

@@ -0,0 +1,71 @@
from . import utils
def get_header(app_name: str):
return f"""# Welcome to TrueNAS SCALE
Thank you for installing {app_name}!
"""
def get_footer(app_name: str):
return f"""## Documentation
Documentation for {app_name} can be found at https://www.truenas.com/docs.
## Bug reports
If you find a bug in this app, please file an issue at
https://ixsystems.atlassian.net or https://github.com/truenas/apps
## Feature requests or improvements
If you find a feature request for this app, please file an issue at
https://ixsystems.atlassian.net or https://github.com/truenas/apps
"""
def get_notes(app_name: str, body: str = ""):
if not app_name:
utils.throw_error("Expected [app_name] to be set")
return f"{get_header(app_name)}\n\n{body}\n\n{get_footer(app_name)}"
def get_portals(portals: list):
valid_schemes = ["http", "https"]
result = []
for portal in portals:
# Most apps have a single portal, lets default to a standard name
name = portal.get("name", "Web UI")
scheme = portal.get("scheme", "http")
path = portal.get("path", "/")
if not name:
utils.throw_error("Expected [portal.name] to be set")
if name in [p["name"] for p in result]:
utils.throw_error(
f"Expected [portal.name] to be unique, got [{', '.join([p['name'] for p in result]+[name])}]"
)
if scheme not in valid_schemes:
utils.throw_error(
f"Expected [portal.scheme] to be one of [{', '.join(valid_schemes)}], got [{portal['scheme']}]"
)
if not portal.get("port"):
utils.throw_error("Expected [portal.port] to be set")
if not path.startswith("/"):
utils.throw_error(
f"Expected [portal.path] to start with /, got [{portal['path']}]"
)
result.append(
{
"name": name,
"scheme": scheme,
"host": portal.get("host", "0.0.0.0"),
"port": portal["port"],
"path": path,
}
)
return result

View File

@@ -0,0 +1,21 @@
from . import utils
def dns_opts(dns_options=None):
dns_options = dns_options or []
if not dns_options:
return []
tracked = {}
disallowed_opts = []
for opt in dns_options:
key = opt.split(":")[0]
if key in tracked:
utils.throw_error(
f"Expected [dns_opts] to be unique, got [{', '.join([d.split(':')[0] for d in tracked])}]"
)
if key in disallowed_opts:
utils.throw_error(f"Expected [dns_opts] to not contain [{key}] key.")
tracked[key] = opt
return dns_options

View File

@@ -0,0 +1,42 @@
import ipaddress
from . import utils
def must_valid_port(num: int):
if num < 1 or num > 65535:
utils.throw_error(f"Expected a valid port number, got [{num}]")
def must_valid_ip(ip: str):
try:
ipaddress.ip_address(ip)
except ValueError:
utils.throw_error(f"Expected a valid IP address, got [{ip}]")
def must_valid_protocol(protocol: str):
if protocol not in ["tcp", "udp"]:
utils.throw_error(f"Expected a valid protocol, got [{protocol}]")
def must_valid_mode(mode: str):
if mode not in ["ingress", "host"]:
utils.throw_error(f"Expected a valid mode, got [{mode}]")
def get_port(port=None):
port = port or {}
must_valid_port(port["published"])
must_valid_port(port["target"])
must_valid_ip(port.get("host_ip", "0.0.0.0"))
must_valid_protocol(port.get("protocol", "tcp"))
must_valid_mode(port.get("mode", "ingress"))
return {
"target": port["target"],
"published": port["published"],
"protocol": port.get("protocol", "tcp"),
"mode": port.get("mode", "ingress"),
"host_ip": port.get("host_ip", "0.0.0.0"),
}

View File

@@ -0,0 +1,77 @@
from . import utils
from .security import get_caps, get_sec_opts
from .network import dns_opts
from .healthchecks import pg_test, check_health
from .resources import resources
def pg_url(variant, host, user, password, dbname, port=5432):
if not host:
utils.throw_error("Expected [host] to be set")
if not user:
utils.throw_error("Expected [user] to be set")
if not password:
utils.throw_error("Expected [password] to be set")
if not dbname:
utils.throw_error("Expected [dbname] to be set")
if variant == "postgresql":
return f"postgresql://{user}:{password}@{host}:{port}/{dbname}?sslmode=disable"
elif variant == "postgres":
return f"postgres://{user}:{password}@{host}:{port}/{dbname}?sslmode=disable"
else:
utils.throw_error(
f"Expected [variant] to be one of [postgresql, postgres], got [{variant}]"
)
def pg_env(user, password, dbname, port=5432):
if not user:
utils.throw_error("Expected [user] to be set for postgres")
if not password:
utils.throw_error("Expected [password] to be set for postgres")
if not dbname:
utils.throw_error("Expected [dbname] to be set for postgres")
return {
"POSTGRES_USER": user,
"POSTGRES_PASSWORD": password,
"POSTGRES_DB": dbname,
"POSTGRES_PORT": port,
}
def pg_container(data={}):
req_keys = ["db_user", "db_password", "db_name", "volumes", "resources"]
for key in req_keys:
if not data.get(key):
utils.throw_error(f"Expected [{key}] to be set for postgres")
pg_user = data["db_user"]
pg_password = data["db_password"]
pg_dbname = data["db_name"]
pg_port = data.get("port", 5432)
depends = data.get("depends_on", {})
depends_on = {}
for key in depends:
depends_on[key] = {
"condition": depends[key].get("condition", "service_completed_successfully")
}
return {
"image": f"{data.get('image', 'postgres:15')}",
"user": f"{data.get('user', '999')}:{data.get('group', '999')}",
"restart": "unless-stopped",
"cap_drop": get_caps()["drop"],
"security_opt": get_sec_opts(),
**({"dns_opts": dns_opts(data["dns_opts"])} if data.get("dns_opts") else {}),
"healthcheck": check_health(pg_test(user=pg_user, db=pg_dbname)),
"environment": pg_env(
user=pg_user,
password=pg_password,
dbname=pg_dbname,
port=pg_port,
),
"volumes": data["volumes"],
"depends_on": depends_on,
"deploy": {"resources": resources(data["resources"])},
}

View File

@@ -0,0 +1,49 @@
from . import utils
from .security import get_caps, get_sec_opts
from .network import dns_opts
from .healthchecks import redis_test, check_health
from .resources import resources
def redis_container(data={}):
req_keys = ["password", "volumes", "resources"]
for key in req_keys:
if not data.get(key):
utils.throw_error(f"Expected [{key}] to be set for postgres")
redis_password = data["password"]
redis_port = data.get("port", 6379)
depends = data.get("depends_on", {})
depends_on = {}
for key in depends:
depends_on[key] = {
"condition": depends[key].get("condition", "service_completed_successfully")
}
return {
"image": f"{data.get('image', 'bitnami/redis:7.0.11')}",
"user": f"{data.get('user', '1001')}:{data.get('group', '0')}",
"restart": "unless-stopped",
"cap_drop": get_caps()["drop"],
"security_opt": get_sec_opts(),
**({"dns_opts": dns_opts(data["dns_opts"])} if data.get("dns_opts") else {}),
"healthcheck": check_health(redis_test(config={"port": redis_port})),
"environment": redis_env(
password=redis_password,
port=redis_port,
),
"volumes": data["volumes"],
"depends_on": depends_on,
"deploy": {"resources": resources(data["resources"])},
}
def redis_env(password, port=6379):
if not password:
utils.throw_error("Expected [password] to be set for redis")
return {
"ALLOW_EMPTY_PASSWORD": "no",
"REDIS_PASSWORD": password,
"REDIS_PORT_NUMBER": port,
}

View File

@@ -0,0 +1,87 @@
import re
from . import utils
def resources(resources):
gpus = resources.get("gpus", {})
cpus = str(resources.get("limits", {}).get("cpus", 2.0))
memory = str(resources.get("limits", {}).get("memory", 4096))
if not re.match(r"^[1-9][0-9]*(\.[0-9]+)?$", cpus):
utils.throw_error(f"Expected cpus to be a number or a float, got [{cpus}]")
if not re.match(r"^[1-9][0-9]*$", memory):
raise ValueError(f"Expected memory to be a number, got [{memory}]")
result = {
"limits": {"cpus": cpus, "memory": f"{memory}M"},
"reservations": {"devices": []},
}
if gpus:
gpu_result = get_nvidia_gpus_reservations(gpus)
if gpu_result:
# Appending to devices, as we can later extend this to support other types of devices. Eg. TPUs.
result["reservations"]["devices"].append(get_nvidia_gpus_reservations(gpus))
# Docker does not like empty "things" all around.
if not result["reservations"]["devices"]:
del result["reservations"]
return result
def get_nvidia_gpus_reservations(gpus: dict) -> dict:
"""
Input:
{
"nvidia_gpu_selection": {
"pci_slot_0": {"uuid": "uuid_0", "use_gpu": True},
"pci_slot_1": {"uuid": "uuid_1", "use_gpu": True},
},
}
"""
if not gpus:
return {}
device_ids = []
for gpu in gpus.get("nvidia_gpu_selection", {}).values():
if gpu["use_gpu"]:
device_ids.append(gpu["uuid"])
if not device_ids:
return {}
return {
"capabilities": ["gpu"],
"driver": "nvidia",
"device_ids": device_ids,
}
disallowed_devices = ["/dev/dri"]
# Returns the top level devices list
# Accepting other_devices to allow manually adding devices
# directly to the list. (Eg sound devices)
def get_devices(resources: dict, other_devices: list = []) -> list:
devices = []
if resources.get("gpus", {}).get("use_all_gpus", False):
devices.append("/dev/dri:/dev/dri")
added_host_devices: list = []
for device in other_devices:
host_device = device.get("host_device", "").rstrip("/")
container_device = device.get("container_device", "") or host_device
if not host_device:
utils.throw_error(f"Expected [host_device] to be set for device [{device}]")
if not utils.valid_path(host_device):
utils.throw_error(f"Expected [host_device] to be a valid path for device [{device}]")
if host_device in disallowed_devices:
utils.throw_error(f"Device [{host_device}] is not allowed to be manually added.")
if host_device in added_host_devices:
utils.throw_error(f"Expected devices to be unique, but [{host_device}] was already added.")
devices.append(f"{host_device}:{container_device}")
added_host_devices.append(host_device)
return devices

View File

@@ -0,0 +1,27 @@
from base64 import b64encode
def get_caps(add=None, drop=None):
add = add or []
drop = drop or ["ALL"]
result = {"drop": drop}
if add:
result["add"] = add
return result
def get_sec_opts(add=None, remove=None):
add = add or []
remove = remove or []
result = ["no-new-privileges"]
for opt in add:
if opt not in result:
result.append(opt)
for opt in remove:
if opt in result:
result.remove(opt)
return result
def htpasswd(username, password):
return b64encode(f"{username}:{password}".encode("utf-8")).decode("utf-8")

View File

@@ -0,0 +1,377 @@
import re
import json
import hashlib
from . import utils
BIND_TYPES = ["host_path", "ix_volume"]
VOL_TYPES = ["volume", "nfs", "cifs", "temporary"]
ALL_TYPES = BIND_TYPES + VOL_TYPES + ["tmpfs", "anonymous"]
PROPAGATION_TYPES = ["shared", "slave", "private", "rshared", "rslave", "rprivate"]
def _get_name_for_temporary(data):
if not data.get("mount_path"):
utils.throw_error("Expected [mount_path] to be set for temporary volume")
return (
data["mount_path"]
.lstrip("/")
.lower()
.replace("/", "_")
.replace(".", "_")
.replace(" ", "_")
)
# Returns a volume mount object (Used in container's "volumes" level)
def vol_mount(data, values=None):
values = values or {}
ix_volumes = values.get("ix_volumes") or []
vol_type = _get_docker_vol_type(data)
volume = {
"type": vol_type,
"target": utils.valid_path(data.get("mount_path", "")),
"read_only": data.get("read_only", False),
}
if vol_type == "bind": # Default create_host_path is true in short-syntax
volume.update(_get_bind_vol_config(data, ix_volumes))
elif vol_type == "volume":
volume.update(_get_volume_vol_config(data))
elif vol_type == "tmpfs":
volume.update(_get_tmpfs_vol_config(data))
elif vol_type == "temporary":
volume["type"] = "volume"
volume.update(_get_volume_vol_config(data))
elif vol_type == "anonymous":
volume["type"] = "volume"
volume.update(_get_anonymous_vol_config(data))
return volume
def storage_item(data, values=None, perm_opts=None):
values = values or {}
perm_opts = perm_opts or {}
if data.get("type") == "temporary":
data.update({"volume_name": _get_name_for_temporary(data)})
return {
"vol_mount": vol_mount(data, values),
"vol": vol(data),
"perms_item": perms_item(data, values, perm_opts) if perm_opts else {},
}
def perms_item(data, values=None, opts=None):
opts = opts or {}
values = values or {}
ix_context = values.get("ix_context") or {}
vol_type = data.get("type", "")
# Temp volumes are always auto permissions
if vol_type == "temporary":
data.update({"auto_permissions": True})
# If its ix_volume and we are installing, we need to set auto permissions
if vol_type == "ix_volume" and ix_context.get("is_install", False):
data.update({"auto_permissions": True})
if not data.get("auto_permissions"):
return {}
if vol_type == "host_path":
if data.get("host_path_config", {}).get("acl_enable", False):
return {}
if vol_type == "ix_volume":
if data.get("ix_volume_config", {}).get("acl_enable", False):
return {}
req_keys = ["mount_path", "mode", "uid", "gid"]
for key in req_keys:
if opts.get(key, None) is None:
utils.throw_error(f"Expected opts passed to [perms_item] to have [{key}] key")
data.update({"mount_path": opts["mount_path"]})
volume_mount = vol_mount(data, values)
return {
"vol_mount": volume_mount,
"perm_dir": {
"dir": volume_mount["target"],
"mode": opts["mode"],
"uid": opts["uid"],
"gid": opts["gid"],
"chmod": opts.get("chmod", "false"),
"is_temporary": data["type"] == "temporary",
},
}
def _get_bind_vol_config(data, ix_volumes=None):
ix_volumes = ix_volumes or []
path = host_path(data, ix_volumes)
if data.get("propagation", "rprivate") not in PROPAGATION_TYPES:
utils.throw_error(
f"Expected [propagation] to be one of [{', '.join(PROPAGATION_TYPES)}], got [{data['propagation']}]"
)
# https://docs.docker.com/storage/bind-mounts/#configure-bind-propagation
return {
"source": path,
"bind": {
"create_host_path": data.get("host_path_config", {}).get(
"create_host_path", True
),
"propagation": _get_valid_propagation(data),
},
}
def _get_volume_vol_config(data):
if data.get("type") in ["nfs", "cifs"]:
if data.get("volume_name"):
utils.throw_error("Expected [volume_name] to be empty for [nfs, cifs] type")
data.update({"volume_name": _get_name_for_external_volume(data)})
if not data.get("volume_name"):
utils.throw_error("Expected [volume_name] to be set for [volume] type")
return {"source": data["volume_name"], "volume": _process_volume_config(data)}
def _get_anonymous_vol_config(data):
return {"volume": _process_volume_config(data)}
mode_regex = re.compile(r"^0[0-7]{3}$")
def _get_tmpfs_vol_config(data):
tmpfs = {}
config = data.get("tmpfs_config", {})
if config.get("size"):
if not isinstance(config["size"], int):
utils.throw_error("Expected [size] to be an integer for [tmpfs] type")
if not config["size"] > 0:
utils.throw_error("Expected [size] to be greater than 0 for [tmpfs] type")
# Convert Mebibytes to Bytes
tmpfs.update({"size": config["size"] * 1024 * 1024})
if config.get("mode"):
if not mode_regex.match(str(config["mode"])):
utils.throw_error(
f"Expected [mode] to be a octal string for [tmpfs] type, got [{config['mode']}]"
)
tmpfs.update({"mode": int(config["mode"], 8)})
return {"tmpfs": tmpfs}
# We generate a unique name for the volume based on the config
# Docker will not update any volume after creation. This is to ensure
# that changing any value (eg server address) in the config will result in a new volume
def _get_name_for_external_volume(data):
config_hash = hashlib.sha256(json.dumps(data).encode("utf-8")).hexdigest()
return f"{data['type']}_{config_hash}"
# Returns a volume object (Used in top "volumes" level)
def vol(data):
if not data or _get_docker_vol_type(data) != "volume":
return {}
if not data.get("volume_name"):
utils.throw_error("Expected [volume_name] to be set for [volume] type")
if data["type"] == "nfs":
return {data["volume_name"]: _process_nfs(data)}
elif data["type"] == "cifs":
return {data["volume_name"]: _process_cifs(data)}
else:
return {data["volume_name"]: {}}
def _is_host_path(data):
return data.get("type") == "host_path"
def _get_valid_propagation(data):
if not data.get("propagation"):
return "rprivate"
if not data["propagation"] in PROPAGATION_TYPES:
utils.throw_error(
f"Expected [propagation] to be one of [{', '.join(PROPAGATION_TYPES)}], got [{data['propagation']}]"
)
return data["propagation"]
def _is_ix_volume(data):
return data.get("type") == "ix_volume"
# Returns the host path for a for either a host_path or ix_volume
def host_path(data, ix_volumes=None):
ix_volumes = ix_volumes or []
path = ""
if _is_host_path(data):
path = _process_host_path_config(data)
elif _is_ix_volume(data):
path = _process_ix_volume_config(data, ix_volumes)
else:
utils.throw_error(
f"Expected [host_path()] to be called only for types [host_path, ix_volume], got [{data['type']}]"
)
return utils.valid_path(path)
# Returns the type of storage as used in docker-compose
def _get_docker_vol_type(data):
if not data.get("type"):
utils.throw_error("Expected [type] to be set for storage")
if data["type"] not in ALL_TYPES:
utils.throw_error(
f"Expected storage [type] to be one of {ALL_TYPES}, got [{data['type']}]"
)
if data["type"] in BIND_TYPES:
return "bind"
elif data["type"] in VOL_TYPES:
return "volume"
else:
return data["type"]
def _process_host_path_config(data):
if data.get("host_path_config", {}).get("acl_enable", False):
if not data["host_path_config"].get("acl", {}).get("path"):
utils.throw_error(
"Expected [host_path_config.acl.path] to be set for [host_path] type with ACL enabled"
)
return data["host_path_config"]["acl"]["path"]
if not data.get("host_path_config", {}).get("path"):
utils.throw_error(
"Expected [host_path_config.path] to be set for [host_path] type"
)
return data["host_path_config"]["path"]
def _process_volume_config(data):
return {"nocopy": data.get("volume_config", {}).get("nocopy", False)}
def _process_ix_volume_config(data, ix_volumes):
path = ""
if not data.get("ix_volume_config", {}).get("dataset_name"):
utils.throw_error(
"Expected [ix_volume_config.dataset_name] to be set for [ix_volume] type"
)
if not ix_volumes:
utils.throw_error("Expected [ix_volumes] to be set for [ix_volume] type")
ds = data["ix_volume_config"]["dataset_name"]
path = ix_volumes.get(ds, None)
if not path:
utils.throw_error(f"Expected the key [{ds}] to be set in [ix_volumes]")
return path
# Constructs a volume object for a cifs type
def _process_cifs(data):
if not data.get("cifs_config"):
utils.throw_error("Expected [cifs_config] to be set for [cifs] type")
required_keys = ["server", "path", "username", "password"]
for key in required_keys:
if not data["cifs_config"].get(key):
utils.throw_error(f"Expected [{key}] to be set for [cifs] type")
opts = [
f"user={data['cifs_config']['username']}",
f"password={data['cifs_config']['password']}",
]
if data["cifs_config"].get("domain"):
opts.append(f'domain={data["cifs_config"]["domain"]}')
if data["cifs_config"].get("options"):
if not isinstance(data["cifs_config"]["options"], list):
utils.throw_error(
"Expected [cifs_config.options] to be a list for [cifs] type"
)
disallowed_opts = ["user", "password", "domain"]
for opt in data["cifs_config"]["options"]:
if not isinstance(opt, str):
utils.throw_error(
"Expected [cifs_config.options] to be a list of strings for [cifs] type"
)
key = opt.split("=")[0]
for disallowed in disallowed_opts:
if key == disallowed:
utils.throw_error(
f"Expected [cifs_config.options] to not start with [{disallowed}] for [cifs] type"
)
opts.append(opt)
server = data["cifs_config"]["server"].lstrip("/")
path = data["cifs_config"]["path"].strip("/")
volume = {
"driver_opts": {
"type": "cifs",
"device": f"//{server}/{path}",
"o": f"{','.join(opts)}",
},
}
return volume
# Constructs a volume object for a nfs type
def _process_nfs(data):
if not data.get("nfs_config"):
utils.throw_error("Expected [nfs_config] to be set for [nfs] type")
required_keys = ["server", "path"]
for key in required_keys:
if not data["nfs_config"].get(key):
utils.throw_error(f"Expected [{key}] to be set for [nfs] type")
opts = [f"addr={data['nfs_config']['server']}"]
if data["nfs_config"].get("options"):
if not isinstance(data["nfs_config"]["options"], list):
utils.throw_error("Expected [nfs_config.options] to be a list for [nfs] type")
disallowed_opts = ["addr"]
for opt in data["nfs_config"]["options"]:
if not isinstance(opt, str):
utils.throw_error(
"Expected [nfs_config.options] to be a list of strings for [nfs] type"
)
key = opt.split("=")[0]
for disallowed in disallowed_opts:
if key == disallowed:
utils.throw_error(
f"Expected [nfs_config.options] to not start with [{disallowed}] for [nfs] type"
)
opts.append(opt)
volume = {
"driver_opts": {
"type": "nfs",
"device": f":{data['nfs_config']['path']}",
"o": f"{','.join(opts)}",
},
}
return volume

View File

@@ -0,0 +1,83 @@
import hashlib
import secrets
import sys
from . import security
class TemplateException(Exception):
pass
def throw_error(message):
# When throwing a known error, hide the traceback
# This is because the error is also shown in the UI
# and having a traceback makes it hard for user to read
sys.tracebacklimit = 0
raise TemplateException(message)
def secure_string(length):
return secrets.token_urlsafe(length)
def basic_auth_header(username, password):
return f"Basic {security.htpasswd(username, password)}"
def merge_dicts(*dicts):
merged_dict = {}
for dictionary in dicts:
merged_dict.update(dictionary)
return merged_dict
# Basic validation for a path (Expand later)
def valid_path(path=""):
if not path.startswith("/"):
throw_error(f"Expected path [{path}] to start with /")
# There is no reason to allow / as a path, either on host or in a container
if path == "/":
throw_error(f"Expected path [{path}] to not be /")
return path
def camel_case(string):
return string.title()
def is_boolean(string):
return string.lower() in ["true", "false"]
def is_number(string):
try:
float(string)
return True
except ValueError:
return False
def get_image(images={}, name=""):
if not images:
throw_error("Expected [images] to be set")
if name not in images:
throw_error(f"Expected [images.{name}] to be set")
if not images[name].get("repository") or not images[name].get("tag"):
throw_error(
f"Expected [images.{name}.repository] and [images.{name}.tag] to be set"
)
return f"{images[name]['repository']}:{images[name]['tag']}"
def hash_data(data=""):
if not data:
throw_error("Expected [data] to be set")
return hashlib.sha256(data.encode("utf-8")).hexdigest()
def get_image_with_hashed_data(images={}, name="", data=""):
return f"ix-{get_image(images, name)}-{hash_data(data)}"

View File

@@ -0,0 +1,48 @@
{% from "macros/global/perms/script.sh.jinja" import process_dir_func %}
{# Takes a list of items to process #}
{# Each item is a dictionary with the following keys: #}
{# - dir: directory to process #}
{# - mode: always, check. (
always: Always changes ownership and permissions,
check: Checks the top level dir, and only applies if there is a mismatch.
) #}
{# - uid: uid to change to #}
{# - gid: gid to change to #}
{# - chmod: chmod to change to (Optional, default is no change) #}
{% macro perms_container(items=[]) %}
image: bash
user: root
deploy:
resources:
limits:
cpus: "1.0"
memory: 512m
entrypoint:
- bash
- -c
command:
- |
{{- process_dir_func() | indent(4) }}
{%- for item in items %}
process_dir {{ item.dir }} {{ item.mode }} {{ item.uid }} {{ item.gid }} {{ item.chmod }} {{ item.is_temporary|lower }}
{%- endfor %}
{% endmacro %}
{# Examples #}
{# perms_container([
{
"dir": "/mnt/directories/dir1",
"mode": "always",
"uid": 500,
"gid": 500,
"chmod": "755",
},
{
"dir": "/mnt/directories/dir2",
"mode": "check",
"uid": 500,
"gid": 500,
"chmod": "755",
},
]) #}

View File

@@ -0,0 +1,75 @@
{#
Don't forget to use double $ for shell variables,
otherwise docker-compose will try to expand them
#}
{% macro process_dir_func() %}
function process_dir() {
local dir=$$1
local mode=$$2
local uid=$$3
local gid=$$4
local chmod=$$5
local is_temporary=$$6
local fix_owner="false"
local fix_perms="false"
if [ ! -d "$$dir" ]; then
echo "Path [$$dir] does is not a directory, skipping..."
exit 0
fi
if [ "$$is_temporary" = "true" ]; then
echo "Path [$$dir] is a temporary directory, ensuring it is empty..."
rm -rf "$$dir/{*,.*}"
fi
echo "Current Ownership and Permissions on [$$dir]:"
echo "chown: $$(stat -c "%u %g" "$$dir")"
echo "chmod: $$(stat -c "%a" "$$dir")"
if [ "$$mode" = "always" ]; then
fix_owner="true"
fix_perms="true"
fi
if [ "$$mode" = "check" ]; then
if [ $$(stat -c %u "$$dir") -eq $$uid ] && [ $$(stat -c %g "$$dir") -eq $$gid ]; then
echo "Ownership is correct. Skipping..."
fix_owner="false"
else
echo "Ownership is incorrect. Fixing..."
fix_owner="true"
fi
if [ "$$chmod" = "false" ]; then
echo "Skipping permissions check, chmod is false"
elif [ -n "$$chmod" ]; then
if [ $$(stat -c %a "$$dir") -eq $$chmod ]; then
echo "Permissions are correct. Skipping..."
fix_perms="false"
else
echo "Permissions are incorrect. Fixing..."
fix_perms="true"
fi
fi
fi
if [ "$$fix_owner" = "true" ]; then
echo "Changing ownership to $$uid:$$gid on: [$$dir]"
chown -R "$$uid:$$gid" "$$dir"
echo "Finished changing ownership"
echo "Ownership after changes:"
stat -c "%u %g" "$$dir"
fi
if [ -n "$$chmod" ] && [ "$$fix_perms" = "true" ]; then
echo "Changing permissions to $$chmod on: [$$dir]"
chmod -R "$$chmod" "$$dir"
echo "Finished changing permissions"
echo "Permissions after changes:"
stat -c "%a" "$$dir"
fi
}
{% endmacro %}

View File

@@ -0,0 +1,43 @@
resources:
limits:
cpus: 2.0
memory: 4096
syncthing:
additional_envs: []
network:
web_port: 9000
tcp_port: 22000
quic_port: 22000
local_discover_port: 27017
certificate_id: null
host_network: false
run_as:
user: 568
group: 568
storage:
home:
type: volume
volume_name: syncthing-home
auto_permissions: true
additional_storage:
- type: volume
mount_path: /mnt/test/data1
volume_name: test-data1
- type: volume
mount_path: /mnt/test/data2
volume_name: test-data2
# Manual test for cifs rendering (migration_mode must add extra options)
# - type: cifs
# mount_path: /mnt/test/data3
# volume_name: test-data3
# cifs_config:
# server: 192.168.1.1
# path: /test
# domain: WORKGROUP
# username: test
# password: test
# migration_mode: true

View File

@@ -0,0 +1,129 @@
resources:
limits:
cpus: 2.0
memory: 4096
syncthing:
additional_envs: []
network:
web_port: 9000
tcp_port: 22000
quic_port: 22000
local_discover_port: 27017
certificate_id: "1"
host_network: false
run_as:
user: 568
group: 568
storage:
home:
type: volume
volume_name: syncthing-home
auto_permissions: true
additional_storage:
- type: volume
mount_path: /mnt/test/data1
volume_name: test-data1
- type: volume
mount_path: /mnt/test/data2
volume_name: test-data2
# Manual test for cifs rendering (migration_mode must add extra options)
# - type: cifs
# mount_path: /mnt/test/data3
# volume_name: test-data3
# cifs_config:
# server: 192.168.1.1
# path: /test
# domain: WORKGROUP
# username: test
# password: test
# migration_mode: true
ix_certificates:
"1":
certificate: |
-----BEGIN CERTIFICATE-----
MIIEdjCCA16gAwIBAgIDYFMYMA0GCSqGSIb3DQEBCwUAMGwxDDAKBgNVBAMMA2Fz
ZDELMAkGA1UEBhMCVVMxDTALBgNVBAgMBGFzZGYxCzAJBgNVBAcMAmFmMQ0wCwYD
VQQKDARhc2RmMQwwCgYDVQQLDANhc2QxFjAUBgkqhkiG9w0BCQEWB2FAYS5jb20w
HhcNMjEwODMwMjMyMzU0WhcNMjMxMjAzMjMyMzU0WjBuMQswCQYDVQQDDAJhZDEL
MAkGA1UEBhMCVVMxDTALBgNVBAgMBGFzZGYxDTALBgNVBAcMBGFzZGYxDTALBgNV
BAoMBGFkc2YxDTALBgNVBAsMBGFzZGYxFjAUBgkqhkiG9w0BCQEWB2FAYS5jb20w
ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC7+1xOHRQyOnQTHFcrdasX
Zl0gzutVlA890a1wiQpdD5dOtCLo7+eqVYjqVKo9W8RUIArXWmBu/AbkH7oVFWC1
P973W1+ArF5sA70f7BZgqRKJTIisuIFIlRETgfnP2pfQmHRZtGaIJRZI4vQCdYgW
2g0KOvvNcZJCVq1OrhKiNiY1bWCp66DGg0ic6OEkZFHTm745zUNQaf2dNgsxKU0H
PGjVLJI//yrRFAOSBUqgD4c50krnMF7fU/Fqh+UyOu8t6Y/HsySh3urB+Zie331t
AzV6QV39KKxRflNx/yuWrtIEslGTm+xHKoCYJEk/nZ3mX8Y5hG6wWAb7A/FuDVg3
AgMBAAGjggEdMIIBGTAnBgNVHREEIDAehwTAqAADhwTAqAAFhwTAqAC2hwTAqACB
hwTAqACSMB0GA1UdDgQWBBQ4G2ff4tgZl4vmo4xCfqmJhdqShzAMBgNVHRMBAf8E
AjAAMIGYBgNVHSMEgZAwgY2AFLlYf9L99nxJDcpCM/LT3V5hQ/a3oXCkbjBsMQww
CgYDVQQDDANhc2QxCzAJBgNVBAYTAlVTMQ0wCwYDVQQIDARhc2RmMQswCQYDVQQH
DAJhZjENMAsGA1UECgwEYXNkZjEMMAoGA1UECwwDYXNkMRYwFAYJKoZIhvcNAQkB
FgdhQGEuY29tggNgUxcwFgYDVR0lAQH/BAwwCgYIKwYBBQUHAwEwDgYDVR0PAQH/
BAQDAgWgMA0GCSqGSIb3DQEBCwUAA4IBAQA6FpOInEHB5iVk3FP67GybJ29vHZTD
KQHbQgmg8s4L7qIsA1HQ+DMCbdylpA11x+t/eL/n48BvGw2FNXpN6uykhLHJjbKR
h8yITa2KeD3LjLYhScwIigXmTVYSP3km6s8jRL6UKT9zttnIHyXVpBDya6Q4WTMx
fmfC6O7t1PjQ5ZyVtzizIUP8ah9n4TKdXU4A3QIM6WsJXpHb+vqp1WDWJ7mKFtgj
x5TKv3wcPnktx0zMPfLb5BTSE9rc9djcBG0eIAsPT4FgiatCUChe7VhuMnqskxEz
MymJLoq8+mzucRwFkOkR2EIt1x+Irl2mJVMeBow63rVZfUQBD8h++LqB
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIEhDCCA2ygAwIBAgIDYFMXMA0GCSqGSIb3DQEBCwUAMGwxDDAKBgNVBAMMA2Fz
ZDELMAkGA1UEBhMCVVMxDTALBgNVBAgMBGFzZGYxCzAJBgNVBAcMAmFmMQ0wCwYD
VQQKDARhc2RmMQwwCgYDVQQLDANhc2QxFjAUBgkqhkiG9w0BCQEWB2FAYS5jb20w
HhcNMjEwODMwMjMyMDQ1WhcNMzEwODI4MjMyMDQ1WjBsMQwwCgYDVQQDDANhc2Qx
CzAJBgNVBAYTAlVTMQ0wCwYDVQQIDARhc2RmMQswCQYDVQQHDAJhZjENMAsGA1UE
CgwEYXNkZjEMMAoGA1UECwwDYXNkMRYwFAYJKoZIhvcNAQkBFgdhQGEuY29tMIIB
IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAq//c0hEEr83CS1pMgsHX50jt
2MqIbcf63UUNJTiYpUUvUQSFJFc7m/dr+RTZvu97eDCnD5K2qkHHvTPaPZwY+Djf
iy7N641Sz6u/y3Yo3xxs1Aermsfedh48vusJpjbkT2XS44VjbkrpKcWDNVpp3Evd
M7oJotXeUsZ+imiyVCfr4YhoY5gbGh/r+KN9Wf9YKoUyfLLZGwdZkhtX2zIbidsL
Thqi9YTaUHttGinjiBBum234u/CfvKXsfG3yP2gvBGnlvZnM9ktv+lVffYNqlf7H
VmB1bKKk84HtzuW5X76SGAgOG8eHX4x5ZLI1WQUuoQOVRl1I0UCjBtbz8XhwvQID
AQABo4IBLTCCASkwLQYDVR0RBCYwJIcEwKgABYcEwKgAA4cEwKgAkocEwKgAtYcE
wKgAgYcEwKgAtjAdBgNVHQ4EFgQUuVh/0v32fEkNykIz8tPdXmFD9rcwDwYDVR0T
AQH/BAUwAwEB/zCBmAYDVR0jBIGQMIGNgBS5WH/S/fZ8SQ3KQjPy091eYUP2t6Fw
pG4wbDEMMAoGA1UEAwwDYXNkMQswCQYDVQQGEwJVUzENMAsGA1UECAwEYXNkZjEL
MAkGA1UEBwwCYWYxDTALBgNVBAoMBGFzZGYxDDAKBgNVBAsMA2FzZDEWMBQGCSqG
SIb3DQEJARYHYUBhLmNvbYIDYFMXMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEF
BQcDAjAOBgNVHQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQELBQADggEBAKEocOmVuWlr
zegtKYMe8NhHIkFY9oVn5ym6RHNOJpPH4QF8XYC3Z5+iC5yGh4P/jVe/4I4SF6Ql
PtofU0jNq5vzapt/y+m008eXqPQFmoUOvu+JavoRVcRx2LIP5AgBA1mF56CSREsX
TkuJAA9IUQ8EjnmAoAeKINuPaKxGDuU8BGCMqr/qd564MKNf9XYL+Fb2rlkA0O2d
2No34DQLgqSmST/LAvPM7Cbp6knYgnKmGr1nETCXasg1cueHLnWWTvps2HiPp2D/
+Fq0uqcZLu4Mdo0CPs4e5sHRyldEnRSKh0DVLprq9zr/GMipmPLJUsT5Jed3sj0w
M7Y3vwxshpo=
-----END CERTIFICATE-----
privatekey: |
-----BEGIN PRIVATE KEY-----
MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQC7+1xOHRQyOnQT
HFcrdasXZl0gzutVlA890a1wiQpdD5dOtCLo7+eqVYjqVKo9W8RUIArXWmBu/Abk
H7oVFWC1P973W1+ArF5sA70f7BZgqRKJTIisuIFIlRETgfnP2pfQmHRZtGaIJRZI
4vQCdYgW2g0KOvvNcZJCVq1OrhKiNiY1bWCp66DGg0ic6OEkZFHTm745zUNQaf2d
NgsxKU0HPGjVLJI//yrRFAOSBUqgD4c50krnMF7fU/Fqh+UyOu8t6Y/HsySh3urB
+Zie331tAzV6QV39KKxRflNx/yuWrtIEslGTm+xHKoCYJEk/nZ3mX8Y5hG6wWAb7
A/FuDVg3AgMBAAECggEAapt30rj9DitGTtxAt13pJMEhyYxvvD3WkvmJwguF/Bbu
eW0Ba1c668fMeRCA54FWi1sMqusPS4HUqqUvk+tmyAOsAF4qgD/A4MMSC7uJSVI5
N/JWhJWyhCY94/FPakiO1nbPbVw41bcqtzU2qvparpME2CtxSCbDiqm7aaag3Kqe
EF0fGSUdZ+TYl9JM05+eIyiX+UY19Fg0OjTHMn8nGpxcNTfDBdQ68TKvdo/dtIKL
PLKzJUNNdM8odC4CvQtfGMqaslwZwXkiOl5VJcW21ncj/Y0ngEMKeD/i65ZoqGdR
0FKCQYEAGtM2FvJcZQ92Wsw7yj2bK2MSegVUyLK32QKBgQDe8syVCepPzRsfjfxA
6TZlWcGuTZLhwIx97Ktw3VcQ1f4rLoEYlv0xC2VWBORpzIsJo4I/OLmgp8a+Ga8z
FkVRnq90dV3t4NP9uJlHgcODHnOardC2UUka4olBSCG6zmK4Jxi34lOxhGRkshOo
L4IBeOIB5g+ZrEEXkzfYJHESRQKBgQDX2YhFhGIrT8BAnC5BbXbhm8h6Bhjz8DYL
d+qhVJjef7L/aJxViU0hX9Ba2O8CLK3FZeREFE3hJPiJ4TZSlN4evxs5p+bbNDcA
0mhRI/o3X4ac6IxdRebyYnCOB/Cu94/MzppcZcotlCekKNike7eorCcX4Qavm7Pu
MUuQ+ifmSwKBgEnchoqZzlbBzMqXb4rRuIO7SL9GU/MWp3TQg7vQmJerTZlgvsQ2
wYsOC3SECmhCq4117iCj2luvOdihCboTFsQDnn0mpQe6BIF6Ns3J38wAuqv0CcFd
DKsrge1uyD3rQilgSoAhKzkUc24o0PpXQurZ8YZPgbuXpbj5vPaOnCdBAoGACYc7
wb3XS4wos3FxhUfcwJbM4b4VKeeHqzfu7pI6cU/3ydiHVitKcVe2bdw3qMPqI9Wc
nvi6e17Tbdq4OCsEJx1OiVwFD9YdO3cOTc6lw/3+hjypvZBRYo+/4jUthbu96E+S
dtOzehGZMmDvN0uSzupSi3ZOgkAAUFpyuIKickMCgYAId0PCRjonO2thn/R0rZ7P
//L852uyzYhXKw5/fjFGhQ6LbaLgIRFaCZ0L2809u0HFnNvJjHv4AKP6j+vFQYYY
qQ+66XnfsA9G/bu4MDS9AX83iahD9IdLXQAy8I19prAbpVumKegPbMnNYNB/TYEc
3G15AKCXo7jjOUtHY01DCQ==
-----END PRIVATE KEY-----

View File

@@ -1,4 +1,6 @@
import re
import json
import hashlib
from . import utils
@@ -128,6 +130,10 @@ def _get_bind_vol_config(data, ix_volumes=None):
def _get_volume_vol_config(data):
if data.get("type") in ["nfs", "cifs"]:
if data.get("volume_name"):
utils.throw_error("Expected [volume_name] to be empty for [nfs, cifs] type")
data.update({"volume_name": _get_name_for_external_volume(data)})
if not data.get("volume_name"):
utils.throw_error("Expected [volume_name] to be set for [volume] type")
@@ -163,6 +169,14 @@ def _get_tmpfs_vol_config(data):
return {"tmpfs": tmpfs}
# We generate a unique name for the volume based on the config
# Docker will not update any volume after creation. This is to ensure
# that changing any value (eg server address) in the config will result in a new volume
def _get_name_for_external_volume(data):
config_hash = hashlib.sha256(json.dumps(data).encode("utf-8")).hexdigest()
return f"{data['type']}_{config_hash}"
# Returns a volume object (Used in top "volumes" level)
def vol(data):
if not data or _get_docker_vol_type(data) != "volume":
@@ -309,7 +323,7 @@ def _process_cifs(data):
opts.append(opt)
server = data["cifs_config"]["server"].lstrip("/")
path = data["cifs_config"]["path"]
path = data["cifs_config"]["path"].strip("/")
volume = {
"driver_opts": {
"type": "cifs",

View File

@@ -1,2 +1,2 @@
0.0.1: f074617a82a86d2a6cc78a4c8a4296fc9d168e456f12713e50c696557b302133
1.0.0: 317726af1e56541666942aeebcc7543e6f0946f96c322d35b612c0f2f7189a88
1.0.0: c99e9b78fa296d9796875a1373d75fb779ae5b78ba7c4d01b1a23d564e0075c0

View File

@@ -30,7 +30,7 @@ def migrate_smb_pv_pvc_type(smb_pv_pvc):
"type": "cifs",
"cifs_config": {
"server": smb_config["server"],
"share": smb_config["share"],
"path": smb_config["share"],
"domain": smb_config.get("domain", ""),
"username": smb_config["username"],
"password": smb_config["password"],