mirror of
https://github.com/MAGICGrants/truenas-apps.git
synced 2026-01-08 20:18:01 -05:00
add signal-cli-rest-api (#3611)
* add signal-cli-rest-api * fixed test port * fixed label * fixed icon * remove old libs * new lib * Update title * fix port and uid/gid, add missing sections in questions, category * add mode * icon * icn * caps --------- Co-authored-by: Stavros Kois <s.kois@outlook.com> Co-authored-by: Stavros Kois <47820033+stavros-k@users.noreply.github.com>
This commit is contained in:
4
ix-dev/community/signal-cli-rest-api/README.md
Normal file
4
ix-dev/community/signal-cli-rest-api/README.md
Normal file
@@ -0,0 +1,4 @@
|
||||
# Signal Messenger REST API
|
||||
|
||||
[Signal Messenger REST API](https://github.com/bbernhard/signal-cli-rest-api) This project creates a small dockerized
|
||||
REST API around signal-cli.
|
||||
41
ix-dev/community/signal-cli-rest-api/app.yaml
Normal file
41
ix-dev/community/signal-cli-rest-api/app.yaml
Normal file
@@ -0,0 +1,41 @@
|
||||
app_version: '0.95'
|
||||
capabilities:
|
||||
- description: Signal CLI REST API is able to change file ownership arbitrarily
|
||||
name: CHOWN
|
||||
- description: Signal CLI REST API is able to bypass file permission checks
|
||||
name: DAC_OVERRIDE
|
||||
- description: Signal CLI REST API is able to bypass permission checks for file operations
|
||||
name: FOWNER
|
||||
- description: Signal CLI REST API is able to change group ID of processes
|
||||
name: SETGID
|
||||
- description: Signal CLI REST API is able to change user ID of processes
|
||||
name: SETUID
|
||||
categories:
|
||||
- productivity
|
||||
changelog_url: https://github.com/bbernhard/signal-cli-rest-api/releases
|
||||
date_added: '2025-11-14'
|
||||
description: This project creates a small dockerized REST API around signal-cli.
|
||||
home: https://github.com/bbernhard/signal-cli-rest-api
|
||||
host_mounts: []
|
||||
icon: https://media.sys.truenas.net/apps/signal-cli-rest-api/icons/icon.svg
|
||||
keywords:
|
||||
- signal
|
||||
lib_version: 2.1.62
|
||||
lib_version_hash: 5d04ea326fca3ee8d6bbdd7ae966453f45e80ba8c83a81dfdae14ff99f8e70d5
|
||||
maintainers:
|
||||
- email: dev@truenas.com
|
||||
name: truenas
|
||||
url: https://www.truenas.com/
|
||||
name: signal-cli-rest-api
|
||||
run_as_context:
|
||||
- description: Signal CLI REST API runs as root user.
|
||||
gid: 0
|
||||
group_name: root
|
||||
uid: 0
|
||||
user_name: root
|
||||
screenshots: []
|
||||
sources:
|
||||
- https://github.com/bbernhard/signal-cli-rest-api
|
||||
title: Signal CLI REST API
|
||||
train: community
|
||||
version: 1.0.0
|
||||
6
ix-dev/community/signal-cli-rest-api/item.yaml
Normal file
6
ix-dev/community/signal-cli-rest-api/item.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
categories:
|
||||
- productivity
|
||||
icon_url: https://media.sys.truenas.net/apps/signal-cli-rest-api/icons/icon.svg
|
||||
screenshots: []
|
||||
tags:
|
||||
- signal
|
||||
8
ix-dev/community/signal-cli-rest-api/ix_values.yaml
Normal file
8
ix-dev/community/signal-cli-rest-api/ix_values.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
images:
|
||||
image:
|
||||
repository: bbernhard/signal-cli-rest-api
|
||||
tag: 0.95
|
||||
|
||||
consts:
|
||||
signal_cli_rest_api_container_name: signal-cli-rest-api
|
||||
config_path: /config
|
||||
449
ix-dev/community/signal-cli-rest-api/questions.yaml
Normal file
449
ix-dev/community/signal-cli-rest-api/questions.yaml
Normal file
@@ -0,0 +1,449 @@
|
||||
groups:
|
||||
- name: Signal CLI REST API Configuration
|
||||
description: Configure Signal CLI REST API
|
||||
- name: User and Group Configuration
|
||||
description: Configure User and Group for Signal CLI REST API
|
||||
- name: Network Configuration
|
||||
description: Configure Network for Signal CLI REST API
|
||||
- name: Storage Configuration
|
||||
description: Configure Storage for Signal CLI REST API
|
||||
- name: Labels Configuration
|
||||
description: Configure Labels for Signal CLI REST API
|
||||
- name: Resources Configuration
|
||||
description: Configure Resources for Signal CLI REST API
|
||||
|
||||
questions:
|
||||
- variable: TZ
|
||||
group: Signal CLI REST API Configuration
|
||||
label: Timezone
|
||||
schema:
|
||||
type: string
|
||||
default: Etc/UTC
|
||||
required: true
|
||||
$ref:
|
||||
- definitions/timezone
|
||||
|
||||
- variable: signal_cli_rest_api
|
||||
label: ""
|
||||
group: Signal CLI REST API Configuration
|
||||
schema:
|
||||
type: dict
|
||||
attrs:
|
||||
- variable: mode
|
||||
label: Mode
|
||||
description: |
|
||||
The mode of the Signal CLI REST API.</br>
|
||||
https://github.com/bbernhard/signal-cli-rest-api?tab=readme-ov-file#execution-modes
|
||||
schema:
|
||||
type: string
|
||||
default: native
|
||||
required: true
|
||||
enum:
|
||||
- value: normal
|
||||
description: Normal
|
||||
- value: native
|
||||
description: Native
|
||||
- value: json-rpc
|
||||
description: JSON-RPC
|
||||
- variable: additional_envs
|
||||
label: Additional Environment Variables
|
||||
schema:
|
||||
type: list
|
||||
default: []
|
||||
items:
|
||||
- variable: env
|
||||
label: Environment Variable
|
||||
schema:
|
||||
type: dict
|
||||
attrs:
|
||||
- variable: name
|
||||
label: Name
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
- variable: value
|
||||
label: Value
|
||||
schema:
|
||||
type: string
|
||||
- variable: run_as
|
||||
label: ""
|
||||
group: User and Group Configuration
|
||||
schema:
|
||||
type: dict
|
||||
attrs:
|
||||
- variable: user
|
||||
label: User ID
|
||||
description: The user id that Signal CLI REST API files will be owned by.
|
||||
schema:
|
||||
type: int
|
||||
min: 568
|
||||
default: 568
|
||||
required: true
|
||||
- variable: group
|
||||
label: Group ID
|
||||
description: The group id that Signal CLI REST API files will be owned by.
|
||||
schema:
|
||||
type: int
|
||||
min: 568
|
||||
default: 568
|
||||
required: true
|
||||
|
||||
- variable: network
|
||||
label: ""
|
||||
group: Network Configuration
|
||||
schema:
|
||||
type: dict
|
||||
attrs:
|
||||
- variable: rest_port
|
||||
label: REST Port
|
||||
schema:
|
||||
type: dict
|
||||
attrs:
|
||||
- variable: bind_mode
|
||||
label: Port Bind Mode
|
||||
description: |
|
||||
The port bind mode.</br>
|
||||
- Publish: The port will be published on the host for external access.</br>
|
||||
- Expose: The port will be exposed for inter-container communication.</br>
|
||||
- None: The port will not be exposed or published.</br>
|
||||
Note: If the Dockerfile defines an EXPOSE directive,
|
||||
the port will still be exposed for inter-container communication regardless of this setting.
|
||||
schema:
|
||||
type: string
|
||||
default: "published"
|
||||
enum:
|
||||
- value: "published"
|
||||
description: Publish port on the host for external access
|
||||
- value: "exposed"
|
||||
description: Expose port for inter-container communication
|
||||
- value: ""
|
||||
description: None
|
||||
- variable: port_number
|
||||
label: Port Number
|
||||
schema:
|
||||
type: int
|
||||
default: 30295
|
||||
min: 1
|
||||
max: 65535
|
||||
required: true
|
||||
- variable: host_ips
|
||||
label: Host IPs
|
||||
description: IPs on the host to bind this port
|
||||
schema:
|
||||
type: list
|
||||
show_if: [["bind_mode", "=", "published"]]
|
||||
default: []
|
||||
items:
|
||||
- variable: host_ip
|
||||
label: Host IP
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
$ref:
|
||||
- definitions/node_bind_ip
|
||||
- variable: host_network
|
||||
label: Host Network
|
||||
description: |
|
||||
Bind to the host network. It's recommended to keep this disabled.
|
||||
schema:
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
- variable: storage
|
||||
label: ""
|
||||
group: Storage Configuration
|
||||
schema:
|
||||
type: dict
|
||||
attrs:
|
||||
- variable: config
|
||||
label: Config Storage
|
||||
schema:
|
||||
type: dict
|
||||
attrs:
|
||||
- variable: type
|
||||
label: Type
|
||||
description: |
|
||||
ixVolume: Is dataset created automatically by the system.</br>
|
||||
Host Path: Is a path that already exists on the system.
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
default: "ix_volume"
|
||||
enum:
|
||||
- value: "host_path"
|
||||
description: Host Path (Path that already exists on the system)
|
||||
- value: "ix_volume"
|
||||
description: ixVolume (Dataset created automatically by the system)
|
||||
- variable: ix_volume_config
|
||||
label: ixVolume Configuration
|
||||
description: The configuration for the ixVolume dataset.
|
||||
schema:
|
||||
type: dict
|
||||
show_if: [["type", "=", "ix_volume"]]
|
||||
$ref:
|
||||
- "normalize/ix_volume"
|
||||
attrs:
|
||||
- variable: acl_enable
|
||||
label: Enable ACL
|
||||
description: Enable ACL for the storage.
|
||||
schema:
|
||||
type: boolean
|
||||
default: false
|
||||
- variable: dataset_name
|
||||
label: Dataset Name
|
||||
description: The name of the dataset to use for storage.
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
hidden: true
|
||||
default: "config"
|
||||
- variable: acl_entries
|
||||
label: ACL Configuration
|
||||
schema:
|
||||
type: dict
|
||||
show_if: [["acl_enable", "=", true]]
|
||||
attrs: []
|
||||
- variable: host_path_config
|
||||
label: Host Path Configuration
|
||||
schema:
|
||||
type: dict
|
||||
show_if: [["type", "=", "host_path"]]
|
||||
attrs:
|
||||
- variable: acl_enable
|
||||
label: Enable ACL
|
||||
description: Enable ACL for the storage.
|
||||
schema:
|
||||
type: boolean
|
||||
default: false
|
||||
- variable: acl
|
||||
label: ACL Configuration
|
||||
schema:
|
||||
type: dict
|
||||
show_if: [["acl_enable", "=", true]]
|
||||
attrs: []
|
||||
$ref:
|
||||
- "normalize/acl"
|
||||
- variable: path
|
||||
label: Host Path
|
||||
description: The host path to use for storage.
|
||||
schema:
|
||||
type: hostpath
|
||||
show_if: [["acl_enable", "=", false]]
|
||||
required: true
|
||||
- variable: additional_storage
|
||||
label: Additional Storage
|
||||
schema:
|
||||
type: list
|
||||
default: []
|
||||
items:
|
||||
- variable: storageEntry
|
||||
label: Storage Entry
|
||||
schema:
|
||||
type: dict
|
||||
attrs:
|
||||
- variable: type
|
||||
label: Type
|
||||
description: |
|
||||
ixVolume: Is dataset created automatically by the system.</br>
|
||||
Host Path: Is a path that already exists on the system.</br>
|
||||
SMB Share: Is a SMB share that is mounted to as a volume.</br>
|
||||
NFS Share: Is a NFS share that is mounted to as a volume.
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
default: "ix_volume"
|
||||
enum:
|
||||
- value: "host_path"
|
||||
description: Host Path (Path that already exists on the system)
|
||||
- value: "ix_volume"
|
||||
description: ixVolume (Dataset created automatically by the system)
|
||||
- value: "cifs"
|
||||
description: SMB/CIFS Share (Mounts a volume to a SMB share)
|
||||
- value: "nfs"
|
||||
description: NFS Share (Mounts a volume to a NFS share)
|
||||
- variable: read_only
|
||||
label: Read Only
|
||||
description: Mount the volume as read only.
|
||||
schema:
|
||||
type: boolean
|
||||
default: false
|
||||
- variable: mount_path
|
||||
label: Mount Path
|
||||
description: The path inside the container to mount the storage.
|
||||
schema:
|
||||
type: path
|
||||
required: true
|
||||
- variable: host_path_config
|
||||
label: Host Path Configuration
|
||||
schema:
|
||||
type: dict
|
||||
show_if: [["type", "=", "host_path"]]
|
||||
attrs:
|
||||
- variable: acl_enable
|
||||
label: Enable ACL
|
||||
description: Enable ACL for the storage.
|
||||
schema:
|
||||
type: boolean
|
||||
default: false
|
||||
- variable: acl
|
||||
label: ACL Configuration
|
||||
schema:
|
||||
type: dict
|
||||
show_if: [["acl_enable", "=", true]]
|
||||
attrs: []
|
||||
$ref:
|
||||
- "normalize/acl"
|
||||
- variable: path
|
||||
label: Host Path
|
||||
description: The host path to use for storage.
|
||||
schema:
|
||||
type: hostpath
|
||||
show_if: [["acl_enable", "=", false]]
|
||||
required: true
|
||||
- variable: ix_volume_config
|
||||
label: ixVolume Configuration
|
||||
description: The configuration for the ixVolume dataset.
|
||||
schema:
|
||||
type: dict
|
||||
show_if: [["type", "=", "ix_volume"]]
|
||||
$ref:
|
||||
- "normalize/ix_volume"
|
||||
attrs:
|
||||
- variable: acl_enable
|
||||
label: Enable ACL
|
||||
description: Enable ACL for the storage.
|
||||
schema:
|
||||
type: boolean
|
||||
default: false
|
||||
- variable: dataset_name
|
||||
label: Dataset Name
|
||||
description: The name of the dataset to use for storage.
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
default: "storage_entry"
|
||||
- variable: acl_entries
|
||||
label: ACL Configuration
|
||||
schema:
|
||||
type: dict
|
||||
show_if: [["acl_enable", "=", true]]
|
||||
attrs: []
|
||||
$ref:
|
||||
- "normalize/acl"
|
||||
- variable: cifs_config
|
||||
label: SMB Configuration
|
||||
description: The configuration for the SMB dataset.
|
||||
schema:
|
||||
type: dict
|
||||
show_if: [["type", "=", "cifs"]]
|
||||
attrs:
|
||||
- variable: server
|
||||
label: Server
|
||||
description: The server to mount the SMB share.
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
- variable: path
|
||||
label: Path
|
||||
description: The path to mount the SMB share.
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
- variable: username
|
||||
label: Username
|
||||
description: The username to use for the SMB share.
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
- variable: password
|
||||
label: Password
|
||||
description: The password to use for the SMB share.
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
private: true
|
||||
- variable: domain
|
||||
label: Domain
|
||||
description: The domain to use for the SMB share.
|
||||
schema:
|
||||
type: string
|
||||
- variable: nfs_config
|
||||
label: NFS Configuration
|
||||
description: The configuration for the NFS dataset.
|
||||
schema:
|
||||
type: dict
|
||||
show_if: [["type", "=", "nfs"]]
|
||||
attrs:
|
||||
- variable: server
|
||||
label: Server
|
||||
description: The server to mount the NFS share.
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
- variable: path
|
||||
label: Path
|
||||
description: The path to mount the NFS share.
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
- variable: labels
|
||||
label: ""
|
||||
group: Labels Configuration
|
||||
schema:
|
||||
type: list
|
||||
default: []
|
||||
items:
|
||||
- variable: label
|
||||
label: Label
|
||||
schema:
|
||||
type: dict
|
||||
attrs:
|
||||
- variable: key
|
||||
label: Key
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
- variable: value
|
||||
label: Value
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
- variable: containers
|
||||
label: Containers
|
||||
description: Containers where the label should be applied
|
||||
schema:
|
||||
type: list
|
||||
items:
|
||||
- variable: container
|
||||
label: Container
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
enum:
|
||||
- value: signal-cli-rest-api
|
||||
description: signal-cli-rest-api
|
||||
- variable: resources
|
||||
label: ""
|
||||
group: Resources Configuration
|
||||
schema:
|
||||
type: dict
|
||||
attrs:
|
||||
- variable: limits
|
||||
label: Limits
|
||||
schema:
|
||||
type: dict
|
||||
attrs:
|
||||
- variable: cpus
|
||||
label: CPUs
|
||||
description: CPUs limit for Signal CLI REST API.
|
||||
schema:
|
||||
type: int
|
||||
default: 2
|
||||
required: true
|
||||
- variable: memory
|
||||
label: Memory (in MB)
|
||||
description: Memory limit for Signal CLI REST API.
|
||||
schema:
|
||||
type: int
|
||||
default: 4096
|
||||
required: true
|
||||
@@ -0,0 +1,24 @@
|
||||
{% set tpl = ix_lib.base.render.Render(values) %}
|
||||
|
||||
{% set c1 = tpl.add_container(values.consts.signal_cli_rest_api_container_name, "image") %}
|
||||
|
||||
{% do c1.add_caps(["CHOWN", "FOWNER", "DAC_OVERRIDE", "SETUID", "SETGID"]) %}
|
||||
{% do c1.healthcheck.set_test("curl", {"port": values.network.rest_port.port_number, "path": "/v1/health"}) %}
|
||||
{% do c1.environment.add_env("SIGNAL_CLI_UID", values.run_as.user) %}
|
||||
{% do c1.environment.add_env("SIGNAL_CLI_GID", values.run_as.group) %}
|
||||
{% do c1.environment.add_env("SIGNAL_CLI_CONFIG_DIR", values.consts.config_path) %}
|
||||
{% do c1.environment.add_env("MODE", values.signal_cli_rest_api.mode) %}
|
||||
{% do c1.environment.add_env("PORT", values.network.rest_port.port_number) %}
|
||||
{% do c1.environment.add_user_envs(values.signal_cli_rest_api.additional_envs) %}
|
||||
|
||||
{% do c1.add_port(values.network.rest_port) %}
|
||||
|
||||
{% do c1.add_storage(values.consts.config_path, values.storage.config) %}
|
||||
|
||||
{% for store in values.storage.additional_storage %}
|
||||
{% do c1.add_storage(store.mount_path, store) %}
|
||||
{% endfor %}
|
||||
|
||||
{% do tpl.portals.add(values.network.rest_port, {"path": "/swagger/index.html"}) %}
|
||||
|
||||
{{ tpl.render() | tojson }}
|
||||
@@ -0,0 +1,70 @@
|
||||
import os
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
|
||||
|
||||
def is_truenas_system():
|
||||
"""Check if we're running on a TrueNAS system"""
|
||||
return "truenas" in os.uname().release
|
||||
|
||||
|
||||
# Import based on system detection
|
||||
if is_truenas_system():
|
||||
from truenas_api_client import Client as TrueNASClient
|
||||
|
||||
try:
|
||||
# 25.04 and later
|
||||
from truenas_api_client.exc import ValidationErrors
|
||||
except ImportError:
|
||||
# 24.10 and earlier
|
||||
from truenas_api_client import ValidationErrors
|
||||
else:
|
||||
# Mock classes for non-TrueNAS systems
|
||||
class TrueNASClient:
|
||||
def call(self, *args, **kwargs):
|
||||
return None
|
||||
|
||||
class ValidationErrors(Exception):
|
||||
def __init__(self, errors):
|
||||
self.errors = errors
|
||||
|
||||
|
||||
class Client:
|
||||
def __init__(self, render_instance: "Render"):
|
||||
self.client = TrueNASClient()
|
||||
self._render_instance = render_instance
|
||||
self._app_name: str = self._render_instance.values.get("ix_context", {}).get("app_name", "") or "unknown"
|
||||
|
||||
def validate_ip_port_combo(self, ip: str, port: int) -> None:
|
||||
# Example of an error messages:
|
||||
# The port is being used by following services: 1) "0.0.0.0:80" used by WebUI Service
|
||||
# The port is being used by following services: 1) "0.0.0.0:9998" used by Applications ('$app_name' application)
|
||||
try:
|
||||
self.client.call("port.validate_port", f"render.{self._app_name}.schema", port, ip, None, True)
|
||||
except ValidationErrors as e:
|
||||
err_str = str(e)
|
||||
# If the IP:port combo appears more than once in the error message,
|
||||
# means that the port is used by more than one service/app.
|
||||
# This shouldn't happen in a well-configured system.
|
||||
# Notice that the ip portion is not included check,
|
||||
# because input might be a specific IP, but another service or app
|
||||
# might be using the same port on a wildcard IP
|
||||
if err_str.count(f':{port}" used by') > 1:
|
||||
raise RenderError(err_str) from None
|
||||
|
||||
# If the error complains about the current app, we ignore it
|
||||
# This is to handle cases where the app is being updated or edited
|
||||
if f"Applications ('{self._app_name}' application)" in err_str:
|
||||
# During upgrade, we want to ignore the error if it is related to the current app
|
||||
return
|
||||
|
||||
raise RenderError(err_str) from None
|
||||
except Exception:
|
||||
pass
|
||||
@@ -0,0 +1,86 @@
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .formatter import escape_dollar
|
||||
from .validations import valid_octal_mode_or_raise, valid_fs_path_or_raise
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from formatter import escape_dollar
|
||||
from validations import valid_octal_mode_or_raise, valid_fs_path_or_raise
|
||||
|
||||
|
||||
class Configs:
|
||||
def __init__(self, render_instance: "Render"):
|
||||
self._render_instance = render_instance
|
||||
self._configs: dict[str, dict] = {}
|
||||
|
||||
def add(self, name: str, data: str):
|
||||
if not isinstance(data, str):
|
||||
raise RenderError(f"Expected [data] to be a string, got [{type(data)}]")
|
||||
|
||||
if name not in self._configs:
|
||||
self._configs[name] = {"name": name, "data": data}
|
||||
return
|
||||
|
||||
if data == self._configs[name]["data"]:
|
||||
return
|
||||
|
||||
raise RenderError(f"Config [{name}] already added with different data")
|
||||
|
||||
def has_configs(self):
|
||||
return bool(self._configs)
|
||||
|
||||
def render(self):
|
||||
return {
|
||||
c["name"]: {"content": escape_dollar(c["data"])}
|
||||
for c in sorted(self._configs.values(), key=lambda c: c["name"])
|
||||
}
|
||||
|
||||
|
||||
class ContainerConfigs:
|
||||
def __init__(self, render_instance: "Render", configs: Configs):
|
||||
self._render_instance = render_instance
|
||||
self.top_level_configs: Configs = configs
|
||||
self.container_configs: set[ContainerConfig] = set()
|
||||
|
||||
def add(self, name: str, data: str, target: str, mode: str = ""):
|
||||
self.top_level_configs.add(name, data)
|
||||
|
||||
if target == "":
|
||||
raise RenderError(f"Expected [target] to be set for config [{name}]")
|
||||
if mode != "":
|
||||
mode = valid_octal_mode_or_raise(mode)
|
||||
|
||||
if target in [c.target for c in self.container_configs]:
|
||||
raise RenderError(f"Target [{target}] already used for another config")
|
||||
target = valid_fs_path_or_raise(target)
|
||||
self.container_configs.add(ContainerConfig(self._render_instance, name, target, mode))
|
||||
|
||||
def has_configs(self):
|
||||
return bool(self.container_configs)
|
||||
|
||||
def render(self):
|
||||
return [c.render() for c in sorted(self.container_configs, key=lambda c: c.source)]
|
||||
|
||||
|
||||
class ContainerConfig:
|
||||
def __init__(self, render_instance: "Render", source: str, target: str, mode: str):
|
||||
self._render_instance = render_instance
|
||||
self.source = source
|
||||
self.target = target
|
||||
self.mode = mode
|
||||
|
||||
def render(self):
|
||||
result: dict[str, str | int] = {
|
||||
"source": self.source,
|
||||
"target": self.target,
|
||||
}
|
||||
|
||||
if self.mode:
|
||||
result["mode"] = int(self.mode, 8)
|
||||
|
||||
return result
|
||||
@@ -0,0 +1,465 @@
|
||||
from typing import Any, TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
from storage import IxStorage
|
||||
|
||||
try:
|
||||
from .configs import ContainerConfigs
|
||||
from .depends import Depends
|
||||
from .deploy import Deploy
|
||||
from .device_cgroup_rules import DeviceCGroupRules
|
||||
from .devices import Devices
|
||||
from .dns import Dns
|
||||
from .environment import Environment
|
||||
from .error import RenderError
|
||||
from .expose import Expose
|
||||
from .extra_hosts import ExtraHosts
|
||||
from .formatter import escape_dollar, get_image_with_hashed_data
|
||||
from .healthcheck import Healthcheck
|
||||
from .labels import Labels
|
||||
from .ports import Ports
|
||||
from .restart import RestartPolicy
|
||||
from .tmpfs import Tmpfs
|
||||
from .validations import (
|
||||
valid_cap_or_raise,
|
||||
valid_cgroup_or_raise,
|
||||
valid_ipc_mode_or_raise,
|
||||
valid_network_mode_or_raise,
|
||||
valid_pid_mode_or_raise,
|
||||
valid_port_bind_mode_or_raise,
|
||||
valid_port_mode_or_raise,
|
||||
valid_pull_policy_or_raise,
|
||||
)
|
||||
from .security_opts import SecurityOpts
|
||||
from .storage import Storage
|
||||
from .sysctls import Sysctls
|
||||
except ImportError:
|
||||
from configs import ContainerConfigs
|
||||
from depends import Depends
|
||||
from deploy import Deploy
|
||||
from device_cgroup_rules import DeviceCGroupRules
|
||||
from devices import Devices
|
||||
from dns import Dns
|
||||
from environment import Environment
|
||||
from error import RenderError
|
||||
from expose import Expose
|
||||
from extra_hosts import ExtraHosts
|
||||
from formatter import escape_dollar, get_image_with_hashed_data
|
||||
from healthcheck import Healthcheck
|
||||
from labels import Labels
|
||||
from ports import Ports
|
||||
from restart import RestartPolicy
|
||||
from tmpfs import Tmpfs
|
||||
from validations import (
|
||||
valid_cap_or_raise,
|
||||
valid_cgroup_or_raise,
|
||||
valid_ipc_mode_or_raise,
|
||||
valid_network_mode_or_raise,
|
||||
valid_pid_mode_or_raise,
|
||||
valid_port_bind_mode_or_raise,
|
||||
valid_port_mode_or_raise,
|
||||
valid_pull_policy_or_raise,
|
||||
)
|
||||
from security_opts import SecurityOpts
|
||||
from storage import Storage
|
||||
from sysctls import Sysctls
|
||||
|
||||
|
||||
class Container:
|
||||
def __init__(self, render_instance: "Render", name: str, image: str):
|
||||
self._render_instance = render_instance
|
||||
|
||||
self._name: str = name
|
||||
self._image: str = self._resolve_image(image)
|
||||
self._build_image: str = ""
|
||||
self._pull_policy: str = ""
|
||||
self._user: str = ""
|
||||
self._tty: bool = False
|
||||
self._stdin_open: bool = False
|
||||
self._init: bool | None = None
|
||||
self._read_only: bool | None = None
|
||||
self._extra_hosts: ExtraHosts = ExtraHosts(self._render_instance)
|
||||
self._hostname: str = ""
|
||||
self._cap_drop: set[str] = set(["ALL"]) # Drop all capabilities by default and add caps granularly
|
||||
self._cap_add: set[str] = set()
|
||||
self._security_opt: SecurityOpts = SecurityOpts(self._render_instance)
|
||||
self._privileged: bool = False
|
||||
self._group_add: set[int | str] = set()
|
||||
self._network_mode: str = ""
|
||||
self._entrypoint: list[str] = []
|
||||
self._command: list[str] = []
|
||||
self._grace_period: int | None = None
|
||||
self._shm_size: int | None = None
|
||||
self._storage: Storage = Storage(self._render_instance, self)
|
||||
self._tmpfs: Tmpfs = Tmpfs(self._render_instance, self)
|
||||
self._ipc_mode: str | None = None
|
||||
self._pid_mode: str | None = None
|
||||
self._cgroup: str | None = None
|
||||
self._device_cgroup_rules: DeviceCGroupRules = DeviceCGroupRules(self._render_instance)
|
||||
self.sysctls: Sysctls = Sysctls(self._render_instance, self)
|
||||
self.configs: ContainerConfigs = ContainerConfigs(self._render_instance, self._render_instance.configs)
|
||||
self.deploy: Deploy = Deploy(self._render_instance)
|
||||
self.networks: set[str] = set()
|
||||
self.devices: Devices = Devices(self._render_instance)
|
||||
self.environment: Environment = Environment(self._render_instance, self.deploy.resources)
|
||||
self.dns: Dns = Dns(self._render_instance)
|
||||
self.depends: Depends = Depends(self._render_instance)
|
||||
self.healthcheck: Healthcheck = Healthcheck(self._render_instance)
|
||||
self.labels: Labels = Labels(self._render_instance)
|
||||
self.restart: RestartPolicy = RestartPolicy(self._render_instance)
|
||||
self.ports: Ports = Ports(self._render_instance)
|
||||
self.expose: Expose = Expose(self._render_instance)
|
||||
|
||||
self._auto_set_network_mode()
|
||||
self._auto_add_labels()
|
||||
self._auto_add_groups()
|
||||
|
||||
def _auto_add_groups(self):
|
||||
self.add_group(568)
|
||||
|
||||
def _auto_set_network_mode(self):
|
||||
if self._render_instance.values.get("network", {}).get("host_network", False):
|
||||
self.set_network_mode("host")
|
||||
|
||||
def _auto_add_labels(self):
|
||||
labels = self._render_instance.values.get("labels", [])
|
||||
if not labels:
|
||||
return
|
||||
|
||||
for label in labels:
|
||||
containers = label.get("containers", [])
|
||||
if not containers:
|
||||
raise RenderError(f'Label [{label.get("key", "")}] must have at least one container')
|
||||
|
||||
if self._name in containers:
|
||||
self.labels.add_label(label["key"], label["value"])
|
||||
|
||||
def _resolve_image(self, image: str):
|
||||
images = self._render_instance.values["images"]
|
||||
if image not in images:
|
||||
raise RenderError(
|
||||
f"Image [{image}] not found in values. " f"Available images: [{', '.join(images.keys())}]"
|
||||
)
|
||||
repo = images[image].get("repository", "")
|
||||
tag = images[image].get("tag", "")
|
||||
|
||||
if not repo:
|
||||
raise RenderError(f"Repository not found for image [{image}]")
|
||||
if not tag:
|
||||
raise RenderError(f"Tag not found for image [{image}]")
|
||||
|
||||
return f"{repo}:{tag}"
|
||||
|
||||
def name(self) -> str:
|
||||
return self._name
|
||||
|
||||
def build_image(self, content: list[str | None]):
|
||||
dockerfile = f"FROM {self._image}\n"
|
||||
for line in content:
|
||||
line = line.strip() if line else ""
|
||||
if not line:
|
||||
continue
|
||||
if line.startswith("FROM"):
|
||||
# TODO: This will also block multi-stage builds
|
||||
# We can revisit this later if we need it
|
||||
raise RenderError(
|
||||
"FROM cannot be used in build image. Define the base image when creating the container."
|
||||
)
|
||||
dockerfile += line + "\n"
|
||||
|
||||
self._build_image = dockerfile
|
||||
self._image = get_image_with_hashed_data(self._image, dockerfile)
|
||||
|
||||
def set_pull_policy(self, pull_policy: str):
|
||||
self._pull_policy = valid_pull_policy_or_raise(pull_policy)
|
||||
|
||||
def set_user(self, user: int, group: int):
|
||||
for i in (user, group):
|
||||
if not isinstance(i, int) or i < 0:
|
||||
raise RenderError(f"User/Group [{i}] is not valid")
|
||||
self._user = f"{user}:{group}"
|
||||
|
||||
def add_extra_host(self, host: str, ip: str):
|
||||
self._extra_hosts.add_host(host, ip)
|
||||
|
||||
def add_group(self, group: int | str):
|
||||
if isinstance(group, str):
|
||||
group = str(group).strip()
|
||||
if group.isdigit():
|
||||
raise RenderError(f"Group is a number [{group}] but passed as a string")
|
||||
|
||||
if group in self._group_add:
|
||||
raise RenderError(f"Group [{group}] already added")
|
||||
self._group_add.add(group)
|
||||
|
||||
def get_additional_groups(self) -> list[int | str]:
|
||||
result = []
|
||||
if self.deploy.resources.has_gpus() or self.devices.has_gpus():
|
||||
result.append(44) # video
|
||||
result.append(107) # render
|
||||
return result
|
||||
|
||||
def get_current_groups(self) -> list[str]:
|
||||
result = [str(g) for g in self._group_add]
|
||||
result.extend([str(g) for g in self.get_additional_groups()])
|
||||
return result
|
||||
|
||||
def set_tty(self, enabled: bool = False):
|
||||
self._tty = enabled
|
||||
|
||||
def set_stdin(self, enabled: bool = False):
|
||||
self._stdin_open = enabled
|
||||
|
||||
def set_ipc_mode(self, ipc_mode: str):
|
||||
self._ipc_mode = valid_ipc_mode_or_raise(ipc_mode, self._render_instance.container_names())
|
||||
|
||||
def set_pid_mode(self, mode: str = ""):
|
||||
self._pid_mode = valid_pid_mode_or_raise(mode, self._render_instance.container_names())
|
||||
|
||||
def add_device_cgroup_rule(self, dev_grp_rule: str):
|
||||
self._device_cgroup_rules.add_rule(dev_grp_rule)
|
||||
|
||||
def set_cgroup(self, cgroup: str):
|
||||
self._cgroup = valid_cgroup_or_raise(cgroup)
|
||||
|
||||
def set_init(self, enabled: bool = False):
|
||||
self._init = enabled
|
||||
|
||||
def set_read_only(self, enabled: bool = False):
|
||||
self._read_only = enabled
|
||||
|
||||
def set_hostname(self, hostname: str):
|
||||
self._hostname = hostname
|
||||
|
||||
def set_grace_period(self, grace_period: int):
|
||||
if grace_period < 0:
|
||||
raise RenderError(f"Grace period [{grace_period}] cannot be negative")
|
||||
self._grace_period = grace_period
|
||||
|
||||
def set_privileged(self, enabled: bool = False):
|
||||
self._privileged = enabled
|
||||
|
||||
def clear_caps(self):
|
||||
self._cap_add.clear()
|
||||
self._cap_drop.clear()
|
||||
|
||||
def add_caps(self, caps: list[str]):
|
||||
for c in caps:
|
||||
if c in self._cap_add:
|
||||
raise RenderError(f"Capability [{c}] already added")
|
||||
self._cap_add.add(valid_cap_or_raise(c))
|
||||
|
||||
def add_security_opt(self, key: str, value: str | bool | None = None, arg: str | None = None):
|
||||
self._security_opt.add_opt(key, value, arg)
|
||||
|
||||
def remove_security_opt(self, key: str):
|
||||
self._security_opt.remove_opt(key)
|
||||
|
||||
def set_network_mode(self, mode: str):
|
||||
self._network_mode = valid_network_mode_or_raise(mode, self._render_instance.container_names())
|
||||
|
||||
def add_port(self, port_config: dict | None = None, dev_config: dict | None = None):
|
||||
port_config = port_config or {}
|
||||
dev_config = dev_config or {}
|
||||
# Merge port_config and dev_config (dev_config has precedence)
|
||||
config = port_config | dev_config
|
||||
bind_mode = valid_port_bind_mode_or_raise(config.get("bind_mode", ""))
|
||||
# Skip port if its neither published nor exposed
|
||||
if not bind_mode:
|
||||
return
|
||||
|
||||
# Collect port config
|
||||
mode = valid_port_mode_or_raise(config.get("mode", "ingress"))
|
||||
host_port = config.get("port_number", 0)
|
||||
container_port = config.get("container_port", 0) or host_port
|
||||
protocol = config.get("protocol", "tcp")
|
||||
host_ips = config.get("host_ips") or ["0.0.0.0", "::"]
|
||||
if not isinstance(host_ips, list):
|
||||
raise RenderError(f"Expected [host_ips] to be a list, got [{host_ips}]")
|
||||
|
||||
if bind_mode == "published":
|
||||
for host_ip in host_ips:
|
||||
self.ports._add_port(
|
||||
host_port, container_port, {"protocol": protocol, "host_ip": host_ip, "mode": mode}
|
||||
)
|
||||
elif bind_mode == "exposed":
|
||||
self.expose.add_port(container_port, protocol)
|
||||
|
||||
def set_entrypoint(self, entrypoint: list[str]):
|
||||
self._entrypoint = [escape_dollar(str(e)) for e in entrypoint]
|
||||
|
||||
def set_command(self, command: list[str]):
|
||||
self._command = [escape_dollar(str(e)) for e in command]
|
||||
|
||||
def add_storage(self, mount_path: str, config: "IxStorage"):
|
||||
if config.get("type", "") == "tmpfs":
|
||||
self._tmpfs.add(mount_path, config)
|
||||
else:
|
||||
self._storage.add(mount_path, config)
|
||||
|
||||
def add_docker_socket(self, read_only: bool = True, mount_path: str = "/var/run/docker.sock"):
|
||||
self.add_group(999)
|
||||
self._storage._add_docker_socket(read_only, mount_path)
|
||||
|
||||
def add_udev(self, read_only: bool = True, mount_path: str = "/run/udev"):
|
||||
self._storage._add_udev(read_only, mount_path)
|
||||
|
||||
def add_tun_device(self):
|
||||
self.devices._add_tun_device()
|
||||
|
||||
def add_snd_device(self):
|
||||
self.add_group(29)
|
||||
self.devices._add_snd_device()
|
||||
|
||||
def add_usb_bus(self):
|
||||
self.devices.add_usb_bus()
|
||||
|
||||
def setup_as_helper(self, profile: str = "low", disable_network: bool = True):
|
||||
self.restart.set_policy("on-failure", 1)
|
||||
self.healthcheck.disable()
|
||||
self.remove_devices()
|
||||
if profile:
|
||||
self.deploy.resources.set_profile(profile)
|
||||
if disable_network:
|
||||
self.set_network_mode("none")
|
||||
|
||||
def set_shm_size_mb(self, size: int):
|
||||
self._shm_size = size
|
||||
|
||||
# Easily remove devices from the container
|
||||
# Useful in dependencies like postgres and redis
|
||||
# where there is no need to pass devices to them
|
||||
def remove_devices(self):
|
||||
self.deploy.resources.remove_devices()
|
||||
self.devices.remove_devices()
|
||||
|
||||
@property
|
||||
def storage(self):
|
||||
return self._storage
|
||||
|
||||
def render(self) -> dict[str, Any]:
|
||||
if self._network_mode and self.networks:
|
||||
raise RenderError("Cannot set both [network_mode] and [networks]")
|
||||
|
||||
result = {
|
||||
"image": self._image,
|
||||
"platform": "linux/amd64",
|
||||
"tty": self._tty,
|
||||
"stdin_open": self._stdin_open,
|
||||
"restart": self.restart.render(),
|
||||
}
|
||||
|
||||
if self._pull_policy:
|
||||
result["pull_policy"] = self._pull_policy
|
||||
|
||||
if self.healthcheck.has_healthcheck():
|
||||
result["healthcheck"] = self.healthcheck.render()
|
||||
|
||||
if self._hostname:
|
||||
result["hostname"] = self._hostname
|
||||
|
||||
if self._build_image:
|
||||
result["build"] = {"tags": [self._image], "dockerfile_inline": self._build_image}
|
||||
|
||||
if self.configs.has_configs():
|
||||
result["configs"] = self.configs.render()
|
||||
|
||||
if self._ipc_mode is not None:
|
||||
result["ipc"] = self._ipc_mode
|
||||
|
||||
if self._pid_mode is not None:
|
||||
result["pid"] = self._pid_mode
|
||||
|
||||
if self._device_cgroup_rules.has_rules():
|
||||
result["device_cgroup_rules"] = self._device_cgroup_rules.render()
|
||||
|
||||
if self._cgroup is not None:
|
||||
result["cgroup"] = self._cgroup
|
||||
|
||||
if self._extra_hosts.has_hosts():
|
||||
result["extra_hosts"] = self._extra_hosts.render()
|
||||
|
||||
if self._init is not None:
|
||||
result["init"] = self._init
|
||||
|
||||
if self._read_only is not None:
|
||||
result["read_only"] = self._read_only
|
||||
|
||||
if self._grace_period is not None:
|
||||
result["stop_grace_period"] = f"{self._grace_period}s"
|
||||
|
||||
if self._user:
|
||||
result["user"] = self._user
|
||||
|
||||
for g in self.get_additional_groups():
|
||||
self.add_group(g)
|
||||
|
||||
if self._group_add:
|
||||
result["group_add"] = sorted(self._group_add, key=lambda g: (isinstance(g, str), g))
|
||||
|
||||
if self._shm_size is not None:
|
||||
result["shm_size"] = f"{self._shm_size}M"
|
||||
|
||||
if self._privileged is not None:
|
||||
result["privileged"] = self._privileged
|
||||
|
||||
if self._cap_drop:
|
||||
result["cap_drop"] = sorted(self._cap_drop)
|
||||
|
||||
if self._cap_add:
|
||||
result["cap_add"] = sorted(self._cap_add)
|
||||
|
||||
if self._security_opt.has_opts():
|
||||
result["security_opt"] = self._security_opt.render()
|
||||
|
||||
if self._network_mode:
|
||||
result["network_mode"] = self._network_mode
|
||||
|
||||
if self.sysctls.has_sysctls():
|
||||
result["sysctls"] = self.sysctls.render()
|
||||
|
||||
if self._network_mode != "host":
|
||||
if self.ports.has_ports():
|
||||
result["ports"] = self.ports.render()
|
||||
|
||||
if self.expose.has_ports():
|
||||
result["expose"] = self.expose.render()
|
||||
|
||||
if self._entrypoint:
|
||||
result["entrypoint"] = self._entrypoint
|
||||
|
||||
if self._command:
|
||||
result["command"] = self._command
|
||||
|
||||
if self.devices.has_devices():
|
||||
result["devices"] = self.devices.render()
|
||||
|
||||
if self.deploy.has_deploy():
|
||||
result["deploy"] = self.deploy.render()
|
||||
|
||||
if self.environment.has_variables():
|
||||
result["environment"] = self.environment.render()
|
||||
|
||||
if self.labels.has_labels():
|
||||
result["labels"] = self.labels.render()
|
||||
|
||||
if self.dns.has_dns_nameservers():
|
||||
result["dns"] = self.dns.render_dns_nameservers()
|
||||
|
||||
if self.dns.has_dns_searches():
|
||||
result["dns_search"] = self.dns.render_dns_searches()
|
||||
|
||||
if self.dns.has_dns_opts():
|
||||
result["dns_opt"] = self.dns.render_dns_opts()
|
||||
|
||||
if self.depends.has_dependencies():
|
||||
result["depends_on"] = self.depends.render()
|
||||
|
||||
if self._storage.has_mounts():
|
||||
result["volumes"] = self._storage.render()
|
||||
|
||||
if self._tmpfs.has_tmpfs():
|
||||
result["tmpfs"] = self._tmpfs.render()
|
||||
|
||||
return result
|
||||
@@ -0,0 +1,34 @@
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .validations import valid_depend_condition_or_raise
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from validations import valid_depend_condition_or_raise
|
||||
|
||||
|
||||
class Depends:
|
||||
def __init__(self, render_instance: "Render"):
|
||||
self._render_instance = render_instance
|
||||
self._dependencies: dict[str, str] = {}
|
||||
|
||||
def add_dependency(self, name: str, condition: str):
|
||||
condition = valid_depend_condition_or_raise(condition)
|
||||
if name in self._dependencies.keys():
|
||||
raise RenderError(f"Dependency [{name}] already added")
|
||||
if name not in self._render_instance.container_names():
|
||||
raise RenderError(
|
||||
f"Dependency [{name}] not found in defined containers. "
|
||||
f"Available containers: [{', '.join(self._render_instance.container_names())}]"
|
||||
)
|
||||
self._dependencies[name] = condition
|
||||
|
||||
def has_dependencies(self):
|
||||
return len(self._dependencies) > 0
|
||||
|
||||
def render(self):
|
||||
return {d: {"condition": c} for d, c in self._dependencies.items()}
|
||||
@@ -0,0 +1,24 @@
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
|
||||
try:
|
||||
from .resources import Resources
|
||||
except ImportError:
|
||||
from resources import Resources
|
||||
|
||||
|
||||
class Deploy:
|
||||
def __init__(self, render_instance: "Render"):
|
||||
self._render_instance = render_instance
|
||||
self.resources: Resources = Resources(self._render_instance)
|
||||
|
||||
def has_deploy(self):
|
||||
return self.resources.has_resources()
|
||||
|
||||
def render(self):
|
||||
if self.resources.has_resources():
|
||||
return {"resources": self.resources.render()}
|
||||
|
||||
return {}
|
||||
@@ -0,0 +1,57 @@
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
|
||||
try:
|
||||
from .deps_elastic import ElasticSearchContainer, ElasticConfig
|
||||
from .deps_mariadb import MariadbContainer, MariadbConfig
|
||||
from .deps_meilisearch import MeilisearchContainer, MeiliConfig
|
||||
from .deps_mongodb import MongoDBContainer, MongoDBConfig
|
||||
from .deps_perms import PermsContainer
|
||||
from .deps_postgres import PostgresContainer, PostgresConfig
|
||||
from .deps_redis import RedisContainer, RedisConfig
|
||||
from .deps_solr import SolrContainer, SolrConfig
|
||||
from .deps_tika import TikaContainer, TikaConfig
|
||||
except ImportError:
|
||||
from deps_elastic import ElasticSearchContainer, ElasticConfig
|
||||
from deps_mariadb import MariadbContainer, MariadbConfig
|
||||
from deps_meilisearch import MeilisearchContainer, MeiliConfig
|
||||
from deps_mongodb import MongoDBContainer, MongoDBConfig
|
||||
from deps_perms import PermsContainer
|
||||
from deps_postgres import PostgresContainer, PostgresConfig
|
||||
from deps_redis import RedisContainer, RedisConfig
|
||||
from deps_solr import SolrContainer, SolrConfig
|
||||
from deps_tika import TikaContainer, TikaConfig
|
||||
|
||||
|
||||
class Deps:
|
||||
def __init__(self, render_instance: "Render"):
|
||||
self._render_instance = render_instance
|
||||
|
||||
def perms(self, name: str):
|
||||
return PermsContainer(self._render_instance, name)
|
||||
|
||||
def postgres(self, name: str, image: str, config: PostgresConfig, perms_instance: PermsContainer):
|
||||
return PostgresContainer(self._render_instance, name, image, config, perms_instance)
|
||||
|
||||
def redis(self, name: str, image: str, config: RedisConfig, perms_instance: PermsContainer):
|
||||
return RedisContainer(self._render_instance, name, image, config, perms_instance)
|
||||
|
||||
def mariadb(self, name: str, image: str, config: MariadbConfig, perms_instance: PermsContainer):
|
||||
return MariadbContainer(self._render_instance, name, image, config, perms_instance)
|
||||
|
||||
def mongodb(self, name: str, image: str, config: MongoDBConfig, perms_instance: PermsContainer):
|
||||
return MongoDBContainer(self._render_instance, name, image, config, perms_instance)
|
||||
|
||||
def meilisearch(self, name: str, image: str, config: MeiliConfig, perms_instance: PermsContainer):
|
||||
return MeilisearchContainer(self._render_instance, name, image, config, perms_instance)
|
||||
|
||||
def elasticsearch(self, name: str, image: str, config: ElasticConfig, perms_instance: PermsContainer):
|
||||
return ElasticSearchContainer(self._render_instance, name, image, config, perms_instance)
|
||||
|
||||
def solr(self, name: str, image: str, config: SolrConfig, perms_instance: PermsContainer):
|
||||
return SolrContainer(self._render_instance, name, image, config, perms_instance)
|
||||
|
||||
def tika(self, name: str, image: str, config: TikaConfig):
|
||||
return TikaContainer(self._render_instance, name, image, config)
|
||||
@@ -0,0 +1,95 @@
|
||||
from typing import TYPE_CHECKING, TypedDict, NotRequired
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
from storage import IxStorage
|
||||
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .deps_perms import PermsContainer
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from deps_perms import PermsContainer
|
||||
|
||||
|
||||
class ElasticConfig(TypedDict):
|
||||
password: str
|
||||
node_name: str
|
||||
port: NotRequired[int]
|
||||
volume: "IxStorage"
|
||||
|
||||
|
||||
class ElasticSearchContainer:
|
||||
def __init__(
|
||||
self, render_instance: "Render", name: str, image: str, config: ElasticConfig, perms_instance: PermsContainer
|
||||
):
|
||||
self._render_instance = render_instance
|
||||
self._name = name
|
||||
self._config = config
|
||||
self._data_dir = "/usr/share/elasticsearch/data"
|
||||
|
||||
for key in ("password", "node_name", "volume"):
|
||||
if key not in config:
|
||||
raise RenderError(f"Expected [{key}] to be set for ElasticSearch")
|
||||
|
||||
c = self._render_instance.add_container(name, image)
|
||||
|
||||
c.set_user(1000, 1000)
|
||||
basic_auth_header = self._render_instance.funcs["basic_auth_header"]("elastic", config["password"])
|
||||
c.healthcheck.set_test(
|
||||
"curl",
|
||||
{
|
||||
"port": self.get_port(),
|
||||
"path": "/_cluster/health?local=true",
|
||||
"headers": [("Authorization", basic_auth_header)],
|
||||
},
|
||||
)
|
||||
c.remove_devices()
|
||||
c.add_storage(self._data_dir, config["volume"])
|
||||
|
||||
c.environment.add_env("ELASTIC_PASSWORD", config["password"])
|
||||
c.environment.add_env("http.port", self.get_port())
|
||||
c.environment.add_env("path.data", self._data_dir)
|
||||
c.environment.add_env("path.repo", self.get_snapshots_dir())
|
||||
c.environment.add_env("node.name", config["node_name"])
|
||||
c.environment.add_env("discovery.type", "single-node")
|
||||
c.environment.add_env("xpack.security.enabled", True)
|
||||
c.environment.add_env("xpack.security.transport.ssl.enabled", False)
|
||||
|
||||
perms_instance.add_or_skip_action(
|
||||
f"{self._name}_elastic_data", config["volume"], {"uid": 1000, "gid": 1000, "mode": "check"}
|
||||
)
|
||||
|
||||
self._get_repo(image, ("docker.elastic.co/elasticsearch/elasticsearch"))
|
||||
|
||||
# Store container for further configuration
|
||||
# For example: c.depends.add_dependency("other_container", "service_started")
|
||||
self._container = c
|
||||
|
||||
@property
|
||||
def container(self):
|
||||
return self._container
|
||||
|
||||
def _get_repo(self, image, supported_repos):
|
||||
images = self._render_instance.values["images"]
|
||||
if image not in images:
|
||||
raise RenderError(f"Image [{image}] not found in values. Available images: [{', '.join(images.keys())}]")
|
||||
repo = images[image].get("repository")
|
||||
if not repo:
|
||||
raise RenderError("Could not determine repo")
|
||||
if repo not in supported_repos:
|
||||
raise RenderError(
|
||||
f"Unsupported repo [{repo}] for elastic search. Supported repos: {', '.join(supported_repos)}"
|
||||
)
|
||||
return repo
|
||||
|
||||
def get_port(self):
|
||||
return self._config.get("port") or 9200
|
||||
|
||||
def get_url(self):
|
||||
return f"http://{self._name}:{self.get_port()}"
|
||||
|
||||
def get_snapshots_dir(self):
|
||||
return f"{self._data_dir}/snapshots"
|
||||
@@ -0,0 +1,91 @@
|
||||
from typing import TYPE_CHECKING, TypedDict, NotRequired
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
from storage import IxStorage
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .deps_perms import PermsContainer
|
||||
from .validations import valid_port_or_raise
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from deps_perms import PermsContainer
|
||||
from validations import valid_port_or_raise
|
||||
|
||||
|
||||
class MariadbConfig(TypedDict):
|
||||
user: str
|
||||
password: str
|
||||
database: str
|
||||
root_password: NotRequired[str]
|
||||
port: NotRequired[int]
|
||||
auto_upgrade: NotRequired[bool]
|
||||
volume: "IxStorage"
|
||||
|
||||
|
||||
class MariadbContainer:
|
||||
def __init__(
|
||||
self, render_instance: "Render", name: str, image: str, config: MariadbConfig, perms_instance: PermsContainer
|
||||
):
|
||||
self._render_instance = render_instance
|
||||
self._name = name
|
||||
self._config = config
|
||||
|
||||
for key in ("user", "password", "database", "volume"):
|
||||
if key not in config:
|
||||
raise RenderError(f"Expected [{key}] to be set for mariadb")
|
||||
|
||||
port = valid_port_or_raise(self.get_port())
|
||||
root_password = config.get("root_password") or config["password"]
|
||||
auto_upgrade = config.get("auto_upgrade", True)
|
||||
|
||||
self._get_repo(image, ("mariadb"))
|
||||
c = self._render_instance.add_container(name, image)
|
||||
c.set_user(999, 999)
|
||||
c.healthcheck.set_test("mariadb", {"password": root_password})
|
||||
c.remove_devices()
|
||||
|
||||
c.add_storage("/var/lib/mysql", config["volume"])
|
||||
perms_instance.add_or_skip_action(
|
||||
f"{self._name}_mariadb_data", config["volume"], {"uid": 999, "gid": 999, "mode": "check"}
|
||||
)
|
||||
|
||||
c.environment.add_env("MARIADB_USER", config["user"])
|
||||
c.environment.add_env("MARIADB_PASSWORD", config["password"])
|
||||
c.environment.add_env("MARIADB_ROOT_PASSWORD", root_password)
|
||||
c.environment.add_env("MARIADB_DATABASE", config["database"])
|
||||
c.environment.add_env("MARIADB_AUTO_UPGRADE", str(auto_upgrade).lower())
|
||||
c.set_command(["--port", str(port)])
|
||||
|
||||
# Store container for further configuration
|
||||
# For example: c.depends.add_dependency("other_container", "service_started")
|
||||
self._container = c
|
||||
|
||||
def _get_repo(self, image, supported_repos):
|
||||
images = self._render_instance.values["images"]
|
||||
if image not in images:
|
||||
raise RenderError(f"Image [{image}] not found in values. Available images: [{', '.join(images.keys())}]")
|
||||
repo = images[image].get("repository")
|
||||
if not repo:
|
||||
raise RenderError("Could not determine repo")
|
||||
if repo not in supported_repos:
|
||||
raise RenderError(f"Unsupported repo [{repo}] for mariadb. Supported repos: {', '.join(supported_repos)}")
|
||||
return repo
|
||||
|
||||
def get_url(self, variant: str):
|
||||
addr = f"{self._name}:{self.get_port()}"
|
||||
urls = {
|
||||
"jdbc": f"jdbc:mariadb://{addr}/{self._config['database']}",
|
||||
}
|
||||
|
||||
if variant not in urls:
|
||||
raise RenderError(f"Expected [variant] to be one of [{', '.join(urls.keys())}], got [{variant}]")
|
||||
return urls[variant]
|
||||
|
||||
def get_port(self):
|
||||
return self._config.get("port") or 3306
|
||||
|
||||
@property
|
||||
def container(self):
|
||||
return self._container
|
||||
@@ -0,0 +1,85 @@
|
||||
from typing import TYPE_CHECKING, TypedDict, NotRequired
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
from storage import IxStorage
|
||||
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .deps_perms import PermsContainer
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from deps_perms import PermsContainer
|
||||
|
||||
|
||||
class MeiliConfig(TypedDict):
|
||||
master_key: str
|
||||
port: NotRequired[int]
|
||||
volume: "IxStorage"
|
||||
|
||||
|
||||
class MeilisearchContainer:
|
||||
def __init__(
|
||||
self, render_instance: "Render", name: str, image: str, config: MeiliConfig, perms_instance: PermsContainer
|
||||
):
|
||||
self._render_instance = render_instance
|
||||
self._name = name
|
||||
self._config = config
|
||||
self._data_dir = "/meili_data"
|
||||
|
||||
for key in ("master_key", "volume"):
|
||||
if key not in config:
|
||||
raise RenderError(f"Expected [{key}] to be set for meilisearch")
|
||||
|
||||
c = self._render_instance.add_container(name, image)
|
||||
|
||||
user, group = 568, 568
|
||||
run_as = self._render_instance.values.get("run_as")
|
||||
if run_as:
|
||||
user = run_as["user"] or user # Avoids running as root
|
||||
group = run_as["group"] or group # Avoids running as root
|
||||
|
||||
c.set_user(user, group)
|
||||
c.healthcheck.set_test("curl", {"port": self.get_port(), "path": "/health"})
|
||||
c.remove_devices()
|
||||
c.add_storage(self._data_dir, config["volume"])
|
||||
|
||||
c.environment.add_env("MEILI_HTTP_ADDR", f"0.0.0.0:{self.get_port()}")
|
||||
c.environment.add_env("MEILI_NO_ANALYTICS", True)
|
||||
c.environment.add_env("MEILI_EXPERIMENTAL_DUMPLESS_UPGRADE", True)
|
||||
c.environment.add_env("MEILI_MASTER_KEY", config["master_key"])
|
||||
|
||||
perms_instance.add_or_skip_action(
|
||||
f"{self._name}_meili_data", config["volume"], {"uid": user, "gid": group, "mode": "check"}
|
||||
)
|
||||
|
||||
self._get_repo(image, ("getmeili/meilisearch",))
|
||||
|
||||
# Store container for further configuration
|
||||
# For example: c.depends.add_dependency("other_container", "service_started")
|
||||
self._container = c
|
||||
|
||||
@property
|
||||
def container(self):
|
||||
return self._container
|
||||
|
||||
def _get_repo(self, image, supported_repos):
|
||||
images = self._render_instance.values["images"]
|
||||
if image not in images:
|
||||
raise RenderError(f"Image [{image}] not found in values. Available images: [{', '.join(images.keys())}]")
|
||||
repo = images[image].get("repository")
|
||||
if not repo:
|
||||
raise RenderError("Could not determine repo")
|
||||
if repo not in supported_repos:
|
||||
raise RenderError(
|
||||
f"Unsupported repo [{repo}] for meilisearch. Supported repos: {', '.join(supported_repos)}"
|
||||
)
|
||||
return repo
|
||||
|
||||
def get_port(self):
|
||||
return self._config.get("port") or 7700
|
||||
|
||||
def get_url(self):
|
||||
return f"http://{self._name}:{self.get_port()}"
|
||||
@@ -0,0 +1,97 @@
|
||||
import urllib.parse
|
||||
from typing import TYPE_CHECKING, TypedDict
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
from storage import IxStorage
|
||||
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .deps_perms import PermsContainer
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from deps_perms import PermsContainer
|
||||
|
||||
|
||||
class MongoDBConfig(TypedDict):
|
||||
user: str
|
||||
password: str
|
||||
database: str
|
||||
volume: "IxStorage"
|
||||
|
||||
|
||||
class MongoDBContainer:
|
||||
def __init__(
|
||||
self, render_instance: "Render", name: str, image: str, config: MongoDBConfig, perms_instance: PermsContainer
|
||||
):
|
||||
self._render_instance = render_instance
|
||||
self._name = name
|
||||
self._config = config
|
||||
self._data_dir = "/data/db"
|
||||
|
||||
for key in ("user", "password", "database", "volume"):
|
||||
if key not in config:
|
||||
raise RenderError(f"Expected [{key}] to be set for mongodb")
|
||||
|
||||
c = self._render_instance.add_container(name, image)
|
||||
|
||||
user, group = 568, 568
|
||||
run_as = self._render_instance.values.get("run_as")
|
||||
if run_as:
|
||||
user = run_as["user"] or user # Avoids running as root
|
||||
group = run_as["group"] or group # Avoids running as root
|
||||
|
||||
c.set_user(user, group)
|
||||
c.healthcheck.set_test("mongodb", {"db": config["database"]})
|
||||
c.remove_devices()
|
||||
c.add_storage(self._data_dir, config["volume"])
|
||||
|
||||
c.environment.add_env("MONGO_INITDB_ROOT_USERNAME", config["user"])
|
||||
c.environment.add_env("MONGO_INITDB_ROOT_PASSWORD", config["password"])
|
||||
c.environment.add_env("MONGO_INITDB_DATABASE", config["database"])
|
||||
|
||||
perms_instance.add_or_skip_action(
|
||||
f"{self._name}_mongodb_data", config["volume"], {"uid": user, "gid": group, "mode": "check"}
|
||||
)
|
||||
|
||||
self._get_repo(image, ("mongodb"))
|
||||
|
||||
# Store container for further configuration
|
||||
# For example: c.depends.add_dependency("other_container", "service_started")
|
||||
self._container = c
|
||||
|
||||
@property
|
||||
def container(self):
|
||||
return self._container
|
||||
|
||||
def _get_repo(self, image, supported_repos):
|
||||
images = self._render_instance.values["images"]
|
||||
if image not in images:
|
||||
raise RenderError(f"Image [{image}] not found in values. Available images: [{', '.join(images.keys())}]")
|
||||
repo = images[image].get("repository")
|
||||
if not repo:
|
||||
raise RenderError("Could not determine repo")
|
||||
if repo not in supported_repos:
|
||||
raise RenderError(f"Unsupported repo [{repo}] for mongodb. Supported repos: {', '.join(supported_repos)}")
|
||||
return repo
|
||||
|
||||
def get_port(self):
|
||||
return self._config.get("port") or 27017
|
||||
|
||||
def get_url(self, variant: str):
|
||||
user = urllib.parse.quote_plus(self._config["user"])
|
||||
password = urllib.parse.quote_plus(self._config["password"])
|
||||
creds = f"{user}:{password}"
|
||||
addr = f"{self._name}:{self.get_port()}"
|
||||
db = self._config["database"]
|
||||
|
||||
urls = {
|
||||
"mongodb": f"mongodb://{creds}@{addr}/{db}",
|
||||
"host_port": addr,
|
||||
}
|
||||
|
||||
if variant not in urls:
|
||||
raise RenderError(f"Expected [variant] to be one of [{', '.join(urls.keys())}], got [{variant}]")
|
||||
return urls[variant]
|
||||
@@ -0,0 +1,259 @@
|
||||
import json
|
||||
import pathlib
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
from storage import IxStorage
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .validations import valid_octal_mode_or_raise, valid_fs_path_or_raise
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from validations import valid_octal_mode_or_raise, valid_fs_path_or_raise
|
||||
|
||||
|
||||
class PermsContainer:
|
||||
def __init__(self, render_instance: "Render", name: str):
|
||||
self._render_instance = render_instance
|
||||
self._name = name
|
||||
self.actions: set[str] = set()
|
||||
self.parsed_configs: list[dict] = []
|
||||
|
||||
def add_or_skip_action(self, identifier: str, volume_config: "IxStorage", action_config: dict):
|
||||
identifier = self.normalize_identifier_for_path(identifier)
|
||||
if identifier in self.actions:
|
||||
raise RenderError(f"Action with id [{identifier}] already used for another permission action")
|
||||
|
||||
parsed_action = self.parse_action(identifier, volume_config, action_config)
|
||||
if parsed_action:
|
||||
self.parsed_configs.append(parsed_action)
|
||||
self.actions.add(identifier)
|
||||
|
||||
def parse_action(self, identifier: str, volume_config: "IxStorage", action_config: dict):
|
||||
valid_modes = [
|
||||
"always", # Always set permissions, without checking.
|
||||
"check", # Checks if permissions are correct, and set them if not.
|
||||
]
|
||||
mode = action_config.get("mode", "check")
|
||||
uid = action_config.get("uid", None)
|
||||
gid = action_config.get("gid", None)
|
||||
chmod = action_config.get("chmod", None)
|
||||
recursive = action_config.get("recursive", False)
|
||||
mount_path = pathlib.Path("/mnt/permission", identifier).as_posix()
|
||||
read_only = volume_config.get("read_only", False)
|
||||
is_temporary = False
|
||||
|
||||
vol_type = volume_config.get("type", "")
|
||||
match vol_type:
|
||||
case "temporary":
|
||||
# If it is a temporary volume, we force auto permissions
|
||||
# and set is_temporary to True, so it will be cleaned up
|
||||
is_temporary = True
|
||||
recursive = True
|
||||
case "volume":
|
||||
if not volume_config.get("volume_config", {}).get("auto_permissions", False):
|
||||
return None
|
||||
case "host_path":
|
||||
host_path_config = volume_config.get("host_path_config", {})
|
||||
# Skip when ACL enabled
|
||||
if host_path_config.get("acl_enable", False):
|
||||
return None
|
||||
if not host_path_config.get("auto_permissions", False):
|
||||
return None
|
||||
case "ix_volume":
|
||||
ix_vol_config = volume_config.get("ix_volume_config", {})
|
||||
# Skip when ACL enabled
|
||||
if ix_vol_config.get("acl_enable", False):
|
||||
return None
|
||||
# For ix_volumes, we default to auto_permissions = True
|
||||
if not ix_vol_config.get("auto_permissions", True):
|
||||
return None
|
||||
case _:
|
||||
# Skip for other types
|
||||
return None
|
||||
|
||||
if mode not in valid_modes:
|
||||
raise RenderError(f"Expected [mode] to be one of [{', '.join(valid_modes)}], got [{mode}]")
|
||||
if not isinstance(uid, int) or not isinstance(gid, int):
|
||||
raise RenderError("Expected [uid] and [gid] to be set when [auto_permissions] is enabled")
|
||||
if chmod is not None:
|
||||
chmod = valid_octal_mode_or_raise(chmod)
|
||||
|
||||
mount_path = valid_fs_path_or_raise(mount_path)
|
||||
return {
|
||||
"mount_path": mount_path,
|
||||
"volume_config": volume_config,
|
||||
"action_data": {
|
||||
"read_only": read_only,
|
||||
"mount_path": mount_path,
|
||||
"is_temporary": is_temporary,
|
||||
"identifier": identifier,
|
||||
"recursive": recursive,
|
||||
"mode": mode,
|
||||
"uid": uid,
|
||||
"gid": gid,
|
||||
"chmod": chmod,
|
||||
},
|
||||
}
|
||||
|
||||
def normalize_identifier_for_path(self, identifier: str):
|
||||
return identifier.rstrip("/").lstrip("/").lower().replace("/", "_").replace(".", "-").replace(" ", "-")
|
||||
|
||||
def has_actions(self):
|
||||
return bool(self.actions)
|
||||
|
||||
def activate(self):
|
||||
if len(self.parsed_configs) != len(self.actions):
|
||||
raise RenderError("Number of actions and parsed configs does not match")
|
||||
|
||||
if not self.has_actions():
|
||||
raise RenderError("No actions added. Check if there are actions before activating")
|
||||
|
||||
# Add the container and set it up
|
||||
c = self._render_instance.add_container(self._name, "python_permissions_image")
|
||||
c.set_user(0, 0)
|
||||
c.add_caps(["CHOWN", "FOWNER", "DAC_OVERRIDE"])
|
||||
c.set_network_mode("none")
|
||||
|
||||
# Don't attach any devices
|
||||
c.remove_devices()
|
||||
|
||||
c.deploy.resources.set_profile("medium")
|
||||
c.restart.set_policy("on-failure", maximum_retry_count=1)
|
||||
c.healthcheck.disable()
|
||||
|
||||
c.set_entrypoint(["python3", "/script/run.py"])
|
||||
script = "#!/usr/bin/env python3\n"
|
||||
script += get_script()
|
||||
c.configs.add("permissions_run_script", script, "/script/run.py", "0700")
|
||||
|
||||
actions_data: list[dict] = []
|
||||
for parsed in self.parsed_configs:
|
||||
if not parsed["action_data"]["read_only"]:
|
||||
c.add_storage(parsed["mount_path"], parsed["volume_config"])
|
||||
actions_data.append(parsed["action_data"])
|
||||
|
||||
actions_data_json = json.dumps(actions_data)
|
||||
c.configs.add("permissions_actions_data", actions_data_json, "/script/actions.json", "0500")
|
||||
|
||||
|
||||
def get_script():
|
||||
return """
|
||||
import os
|
||||
import json
|
||||
import time
|
||||
import shutil
|
||||
|
||||
with open("/script/actions.json", "r") as f:
|
||||
actions_data = json.load(f)
|
||||
|
||||
if not actions_data:
|
||||
# If this script is called, there should be actions data
|
||||
raise ValueError("No actions data found")
|
||||
|
||||
def fix_perms(path, chmod, recursive=False):
|
||||
print(f"Changing permissions{' recursively ' if recursive else ' '}to {chmod} on: [{path}]")
|
||||
os.chmod(path, int(chmod, 8))
|
||||
if recursive:
|
||||
for root, dirs, files in os.walk(path):
|
||||
for f in files:
|
||||
os.chmod(os.path.join(root, f), int(chmod, 8))
|
||||
print("Permissions after changes:")
|
||||
print_chmod_stat()
|
||||
|
||||
def fix_owner(path, uid, gid, recursive=False):
|
||||
print(f"Changing ownership{' recursively ' if recursive else ' '}to {uid}:{gid} on: [{path}]")
|
||||
os.chown(path, uid, gid)
|
||||
if recursive:
|
||||
for root, dirs, files in os.walk(path):
|
||||
for f in files:
|
||||
os.chown(os.path.join(root, f), uid, gid)
|
||||
print("Ownership after changes:")
|
||||
print_chown_stat()
|
||||
|
||||
def print_chown_stat():
|
||||
curr_stat = os.stat(action["mount_path"])
|
||||
print(f"Ownership: [{curr_stat.st_uid}:{curr_stat.st_gid}]")
|
||||
|
||||
def print_chmod_stat():
|
||||
curr_stat = os.stat(action["mount_path"])
|
||||
print(f"Permissions: [{oct(curr_stat.st_mode)[3:]}]")
|
||||
|
||||
def print_chown_diff(curr_stat, uid, gid):
|
||||
print(f"Ownership: wanted [{uid}:{gid}], got [{curr_stat.st_uid}:{curr_stat.st_gid}].")
|
||||
|
||||
def print_chmod_diff(curr_stat, mode):
|
||||
print(f"Permissions: wanted [{mode}], got [{oct(curr_stat.st_mode)[3:]}].")
|
||||
|
||||
def perform_action(action):
|
||||
if action["read_only"]:
|
||||
print(f"Path for action [{action['identifier']}] is read-only, skipping...")
|
||||
return
|
||||
|
||||
start_time = time.time()
|
||||
print(f"=== Applying configuration on volume with identifier [{action['identifier']}] ===")
|
||||
|
||||
if not os.path.isdir(action["mount_path"]):
|
||||
print(f"Path [{action['mount_path']}] is not a directory, skipping...")
|
||||
return
|
||||
|
||||
if action["is_temporary"]:
|
||||
print(f"Path [{action['mount_path']}] is a temporary directory, ensuring it is empty...")
|
||||
for item in os.listdir(action["mount_path"]):
|
||||
item_path = os.path.join(action["mount_path"], item)
|
||||
|
||||
# Exclude the safe directory, where we can use to mount files temporarily
|
||||
if os.path.basename(item_path) == "ix-safe":
|
||||
continue
|
||||
if os.path.isdir(item_path):
|
||||
shutil.rmtree(item_path)
|
||||
else:
|
||||
os.remove(item_path)
|
||||
|
||||
if not action["is_temporary"] and os.listdir(action["mount_path"]):
|
||||
print(f"Path [{action['mount_path']}] is not empty, skipping...")
|
||||
return
|
||||
|
||||
print(f"Current Ownership and Permissions on [{action['mount_path']}]:")
|
||||
curr_stat = os.stat(action["mount_path"])
|
||||
print_chown_diff(curr_stat, action["uid"], action["gid"])
|
||||
print_chmod_diff(curr_stat, action["chmod"])
|
||||
print("---")
|
||||
|
||||
if action["mode"] == "always":
|
||||
fix_owner(action["mount_path"], action["uid"], action["gid"], action["recursive"])
|
||||
if not action["chmod"]:
|
||||
print("Skipping permissions check, chmod is falsy")
|
||||
else:
|
||||
fix_perms(action["mount_path"], action["chmod"], action["recursive"])
|
||||
return
|
||||
|
||||
elif action["mode"] == "check":
|
||||
if curr_stat.st_uid != action["uid"] or curr_stat.st_gid != action["gid"]:
|
||||
print("Ownership is incorrect. Fixing...")
|
||||
fix_owner(action["mount_path"], action["uid"], action["gid"], action["recursive"])
|
||||
else:
|
||||
print("Ownership is correct. Skipping...")
|
||||
|
||||
if not action["chmod"]:
|
||||
print("Skipping permissions check, chmod is falsy")
|
||||
else:
|
||||
if oct(curr_stat.st_mode)[3:] != action["chmod"]:
|
||||
print("Permissions are incorrect. Fixing...")
|
||||
fix_perms(action["mount_path"], action["chmod"], action["recursive"])
|
||||
else:
|
||||
print("Permissions are correct. Skipping...")
|
||||
|
||||
print(f"Time taken: {(time.time() - start_time) * 1000:.2f}ms")
|
||||
print(f"=== Finished applying configuration on volume with identifier [{action['identifier']}] ==")
|
||||
print()
|
||||
|
||||
if __name__ == "__main__":
|
||||
start_time = time.time()
|
||||
for action in actions_data:
|
||||
perform_action(action)
|
||||
print(f"Total time taken: {(time.time() - start_time) * 1000:.2f}ms")
|
||||
"""
|
||||
@@ -0,0 +1,169 @@
|
||||
import urllib.parse
|
||||
from typing import TYPE_CHECKING, TypedDict, NotRequired
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
from storage import IxStorage
|
||||
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .deps_perms import PermsContainer
|
||||
from .validations import valid_port_or_raise
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from deps_perms import PermsContainer
|
||||
from validations import valid_port_or_raise
|
||||
|
||||
|
||||
class PostgresConfig(TypedDict):
|
||||
user: str
|
||||
password: str
|
||||
database: str
|
||||
port: NotRequired[int]
|
||||
volume: "IxStorage"
|
||||
additional_options: NotRequired[dict[str, str]]
|
||||
|
||||
|
||||
MAX_POSTGRES_VERSION = 17
|
||||
|
||||
|
||||
class PostgresContainer:
|
||||
def __init__(
|
||||
self, render_instance: "Render", name: str, image: str, config: PostgresConfig, perms_instance: PermsContainer
|
||||
):
|
||||
self._render_instance = render_instance
|
||||
self._name = name
|
||||
self._config = config
|
||||
self._data_dir = "/var/lib/postgresql/data"
|
||||
self._upgrade_name = f"{self._name}_upgrade"
|
||||
self._upgrade_container = None
|
||||
|
||||
for key in ("user", "password", "database", "volume"):
|
||||
if key not in config:
|
||||
raise RenderError(f"Expected [{key}] to be set for postgres")
|
||||
|
||||
port = valid_port_or_raise(self.get_port())
|
||||
|
||||
c = self._render_instance.add_container(name, image)
|
||||
|
||||
c.set_user(999, 999)
|
||||
c.healthcheck.set_test("postgres", {"user": config["user"], "db": config["database"]})
|
||||
c.set_shm_size_mb(256)
|
||||
c.remove_devices()
|
||||
c.add_storage(self._data_dir, config["volume"])
|
||||
|
||||
opts = []
|
||||
for k, v in config.get("additional_options", {}).items():
|
||||
opts.extend(["-c", f"{k}={v}"])
|
||||
if opts:
|
||||
c.set_command(opts)
|
||||
|
||||
common_variables = {
|
||||
"POSTGRES_USER": config["user"],
|
||||
"POSTGRES_PASSWORD": config["password"],
|
||||
"POSTGRES_DB": config["database"],
|
||||
"PGPORT": port,
|
||||
}
|
||||
|
||||
for k, v in common_variables.items():
|
||||
c.environment.add_env(k, v)
|
||||
|
||||
perms_instance.add_or_skip_action(
|
||||
f"{self._name}_postgres_data", config["volume"], {"uid": 999, "gid": 999, "mode": "check"}
|
||||
)
|
||||
|
||||
repo = self._get_repo(
|
||||
image,
|
||||
(
|
||||
"postgres",
|
||||
"postgis/postgis",
|
||||
"pgvector/pgvector",
|
||||
"tensorchord/pgvecto-rs",
|
||||
"ghcr.io/immich-app/postgres",
|
||||
),
|
||||
)
|
||||
# eg we don't want to handle upgrades of pg_vector at the moment
|
||||
if repo == "postgres":
|
||||
target_major_version = self._get_target_version(image)
|
||||
upg = self._render_instance.add_container(self._upgrade_name, "postgres_upgrade_image")
|
||||
upg.set_entrypoint(["/bin/bash", "-c", "/upgrade.sh"])
|
||||
upg.restart.set_policy("on-failure", 1)
|
||||
upg.set_user(999, 999)
|
||||
upg.healthcheck.disable()
|
||||
upg.remove_devices()
|
||||
upg.add_storage(self._data_dir, config["volume"])
|
||||
for k, v in common_variables.items():
|
||||
upg.environment.add_env(k, v)
|
||||
|
||||
upg.environment.add_env("TARGET_VERSION", target_major_version)
|
||||
upg.environment.add_env("DATA_DIR", self._data_dir)
|
||||
|
||||
self._upgrade_container = upg
|
||||
|
||||
c.depends.add_dependency(self._upgrade_name, "service_completed_successfully")
|
||||
|
||||
# Store container for further configuration
|
||||
# For example: c.depends.add_dependency("other_container", "service_started")
|
||||
self._container = c
|
||||
|
||||
@property
|
||||
def container(self):
|
||||
return self._container
|
||||
|
||||
def add_dependency(self, container_name: str, condition: str):
|
||||
self._container.depends.add_dependency(container_name, condition)
|
||||
if self._upgrade_container:
|
||||
self._upgrade_container.depends.add_dependency(container_name, condition)
|
||||
|
||||
def _get_repo(self, image, supported_repos):
|
||||
images = self._render_instance.values["images"]
|
||||
if image not in images:
|
||||
raise RenderError(f"Image [{image}] not found in values. Available images: [{', '.join(images.keys())}]")
|
||||
repo = images[image].get("repository")
|
||||
if not repo:
|
||||
raise RenderError("Could not determine repo")
|
||||
if repo not in supported_repos:
|
||||
raise RenderError(f"Unsupported repo [{repo}] for postgres. Supported repos: {', '.join(supported_repos)}")
|
||||
return repo
|
||||
|
||||
def _get_target_version(self, image):
|
||||
images = self._render_instance.values["images"]
|
||||
if image not in images:
|
||||
raise RenderError(f"Image [{image}] not found in values. Available images: [{', '.join(images.keys())}]")
|
||||
tag = images[image].get("tag", "")
|
||||
tag = str(tag) # Account for tags like 16.6
|
||||
target_major_version = tag.split(".")[0]
|
||||
|
||||
try:
|
||||
target_major_version = int(target_major_version)
|
||||
except ValueError:
|
||||
raise RenderError(f"Could not determine target major version from tag [{tag}]")
|
||||
|
||||
if target_major_version > MAX_POSTGRES_VERSION:
|
||||
raise RenderError(f"Postgres version [{target_major_version}] is not supported")
|
||||
|
||||
return target_major_version
|
||||
|
||||
def get_port(self):
|
||||
return self._config.get("port") or 5432
|
||||
|
||||
def get_url(self, variant: str):
|
||||
user = urllib.parse.quote_plus(self._config["user"])
|
||||
password = urllib.parse.quote_plus(self._config["password"])
|
||||
creds = f"{user}:{password}"
|
||||
addr = f"{self._name}:{self.get_port()}"
|
||||
db = self._config["database"]
|
||||
|
||||
urls = {
|
||||
"postgres": f"postgres://{creds}@{addr}/{db}?sslmode=disable",
|
||||
"postgresql": f"postgresql://{creds}@{addr}/{db}?sslmode=disable",
|
||||
"postgresql_no_creds": f"postgresql://{addr}/{db}?sslmode=disable",
|
||||
"jdbc": f"jdbc:postgresql://{addr}/{db}",
|
||||
"host_port": addr,
|
||||
}
|
||||
|
||||
if variant not in urls:
|
||||
raise RenderError(f"Expected [variant] to be one of [{', '.join(urls.keys())}], got [{variant}]")
|
||||
return urls[variant]
|
||||
@@ -0,0 +1,90 @@
|
||||
import urllib.parse
|
||||
from typing import TYPE_CHECKING, TypedDict, NotRequired
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
from storage import IxStorage
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .deps_perms import PermsContainer
|
||||
from .validations import valid_port_or_raise, valid_redis_password_or_raise
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from deps_perms import PermsContainer
|
||||
from validations import valid_port_or_raise, valid_redis_password_or_raise
|
||||
|
||||
|
||||
class RedisConfig(TypedDict):
|
||||
password: str
|
||||
port: NotRequired[int]
|
||||
volume: "IxStorage"
|
||||
|
||||
|
||||
class RedisContainer:
|
||||
def __init__(
|
||||
self, render_instance: "Render", name: str, image: str, config: RedisConfig, perms_instance: PermsContainer
|
||||
):
|
||||
self._render_instance = render_instance
|
||||
self._name = name
|
||||
self._config = config
|
||||
|
||||
for key in ("password", "volume"):
|
||||
if key not in config:
|
||||
raise RenderError(f"Expected [{key}] to be set for redis")
|
||||
|
||||
valid_redis_password_or_raise(config["password"])
|
||||
|
||||
port = valid_port_or_raise(self.get_port())
|
||||
self._get_repo(image, ("redis", "valkey/valkey"))
|
||||
|
||||
user, group = 568, 568
|
||||
run_as = self._render_instance.values.get("run_as")
|
||||
if run_as:
|
||||
user = run_as["user"] or user # Avoids running as root
|
||||
group = run_as["group"] or group # Avoids running as root
|
||||
c = self._render_instance.add_container(name, image)
|
||||
c.set_user(user, group)
|
||||
c.remove_devices()
|
||||
c.healthcheck.set_test("redis", {"password": config["password"]})
|
||||
|
||||
cmd = []
|
||||
cmd.extend(["--port", str(port)])
|
||||
cmd.extend(["--requirepass", config["password"]])
|
||||
c.environment.add_env("REDIS_PASSWORD", config["password"])
|
||||
c.set_command(cmd)
|
||||
|
||||
c.add_storage("/data", config["volume"])
|
||||
perms_instance.add_or_skip_action(
|
||||
f"{self._name}_redis_data", config["volume"], {"uid": user, "gid": group, "mode": "check"}
|
||||
)
|
||||
|
||||
# Store container for further configuration
|
||||
# For example: c.depends.add_dependency("other_container", "service_started")
|
||||
self._container = c
|
||||
|
||||
def _get_repo(self, image, supported_repos):
|
||||
images = self._render_instance.values["images"]
|
||||
if image not in images:
|
||||
raise RenderError(f"Image [{image}] not found in values. Available images: [{', '.join(images.keys())}]")
|
||||
repo = images[image].get("repository")
|
||||
if not repo:
|
||||
raise RenderError("Could not determine repo")
|
||||
if repo not in supported_repos:
|
||||
raise RenderError(f"Unsupported repo [{repo}] for redis. Supported repos: {', '.join(supported_repos)}")
|
||||
return repo
|
||||
|
||||
def get_port(self):
|
||||
return self._config.get("port") or 6379
|
||||
|
||||
def get_url(self, variant: str):
|
||||
addr = f"{self._name}:{self.get_port()}"
|
||||
password = urllib.parse.quote_plus(self._config["password"])
|
||||
|
||||
match variant:
|
||||
case "redis":
|
||||
return f"redis://default:{password}@{addr}"
|
||||
|
||||
@property
|
||||
def container(self):
|
||||
return self._container
|
||||
@@ -0,0 +1,85 @@
|
||||
from typing import TYPE_CHECKING, TypedDict, NotRequired, List
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
from storage import IxStorage
|
||||
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .deps_perms import PermsContainer
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from deps_perms import PermsContainer
|
||||
|
||||
|
||||
class SolrConfig(TypedDict):
|
||||
core: str
|
||||
modules: NotRequired[List[str]]
|
||||
port: NotRequired[int]
|
||||
volume: "IxStorage"
|
||||
|
||||
|
||||
class SolrContainer:
|
||||
def __init__(
|
||||
self, render_instance: "Render", name: str, image: str, config: SolrConfig, perms_instance: PermsContainer
|
||||
):
|
||||
self._render_instance = render_instance
|
||||
self._name = name
|
||||
self._config = config
|
||||
self._data_dir = "/var/solr"
|
||||
|
||||
for key in ("core", "volume"):
|
||||
if key not in config:
|
||||
raise RenderError(f"Expected [{key}] to be set for solr")
|
||||
|
||||
c = self._render_instance.add_container(name, image)
|
||||
|
||||
user, group = 568, 568
|
||||
run_as = self._render_instance.values.get("run_as")
|
||||
if run_as:
|
||||
user = run_as["user"] or user # Avoids running as root
|
||||
group = run_as["group"] or group # Avoids running as root
|
||||
|
||||
c.set_user(user, group)
|
||||
c.healthcheck.set_test("curl", {"port": self.get_port(), "path": f"/solr/{config['core']}/admin/ping"})
|
||||
c.remove_devices()
|
||||
c.add_storage(self._data_dir, config["volume"])
|
||||
|
||||
c.set_command(["solr-precreate", config["core"]])
|
||||
|
||||
c.environment.add_env("SOLR_PORT", self.get_port())
|
||||
if modules := config.get("modules"):
|
||||
c.environment.add_env("SOLR_MODULES", ",".join(modules))
|
||||
|
||||
perms_instance.add_or_skip_action(
|
||||
f"{self._name}_solr_data", config["volume"], {"uid": user, "gid": group, "mode": "check"}
|
||||
)
|
||||
|
||||
self._get_repo(image, ("solr",))
|
||||
|
||||
# Store container for further configuration
|
||||
# For example: c.depends.add_dependency("other_container", "service_started")
|
||||
self._container = c
|
||||
|
||||
@property
|
||||
def container(self):
|
||||
return self._container
|
||||
|
||||
def _get_repo(self, image, supported_repos):
|
||||
images = self._render_instance.values["images"]
|
||||
if image not in images:
|
||||
raise RenderError(f"Image [{image}] not found in values. Available images: [{', '.join(images.keys())}]")
|
||||
repo = images[image].get("repository")
|
||||
if not repo:
|
||||
raise RenderError("Could not determine repo")
|
||||
if repo not in supported_repos:
|
||||
raise RenderError(f"Unsupported repo [{repo}] for solr. Supported repos: {', '.join(supported_repos)}")
|
||||
return repo
|
||||
|
||||
def get_port(self):
|
||||
return self._config.get("port") or 8983
|
||||
|
||||
def get_url(self):
|
||||
return f"http://{self._name}:{self.get_port()}/solr/{self._config['core']}"
|
||||
@@ -0,0 +1,63 @@
|
||||
from typing import TYPE_CHECKING, TypedDict, NotRequired
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
|
||||
|
||||
class TikaConfig(TypedDict):
|
||||
port: NotRequired[int]
|
||||
|
||||
|
||||
class TikaContainer:
|
||||
def __init__(self, render_instance: "Render", name: str, image: str, config: TikaConfig):
|
||||
self._render_instance = render_instance
|
||||
self._name = name
|
||||
self._config = config
|
||||
|
||||
c = self._render_instance.add_container(name, image)
|
||||
|
||||
user, group = 568, 568
|
||||
run_as = self._render_instance.values.get("run_as")
|
||||
if run_as:
|
||||
user = run_as["user"] or user # Avoids running as root
|
||||
group = run_as["group"] or group # Avoids running as root
|
||||
|
||||
c.set_user(user, group)
|
||||
c.healthcheck.set_test("wget", {"port": self.get_port(), "path": "/tika", "spider": False})
|
||||
c.remove_devices()
|
||||
|
||||
c.set_command(["--port", str(self.get_port())])
|
||||
|
||||
self._get_repo(image, ("apache/tika"))
|
||||
|
||||
# Store container for further configuration
|
||||
# For example: c.depends.add_dependency("other_container", "service_started")
|
||||
self._container = c
|
||||
|
||||
@property
|
||||
def container(self):
|
||||
return self._container
|
||||
|
||||
def _get_repo(self, image, supported_repos):
|
||||
images = self._render_instance.values["images"]
|
||||
if image not in images:
|
||||
raise RenderError(f"Image [{image}] not found in values. Available images: [{', '.join(images.keys())}]")
|
||||
repo = images[image].get("repository")
|
||||
if not repo:
|
||||
raise RenderError("Could not determine repo")
|
||||
if repo not in supported_repos:
|
||||
raise RenderError(f"Unsupported repo [{repo}] for tika. Supported repos: {', '.join(supported_repos)}")
|
||||
return repo
|
||||
|
||||
def get_port(self):
|
||||
return self._config.get("port") or 9998
|
||||
|
||||
def get_url(self):
|
||||
return f"http://{self._name}:{self.get_port()}"
|
||||
@@ -0,0 +1,31 @@
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .validations import valid_fs_path_or_raise, allowed_device_or_raise, valid_cgroup_perm_or_raise
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from validations import valid_fs_path_or_raise, allowed_device_or_raise, valid_cgroup_perm_or_raise
|
||||
|
||||
|
||||
class Device:
|
||||
def __init__(self, host_device: str, container_device: str, cgroup_perm: str = "", allow_disallowed=False):
|
||||
hd = valid_fs_path_or_raise(host_device.rstrip("/"))
|
||||
cd = valid_fs_path_or_raise(container_device.rstrip("/"))
|
||||
if not hd or not cd:
|
||||
raise RenderError(
|
||||
"Expected [host_device] and [container_device] to be set. "
|
||||
f"Got host_device [{host_device}] and container_device [{container_device}]"
|
||||
)
|
||||
|
||||
cgroup_perm = valid_cgroup_perm_or_raise(cgroup_perm)
|
||||
if not allow_disallowed:
|
||||
hd = allowed_device_or_raise(hd)
|
||||
|
||||
self.cgroup_perm: str = cgroup_perm
|
||||
self.host_device: str = hd
|
||||
self.container_device: str = cd
|
||||
|
||||
def render(self):
|
||||
result = f"{self.host_device}:{self.container_device}"
|
||||
if self.cgroup_perm:
|
||||
result += f":{self.cgroup_perm}"
|
||||
return result
|
||||
@@ -0,0 +1,54 @@
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .validations import valid_device_cgroup_rule_or_raise
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from validations import valid_device_cgroup_rule_or_raise
|
||||
|
||||
|
||||
class DeviceCGroupRule:
|
||||
def __init__(self, rule: str):
|
||||
rule = valid_device_cgroup_rule_or_raise(rule)
|
||||
parts = rule.split(" ")
|
||||
major, minor = parts[1].split(":")
|
||||
|
||||
self._type = parts[0]
|
||||
self._major = major
|
||||
self._minor = minor
|
||||
self._permissions = parts[2]
|
||||
|
||||
def get_key(self):
|
||||
return f"{self._type}_{self._major}_{self._minor}"
|
||||
|
||||
def render(self):
|
||||
return f"{self._type} {self._major}:{self._minor} {self._permissions}"
|
||||
|
||||
|
||||
class DeviceCGroupRules:
|
||||
def __init__(self, render_instance: "Render"):
|
||||
self._render_instance = render_instance
|
||||
self._rules: set[DeviceCGroupRule] = set()
|
||||
self._track_rule_combos: set[str] = set()
|
||||
|
||||
def add_rule(self, rule: str):
|
||||
dev_group_rule = DeviceCGroupRule(rule)
|
||||
if dev_group_rule in self._rules:
|
||||
raise RenderError(f"Device Group Rule [{rule}] already added")
|
||||
|
||||
rule_key = dev_group_rule.get_key()
|
||||
if rule_key in self._track_rule_combos:
|
||||
raise RenderError(f"Device Group Rule [{rule}] has already been added for this device group")
|
||||
|
||||
self._rules.add(dev_group_rule)
|
||||
self._track_rule_combos.add(rule_key)
|
||||
|
||||
def has_rules(self):
|
||||
return len(self._rules) > 0
|
||||
|
||||
def render(self):
|
||||
return sorted([rule.render() for rule in self._rules])
|
||||
@@ -0,0 +1,71 @@
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .device import Device
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from device import Device
|
||||
|
||||
|
||||
class Devices:
|
||||
def __init__(self, render_instance: "Render"):
|
||||
self._render_instance = render_instance
|
||||
self._devices: set[Device] = set()
|
||||
|
||||
# Tracks all container device paths to make sure they are not duplicated
|
||||
self._container_device_paths: set[str] = set()
|
||||
# Scan values for devices we should automatically add
|
||||
# for example /dev/dri for gpus
|
||||
self._auto_add_devices_from_values()
|
||||
|
||||
def _auto_add_devices_from_values(self):
|
||||
resources = self._render_instance.values.get("resources", {})
|
||||
|
||||
if resources.get("gpus", {}).get("use_all_gpus", False):
|
||||
self.add_device("/dev/dri", "/dev/dri", allow_disallowed=True)
|
||||
if resources["gpus"].get("kfd_device_exists", False):
|
||||
self.add_device("/dev/kfd", "/dev/kfd", allow_disallowed=True) # AMD ROCm
|
||||
|
||||
def add_device(self, host_device: str, container_device: str, cgroup_perm: str = "", allow_disallowed=False):
|
||||
# Host device can be mapped to multiple container devices,
|
||||
# so we only make sure container devices are not duplicated
|
||||
if container_device in self._container_device_paths:
|
||||
raise RenderError(f"Device with container path [{container_device}] already added")
|
||||
|
||||
self._devices.add(Device(host_device, container_device, cgroup_perm, allow_disallowed))
|
||||
self._container_device_paths.add(container_device)
|
||||
|
||||
def add_usb_bus(self):
|
||||
self.add_device("/dev/bus/usb", "/dev/bus/usb", allow_disallowed=True)
|
||||
|
||||
def _add_snd_device(self):
|
||||
self.add_device("/dev/snd", "/dev/snd", allow_disallowed=True)
|
||||
|
||||
def _add_tun_device(self):
|
||||
self.add_device("/dev/net/tun", "/dev/net/tun", allow_disallowed=True)
|
||||
|
||||
def has_devices(self):
|
||||
return len(self._devices) > 0
|
||||
|
||||
# Mainly will be used from dependencies
|
||||
# There is no reason to pass devices to
|
||||
# redis or postgres for example
|
||||
def remove_devices(self):
|
||||
self._devices.clear()
|
||||
self._container_device_paths.clear()
|
||||
|
||||
# Check if there are any gpu devices
|
||||
# Used to determine if we should add groups
|
||||
# like 'video' to the container
|
||||
def has_gpus(self):
|
||||
for d in self._devices:
|
||||
if d.host_device == "/dev/dri":
|
||||
return True
|
||||
return False
|
||||
|
||||
def render(self) -> list[str]:
|
||||
return sorted([d.render() for d in self._devices])
|
||||
@@ -0,0 +1,79 @@
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .validations import allowed_dns_opt_or_raise
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from validations import allowed_dns_opt_or_raise
|
||||
|
||||
|
||||
class Dns:
|
||||
def __init__(self, render_instance: "Render"):
|
||||
self._render_instance = render_instance
|
||||
self._dns_options: set[str] = set()
|
||||
self._dns_searches: set[str] = set()
|
||||
self._dns_nameservers: set[str] = set()
|
||||
|
||||
self._auto_add_dns_opts_from_values()
|
||||
self._auto_add_dns_searches_from_values()
|
||||
self._auto_add_dns_nameservers_from_values()
|
||||
|
||||
def _get_dns_opt_keys(self):
|
||||
return [self._get_key_from_opt(opt) for opt in self._dns_options]
|
||||
|
||||
def _get_key_from_opt(self, opt):
|
||||
return opt.split(":")[0]
|
||||
|
||||
def _auto_add_dns_opts_from_values(self):
|
||||
values = self._render_instance.values
|
||||
for dns_opt in values.get("network", {}).get("dns_opts", []):
|
||||
self.add_dns_opt(dns_opt)
|
||||
|
||||
def _auto_add_dns_searches_from_values(self):
|
||||
values = self._render_instance.values
|
||||
for dns_search in values.get("network", {}).get("dns_searches", []):
|
||||
self.add_dns_search(dns_search)
|
||||
|
||||
def _auto_add_dns_nameservers_from_values(self):
|
||||
values = self._render_instance.values
|
||||
for dns_nameserver in values.get("network", {}).get("dns_nameservers", []):
|
||||
self.add_dns_nameserver(dns_nameserver)
|
||||
|
||||
def add_dns_search(self, dns_search):
|
||||
if dns_search in self._dns_searches:
|
||||
raise RenderError(f"DNS Search [{dns_search}] already added")
|
||||
self._dns_searches.add(dns_search)
|
||||
|
||||
def add_dns_nameserver(self, dns_nameserver):
|
||||
if dns_nameserver in self._dns_nameservers:
|
||||
raise RenderError(f"DNS Nameserver [{dns_nameserver}] already added")
|
||||
self._dns_nameservers.add(dns_nameserver)
|
||||
|
||||
def add_dns_opt(self, dns_opt):
|
||||
# eg attempts:3
|
||||
key = allowed_dns_opt_or_raise(self._get_key_from_opt(dns_opt))
|
||||
if key in self._get_dns_opt_keys():
|
||||
raise RenderError(f"DNS Option [{key}] already added")
|
||||
self._dns_options.add(dns_opt)
|
||||
|
||||
def has_dns_opts(self):
|
||||
return len(self._dns_options) > 0
|
||||
|
||||
def has_dns_searches(self):
|
||||
return len(self._dns_searches) > 0
|
||||
|
||||
def has_dns_nameservers(self):
|
||||
return len(self._dns_nameservers) > 0
|
||||
|
||||
def render_dns_searches(self):
|
||||
return sorted(self._dns_searches)
|
||||
|
||||
def render_dns_opts(self):
|
||||
return sorted(self._dns_options)
|
||||
|
||||
def render_dns_nameservers(self):
|
||||
return sorted(self._dns_nameservers)
|
||||
@@ -0,0 +1,119 @@
|
||||
from typing import Any, TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .formatter import escape_dollar
|
||||
from .resources import Resources
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from formatter import escape_dollar
|
||||
from resources import Resources
|
||||
|
||||
|
||||
class Environment:
|
||||
def __init__(self, render_instance: "Render", resources: Resources):
|
||||
self._render_instance = render_instance
|
||||
self._resources = resources
|
||||
# Stores variables that user defined
|
||||
self._user_vars: dict[str, Any] = {}
|
||||
# Stores variables that are automatically added (based on values)
|
||||
self._auto_variables: dict[str, Any] = {}
|
||||
# Stores variables that are added by the application developer
|
||||
self._app_dev_variables: dict[str, Any] = {}
|
||||
|
||||
self._skip_generic_variables: bool = render_instance.values.get("skip_generic_variables", False)
|
||||
self._skip_id_variables: bool = render_instance.values.get("skip_id_variables", False)
|
||||
|
||||
self._auto_add_variables_from_values()
|
||||
|
||||
def _auto_add_variables_from_values(self):
|
||||
if not self._skip_generic_variables:
|
||||
self._add_generic_variables()
|
||||
self._add_nvidia_variables()
|
||||
|
||||
def _add_generic_variables(self):
|
||||
self._auto_variables["TZ"] = self._render_instance.values.get("TZ", "Etc/UTC")
|
||||
self._auto_variables["UMASK"] = self._render_instance.values.get("UMASK", "002")
|
||||
self._auto_variables["UMASK_SET"] = self._render_instance.values.get("UMASK", "002")
|
||||
|
||||
run_as = self._render_instance.values.get("run_as", {})
|
||||
user = run_as.get("user")
|
||||
group = run_as.get("group")
|
||||
if user:
|
||||
self._auto_variables["PUID"] = user
|
||||
self._auto_variables["UID"] = user
|
||||
self._auto_variables["USER_ID"] = user
|
||||
if group:
|
||||
self._auto_variables["PGID"] = group
|
||||
self._auto_variables["GID"] = group
|
||||
self._auto_variables["GROUP_ID"] = group
|
||||
|
||||
def _add_nvidia_variables(self):
|
||||
if self._resources._nvidia_ids:
|
||||
self._auto_variables["NVIDIA_DRIVER_CAPABILITIES"] = "all"
|
||||
self._auto_variables["NVIDIA_VISIBLE_DEVICES"] = ",".join(sorted(self._resources._nvidia_ids))
|
||||
else:
|
||||
self._auto_variables["NVIDIA_VISIBLE_DEVICES"] = "void"
|
||||
|
||||
def _format_value(self, v: Any) -> str:
|
||||
value = str(v)
|
||||
|
||||
# str(bool) returns "True" or "False",
|
||||
# but we want "true" or "false"
|
||||
if isinstance(v, bool):
|
||||
value = value.lower()
|
||||
return value
|
||||
|
||||
def remove_auto_env(self, name: str):
|
||||
if name in self._auto_variables.keys():
|
||||
del self._auto_variables[name]
|
||||
return
|
||||
raise RenderError(f"Environment variable [{name}] is not defined.")
|
||||
|
||||
def add_env(self, name: str, value: Any):
|
||||
if not name:
|
||||
raise RenderError(f"Environment variable name cannot be empty. [{name}]")
|
||||
if name in self._app_dev_variables.keys():
|
||||
raise RenderError(
|
||||
f"Found duplicate environment variable [{name}] in application developer environment variables."
|
||||
)
|
||||
self._app_dev_variables[name] = value
|
||||
|
||||
def add_user_envs(self, user_env: list[dict]):
|
||||
for item in user_env:
|
||||
if not item.get("name"):
|
||||
raise RenderError(f"Environment variable name cannot be empty. [{item}]")
|
||||
if item["name"] in self._user_vars.keys():
|
||||
raise RenderError(
|
||||
f"Found duplicate environment variable [{item['name']}] in user environment variables."
|
||||
)
|
||||
self._user_vars[item["name"]] = item.get("value")
|
||||
|
||||
def has_variables(self):
|
||||
return len(self._auto_variables) > 0 or len(self._user_vars) > 0 or len(self._app_dev_variables) > 0
|
||||
|
||||
def render(self):
|
||||
result: dict[str, str] = {}
|
||||
|
||||
# Add envs from auto variables
|
||||
result.update({k: self._format_value(v) for k, v in self._auto_variables.items()})
|
||||
|
||||
# Track defined keys for faster lookup
|
||||
defined_keys = set(result.keys())
|
||||
|
||||
# Add envs from application developer (prohibit overwriting auto variables)
|
||||
for k, v in self._app_dev_variables.items():
|
||||
if k in defined_keys:
|
||||
raise RenderError(f"Environment variable [{k}] is already defined automatically from the library.")
|
||||
result[k] = self._format_value(v)
|
||||
defined_keys.add(k)
|
||||
|
||||
# Add envs from user (prohibit overwriting app developer envs and auto variables)
|
||||
for k, v in self._user_vars.items():
|
||||
if k in defined_keys:
|
||||
raise RenderError(f"Environment variable [{k}] is already defined from the application developer.")
|
||||
result[k] = self._format_value(v)
|
||||
|
||||
return {k: escape_dollar(v) for k, v in result.items()}
|
||||
@@ -0,0 +1,4 @@
|
||||
class RenderError(Exception):
|
||||
"""Base class for exceptions in this module."""
|
||||
|
||||
pass
|
||||
@@ -0,0 +1,31 @@
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .validations import valid_port_or_raise, valid_port_protocol_or_raise
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from validations import valid_port_or_raise, valid_port_protocol_or_raise
|
||||
|
||||
|
||||
class Expose:
|
||||
def __init__(self, render_instance: "Render"):
|
||||
self._render_instance = render_instance
|
||||
self._ports: set[str] = set()
|
||||
|
||||
def add_port(self, port: int, protocol: str = "tcp"):
|
||||
port = valid_port_or_raise(port)
|
||||
protocol = valid_port_protocol_or_raise(protocol)
|
||||
key = f"{port}/{protocol}"
|
||||
if key in self._ports:
|
||||
raise RenderError(f"Exposed port [{port}/{protocol}] already added")
|
||||
self._ports.add(key)
|
||||
|
||||
def has_ports(self):
|
||||
return len(self._ports) > 0
|
||||
|
||||
def render(self):
|
||||
return sorted(self._ports)
|
||||
@@ -0,0 +1,33 @@
|
||||
import ipaddress
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
|
||||
|
||||
class ExtraHosts:
|
||||
def __init__(self, render_instance: "Render"):
|
||||
self._render_instance = render_instance
|
||||
self._extra_hosts: dict[str, str] = {}
|
||||
|
||||
def add_host(self, host: str, ip: str):
|
||||
if not ip == "host-gateway":
|
||||
try:
|
||||
ipaddress.ip_address(ip)
|
||||
except ValueError:
|
||||
raise RenderError(f"Invalid IP address [{ip}] for host [{host}]")
|
||||
|
||||
if host in self._extra_hosts:
|
||||
raise RenderError(f"Host [{host}] already added with [{self._extra_hosts[host]}]")
|
||||
self._extra_hosts[host] = ip
|
||||
|
||||
def has_hosts(self):
|
||||
return len(self._extra_hosts) > 0
|
||||
|
||||
def render(self):
|
||||
return {host: ip for host, ip in self._extra_hosts.items()}
|
||||
@@ -0,0 +1,26 @@
|
||||
import json
|
||||
import hashlib
|
||||
|
||||
|
||||
def escape_dollar(text: str) -> str:
|
||||
return text.replace("$", "$$")
|
||||
|
||||
|
||||
def get_hashed_name_for_volume(prefix: str, config: dict):
|
||||
config_hash = hashlib.sha256(json.dumps(config).encode("utf-8")).hexdigest()
|
||||
return f"{prefix}_{config_hash}"
|
||||
|
||||
|
||||
def get_hash_with_prefix(prefix: str, data: str):
|
||||
return f"{prefix}_{hashlib.sha256(data.encode('utf-8')).hexdigest()}"
|
||||
|
||||
|
||||
def merge_dicts_no_overwrite(dict1, dict2):
|
||||
overlapping_keys = dict1.keys() & dict2.keys()
|
||||
if overlapping_keys:
|
||||
raise ValueError(f"Merging of dicts failed. Overlapping keys: {overlapping_keys}")
|
||||
return {**dict1, **dict2}
|
||||
|
||||
|
||||
def get_image_with_hashed_data(image: str, data: str):
|
||||
return get_hash_with_prefix(f"ix-{image}", data)
|
||||
@@ -0,0 +1,218 @@
|
||||
import re
|
||||
import copy
|
||||
import yaml
|
||||
import bcrypt
|
||||
import secrets
|
||||
import urllib.parse
|
||||
from base64 import b64encode
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .volume_sources import HostPathSource, IxVolumeSource
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from volume_sources import HostPathSource, IxVolumeSource
|
||||
|
||||
|
||||
class Functions:
|
||||
def __init__(self, render_instance: "Render"):
|
||||
self._render_instance = render_instance
|
||||
|
||||
def _to_yaml(self, data):
|
||||
return yaml.dump(data)
|
||||
|
||||
def _bcrypt_hash(self, password):
|
||||
hashed = bcrypt.hashpw(password.encode("utf-8"), bcrypt.gensalt()).decode("utf-8")
|
||||
return hashed
|
||||
|
||||
def _htpasswd(self, username, password):
|
||||
hashed = self._bcrypt_hash(password)
|
||||
return username + ":" + hashed
|
||||
|
||||
def _secure_string(self, length):
|
||||
return secrets.token_urlsafe(length)[:length]
|
||||
|
||||
def _basic_auth(self, username, password):
|
||||
return b64encode(f"{username}:{password}".encode("utf-8")).decode("utf-8")
|
||||
|
||||
def _basic_auth_header(self, username, password):
|
||||
return f"Basic {self._basic_auth(username, password)}"
|
||||
|
||||
def _fail(self, message):
|
||||
raise RenderError(message)
|
||||
|
||||
def _camel_case(self, string):
|
||||
return string.title()
|
||||
|
||||
def _auto_cast(self, value):
|
||||
lower_str_value = str(value).lower()
|
||||
if lower_str_value in ["true", "false"]:
|
||||
return lower_str_value == "true"
|
||||
|
||||
try:
|
||||
float_value = float(value)
|
||||
if float_value.is_integer():
|
||||
return int(float_value)
|
||||
else:
|
||||
return float(value)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
return value
|
||||
|
||||
def _match_regex(self, value, regex):
|
||||
if not re.match(regex, value):
|
||||
return False
|
||||
return True
|
||||
|
||||
def _must_match_regex(self, value, regex):
|
||||
if not self._match_regex(value, regex):
|
||||
raise RenderError(f"Expected [{value}] to match [{regex}]")
|
||||
return value
|
||||
|
||||
def _is_boolean(self, string):
|
||||
return string.lower() in ["true", "false"]
|
||||
|
||||
def _is_number(self, string):
|
||||
try:
|
||||
float(string)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
def _copy_dict(self, dict):
|
||||
return copy.deepcopy(dict)
|
||||
|
||||
def _merge_dicts(self, *dicts):
|
||||
merged_dict = {}
|
||||
for dictionary in dicts:
|
||||
merged_dict.update(dictionary)
|
||||
return merged_dict
|
||||
|
||||
def _disallow_chars(self, string: str, chars: list[str], key: str):
|
||||
for char in chars:
|
||||
if char in string:
|
||||
raise RenderError(f"Disallowed character [{char}] in [{key}]")
|
||||
return string
|
||||
|
||||
def _or_default(self, value, default):
|
||||
if not value:
|
||||
return default
|
||||
return value
|
||||
|
||||
def _url_to_dict(self, url: str, v6_brackets: bool = False):
|
||||
try:
|
||||
# Try parsing as-is first
|
||||
parsed = urllib.parse.urlparse(url)
|
||||
|
||||
# If we didn't get a hostname, try with http:// prefix
|
||||
if not parsed.hostname:
|
||||
parsed = urllib.parse.urlparse(f"http://{url}")
|
||||
|
||||
# Final check that we have a valid result
|
||||
if not parsed.hostname:
|
||||
raise RenderError(
|
||||
f"Failed to parse URL [{url}]. Ensure it is a valid URL with a hostname and optional port."
|
||||
)
|
||||
|
||||
result = {
|
||||
"netloc": parsed.netloc,
|
||||
"scheme": parsed.scheme,
|
||||
"host": parsed.hostname,
|
||||
"port": parsed.port,
|
||||
"path": parsed.path,
|
||||
}
|
||||
if v6_brackets and parsed.hostname and ":" in parsed.hostname:
|
||||
result["host"] = f"[{parsed.hostname}]"
|
||||
result["host_no_brackets"] = parsed.hostname
|
||||
|
||||
return result
|
||||
|
||||
except Exception:
|
||||
raise RenderError(
|
||||
f"Failed to parse URL [{url}]. Ensure it is a valid URL with a hostname and optional port."
|
||||
)
|
||||
|
||||
def _require_unique(self, values, key, split_char=""):
|
||||
new_values = []
|
||||
for value in values:
|
||||
new_values.append(value.split(split_char)[0] if split_char else value)
|
||||
|
||||
if len(new_values) != len(set(new_values)):
|
||||
raise RenderError(f"Expected values in [{key}] to be unique, but got [{', '.join(values)}]")
|
||||
|
||||
def _require_no_reserved(self, values, key, reserved, split_char="", starts_with=False):
|
||||
new_values = []
|
||||
for value in values:
|
||||
new_values.append(value.split(split_char)[0] if split_char else value)
|
||||
|
||||
if starts_with:
|
||||
for arg in new_values:
|
||||
for reserved_value in reserved:
|
||||
if arg.startswith(reserved_value):
|
||||
raise RenderError(f"Value [{reserved_value}] is reserved and cannot be set in [{key}]")
|
||||
return
|
||||
|
||||
for reserved_value in reserved:
|
||||
if reserved_value in new_values:
|
||||
raise RenderError(f"Value [{reserved_value}] is reserved and cannot be set in [{key}]")
|
||||
|
||||
def _url_encode(self, string):
|
||||
return urllib.parse.quote_plus(string)
|
||||
|
||||
def _temp_config(self, name):
|
||||
if not name:
|
||||
raise RenderError("Expected [name] to be set when calling [temp_config].")
|
||||
return {"type": "temporary", "volume_config": {"volume_name": name}}
|
||||
|
||||
def _get_host_path(self, storage):
|
||||
source_type = storage.get("type", "")
|
||||
if not source_type:
|
||||
raise RenderError("Expected [type] to be set for volume mounts.")
|
||||
|
||||
match source_type:
|
||||
case "host_path":
|
||||
mount_config = storage.get("host_path_config")
|
||||
if mount_config is None:
|
||||
raise RenderError("Expected [host_path_config] to be set for [host_path] type.")
|
||||
host_source = HostPathSource(self._render_instance, mount_config).get()
|
||||
return host_source
|
||||
case "ix_volume":
|
||||
mount_config = storage.get("ix_volume_config")
|
||||
if mount_config is None:
|
||||
raise RenderError("Expected [ix_volume_config] to be set for [ix_volume] type.")
|
||||
ix_source = IxVolumeSource(self._render_instance, mount_config).get()
|
||||
return ix_source
|
||||
case _:
|
||||
raise RenderError(f"Storage type [{source_type}] does not support host path.")
|
||||
|
||||
def func_map(self):
|
||||
return {
|
||||
"auto_cast": self._auto_cast,
|
||||
"basic_auth_header": self._basic_auth_header,
|
||||
"basic_auth": self._basic_auth,
|
||||
"bcrypt_hash": self._bcrypt_hash,
|
||||
"camel_case": self._camel_case,
|
||||
"copy_dict": self._copy_dict,
|
||||
"fail": self._fail,
|
||||
"htpasswd": self._htpasswd,
|
||||
"is_boolean": self._is_boolean,
|
||||
"is_number": self._is_number,
|
||||
"match_regex": self._match_regex,
|
||||
"merge_dicts": self._merge_dicts,
|
||||
"must_match_regex": self._must_match_regex,
|
||||
"secure_string": self._secure_string,
|
||||
"disallow_chars": self._disallow_chars,
|
||||
"get_host_path": self._get_host_path,
|
||||
"or_default": self._or_default,
|
||||
"temp_config": self._temp_config,
|
||||
"require_unique": self._require_unique,
|
||||
"require_no_reserved": self._require_no_reserved,
|
||||
"url_encode": self._url_encode,
|
||||
"url_to_dict": self._url_to_dict,
|
||||
"to_yaml": self._to_yaml,
|
||||
}
|
||||
@@ -0,0 +1,266 @@
|
||||
import json
|
||||
from typing import Any, TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .formatter import escape_dollar
|
||||
from .validations import valid_http_path_or_raise
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from formatter import escape_dollar
|
||||
from validations import valid_http_path_or_raise
|
||||
|
||||
|
||||
class Healthcheck:
|
||||
def __init__(self, render_instance: "Render"):
|
||||
self._render_instance = render_instance
|
||||
self._test: str | list[str] = ""
|
||||
self._interval_sec: int = 30
|
||||
self._timeout_sec: int = 5
|
||||
self._retries: int = 5
|
||||
self._start_period_sec: int = 15
|
||||
self._start_interval_sec: int = 2
|
||||
self._disabled: bool = False
|
||||
self._use_built_in: bool = False
|
||||
|
||||
def _get_test(self):
|
||||
if isinstance(self._test, str):
|
||||
return escape_dollar(self._test)
|
||||
return [escape_dollar(t) for t in self._test]
|
||||
|
||||
def disable(self):
|
||||
self._disabled = True
|
||||
|
||||
def use_built_in(self):
|
||||
self._use_built_in = True
|
||||
|
||||
def set_custom_test(self, test: str | list[str]):
|
||||
if isinstance(test, list):
|
||||
if test[0] == "CMD" and any(t.startswith("$") for t in test):
|
||||
raise RenderError(f"Healthcheck with 'CMD' cannot contain shell variables '{test}'")
|
||||
if self._disabled:
|
||||
raise RenderError("Cannot set custom test when healthcheck is disabled")
|
||||
self._test = test
|
||||
|
||||
def set_test(self, variant: str, config: dict | None = None):
|
||||
config = config or {}
|
||||
self.set_custom_test(test_mapping(variant, config))
|
||||
|
||||
def set_interval(self, interval: int):
|
||||
self._interval_sec = interval
|
||||
|
||||
def set_timeout(self, timeout: int):
|
||||
self._timeout_sec = timeout
|
||||
|
||||
def set_retries(self, retries: int):
|
||||
self._retries = retries
|
||||
|
||||
def set_start_period(self, start_period: int):
|
||||
self._start_period_sec = start_period
|
||||
|
||||
def set_start_interval(self, start_interval: int):
|
||||
self._start_interval_sec = start_interval
|
||||
|
||||
def has_healthcheck(self):
|
||||
return not self._use_built_in
|
||||
|
||||
def render(self):
|
||||
if self._use_built_in:
|
||||
return RenderError("Should not be called when built in healthcheck is used")
|
||||
|
||||
if self._disabled:
|
||||
return {"disable": True}
|
||||
|
||||
if not self._test:
|
||||
raise RenderError("Healthcheck test is not set")
|
||||
|
||||
return {
|
||||
"test": self._get_test(),
|
||||
"retries": self._retries,
|
||||
"interval": f"{self._interval_sec}s",
|
||||
"timeout": f"{self._timeout_sec}s",
|
||||
"start_period": f"{self._start_period_sec}s",
|
||||
"start_interval": f"{self._start_interval_sec}s",
|
||||
}
|
||||
|
||||
|
||||
def test_mapping(variant: str, config: dict | None = None) -> list[str]:
|
||||
config = config or {}
|
||||
tests = {
|
||||
"curl": curl_test,
|
||||
"wget": wget_test,
|
||||
"http": http_test,
|
||||
"netcat": netcat_test,
|
||||
"tcp": tcp_test,
|
||||
"redis": redis_test,
|
||||
"postgres": postgres_test,
|
||||
"mariadb": mariadb_test,
|
||||
"mongodb": mongodb_test,
|
||||
}
|
||||
|
||||
if variant not in tests:
|
||||
raise RenderError(f"Test variant [{variant}] is not valid. Valid options are: [{', '.join(tests.keys())}]")
|
||||
|
||||
return tests[variant](config)
|
||||
|
||||
|
||||
def get_key(config: dict, key: str, default: Any, required: bool):
|
||||
if key not in config:
|
||||
if not required:
|
||||
return default
|
||||
raise RenderError(f"Expected [{key}] to be set")
|
||||
return config[key]
|
||||
|
||||
|
||||
def curl_test(config: dict) -> list[str]:
|
||||
config = config or {}
|
||||
port = get_key(config, "port", None, True)
|
||||
path = valid_http_path_or_raise(get_key(config, "path", "/", False))
|
||||
scheme = get_key(config, "scheme", "http", False)
|
||||
host = get_key(config, "host", "127.0.0.1", False)
|
||||
headers = get_key(config, "headers", [], False)
|
||||
method = get_key(config, "method", "GET", False)
|
||||
data = get_key(config, "data", None, False)
|
||||
|
||||
cmd = ["CMD", "curl", "--request", method, "--silent", "--output", "/dev/null", "--show-error", "--fail"]
|
||||
|
||||
if scheme == "https":
|
||||
cmd.append("--insecure")
|
||||
|
||||
for header in headers:
|
||||
if not header[0] or not header[1]:
|
||||
raise RenderError("Expected [header] to be a list of two items for curl test")
|
||||
cmd.extend(["--header", f"{header[0]}: {header[1]}"])
|
||||
|
||||
if data is not None:
|
||||
cmd.extend(["--data", json.dumps(data)])
|
||||
|
||||
cmd.append(f"{scheme}://{host}:{port}{path}")
|
||||
return cmd
|
||||
|
||||
|
||||
def wget_test(config: dict) -> list[str]:
|
||||
config = config or {}
|
||||
port = get_key(config, "port", None, True)
|
||||
path = valid_http_path_or_raise(get_key(config, "path", "/", False))
|
||||
scheme = get_key(config, "scheme", "http", False)
|
||||
host = get_key(config, "host", "127.0.0.1", False)
|
||||
headers = get_key(config, "headers", [], False)
|
||||
spider = get_key(config, "spider", True, False)
|
||||
|
||||
cmd = ["CMD", "wget", "--quiet"]
|
||||
|
||||
if spider:
|
||||
cmd.append("--spider")
|
||||
else:
|
||||
cmd.extend(["-O", "/dev/null"])
|
||||
|
||||
if scheme == "https":
|
||||
cmd.append("--no-check-certificate")
|
||||
|
||||
for header in headers:
|
||||
if not header[0] or not header[1]:
|
||||
raise RenderError("Expected [header] to be a list of two items for wget test")
|
||||
cmd.extend(["--header", f"{header[0]}: {header[1]}"])
|
||||
|
||||
cmd.append(f"{scheme}://{host}:{port}{path}")
|
||||
|
||||
return cmd
|
||||
|
||||
|
||||
def http_test(config: dict) -> list[str]:
|
||||
config = config or {}
|
||||
port = get_key(config, "port", None, True)
|
||||
path = valid_http_path_or_raise(get_key(config, "path", "/", False))
|
||||
host = get_key(config, "host", "127.0.0.1", False)
|
||||
|
||||
hc = f"""{{ printf "GET {path} HTTP/1.1\\r\\nHost: {host}\\r\\nConnection: close\\r\\n\\r\\n" >&0; grep "HTTP" | grep -q "200"; }} 0<>/dev/tcp/{host}/{port}""" # noqa
|
||||
return ["CMD-SHELL", f"/bin/bash -c '{hc}'"]
|
||||
|
||||
|
||||
def netcat_test(config: dict) -> list[str]:
|
||||
config = config or {}
|
||||
port = get_key(config, "port", None, True)
|
||||
host = get_key(config, "host", "127.0.0.1", False)
|
||||
udp_mode = get_key(config, "udp", False, False)
|
||||
cmd = ["CMD", "nc", "-z", "-w", "1"]
|
||||
|
||||
if udp_mode:
|
||||
cmd.append("-u")
|
||||
|
||||
cmd.extend([host, str(port)])
|
||||
|
||||
return cmd
|
||||
|
||||
|
||||
def tcp_test(config: dict) -> list[str]:
|
||||
config = config or {}
|
||||
port = get_key(config, "port", None, True)
|
||||
host = get_key(config, "host", "127.0.0.1", False)
|
||||
|
||||
return ["CMD", "timeout", "1", "bash", "-c", f"cat < /dev/null > /dev/tcp/{host}/{port}"]
|
||||
|
||||
|
||||
def redis_test(config: dict) -> list[str]:
|
||||
config = config or {}
|
||||
port = get_key(config, "port", 6379, False)
|
||||
host = get_key(config, "host", "127.0.0.1", False)
|
||||
password = get_key(config, "password", None, False)
|
||||
cmd = ["CMD", "redis-cli", "-h", host, "-p", str(port)]
|
||||
|
||||
if password:
|
||||
cmd.extend(["-a", password])
|
||||
|
||||
cmd.append("ping")
|
||||
|
||||
return cmd
|
||||
|
||||
|
||||
def postgres_test(config: dict) -> list[str]:
|
||||
config = config or {}
|
||||
port = get_key(config, "port", 5432, False)
|
||||
host = get_key(config, "host", "127.0.0.1", False)
|
||||
user = get_key(config, "user", None, True)
|
||||
db = get_key(config, "db", None, True)
|
||||
|
||||
return ["CMD", "pg_isready", "-h", host, "-p", str(port), "-U", user, "-d", db]
|
||||
|
||||
|
||||
def mariadb_test(config: dict) -> list[str]:
|
||||
config = config or {}
|
||||
port = get_key(config, "port", 3306, False)
|
||||
host = get_key(config, "host", "127.0.0.1", False)
|
||||
password = get_key(config, "password", None, True)
|
||||
|
||||
return [
|
||||
"CMD",
|
||||
"mariadb-admin",
|
||||
"--user=root",
|
||||
f"--host={host}",
|
||||
f"--port={port}",
|
||||
f"--password={password}",
|
||||
"ping",
|
||||
]
|
||||
|
||||
|
||||
def mongodb_test(config: dict) -> list[str]:
|
||||
config = config or {}
|
||||
port = get_key(config, "port", 27017, False)
|
||||
host = get_key(config, "host", "127.0.0.1", False)
|
||||
db = get_key(config, "db", None, True)
|
||||
|
||||
return [
|
||||
"CMD",
|
||||
"mongosh",
|
||||
"--host",
|
||||
host,
|
||||
"--port",
|
||||
str(port),
|
||||
db,
|
||||
"--eval",
|
||||
'db.adminCommand("ping")',
|
||||
"--quiet",
|
||||
]
|
||||
@@ -0,0 +1,37 @@
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .formatter import escape_dollar
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from formatter import escape_dollar
|
||||
|
||||
|
||||
class Labels:
|
||||
def __init__(self, render_instance: "Render"):
|
||||
self._render_instance = render_instance
|
||||
self._labels: dict[str, str] = {}
|
||||
|
||||
def add_label(self, key: str, value: str):
|
||||
if not key:
|
||||
raise RenderError("Labels must have a key")
|
||||
|
||||
if key.startswith("com.docker.compose"):
|
||||
raise RenderError(f"Label [{key}] cannot start with [com.docker.compose] as it is reserved")
|
||||
|
||||
if key in self._labels.keys():
|
||||
raise RenderError(f"Label [{key}] already added")
|
||||
|
||||
self._labels[key] = escape_dollar(str(value))
|
||||
|
||||
def has_labels(self) -> bool:
|
||||
return bool(self._labels)
|
||||
|
||||
def render(self) -> dict[str, str]:
|
||||
if not self.has_labels():
|
||||
return {}
|
||||
return {label: value for label, value in sorted(self._labels.items())}
|
||||
@@ -0,0 +1,283 @@
|
||||
from dataclasses import dataclass
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
|
||||
SHORT_LIVED = "short-lived"
|
||||
|
||||
|
||||
@dataclass
|
||||
class Security:
|
||||
header: str
|
||||
items: list[str]
|
||||
|
||||
|
||||
class Notes:
|
||||
def __init__(self, render_instance: "Render"):
|
||||
self._render_instance = render_instance
|
||||
self._app_name: str = ""
|
||||
self._app_train: str = ""
|
||||
self._info: list[str] = []
|
||||
self._warnings: list[str] = []
|
||||
self._deprecations: list[str] = []
|
||||
self._security: dict[str, list[Security]] = {}
|
||||
self._header: str = ""
|
||||
self._body: str = ""
|
||||
self._footer: str = ""
|
||||
|
||||
self._auto_set_app_name()
|
||||
self._auto_set_app_train()
|
||||
self._auto_set_header()
|
||||
self._auto_set_footer()
|
||||
|
||||
def _is_enterprise_train(self):
|
||||
if self._app_train == "enterprise":
|
||||
return True
|
||||
|
||||
def _auto_set_app_name(self):
|
||||
app_name = self._render_instance.values.get("ix_context", {}).get("app_metadata", {}).get("title", "")
|
||||
self._app_name = app_name or "<app_name>"
|
||||
|
||||
def _auto_set_app_train(self):
|
||||
app_train = self._render_instance.values.get("ix_context", {}).get("app_metadata", {}).get("train", "")
|
||||
self._app_train = app_train or "<app_train>"
|
||||
|
||||
def _auto_set_header(self):
|
||||
self._header = f"# {self._app_name}\n\n"
|
||||
|
||||
def _auto_set_footer(self):
|
||||
url = "https://github.com/truenas/apps"
|
||||
if self._is_enterprise_train():
|
||||
url = "https://ixsystems.atlassian.net"
|
||||
footer = "## Bug Reports and Feature Requests\n\n"
|
||||
footer += "If you find a bug in this app or have an idea for a new feature, please file an issue at\n"
|
||||
footer += f"{url}\n"
|
||||
self._footer = footer
|
||||
|
||||
def add_info(self, info: str):
|
||||
self._info.append(info)
|
||||
|
||||
def add_warning(self, warning: str):
|
||||
self._warnings.append(warning)
|
||||
|
||||
def _prepend_warning(self, warning: str):
|
||||
self._warnings.insert(0, warning)
|
||||
|
||||
def add_deprecation(self, deprecation: str):
|
||||
self._deprecations.append(deprecation)
|
||||
|
||||
def set_body(self, body: str):
|
||||
self._body = body
|
||||
|
||||
def get_pretty_host_mount(self, hm: str) -> tuple[str, bool]:
|
||||
hm = hm.rstrip("/")
|
||||
mapping = {
|
||||
"/dev/bus/usb": "USB Devices",
|
||||
"/dev/net/tun": "TUN Device",
|
||||
"/dev/snd": "Sound Device",
|
||||
"/dev/fuse": "Fuse Device",
|
||||
"/dev/uinput": "UInput Device",
|
||||
"/dev/dvb": "DVB Devices",
|
||||
"/dev/dri": "DRI Device",
|
||||
"/dev/kfd": "AMD GPU Device",
|
||||
"/etc/os-release": "OS Release File",
|
||||
"/etc/group": "Group File",
|
||||
"/etc/passwd": "Password File",
|
||||
"/etc/hostname": "Hostname File",
|
||||
"/var/run/docker.sock": "Docker Socket",
|
||||
"/var/run/utmp": "UTMP",
|
||||
"/var/run/dbus": "DBus Socket",
|
||||
"/run/udev": "Udev Socket",
|
||||
}
|
||||
if hm in mapping:
|
||||
return f"{mapping[hm]} ({hm})", True
|
||||
|
||||
hm = hm + "/"
|
||||
starters = ("/dev/", "/proc/", "/sys/", "/etc/", "/lib/")
|
||||
if any(hm.startswith(s) for s in starters):
|
||||
return hm.rstrip("/"), True
|
||||
|
||||
return "", False
|
||||
|
||||
def get_group_name_from_id(self, group_id: int | str) -> str:
|
||||
mapping = {
|
||||
0: "root",
|
||||
20: "dialout",
|
||||
24: "cdrom",
|
||||
29: "audio",
|
||||
568: "apps",
|
||||
999: "docker",
|
||||
}
|
||||
if group_id in mapping:
|
||||
return mapping[group_id]
|
||||
return str(group_id)
|
||||
|
||||
def scan_containers(self):
|
||||
for name, c in self._render_instance._containers.items():
|
||||
if self._security.get(name) is None:
|
||||
self._security[name] = []
|
||||
|
||||
if c.restart._policy == "on-failure":
|
||||
self._security[name].append(Security(header=SHORT_LIVED, items=[]))
|
||||
|
||||
if c._privileged:
|
||||
self._security[name].append(
|
||||
Security(
|
||||
header="Privileged mode is enabled",
|
||||
items=[
|
||||
"Has the same level of control as a system administrator",
|
||||
"Can access and modify any part of your TrueNAS system",
|
||||
],
|
||||
)
|
||||
)
|
||||
|
||||
run_as_sec_items = []
|
||||
user, group = c._user.split(":") if c._user else [-1, -1]
|
||||
if user in ["0", -1]:
|
||||
user = "root" if user == "0" else "unknown"
|
||||
if group in ["0", -1]:
|
||||
group = "root" if group == "0" else "unknown"
|
||||
run_as_sec_items.append(f"User: {user}")
|
||||
run_as_sec_items.append(f"Group: {group}")
|
||||
groups = [self.get_group_name_from_id(g) for g in c._group_add]
|
||||
if groups:
|
||||
groups_str = ", ".join(sorted(groups))
|
||||
run_as_sec_items.append(f"Supplementary Groups: {groups_str}")
|
||||
self._security[name].append(Security("Running user/group(s)", run_as_sec_items))
|
||||
|
||||
if c._ipc_mode == "host":
|
||||
self._security[name].append(
|
||||
Security(
|
||||
header="Host IPC namespace is enabled",
|
||||
items=[
|
||||
"Container can access the inter-process communication mechanisms of the host",
|
||||
"Allows communication with other processes on the host under particular circumstances",
|
||||
],
|
||||
)
|
||||
)
|
||||
if c._pid_mode == "host":
|
||||
self._security[name].append(
|
||||
Security(
|
||||
header="Host PID namespace is enabled",
|
||||
items=[
|
||||
"Container can see and interact with all host processes",
|
||||
"Potential for privilege escalation or process manipulation",
|
||||
],
|
||||
)
|
||||
)
|
||||
if c._cgroup == "host":
|
||||
self._security[name].append(
|
||||
Security(
|
||||
header="Host cgroup namespace is enabled",
|
||||
items=[
|
||||
"Container shares control groups with the host system",
|
||||
"Can bypass resource limits and isolation boundaries",
|
||||
],
|
||||
)
|
||||
)
|
||||
if "no-new-privileges=true" not in c._security_opt.render():
|
||||
self._security[name].append(
|
||||
Security(
|
||||
header="Security option [no-new-privileges] is not set",
|
||||
items=[
|
||||
"Processes can gain additional privileges through setuid/setgid binaries",
|
||||
"Can potentially allow privilege escalation attacks within the container",
|
||||
],
|
||||
)
|
||||
)
|
||||
|
||||
host_mounts = []
|
||||
for dev in c.devices._devices:
|
||||
pretty, _ = self.get_pretty_host_mount(dev.host_device)
|
||||
host_mounts.append(f"{pretty} - ({dev.cgroup_perm or 'Read/Write'})")
|
||||
|
||||
for vm in c.storage._volume_mounts:
|
||||
if vm.volume_mount_spec.get("type", "") == "bind":
|
||||
source = vm.volume_mount_spec.get("source", "")
|
||||
read_only = vm.volume_mount_spec.get("read_only", False)
|
||||
pretty, is_host_mount = self.get_pretty_host_mount(source)
|
||||
if is_host_mount:
|
||||
host_mounts.append(f"{pretty} - ({'Read Only' if read_only else 'Read/Write'})")
|
||||
|
||||
if host_mounts:
|
||||
self._security[name].append(
|
||||
Security(
|
||||
header="Passing Host Files, Devices, or Sockets into the Container", items=sorted(host_mounts)
|
||||
)
|
||||
)
|
||||
if c._tty:
|
||||
self._prepend_warning(
|
||||
f"Container [{name}] is running with a TTY, "
|
||||
"Logs do not appear correctly in the UI due to an [upstream bug]"
|
||||
"(https://github.com/docker/docker-py/issues/1394)"
|
||||
)
|
||||
self._security = {k: v for k, v in self._security.items() if v}
|
||||
|
||||
def render(self):
|
||||
self.scan_containers()
|
||||
|
||||
result = self._header
|
||||
|
||||
if self._warnings:
|
||||
result += "## Warnings\n\n"
|
||||
for warning in self._warnings:
|
||||
result += f"- {warning}\n"
|
||||
result += "\n"
|
||||
|
||||
if self._deprecations:
|
||||
result += "## Deprecations\n\n"
|
||||
for deprecation in self._deprecations:
|
||||
result += f"- {deprecation}\n"
|
||||
result += "\n"
|
||||
|
||||
if self._info:
|
||||
result += "## Info\n\n"
|
||||
for info in self._info:
|
||||
result += f"- {info}\n"
|
||||
result += "\n"
|
||||
|
||||
if self._security:
|
||||
result += "## Security\n\n"
|
||||
result += "**Read the following security precautions to ensure"
|
||||
result += " that you wish to continue using this application.**\n\n"
|
||||
|
||||
def render_security(container_name: str, security: list[Security]) -> str:
|
||||
output = "---\n\n"
|
||||
output += f"### Container: [{container_name}]"
|
||||
if any(sec.header == SHORT_LIVED for sec in security):
|
||||
output += "\n\n**This container is short-lived.**"
|
||||
output += "\n\n"
|
||||
for sec in [s for s in security if s.header != SHORT_LIVED]:
|
||||
output += f"#### {sec.header}\n\n"
|
||||
for item in sec.items:
|
||||
output += f"- {item}\n"
|
||||
if sec.items:
|
||||
output += "\n"
|
||||
return output
|
||||
|
||||
sec_list = []
|
||||
sec_short_lived_list = []
|
||||
for container_name, security in self._security.items():
|
||||
if any(sec.header == SHORT_LIVED for sec in security):
|
||||
sec_short_lived_list.append((container_name, security))
|
||||
continue
|
||||
sec_list.append((container_name, security))
|
||||
|
||||
sec_list = sorted(sec_list, key=lambda x: x[0])
|
||||
sec_short_lived_list = sorted(sec_short_lived_list, key=lambda x: x[0])
|
||||
|
||||
joined_sec_list = [*sec_list, *sec_short_lived_list]
|
||||
for idx, item in enumerate(joined_sec_list):
|
||||
container, sec = item
|
||||
result += render_security(container, sec)
|
||||
# If its the last container, add a final ---
|
||||
if idx == len(joined_sec_list) - 1:
|
||||
result += "---\n\n"
|
||||
|
||||
if self._body:
|
||||
result += self._body.strip() + "\n\n"
|
||||
|
||||
result += self._footer
|
||||
|
||||
return result
|
||||
@@ -0,0 +1,73 @@
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import copy
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .validations import valid_portal_scheme_or_raise, valid_http_path_or_raise, valid_port_or_raise
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from validations import valid_portal_scheme_or_raise, valid_http_path_or_raise, valid_port_or_raise
|
||||
|
||||
|
||||
class Portals:
|
||||
def __init__(self, render_instance: "Render"):
|
||||
self._render_instance = render_instance
|
||||
self._portals: set[Portal] = set()
|
||||
|
||||
def add(self, port: dict, config: dict | None = None):
|
||||
config = copy.deepcopy((config or {}))
|
||||
port = copy.deepcopy((port or {}))
|
||||
# If its not published, portal does not make sense
|
||||
if port.get("bind_mode", "") != "published":
|
||||
return
|
||||
|
||||
name = config.get("name", "Web UI")
|
||||
|
||||
if name in [p._name for p in self._portals]:
|
||||
raise RenderError(f"Portal [{name}] already added")
|
||||
|
||||
host = config.get("host", None)
|
||||
host_ips = port.get("host_ips", [])
|
||||
if not isinstance(host_ips, list):
|
||||
raise RenderError("Expected [host_ips] to be a list of strings")
|
||||
|
||||
# Remove wildcard IPs
|
||||
if "::" in host_ips:
|
||||
host_ips.remove("::")
|
||||
if "0.0.0.0" in host_ips:
|
||||
host_ips.remove("0.0.0.0")
|
||||
|
||||
# If host is not set, use the first host_ip (if it exists)
|
||||
if not host and len(host_ips) >= 1:
|
||||
host = host_ips[0]
|
||||
|
||||
config["host"] = host
|
||||
if not config.get("port"):
|
||||
config["port"] = port.get("port_number", 0)
|
||||
|
||||
self._portals.add(Portal(name, config))
|
||||
|
||||
def render(self):
|
||||
return [p.render() for _, p in sorted([(p._name, p) for p in self._portals])]
|
||||
|
||||
|
||||
class Portal:
|
||||
def __init__(self, name: str, config: dict):
|
||||
self._name = name
|
||||
self._scheme = valid_portal_scheme_or_raise(config.get("scheme", "http"))
|
||||
self._host = config.get("host", "0.0.0.0") or "0.0.0.0"
|
||||
self._port = valid_port_or_raise(config.get("port", 0))
|
||||
self._path = valid_http_path_or_raise(config.get("path", "/"))
|
||||
|
||||
def render(self):
|
||||
return {
|
||||
"name": self._name,
|
||||
"scheme": self._scheme,
|
||||
"host": self._host,
|
||||
"port": self._port,
|
||||
"path": self._path,
|
||||
}
|
||||
@@ -0,0 +1,147 @@
|
||||
import ipaddress
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .validations import (
|
||||
valid_ip_or_raise,
|
||||
valid_port_mode_or_raise,
|
||||
valid_port_or_raise,
|
||||
valid_port_protocol_or_raise,
|
||||
)
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from validations import (
|
||||
valid_ip_or_raise,
|
||||
valid_port_mode_or_raise,
|
||||
valid_port_or_raise,
|
||||
valid_port_protocol_or_raise,
|
||||
)
|
||||
|
||||
|
||||
class Ports:
|
||||
def __init__(self, render_instance: "Render"):
|
||||
self._render_instance = render_instance
|
||||
self._ports: dict[str, dict] = {}
|
||||
|
||||
def _gen_port_key(self, host_port: int, host_ip: str, proto: str, ip_family: int) -> str:
|
||||
return f"{host_port}_{host_ip}_{proto}_{ip_family}"
|
||||
|
||||
def _is_wildcard_ip(self, ip: str) -> bool:
|
||||
return ip in ["0.0.0.0", "::"]
|
||||
|
||||
def _get_opposite_wildcard(self, ip: str) -> str:
|
||||
return "0.0.0.0" if ip == "::" else "::"
|
||||
|
||||
def _get_sort_key(self, p: dict) -> str:
|
||||
return f"{p['published']}_{p['target']}_{p['protocol']}_{p.get('host_ip', '_')}"
|
||||
|
||||
def _is_ports_same(self, port1: dict, port2: dict) -> bool:
|
||||
return (
|
||||
port1["published"] == port2["published"]
|
||||
and port1["target"] == port2["target"]
|
||||
and port1["protocol"] == port2["protocol"]
|
||||
and port1.get("host_ip", "_") == port2.get("host_ip", "_")
|
||||
)
|
||||
|
||||
def _has_opposite_family_port(self, port_config: dict, wildcard_ports: dict) -> bool:
|
||||
comparison_port = port_config.copy()
|
||||
comparison_port["host_ip"] = self._get_opposite_wildcard(port_config["host_ip"])
|
||||
for p in wildcard_ports.values():
|
||||
if self._is_ports_same(comparison_port, p):
|
||||
return True
|
||||
return False
|
||||
|
||||
def _check_port_conflicts(self, port_config: dict, ip_family: int) -> None:
|
||||
host_port = port_config["published"]
|
||||
host_ip = port_config["host_ip"]
|
||||
proto = port_config["protocol"]
|
||||
|
||||
key = self._gen_port_key(host_port, host_ip, proto, ip_family)
|
||||
|
||||
if key in self._ports.keys():
|
||||
raise RenderError(f"Port [{host_port}/{proto}/ipv{ip_family}] already added for [{host_ip}]")
|
||||
|
||||
wildcard_ip = "0.0.0.0" if ip_family == 4 else "::"
|
||||
if host_ip != wildcard_ip:
|
||||
# Check if there is a port with same details but with wildcard IP of the same family
|
||||
wildcard_key = self._gen_port_key(host_port, wildcard_ip, proto, ip_family)
|
||||
if wildcard_key in self._ports.keys():
|
||||
raise RenderError(
|
||||
f"Cannot bind port [{host_port}/{proto}/ipv{ip_family}] to [{host_ip}], "
|
||||
f"already bound to [{wildcard_ip}]"
|
||||
)
|
||||
else:
|
||||
# We are adding a port with wildcard IP
|
||||
# Check if there is a port with same details but with specific IP of the same family
|
||||
for p in self._ports.values():
|
||||
# Skip if the port is not for the same family
|
||||
if ip_family != ipaddress.ip_address(p["host_ip"]).version:
|
||||
continue
|
||||
|
||||
# Make a copy of the port config
|
||||
search_port = p.copy()
|
||||
# Replace the host IP with wildcard IP
|
||||
search_port["host_ip"] = wildcard_ip
|
||||
# If the ports match, means that a port for specific IP is already added
|
||||
# and we are trying to add it again with wildcard IP. Raise an error
|
||||
if self._is_ports_same(search_port, port_config):
|
||||
raise RenderError(
|
||||
f"Cannot bind port [{host_port}/{proto}/ipv{ip_family}] to [{host_ip}], "
|
||||
f"already bound to [{p['host_ip']}]"
|
||||
)
|
||||
|
||||
def _add_port(self, host_port: int, container_port: int, config: dict | None = None):
|
||||
config = config or {}
|
||||
host_port = valid_port_or_raise(host_port)
|
||||
container_port = valid_port_or_raise(container_port)
|
||||
proto = valid_port_protocol_or_raise(config.get("protocol", "tcp"))
|
||||
mode = valid_port_mode_or_raise(config.get("mode", "ingress"))
|
||||
|
||||
host_ip = valid_ip_or_raise(config.get("host_ip", ""))
|
||||
ip = ipaddress.ip_address(host_ip)
|
||||
|
||||
port_config = {
|
||||
"published": host_port,
|
||||
"target": container_port,
|
||||
"protocol": proto,
|
||||
"mode": mode,
|
||||
"host_ip": host_ip,
|
||||
}
|
||||
self._check_port_conflicts(port_config, ip.version)
|
||||
|
||||
key = self._gen_port_key(host_port, host_ip, proto, ip.version)
|
||||
self._ports[key] = port_config
|
||||
# After all the local validations, lets validate the port with the TrueNAS API
|
||||
self._render_instance.client.validate_ip_port_combo(host_ip, host_port)
|
||||
|
||||
def has_ports(self):
|
||||
return len(self._ports) > 0
|
||||
|
||||
def render(self):
|
||||
specific_ports = []
|
||||
wildcard_ports = {}
|
||||
|
||||
for port_config in self._ports.values():
|
||||
if self._is_wildcard_ip(port_config["host_ip"]):
|
||||
wildcard_ports[id(port_config)] = port_config.copy()
|
||||
else:
|
||||
specific_ports.append(port_config.copy())
|
||||
|
||||
processed_ports = specific_ports.copy()
|
||||
for wild_port in wildcard_ports.values():
|
||||
processed_port = wild_port.copy()
|
||||
|
||||
# Check if there's a matching wildcard port for the opposite IP family
|
||||
has_opposite_family = self._has_opposite_family_port(wild_port, wildcard_ports)
|
||||
|
||||
if has_opposite_family:
|
||||
processed_port.pop("host_ip")
|
||||
|
||||
if processed_port not in processed_ports:
|
||||
processed_ports.append(processed_port)
|
||||
|
||||
return sorted(processed_ports, key=self._get_sort_key)
|
||||
@@ -0,0 +1,99 @@
|
||||
import copy
|
||||
|
||||
try:
|
||||
from .client import Client
|
||||
from .configs import Configs
|
||||
from .container import Container
|
||||
from .deps import Deps
|
||||
from .error import RenderError
|
||||
from .functions import Functions
|
||||
from .notes import Notes
|
||||
from .portals import Portals
|
||||
from .volumes import Volumes
|
||||
except ImportError:
|
||||
from client import Client
|
||||
from configs import Configs
|
||||
from container import Container
|
||||
from deps import Deps
|
||||
from error import RenderError
|
||||
from functions import Functions
|
||||
from notes import Notes
|
||||
from portals import Portals
|
||||
from volumes import Volumes
|
||||
|
||||
|
||||
class Render(object):
|
||||
def __init__(self, values):
|
||||
self._containers: dict[str, Container] = {}
|
||||
self.values = values
|
||||
self._add_images_internal_use()
|
||||
# Make a copy after we inject the images
|
||||
self._original_values: dict = copy.deepcopy(self.values)
|
||||
|
||||
self.deps: Deps = Deps(self)
|
||||
|
||||
self.client: Client = Client(render_instance=self)
|
||||
|
||||
self.configs = Configs(render_instance=self)
|
||||
self.funcs = Functions(render_instance=self).func_map()
|
||||
self.portals: Portals = Portals(render_instance=self)
|
||||
self.notes: Notes = Notes(render_instance=self)
|
||||
self.volumes = Volumes(render_instance=self)
|
||||
|
||||
def _add_images_internal_use(self):
|
||||
if not self.values.get("images"):
|
||||
self.values["images"] = {}
|
||||
|
||||
if "python_permissions_image" not in self.values["images"]:
|
||||
self.values["images"]["python_permissions_image"] = {"repository": "python", "tag": "3.13.0-slim-bookworm"}
|
||||
|
||||
if "postgres_upgrade_image" not in self.values["images"]:
|
||||
self.values["images"]["postgres_upgrade_image"] = {
|
||||
"repository": "ixsystems/postgres-upgrade",
|
||||
"tag": "1.0.1",
|
||||
}
|
||||
|
||||
def container_names(self):
|
||||
return list(self._containers.keys())
|
||||
|
||||
def add_container(self, name: str, image: str):
|
||||
name = name.strip()
|
||||
if not name:
|
||||
raise RenderError("Container name cannot be empty")
|
||||
container = Container(self, name, image)
|
||||
if name in self._containers:
|
||||
raise RenderError(f"Container {name} already exists.")
|
||||
self._containers[name] = container
|
||||
return container
|
||||
|
||||
def render(self):
|
||||
if self.values != self._original_values:
|
||||
raise RenderError("Values have been modified since the renderer was created.")
|
||||
|
||||
if not self._containers:
|
||||
raise RenderError("No containers added.")
|
||||
|
||||
result: dict = {
|
||||
"x-notes": self.notes.render(),
|
||||
"x-portals": self.portals.render(),
|
||||
"services": {c._name: c.render() for c in self._containers.values()},
|
||||
}
|
||||
|
||||
# Make sure that after services are rendered
|
||||
# there are no labels that target a non-existent container
|
||||
# This is to prevent typos
|
||||
for label in self.values.get("labels", []):
|
||||
for c in label.get("containers", []):
|
||||
if c not in self.container_names():
|
||||
raise RenderError(f"Label [{label['key']}] references container [{c}] which does not exist")
|
||||
|
||||
if self.volumes.has_volumes():
|
||||
result["volumes"] = self.volumes.render()
|
||||
|
||||
if self.configs.has_configs():
|
||||
result["configs"] = self.configs.render()
|
||||
|
||||
# if self.networks:
|
||||
# result["networks"] = {...}
|
||||
|
||||
return result
|
||||
@@ -0,0 +1,115 @@
|
||||
import re
|
||||
from typing import Any, TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
|
||||
DEFAULT_CPUS = 2.0
|
||||
DEFAULT_MEMORY = 4096
|
||||
|
||||
|
||||
class Resources:
|
||||
def __init__(self, render_instance: "Render"):
|
||||
self._render_instance = render_instance
|
||||
self._limits: dict = {}
|
||||
self._reservations: dict = {}
|
||||
self._nvidia_ids: set[str] = set()
|
||||
self._auto_add_cpu_from_values()
|
||||
self._auto_add_memory_from_values()
|
||||
self._auto_add_gpus_from_values()
|
||||
|
||||
def _set_cpu(self, cpus: Any):
|
||||
c = str(cpus)
|
||||
if not re.match(r"^[1-9][0-9]*(\.[0-9]+)?$", c):
|
||||
raise RenderError(f"Expected cpus to be a number or a float (minimum 1.0), got [{cpus}]")
|
||||
self._limits.update({"cpus": c})
|
||||
|
||||
def _set_memory(self, memory: Any):
|
||||
m = str(memory)
|
||||
if not re.match(r"^[1-9][0-9]*$", m):
|
||||
raise RenderError(f"Expected memory to be a number, got [{memory}]")
|
||||
self._limits.update({"memory": f"{m}M"})
|
||||
|
||||
def _auto_add_cpu_from_values(self):
|
||||
resources = self._render_instance.values.get("resources", {})
|
||||
self._set_cpu(resources.get("limits", {}).get("cpus", DEFAULT_CPUS))
|
||||
|
||||
def _auto_add_memory_from_values(self):
|
||||
resources = self._render_instance.values.get("resources", {})
|
||||
self._set_memory(resources.get("limits", {}).get("memory", DEFAULT_MEMORY))
|
||||
|
||||
def _auto_add_gpus_from_values(self):
|
||||
resources = self._render_instance.values.get("resources", {})
|
||||
gpus = resources.get("gpus", {}).get("nvidia_gpu_selection", {})
|
||||
if not gpus:
|
||||
return
|
||||
|
||||
for pci, gpu in gpus.items():
|
||||
if gpu.get("use_gpu", False):
|
||||
if not gpu.get("uuid"):
|
||||
raise RenderError(f"Expected [uuid] to be set for GPU in slot [{pci}] in [nvidia_gpu_selection]")
|
||||
self._nvidia_ids.add(gpu["uuid"])
|
||||
|
||||
if self._nvidia_ids:
|
||||
if not self._reservations:
|
||||
self._reservations["devices"] = []
|
||||
self._reservations["devices"].append(
|
||||
{
|
||||
"capabilities": ["gpu"],
|
||||
"driver": "nvidia",
|
||||
"device_ids": sorted(self._nvidia_ids),
|
||||
}
|
||||
)
|
||||
|
||||
# This is only used on ix-app that we allow
|
||||
# disabling cpus and memory. GPUs are only added
|
||||
# if the user has requested them.
|
||||
def remove_cpus_and_memory(self):
|
||||
self._limits.pop("cpus", None)
|
||||
self._limits.pop("memory", None)
|
||||
|
||||
# Mainly will be used from dependencies
|
||||
# There is no reason to pass devices to
|
||||
# redis or postgres for example
|
||||
def remove_devices(self):
|
||||
self._reservations.pop("devices", None)
|
||||
|
||||
def set_profile(self, profile: str):
|
||||
cpu, memory = profile_mapping(profile)
|
||||
self._set_cpu(cpu)
|
||||
self._set_memory(memory)
|
||||
|
||||
def has_resources(self):
|
||||
return len(self._limits) > 0 or len(self._reservations) > 0
|
||||
|
||||
def has_gpus(self):
|
||||
gpu_devices = [d for d in self._reservations.get("devices", []) if "gpu" in d["capabilities"]]
|
||||
return len(gpu_devices) > 0
|
||||
|
||||
def render(self):
|
||||
result = {}
|
||||
if self._limits:
|
||||
result["limits"] = self._limits
|
||||
if self._reservations:
|
||||
result["reservations"] = self._reservations
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def profile_mapping(profile: str):
|
||||
profiles = {
|
||||
"low": (1, 512),
|
||||
"medium": (2, 1024),
|
||||
}
|
||||
|
||||
if profile not in profiles:
|
||||
raise RenderError(
|
||||
f"Resource profile [{profile}] is not valid. Valid options are: [{', '.join(profiles.keys())}]"
|
||||
)
|
||||
|
||||
return profiles[profile]
|
||||
@@ -0,0 +1,25 @@
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
|
||||
try:
|
||||
from .validations import valid_restart_policy_or_raise
|
||||
except ImportError:
|
||||
from validations import valid_restart_policy_or_raise
|
||||
|
||||
|
||||
class RestartPolicy:
|
||||
def __init__(self, render_instance: "Render"):
|
||||
self._render_instance = render_instance
|
||||
self._policy: str = "unless-stopped"
|
||||
self._maximum_retry_count: int = 0
|
||||
|
||||
def set_policy(self, policy: str, maximum_retry_count: int = 0):
|
||||
self._policy = valid_restart_policy_or_raise(policy, maximum_retry_count)
|
||||
self._maximum_retry_count = maximum_retry_count
|
||||
|
||||
def render(self):
|
||||
if self._policy == "on-failure" and self._maximum_retry_count > 0:
|
||||
return f"{self._policy}:{self._maximum_retry_count}"
|
||||
return self._policy
|
||||
@@ -0,0 +1,52 @@
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .validations import valid_security_opt_or_raise
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from validations import valid_security_opt_or_raise
|
||||
|
||||
|
||||
class SecurityOpt:
|
||||
def __init__(self, opt: str, value: str | bool | None = None, arg: str | None = None):
|
||||
self._opt: str = valid_security_opt_or_raise(opt)
|
||||
self._value = str(value).lower() if isinstance(value, bool) else value
|
||||
self._arg: str | None = arg
|
||||
|
||||
def render(self):
|
||||
result = self._opt
|
||||
if self._value is not None:
|
||||
result = f"{result}={self._value}"
|
||||
if self._arg is not None:
|
||||
result = f"{result}:{self._arg}"
|
||||
return result
|
||||
|
||||
|
||||
class SecurityOpts:
|
||||
def __init__(self, render_instance: "Render"):
|
||||
self._render_instance = render_instance
|
||||
self._opts: dict[str, SecurityOpt] = dict()
|
||||
self.add_opt("no-new-privileges", True)
|
||||
|
||||
def add_opt(self, key: str, value: str | bool | None, arg: str | None = None):
|
||||
if key in self._opts:
|
||||
raise RenderError(f"Security Option [{key}] already added")
|
||||
self._opts[key] = SecurityOpt(key, value, arg)
|
||||
|
||||
def remove_opt(self, key: str):
|
||||
if key not in self._opts:
|
||||
raise RenderError(f"Security Option [{key}] not found")
|
||||
del self._opts[key]
|
||||
|
||||
def has_opts(self):
|
||||
return len(self._opts) > 0
|
||||
|
||||
def render(self):
|
||||
result = []
|
||||
for opt in sorted(self._opts.values(), key=lambda o: o._opt):
|
||||
result.append(opt.render())
|
||||
return result
|
||||
@@ -0,0 +1,125 @@
|
||||
from typing import TYPE_CHECKING, TypedDict, Literal, NotRequired, Union
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from container import Container
|
||||
from render import Render
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .validations import valid_fs_path_or_raise
|
||||
from .volume_mount import VolumeMount
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from validations import valid_fs_path_or_raise
|
||||
from volume_mount import VolumeMount
|
||||
|
||||
|
||||
class IxStorageTmpfsConfig(TypedDict):
|
||||
size: NotRequired[int]
|
||||
mode: NotRequired[str]
|
||||
uid: NotRequired[int]
|
||||
gid: NotRequired[int]
|
||||
|
||||
|
||||
class AclConfig(TypedDict, total=False):
|
||||
path: str
|
||||
|
||||
|
||||
class IxStorageHostPathConfig(TypedDict):
|
||||
path: NotRequired[str] # Either this or acl.path must be set
|
||||
acl_enable: NotRequired[bool]
|
||||
acl: NotRequired[AclConfig]
|
||||
create_host_path: NotRequired[bool]
|
||||
propagation: NotRequired[Literal["shared", "slave", "private", "rshared", "rslave", "rprivate"]]
|
||||
auto_permissions: NotRequired[bool] # Only when acl_enable is false
|
||||
|
||||
|
||||
class IxStorageIxVolumeConfig(TypedDict):
|
||||
dataset_name: str
|
||||
acl_enable: NotRequired[bool]
|
||||
acl_entries: NotRequired[AclConfig]
|
||||
create_host_path: NotRequired[bool]
|
||||
propagation: NotRequired[Literal["shared", "slave", "private", "rshared", "rslave", "rprivate"]]
|
||||
auto_permissions: NotRequired[bool] # Only when acl_enable is false
|
||||
|
||||
|
||||
class IxStorageVolumeConfig(TypedDict):
|
||||
volume_name: NotRequired[str]
|
||||
nocopy: NotRequired[bool]
|
||||
auto_permissions: NotRequired[bool]
|
||||
|
||||
|
||||
class IxStorageNfsConfig(TypedDict):
|
||||
server: str
|
||||
path: str
|
||||
options: NotRequired[list[str]]
|
||||
|
||||
|
||||
class IxStorageCifsConfig(TypedDict):
|
||||
server: str
|
||||
path: str
|
||||
username: str
|
||||
password: str
|
||||
domain: NotRequired[str]
|
||||
options: NotRequired[list[str]]
|
||||
|
||||
|
||||
IxStorageVolumeLikeConfigs = Union[IxStorageVolumeConfig, IxStorageNfsConfig, IxStorageCifsConfig, IxStorageTmpfsConfig]
|
||||
IxStorageBindLikeConfigs = Union[IxStorageHostPathConfig, IxStorageIxVolumeConfig]
|
||||
IxStorageLikeConfigs = Union[IxStorageBindLikeConfigs, IxStorageVolumeLikeConfigs]
|
||||
|
||||
|
||||
class IxStorage(TypedDict):
|
||||
type: Literal["ix_volume", "host_path", "tmpfs", "volume", "anonymous", "temporary"]
|
||||
read_only: NotRequired[bool]
|
||||
|
||||
ix_volume_config: NotRequired[IxStorageIxVolumeConfig]
|
||||
host_path_config: NotRequired[IxStorageHostPathConfig]
|
||||
tmpfs_config: NotRequired[IxStorageTmpfsConfig]
|
||||
volume_config: NotRequired[IxStorageVolumeConfig]
|
||||
nfs_config: NotRequired[IxStorageNfsConfig]
|
||||
cifs_config: NotRequired[IxStorageCifsConfig]
|
||||
|
||||
|
||||
class Storage:
|
||||
def __init__(self, render_instance: "Render", container_instance: "Container"):
|
||||
self._container_instance = container_instance
|
||||
self._render_instance = render_instance
|
||||
self._volume_mounts: set[VolumeMount] = set()
|
||||
|
||||
def add(self, mount_path: str, config: "IxStorage"):
|
||||
mount_path = valid_fs_path_or_raise(mount_path)
|
||||
if self.is_defined(mount_path):
|
||||
raise RenderError(f"Mount path [{mount_path}] already used for another volume mount")
|
||||
if self._container_instance._tmpfs.is_defined(mount_path):
|
||||
raise RenderError(f"Mount path [{mount_path}] already used for another volume mount")
|
||||
|
||||
volume_mount = VolumeMount(self._render_instance, mount_path, config)
|
||||
self._volume_mounts.add(volume_mount)
|
||||
|
||||
def is_defined(self, mount_path: str):
|
||||
return mount_path in [m.mount_path for m in self._volume_mounts]
|
||||
|
||||
def _add_docker_socket(self, read_only: bool = True, mount_path: str = ""):
|
||||
mount_path = valid_fs_path_or_raise(mount_path)
|
||||
cfg: "IxStorage" = {
|
||||
"type": "host_path",
|
||||
"read_only": read_only,
|
||||
"host_path_config": {"path": "/var/run/docker.sock", "create_host_path": False},
|
||||
}
|
||||
self.add(mount_path, cfg)
|
||||
|
||||
def _add_udev(self, read_only: bool = True, mount_path: str = ""):
|
||||
mount_path = valid_fs_path_or_raise(mount_path)
|
||||
cfg: "IxStorage" = {
|
||||
"type": "host_path",
|
||||
"read_only": read_only,
|
||||
"host_path_config": {"path": "/run/udev", "create_host_path": False},
|
||||
}
|
||||
self.add(mount_path, cfg)
|
||||
|
||||
def has_mounts(self) -> bool:
|
||||
return bool(self._volume_mounts)
|
||||
|
||||
def render(self):
|
||||
return [vm.render() for vm in sorted(self._volume_mounts, key=lambda vm: vm.mount_path)]
|
||||
@@ -0,0 +1,38 @@
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
from container import Container
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .validations import valid_sysctl_or_raise
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from validations import valid_sysctl_or_raise
|
||||
|
||||
|
||||
class Sysctls:
|
||||
def __init__(self, render_instance: "Render", container_instance: "Container"):
|
||||
self._render_instance = render_instance
|
||||
self._container_instance = container_instance
|
||||
self._sysctls: dict = {}
|
||||
|
||||
def add(self, key: str, value):
|
||||
key = key.strip()
|
||||
if not key:
|
||||
raise RenderError("Sysctls key cannot be empty")
|
||||
if value is None:
|
||||
raise RenderError(f"Sysctl [{key}] requires a value")
|
||||
if key in self._sysctls:
|
||||
raise RenderError(f"Sysctl [{key}] already added")
|
||||
self._sysctls[key] = str(value)
|
||||
|
||||
def has_sysctls(self):
|
||||
return bool(self._sysctls)
|
||||
|
||||
def render(self):
|
||||
if not self.has_sysctls():
|
||||
return {}
|
||||
host_net = self._container_instance._network_mode == "host"
|
||||
return {valid_sysctl_or_raise(k, host_net): v for k, v in self._sysctls.items()}
|
||||
@@ -0,0 +1,57 @@
|
||||
import pytest
|
||||
|
||||
from render import Render
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_values():
|
||||
return {
|
||||
"images": {
|
||||
"test_image": {
|
||||
"repository": "nginx",
|
||||
"tag": "latest",
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def test_build_image_with_from(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
with pytest.raises(Exception):
|
||||
c1.build_image(["FROM test_image"])
|
||||
|
||||
|
||||
def test_build_image_with_from_with_whitespace(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
with pytest.raises(Exception):
|
||||
c1.build_image([" FROM test_image"])
|
||||
|
||||
|
||||
def test_build_image(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.build_image(
|
||||
[
|
||||
"RUN echo hello",
|
||||
None,
|
||||
"",
|
||||
"RUN echo world",
|
||||
]
|
||||
)
|
||||
output = render.render()
|
||||
assert (
|
||||
output["services"]["test_container"]["image"]
|
||||
== "ix-nginx:latest_4a127145ea4c25511707e57005dd0ed457fe2f4932082c8f9faa339a450b6a99"
|
||||
)
|
||||
assert output["services"]["test_container"]["build"] == {
|
||||
"tags": ["ix-nginx:latest_4a127145ea4c25511707e57005dd0ed457fe2f4932082c8f9faa339a450b6a99"],
|
||||
"dockerfile_inline": """FROM nginx:latest
|
||||
RUN echo hello
|
||||
RUN echo world
|
||||
""",
|
||||
}
|
||||
@@ -0,0 +1,63 @@
|
||||
import pytest
|
||||
|
||||
from render import Render
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_values():
|
||||
return {
|
||||
"images": {
|
||||
"test_image": {
|
||||
"repository": "nginx",
|
||||
"tag": "latest",
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def test_add_duplicate_config_with_different_data(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.configs.add("test_config", "test_data", "/some/path")
|
||||
with pytest.raises(Exception):
|
||||
c1.configs.add("test_config", "test_data2", "/some/path")
|
||||
|
||||
|
||||
def test_add_config_with_empty_target(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
with pytest.raises(Exception):
|
||||
c1.configs.add("test_config", "test_data", "")
|
||||
|
||||
|
||||
def test_add_duplicate_target(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.configs.add("test_config", "test_data", "/some/path")
|
||||
with pytest.raises(Exception):
|
||||
c1.configs.add("test_config2", "test_data2", "/some/path")
|
||||
|
||||
|
||||
def test_add_config(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.configs.add("test_config", "$test_data", "/some/path")
|
||||
output = render.render()
|
||||
assert output["configs"]["test_config"]["content"] == "$$test_data"
|
||||
assert output["services"]["test_container"]["configs"] == [{"source": "test_config", "target": "/some/path"}]
|
||||
|
||||
|
||||
def test_add_config_with_mode(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.configs.add("test_config", "test_data", "/some/path", "0777")
|
||||
output = render.render()
|
||||
assert output["configs"]["test_config"]["content"] == "test_data"
|
||||
assert output["services"]["test_container"]["configs"] == [
|
||||
{"source": "test_config", "target": "/some/path", "mode": 511}
|
||||
]
|
||||
@@ -0,0 +1,519 @@
|
||||
import pytest
|
||||
|
||||
|
||||
from render import Render
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_values():
|
||||
return {
|
||||
"images": {
|
||||
"test_image": {
|
||||
"repository": "nginx",
|
||||
"tag": "latest",
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def test_empty_container_name(mock_values):
|
||||
render = Render(mock_values)
|
||||
with pytest.raises(Exception):
|
||||
render.add_container(" ", "test_image")
|
||||
|
||||
|
||||
def test_resolve_image(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["image"] == "nginx:latest"
|
||||
|
||||
|
||||
def test_missing_repo(mock_values):
|
||||
mock_values["images"]["test_image"]["repository"] = ""
|
||||
render = Render(mock_values)
|
||||
with pytest.raises(Exception):
|
||||
render.add_container("test_container", "test_image")
|
||||
|
||||
|
||||
def test_missing_tag(mock_values):
|
||||
mock_values["images"]["test_image"]["tag"] = ""
|
||||
render = Render(mock_values)
|
||||
with pytest.raises(Exception):
|
||||
render.add_container("test_container", "test_image")
|
||||
|
||||
|
||||
def test_non_existing_image(mock_values):
|
||||
render = Render(mock_values)
|
||||
with pytest.raises(Exception):
|
||||
render.add_container("test_container", "non_existing_image")
|
||||
|
||||
|
||||
def test_pull_policy(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.set_pull_policy("always")
|
||||
c1.healthcheck.disable()
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["pull_policy"] == "always"
|
||||
|
||||
|
||||
def test_invalid_pull_policy(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
with pytest.raises(Exception):
|
||||
c1.set_pull_policy("invalid_policy")
|
||||
|
||||
|
||||
def test_clear_caps(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.add_caps(["NET_ADMIN"])
|
||||
c1.clear_caps()
|
||||
c1.healthcheck.disable()
|
||||
output = render.render()
|
||||
assert "cap_drop" not in output["services"]["test_container"]
|
||||
assert "cap_add" not in output["services"]["test_container"]
|
||||
|
||||
|
||||
def test_privileged(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.set_privileged(True)
|
||||
c1.healthcheck.disable()
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["privileged"] is True
|
||||
|
||||
|
||||
def test_tty(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.set_tty(True)
|
||||
c1.healthcheck.disable()
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["tty"] is True
|
||||
|
||||
|
||||
def test_init(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.set_init(True)
|
||||
c1.healthcheck.disable()
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["init"] is True
|
||||
|
||||
|
||||
def test_read_only(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.set_read_only(True)
|
||||
c1.healthcheck.disable()
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["read_only"] is True
|
||||
|
||||
|
||||
def test_stdin(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.set_stdin(True)
|
||||
c1.healthcheck.disable()
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["stdin_open"] is True
|
||||
|
||||
|
||||
def test_hostname(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.set_hostname("test_hostname")
|
||||
c1.healthcheck.disable()
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["hostname"] == "test_hostname"
|
||||
|
||||
|
||||
def test_grace_period(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.set_grace_period(10)
|
||||
c1.healthcheck.disable()
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["stop_grace_period"] == "10s"
|
||||
|
||||
|
||||
def test_user(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.set_user(1000, 1000)
|
||||
c1.healthcheck.disable()
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["user"] == "1000:1000"
|
||||
|
||||
|
||||
def test_invalid_user(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
with pytest.raises(Exception):
|
||||
c1.set_user(-100, 1000)
|
||||
|
||||
|
||||
def test_add_group(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.add_group(1000)
|
||||
c1.add_group("video")
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["group_add"] == [568, 1000, "video"]
|
||||
|
||||
|
||||
def test_add_duplicate_group(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.add_group(1000)
|
||||
with pytest.raises(Exception):
|
||||
c1.add_group(1000)
|
||||
|
||||
|
||||
def test_add_group_as_string(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
with pytest.raises(Exception):
|
||||
c1.add_group("1000")
|
||||
|
||||
|
||||
def test_add_docker_socket(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.add_docker_socket()
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["group_add"] == [568, 999]
|
||||
assert output["services"]["test_container"]["volumes"] == [
|
||||
{
|
||||
"type": "bind",
|
||||
"source": "/var/run/docker.sock",
|
||||
"target": "/var/run/docker.sock",
|
||||
"read_only": True,
|
||||
"bind": {
|
||||
"propagation": "rprivate",
|
||||
"create_host_path": False,
|
||||
},
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
def test_snd_device(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.add_snd_device()
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["devices"] == ["/dev/snd:/dev/snd"]
|
||||
assert output["services"]["test_container"]["group_add"] == [29, 568]
|
||||
|
||||
|
||||
def test_shm_size(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.set_shm_size_mb(10)
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["shm_size"] == "10M"
|
||||
|
||||
|
||||
def test_valid_caps(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.add_caps(["ALL", "NET_ADMIN"])
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["cap_add"] == ["ALL", "NET_ADMIN"]
|
||||
assert output["services"]["test_container"]["cap_drop"] == ["ALL"]
|
||||
|
||||
|
||||
def test_add_duplicate_caps(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
with pytest.raises(Exception):
|
||||
c1.add_caps(["ALL", "NET_ADMIN", "NET_ADMIN"])
|
||||
|
||||
|
||||
def test_invalid_caps(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
with pytest.raises(Exception):
|
||||
c1.add_caps(["invalid_cap"])
|
||||
|
||||
|
||||
def test_network_mode(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.set_network_mode("host")
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["network_mode"] == "host"
|
||||
|
||||
|
||||
def test_auto_network_mode_with_host_network(mock_values):
|
||||
mock_values["network"] = {"host_network": True}
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["network_mode"] == "host"
|
||||
|
||||
|
||||
def test_network_mode_with_container(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.set_network_mode("service:test_container")
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["network_mode"] == "service:test_container"
|
||||
|
||||
|
||||
def test_network_mode_with_container_missing(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
with pytest.raises(Exception):
|
||||
c1.set_network_mode("service:missing_container")
|
||||
|
||||
|
||||
def test_invalid_network_mode(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
with pytest.raises(Exception):
|
||||
c1.set_network_mode("invalid_mode")
|
||||
|
||||
|
||||
def test_entrypoint(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.set_entrypoint(["/bin/bash", "-c", "echo hello $MY_ENV"])
|
||||
c1.healthcheck.disable()
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["entrypoint"] == ["/bin/bash", "-c", "echo hello $$MY_ENV"]
|
||||
|
||||
|
||||
def test_command(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.set_command(["echo", "hello $MY_ENV"])
|
||||
c1.healthcheck.disable()
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["command"] == ["echo", "hello $$MY_ENV"]
|
||||
|
||||
|
||||
def test_add_ports(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.add_port({"port_number": 8081, "container_port": 8080, "bind_mode": "published"})
|
||||
c1.add_port({"port_number": 8082, "container_port": 8080, "bind_mode": "published", "protocol": "udp"})
|
||||
c1.add_port({"port_number": 8083, "container_port": 8080, "bind_mode": "exposed"})
|
||||
c1.add_port({"port_number": 8084, "container_port": 8080, "bind_mode": ""})
|
||||
c1.add_port(
|
||||
{"port_number": 9091, "container_port": 9091, "bind_mode": "published"},
|
||||
{"container_port": 9092, "protocol": "udp"},
|
||||
)
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["ports"] == [
|
||||
{"published": 8081, "target": 8080, "protocol": "tcp", "mode": "ingress"},
|
||||
{"published": 8082, "target": 8080, "protocol": "udp", "mode": "ingress"},
|
||||
{"published": 9091, "target": 9092, "protocol": "udp", "mode": "ingress"},
|
||||
]
|
||||
assert output["services"]["test_container"]["expose"] == ["8080/tcp"]
|
||||
|
||||
|
||||
def test_add_ports_with_invalid_host_ips(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
with pytest.raises(Exception):
|
||||
c1.add_port({"port_number": 8081, "container_port": 8080, "bind_mode": "published", "host_ips": "invalid"})
|
||||
|
||||
|
||||
def test_add_ports_with_empty_host_ips(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.add_port({"port_number": 8081, "container_port": 8080, "bind_mode": "published", "host_ips": []})
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["ports"] == [
|
||||
{"published": 8081, "target": 8080, "protocol": "tcp", "mode": "ingress"}
|
||||
]
|
||||
|
||||
|
||||
def test_set_ipc_mode(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.set_ipc_mode("host")
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["ipc"] == "host"
|
||||
|
||||
|
||||
def test_set_ipc_empty_mode(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.set_ipc_mode("")
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["ipc"] == ""
|
||||
|
||||
|
||||
def test_set_ipc_mode_with_invalid_ipc_mode(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
with pytest.raises(Exception):
|
||||
c1.set_ipc_mode("invalid")
|
||||
|
||||
|
||||
def test_set_ipc_mode_with_container_ipc_mode(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c2 = render.add_container("test_container2", "test_image")
|
||||
c2.healthcheck.disable()
|
||||
c1.set_ipc_mode("container:test_container2")
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["ipc"] == "container:test_container2"
|
||||
|
||||
|
||||
def test_set_ipc_mode_with_container_ipc_mode_and_invalid_container(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
with pytest.raises(Exception):
|
||||
c1.set_ipc_mode("container:invalid")
|
||||
|
||||
|
||||
def test_set_pid_mode(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.set_pid_mode("host")
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["pid"] == "host"
|
||||
|
||||
|
||||
def test_set_pid_empty_mode(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.set_pid_mode("")
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["pid"] == ""
|
||||
|
||||
|
||||
def test_set_pid_mode_with_invalid_pid_mode(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
with pytest.raises(Exception):
|
||||
c1.set_pid_mode("invalid")
|
||||
|
||||
|
||||
def test_set_pid_mode_with_container_pid_mode(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c2 = render.add_container("test_container2", "test_image")
|
||||
c2.healthcheck.disable()
|
||||
c1.set_pid_mode("container:test_container2")
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["pid"] == "container:test_container2"
|
||||
|
||||
|
||||
def test_set_pid_mode_with_container_pid_mode_and_invalid_container(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
with pytest.raises(Exception):
|
||||
c1.set_pid_mode("container:invalid")
|
||||
|
||||
|
||||
def test_set_cgroup(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.set_cgroup("host")
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["cgroup"] == "host"
|
||||
|
||||
|
||||
def test_set_cgroup_invalid(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
with pytest.raises(Exception):
|
||||
c1.set_cgroup("invalid")
|
||||
|
||||
|
||||
def test_setup_as_helper(mock_values):
|
||||
mock_values["resources"] = {"gpus": {"use_all_gpus": True}}
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.setup_as_helper()
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["restart"] == "on-failure:1"
|
||||
assert output["services"]["test_container"]["network_mode"] == "none"
|
||||
assert output["services"]["test_container"]["healthcheck"]["disable"] is True
|
||||
assert output["services"]["test_container"]["deploy"]["resources"]["limits"]["cpus"] == "1"
|
||||
assert output["services"]["test_container"]["deploy"]["resources"]["limits"]["memory"] == "512M"
|
||||
assert "devices" not in output["services"]["test_container"]
|
||||
|
||||
|
||||
def test_setup_as_helper_med_profile(mock_values):
|
||||
mock_values["resources"] = {"gpus": {"use_all_gpus": True}}
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.setup_as_helper(profile="medium")
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["restart"] == "on-failure:1"
|
||||
assert output["services"]["test_container"]["network_mode"] == "none"
|
||||
assert output["services"]["test_container"]["healthcheck"]["disable"] is True
|
||||
assert output["services"]["test_container"]["deploy"]["resources"]["limits"]["cpus"] == "2"
|
||||
assert output["services"]["test_container"]["deploy"]["resources"]["limits"]["memory"] == "1024M"
|
||||
assert "devices" not in output["services"]["test_container"]
|
||||
|
||||
|
||||
def test_setup_as_helper_no_profile(mock_values):
|
||||
mock_values["resources"] = {"gpus": {"use_all_gpus": True}}
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.setup_as_helper(profile="")
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["restart"] == "on-failure:1"
|
||||
assert output["services"]["test_container"]["network_mode"] == "none"
|
||||
assert output["services"]["test_container"]["healthcheck"]["disable"] is True
|
||||
assert output["services"]["test_container"]["deploy"]["resources"]["limits"]["cpus"] == "2.0"
|
||||
assert output["services"]["test_container"]["deploy"]["resources"]["limits"]["memory"] == "4096M"
|
||||
assert "devices" not in output["services"]["test_container"]
|
||||
|
||||
|
||||
def test_setup_as_helper_with_net(mock_values):
|
||||
mock_values["resources"] = {"gpus": {"use_all_gpus": True}}
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.setup_as_helper(disable_network=False)
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["restart"] == "on-failure:1"
|
||||
assert output["services"]["test_container"]["healthcheck"]["disable"] is True
|
||||
assert output["services"]["test_container"]["deploy"]["resources"]["limits"]["cpus"] == "1"
|
||||
assert output["services"]["test_container"]["deploy"]["resources"]["limits"]["memory"] == "512M"
|
||||
assert "devices" not in output["services"]["test_container"]
|
||||
|
||||
|
||||
def test_container_name(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
assert c1.name() == "test_container"
|
||||
@@ -0,0 +1,54 @@
|
||||
import pytest
|
||||
|
||||
|
||||
from render import Render
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_values():
|
||||
return {
|
||||
"images": {
|
||||
"test_image": {
|
||||
"repository": "nginx",
|
||||
"tag": "latest",
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def test_add_dependency(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c2 = render.add_container("test_container2", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c2.healthcheck.disable()
|
||||
c1.depends.add_dependency("test_container2", "service_started")
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["depends_on"]["test_container2"] == {"condition": "service_started"}
|
||||
|
||||
|
||||
def test_add_dependency_invalid_condition(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
render.add_container("test_container2", "test_image")
|
||||
with pytest.raises(Exception):
|
||||
c1.depends.add_dependency("test_container2", "invalid_condition")
|
||||
|
||||
|
||||
def test_add_dependency_missing_container(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
with pytest.raises(Exception):
|
||||
c1.depends.add_dependency("test_container2", "service_started")
|
||||
|
||||
|
||||
def test_add_dependency_duplicate(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
render.add_container("test_container2", "test_image")
|
||||
c1.depends.add_dependency("test_container2", "service_started")
|
||||
c1.healthcheck.disable()
|
||||
with pytest.raises(Exception):
|
||||
c1.depends.add_dependency("test_container2", "service_started")
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,150 @@
|
||||
import pytest
|
||||
|
||||
|
||||
from render import Render
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_values():
|
||||
return {
|
||||
"images": {
|
||||
"test_image": {
|
||||
"repository": "nginx",
|
||||
"tag": "latest",
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def test_add_device(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.devices.add_device("/h/dev/sda", "/c/dev/sda")
|
||||
c1.devices.add_device("/h/dev/sdb", "/c/dev/sdb", "rwm")
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["devices"] == ["/h/dev/sda:/c/dev/sda", "/h/dev/sdb:/c/dev/sdb:rwm"]
|
||||
|
||||
|
||||
def test_devices_without_host(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
with pytest.raises(Exception):
|
||||
c1.devices.add_device("", "/c/dev/sda")
|
||||
|
||||
|
||||
def test_devices_without_container(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
with pytest.raises(Exception):
|
||||
c1.devices.add_device("/h/dev/sda", "")
|
||||
|
||||
|
||||
def test_add_duplicate_device(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.devices.add_device("/h/dev/sda", "/c/dev/sda")
|
||||
with pytest.raises(Exception):
|
||||
c1.devices.add_device("/h/dev/sda", "/c/dev/sda")
|
||||
|
||||
|
||||
def test_add_device_with_invalid_container_path(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
with pytest.raises(Exception):
|
||||
c1.devices.add_device("/h/dev/sda", "c/dev/sda")
|
||||
|
||||
|
||||
def test_add_device_with_invalid_host_path(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
with pytest.raises(Exception):
|
||||
c1.devices.add_device("h/dev/sda", "/c/dev/sda")
|
||||
|
||||
|
||||
def test_add_disallowed_device(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
with pytest.raises(Exception):
|
||||
c1.devices.add_device("/dev/dri", "/c/dev/sda")
|
||||
|
||||
|
||||
def test_add_device_with_invalid_cgroup_perm(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
with pytest.raises(Exception):
|
||||
c1.devices.add_device("/h/dev/sda", "/c/dev/sda", "invalid")
|
||||
|
||||
|
||||
def test_automatically_add_gpu_devices(mock_values):
|
||||
mock_values["resources"] = {"gpus": {"use_all_gpus": True}}
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["devices"] == ["/dev/dri:/dev/dri"]
|
||||
assert output["services"]["test_container"]["group_add"] == [44, 107, 568]
|
||||
|
||||
|
||||
def test_automatically_add_gpu_devices_and_kfd(mock_values):
|
||||
mock_values["resources"] = {"gpus": {"use_all_gpus": True, "kfd_device_exists": True}}
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["devices"] == ["/dev/dri:/dev/dri", "/dev/kfd:/dev/kfd"]
|
||||
assert output["services"]["test_container"]["group_add"] == [44, 107, 568]
|
||||
|
||||
|
||||
def test_remove_gpu_devices(mock_values):
|
||||
mock_values["resources"] = {"gpus": {"use_all_gpus": True}}
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.devices.remove_devices()
|
||||
output = render.render()
|
||||
assert "devices" not in output["services"]["test_container"]
|
||||
assert output["services"]["test_container"]["group_add"] == [568]
|
||||
|
||||
|
||||
def test_add_usb_bus(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.devices.add_usb_bus()
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["devices"] == ["/dev/bus/usb:/dev/bus/usb"]
|
||||
|
||||
|
||||
def test_add_usb_bus_disallowed(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
with pytest.raises(Exception):
|
||||
c1.devices.add_device("/dev/bus/usb", "/dev/bus/usb")
|
||||
|
||||
|
||||
def test_add_snd_device(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.add_snd_device()
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["devices"] == ["/dev/snd:/dev/snd"]
|
||||
assert output["services"]["test_container"]["group_add"] == [29, 568]
|
||||
|
||||
|
||||
def test_add_tun_device(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.add_tun_device()
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["devices"] == ["/dev/net/tun:/dev/net/tun"]
|
||||
@@ -0,0 +1,79 @@
|
||||
import pytest
|
||||
|
||||
|
||||
from render import Render
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_values():
|
||||
return {
|
||||
"images": {
|
||||
"test_image": {
|
||||
"repository": "nginx",
|
||||
"tag": "latest",
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def test_device_cgroup_rule(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.add_device_cgroup_rule("c 13:* rwm")
|
||||
c1.add_device_cgroup_rule("b 10:20 rwm")
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["device_cgroup_rules"] == [
|
||||
"b 10:20 rwm",
|
||||
"c 13:* rwm",
|
||||
]
|
||||
|
||||
|
||||
def test_device_cgroup_rule_duplicate(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.add_device_cgroup_rule("c 13:* rwm")
|
||||
with pytest.raises(Exception):
|
||||
c1.add_device_cgroup_rule("c 13:* rwm")
|
||||
|
||||
|
||||
def test_device_cgroup_rule_duplicate_group(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.add_device_cgroup_rule("c 13:* rwm")
|
||||
with pytest.raises(Exception):
|
||||
c1.add_device_cgroup_rule("c 13:* rm")
|
||||
|
||||
|
||||
def test_device_cgroup_rule_invalid_device(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
with pytest.raises(Exception):
|
||||
c1.add_device_cgroup_rule("d 10:20 rwm")
|
||||
|
||||
|
||||
def test_device_cgroup_rule_invalid_perm(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
with pytest.raises(Exception):
|
||||
c1.add_device_cgroup_rule("a 10:20 rwd")
|
||||
|
||||
|
||||
def test_device_cgroup_rule_invalid_format(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
with pytest.raises(Exception):
|
||||
c1.add_device_cgroup_rule("a 10 20 rwd")
|
||||
|
||||
|
||||
def test_device_cgroup_rule_invalid_format_missing_major(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
with pytest.raises(Exception):
|
||||
c1.add_device_cgroup_rule("a 10 rwd")
|
||||
@@ -0,0 +1,64 @@
|
||||
import pytest
|
||||
|
||||
|
||||
from render import Render
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_values():
|
||||
return {
|
||||
"images": {
|
||||
"test_image": {
|
||||
"repository": "nginx",
|
||||
"tag": "latest",
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def test_auto_add_dns_opts(mock_values):
|
||||
mock_values["network"] = {"dns_opts": ["attempts:3", "opt1", "opt2"]}
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["dns_opt"] == ["attempts:3", "opt1", "opt2"]
|
||||
|
||||
|
||||
def test_auto_add_dns_searches(mock_values):
|
||||
mock_values["network"] = {"dns_searches": ["search1", "search2"]}
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["dns_search"] == ["search1", "search2"]
|
||||
|
||||
|
||||
def test_auto_add_dns_nameservers(mock_values):
|
||||
mock_values["network"] = {"dns_nameservers": ["nameserver1", "nameserver2"]}
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["dns"] == ["nameserver1", "nameserver2"]
|
||||
|
||||
|
||||
def test_add_duplicate_dns_nameservers(mock_values):
|
||||
mock_values["network"] = {"dns_nameservers": ["nameserver1", "nameserver1"]}
|
||||
render = Render(mock_values)
|
||||
with pytest.raises(Exception):
|
||||
render.add_container("test_container", "test_image")
|
||||
|
||||
|
||||
def test_add_duplicate_dns_searches(mock_values):
|
||||
mock_values["network"] = {"dns_searches": ["search1", "search1"]}
|
||||
render = Render(mock_values)
|
||||
with pytest.raises(Exception):
|
||||
render.add_container("test_container", "test_image")
|
||||
|
||||
|
||||
def test_add_duplicate_dns_opts(mock_values):
|
||||
mock_values["network"] = {"dns_opts": ["attempts:3", "attempts:5"]}
|
||||
render = Render(mock_values)
|
||||
with pytest.raises(Exception):
|
||||
render.add_container("test_container", "test_image")
|
||||
@@ -0,0 +1,219 @@
|
||||
import pytest
|
||||
|
||||
|
||||
from render import Render
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_values():
|
||||
return {
|
||||
"images": {
|
||||
"test_image": {
|
||||
"repository": "nginx",
|
||||
"tag": "latest",
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def test_auto_add_vars(mock_values):
|
||||
mock_values["TZ"] = "Etc/UTC"
|
||||
mock_values["run_as"] = {"user": "1000", "group": "1000"}
|
||||
mock_values["resources"] = {
|
||||
"gpus": {
|
||||
"nvidia_gpu_selection": {
|
||||
"pci_slot_0": {"uuid": "uuid_0", "use_gpu": True},
|
||||
"pci_slot_1": {"uuid": "uuid_1", "use_gpu": True},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
output = render.render()
|
||||
envs = output["services"]["test_container"]["environment"]
|
||||
assert len(envs) == 11
|
||||
assert envs["TZ"] == "Etc/UTC"
|
||||
assert envs["PUID"] == "1000"
|
||||
assert envs["UID"] == "1000"
|
||||
assert envs["USER_ID"] == "1000"
|
||||
assert envs["PGID"] == "1000"
|
||||
assert envs["GID"] == "1000"
|
||||
assert envs["GROUP_ID"] == "1000"
|
||||
assert envs["UMASK"] == "002"
|
||||
assert envs["UMASK_SET"] == "002"
|
||||
assert envs["NVIDIA_DRIVER_CAPABILITIES"] == "all"
|
||||
assert envs["NVIDIA_VISIBLE_DEVICES"] == "uuid_0,uuid_1"
|
||||
|
||||
|
||||
def test_skip_generic_variables(mock_values):
|
||||
mock_values["skip_generic_variables"] = True
|
||||
mock_values["run_as"] = {"user": "1000", "group": "1000"}
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
output = render.render()
|
||||
envs = output["services"]["test_container"]["environment"]
|
||||
|
||||
assert len(envs) == 1
|
||||
assert envs == {
|
||||
"NVIDIA_VISIBLE_DEVICES": "void",
|
||||
}
|
||||
|
||||
|
||||
def test_remove_auto_env(mock_values):
|
||||
mock_values["run_as"] = {"user": "1000", "group": "1000"}
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.environment.remove_auto_env("UID")
|
||||
|
||||
output = render.render()
|
||||
envs = output["services"]["test_container"]["environment"]
|
||||
assert "UID" not in envs
|
||||
|
||||
|
||||
def test_remove_env_not_defined(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
with pytest.raises(Exception):
|
||||
c1.environment.remove_auto_env("NOT_DEFINED")
|
||||
|
||||
|
||||
def test_add_from_all_sources(mock_values):
|
||||
mock_values["TZ"] = "Etc/UTC"
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.environment.add_env("APP_ENV", "test_value")
|
||||
c1.environment.add_user_envs(
|
||||
[
|
||||
{"name": "USER_ENV", "value": "test_value2"},
|
||||
]
|
||||
)
|
||||
output = render.render()
|
||||
envs = output["services"]["test_container"]["environment"]
|
||||
assert envs["APP_ENV"] == "test_value"
|
||||
assert envs["USER_ENV"] == "test_value2"
|
||||
assert envs["TZ"] == "Etc/UTC"
|
||||
|
||||
|
||||
def test_user_add_vars(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.environment.add_user_envs(
|
||||
[
|
||||
{"name": "MY_ENV", "value": "test_value"},
|
||||
{"name": "MY_ENV2", "value": "test_value2"},
|
||||
]
|
||||
)
|
||||
output = render.render()
|
||||
envs = output["services"]["test_container"]["environment"]
|
||||
assert envs["MY_ENV"] == "test_value"
|
||||
assert envs["MY_ENV2"] == "test_value2"
|
||||
|
||||
|
||||
def test_user_add_duplicate_vars(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
with pytest.raises(Exception):
|
||||
c1.environment.add_user_envs(
|
||||
[
|
||||
{"name": "MY_ENV", "value": "test_value"},
|
||||
{"name": "MY_ENV", "value": "test_value2"},
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def test_user_env_without_name(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
with pytest.raises(Exception):
|
||||
c1.environment.add_user_envs(
|
||||
[
|
||||
{"name": "", "value": "test_value"},
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def test_user_env_try_to_overwrite_auto_vars(mock_values):
|
||||
mock_values["TZ"] = "Etc/UTC"
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.environment.add_user_envs(
|
||||
[
|
||||
{"name": "TZ", "value": "test_value"},
|
||||
]
|
||||
)
|
||||
with pytest.raises(Exception):
|
||||
render.render()
|
||||
|
||||
|
||||
def test_user_env_try_to_overwrite_app_dev_vars(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.environment.add_user_envs(
|
||||
[
|
||||
{"name": "PORT", "value": "test_value"},
|
||||
]
|
||||
)
|
||||
c1.environment.add_env("PORT", "test_value2")
|
||||
with pytest.raises(Exception):
|
||||
render.render()
|
||||
|
||||
|
||||
def test_app_dev_vars_try_to_overwrite_auto_vars(mock_values):
|
||||
mock_values["TZ"] = "Etc/UTC"
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.environment.add_env("TZ", "test_value")
|
||||
with pytest.raises(Exception):
|
||||
render.render()
|
||||
|
||||
|
||||
def test_app_dev_no_name(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
with pytest.raises(Exception):
|
||||
c1.environment.add_env("", "test_value")
|
||||
|
||||
|
||||
def test_app_dev_duplicate_vars(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.environment.add_env("PORT", "test_value")
|
||||
with pytest.raises(Exception):
|
||||
c1.environment.add_env("PORT", "test_value2")
|
||||
|
||||
|
||||
def test_format_vars(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.environment.add_env("APP_ENV", "test_$value")
|
||||
c1.environment.add_env("APP_ENV_BOOL", True)
|
||||
c1.environment.add_env("APP_ENV_INT", 10)
|
||||
c1.environment.add_env("APP_ENV_FLOAT", 10.5)
|
||||
c1.environment.add_user_envs(
|
||||
[
|
||||
{"name": "USER_ENV", "value": "test_$value2"},
|
||||
]
|
||||
)
|
||||
|
||||
output = render.render()
|
||||
envs = output["services"]["test_container"]["environment"]
|
||||
assert envs["APP_ENV"] == "test_$$value"
|
||||
assert envs["USER_ENV"] == "test_$$value2"
|
||||
assert envs["APP_ENV_BOOL"] == "true"
|
||||
assert envs["APP_ENV_INT"] == "10"
|
||||
assert envs["APP_ENV_FLOAT"] == "10.5"
|
||||
@@ -0,0 +1,46 @@
|
||||
import pytest
|
||||
|
||||
|
||||
from render import Render
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_values():
|
||||
return {
|
||||
"images": {
|
||||
"test_image": {
|
||||
"repository": "nginx",
|
||||
"tag": "latest",
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def test_add_expose_ports(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.expose.add_port(8081)
|
||||
c1.expose.add_port(8081, "udp")
|
||||
c1.expose.add_port(8082, "udp")
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["expose"] == ["8081/tcp", "8081/udp", "8082/udp"]
|
||||
|
||||
|
||||
def test_add_duplicate_expose_ports(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.expose.add_port(8081)
|
||||
with pytest.raises(Exception):
|
||||
c1.expose.add_port(8081)
|
||||
|
||||
|
||||
def test_add_expose_ports_with_host_network(mock_values):
|
||||
mock_values["network"] = {"host_network": True}
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.expose.add_port(8081)
|
||||
output = render.render()
|
||||
assert "expose" not in output["services"]["test_container"]
|
||||
@@ -0,0 +1,57 @@
|
||||
import pytest
|
||||
|
||||
|
||||
from render import Render
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_values():
|
||||
return {
|
||||
"images": {
|
||||
"test_image": {
|
||||
"repository": "nginx",
|
||||
"tag": "latest",
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def test_add_extra_host(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.add_extra_host("test_host", "127.0.0.1")
|
||||
c1.add_extra_host("test_host2", "127.0.0.2")
|
||||
c1.add_extra_host("host.docker.internal", "host-gateway")
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["extra_hosts"] == {
|
||||
"host.docker.internal": "host-gateway",
|
||||
"test_host": "127.0.0.1",
|
||||
"test_host2": "127.0.0.2",
|
||||
}
|
||||
|
||||
|
||||
def test_add_duplicate_extra_host(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.add_extra_host("test_host", "127.0.0.1")
|
||||
with pytest.raises(Exception):
|
||||
c1.add_extra_host("test_host", "127.0.0.2")
|
||||
|
||||
|
||||
def test_add_extra_host_with_ipv6(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.add_extra_host("test_host", "::1")
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["extra_hosts"] == {"test_host": "::1"}
|
||||
|
||||
|
||||
def test_add_extra_host_with_invalid_ip(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
with pytest.raises(Exception):
|
||||
c1.add_extra_host("test_host", "invalid_ip")
|
||||
@@ -0,0 +1,13 @@
|
||||
from formatter import escape_dollar
|
||||
|
||||
|
||||
def test_escape_dollar():
|
||||
cases = [
|
||||
{"input": "test", "expected": "test"},
|
||||
{"input": "$test", "expected": "$$test"},
|
||||
{"input": "$$test", "expected": "$$$$test"},
|
||||
{"input": "$$$test", "expected": "$$$$$$test"},
|
||||
{"input": "$test$", "expected": "$$test$$"},
|
||||
]
|
||||
for case in cases:
|
||||
assert escape_dollar(case["input"]) == case["expected"]
|
||||
@@ -0,0 +1,151 @@
|
||||
import re
|
||||
import pytest
|
||||
|
||||
|
||||
from render import Render
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_values():
|
||||
return {
|
||||
"images": {
|
||||
"test_image": {
|
||||
"repository": "nginx",
|
||||
"tag": "latest",
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def test_funcs(mock_values):
|
||||
mock_values["ix_volumes"] = {"test": "/mnt/test123"}
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
|
||||
tests = [
|
||||
{"func": "auto_cast", "values": ["1"], "expected": 1},
|
||||
{"func": "auto_cast", "values": ["TrUe"], "expected": True},
|
||||
{"func": "auto_cast", "values": ["FaLsE"], "expected": False},
|
||||
{"func": "auto_cast", "values": ["0.2"], "expected": 0.2},
|
||||
{"func": "auto_cast", "values": [True], "expected": True},
|
||||
{"func": "basic_auth_header", "values": ["my_user", "my_pass"], "expected": "Basic bXlfdXNlcjpteV9wYXNz"},
|
||||
{"func": "basic_auth", "values": ["my_user", "my_pass"], "expected": "bXlfdXNlcjpteV9wYXNz"},
|
||||
{
|
||||
"func": "bcrypt_hash",
|
||||
"values": ["my_pass"],
|
||||
"expect_regex": r"^\$2b\$12\$[a-zA-Z0-9-_\.\/]+$",
|
||||
},
|
||||
{"func": "camel_case", "values": ["my_user"], "expected": "My_User"},
|
||||
{"func": "copy_dict", "values": [{"a": 1}], "expected": {"a": 1}},
|
||||
{"func": "fail", "values": ["my_message"], "expect_raise": True},
|
||||
{
|
||||
"func": "htpasswd",
|
||||
"values": ["my_user", "my_pass"],
|
||||
"expect_regex": r"^my_user:\$2b\$12\$[a-zA-Z0-9-_\.\/]+$",
|
||||
},
|
||||
{"func": "is_boolean", "values": ["true"], "expected": True},
|
||||
{"func": "is_boolean", "values": ["false"], "expected": True},
|
||||
{"func": "is_number", "values": ["1"], "expected": True},
|
||||
{"func": "is_number", "values": ["1.1"], "expected": True},
|
||||
{"func": "match_regex", "values": ["value", "^[a-zA-Z0-9]+$"], "expected": True},
|
||||
{"func": "match_regex", "values": ["value", "^[0-9]+$"], "expected": False},
|
||||
{"func": "merge_dicts", "values": [{"a": 1}, {"b": 2}], "expected": {"a": 1, "b": 2}},
|
||||
{"func": "must_match_regex", "values": ["my_user", "^[0-9]$"], "expect_raise": True},
|
||||
{"func": "must_match_regex", "values": ["1", "^[0-9]$"], "expected": "1"},
|
||||
{"func": "secure_string", "values": [10], "expect_regex": r"^[a-zA-Z0-9-_]+$"},
|
||||
{"func": "disallow_chars", "values": ["my_user", ["$", "@"], "my_key"], "expected": "my_user"},
|
||||
{"func": "disallow_chars", "values": ["my_user$", ["$", "@"], "my_key"], "expect_raise": True},
|
||||
{
|
||||
"func": "get_host_path",
|
||||
"values": [{"type": "host_path", "host_path_config": {"path": "/mnt/test"}}],
|
||||
"expected": "/mnt/test",
|
||||
},
|
||||
{
|
||||
"func": "get_host_path",
|
||||
"values": [{"type": "ix_volume", "ix_volume_config": {"dataset_name": "test"}}],
|
||||
"expected": "/mnt/test123",
|
||||
},
|
||||
{"func": "or_default", "values": [None, 1], "expected": 1},
|
||||
{"func": "or_default", "values": [1, None], "expected": 1},
|
||||
{"func": "or_default", "values": [False, 1], "expected": 1},
|
||||
{"func": "or_default", "values": [True, 1], "expected": True},
|
||||
{"func": "temp_config", "values": [""], "expect_raise": True},
|
||||
{
|
||||
"func": "temp_config",
|
||||
"values": ["test"],
|
||||
"expected": {"type": "temporary", "volume_config": {"volume_name": "test"}},
|
||||
},
|
||||
{"func": "require_unique", "values": [["a=1", "b=2", "c"], "values.key", "="], "expected": None},
|
||||
{
|
||||
"func": "require_unique",
|
||||
"values": [["a=1", "b=2", "b=3"], "values.key", "="],
|
||||
"expect_raise": True,
|
||||
},
|
||||
{
|
||||
"func": "require_no_reserved",
|
||||
"values": [["a=1", "b=2", "c"], "values.key", ["d"], "="],
|
||||
"expected": None,
|
||||
},
|
||||
{
|
||||
"func": "require_no_reserved",
|
||||
"values": [["a=1", "b=2", "c"], "values.key", ["a"], "="],
|
||||
"expect_raise": True,
|
||||
},
|
||||
{
|
||||
"func": "require_no_reserved",
|
||||
"values": [["a=1", "b=2", "c"], "values.key", ["b"], "=", True],
|
||||
"expect_raise": True,
|
||||
},
|
||||
{
|
||||
"func": "url_encode",
|
||||
"values": ["7V!@@%%63r@a5#e!2X9!68g4b"],
|
||||
"expected": "7V%21%40%40%25%2563r%40a5%23e%212X9%2168g4b",
|
||||
},
|
||||
{
|
||||
"func": "url_to_dict",
|
||||
"values": ["192.168.1.1:8080"],
|
||||
"expected": {
|
||||
"host": "192.168.1.1",
|
||||
"port": 8080,
|
||||
"scheme": "http",
|
||||
"netloc": "192.168.1.1:8080",
|
||||
"path": "",
|
||||
},
|
||||
},
|
||||
{
|
||||
"func": "url_to_dict",
|
||||
"values": ["[::]:8080"],
|
||||
"expected": {"host": "::", "port": 8080, "scheme": "http", "netloc": "[::]:8080", "path": ""},
|
||||
},
|
||||
{
|
||||
"func": "url_to_dict",
|
||||
"values": ["[::]:8080/abc/", True],
|
||||
"expected": {
|
||||
"host": "[::]",
|
||||
"port": 8080,
|
||||
"host_no_brackets": "::",
|
||||
"scheme": "http",
|
||||
"netloc": "[::]:8080",
|
||||
"path": "/abc/",
|
||||
},
|
||||
},
|
||||
{
|
||||
"func": "to_yaml",
|
||||
"values": [{"a": 1, "b": 2}],
|
||||
"expected": "a: 1\nb: 2\n",
|
||||
},
|
||||
]
|
||||
|
||||
for test in tests:
|
||||
print(test["func"], test)
|
||||
func = render.funcs[test["func"]]
|
||||
if test.get("expect_raise", False):
|
||||
with pytest.raises(Exception):
|
||||
func(*test["values"])
|
||||
elif test.get("expect_regex"):
|
||||
r = func(*test["values"])
|
||||
assert re.match(test["expect_regex"], r) is not None
|
||||
else:
|
||||
r = func(*test["values"])
|
||||
assert r == test["expected"]
|
||||
@@ -0,0 +1,353 @@
|
||||
import pytest
|
||||
|
||||
from render import Render
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_values():
|
||||
return {
|
||||
"images": {
|
||||
"test_image": {
|
||||
"repository": "nginx",
|
||||
"tag": "latest",
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def test_disable_healthcheck(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["healthcheck"] == {"disable": True}
|
||||
|
||||
|
||||
def test_use_built_in_healthcheck(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.use_built_in()
|
||||
output = render.render()
|
||||
assert "healthcheck" not in output["services"]["test_container"]
|
||||
|
||||
|
||||
def test_set_custom_test(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.set_custom_test("echo $1")
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["healthcheck"] == {
|
||||
"test": "echo $$1",
|
||||
"interval": "30s",
|
||||
"timeout": "5s",
|
||||
"retries": 5,
|
||||
"start_period": "15s",
|
||||
"start_interval": "2s",
|
||||
}
|
||||
|
||||
|
||||
def test_set_custom_test_array(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.set_custom_test(["CMD", "echo", "1"])
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["healthcheck"] == {
|
||||
"test": ["CMD", "echo", "1"],
|
||||
"interval": "30s",
|
||||
"timeout": "5s",
|
||||
"retries": 5,
|
||||
"start_period": "15s",
|
||||
"start_interval": "2s",
|
||||
}
|
||||
|
||||
|
||||
def test_CMD_with_var_should_fail(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
with pytest.raises(Exception):
|
||||
c1.healthcheck.set_custom_test(["CMD", "echo", "$1"])
|
||||
|
||||
|
||||
def test_set_options(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.set_custom_test(["CMD", "echo", "123$567"])
|
||||
c1.healthcheck.set_interval(9)
|
||||
c1.healthcheck.set_timeout(8)
|
||||
c1.healthcheck.set_retries(7)
|
||||
c1.healthcheck.set_start_period(6)
|
||||
c1.healthcheck.set_start_interval(5)
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["healthcheck"] == {
|
||||
"test": ["CMD", "echo", "123$$567"],
|
||||
"interval": "9s",
|
||||
"timeout": "8s",
|
||||
"retries": 7,
|
||||
"start_period": "6s",
|
||||
"start_interval": "5s",
|
||||
}
|
||||
|
||||
|
||||
def test_adding_test_when_disabled(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
with pytest.raises(Exception):
|
||||
c1.healthcheck.set_custom_test("echo $1")
|
||||
|
||||
|
||||
def test_not_adding_test(mock_values):
|
||||
render = Render(mock_values)
|
||||
render.add_container("test_container", "test_image")
|
||||
with pytest.raises(Exception):
|
||||
render.render()
|
||||
|
||||
|
||||
def test_invalid_path(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
with pytest.raises(Exception):
|
||||
c1.healthcheck.set_test("http", {"port": 8080, "path": "invalid"})
|
||||
|
||||
|
||||
def test_http_healthcheck(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.set_test("http", {"port": 8080})
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["healthcheck"]["test"] == [
|
||||
"CMD-SHELL",
|
||||
f"""/bin/bash -c '{{ printf "GET / HTTP/1.1\\r\\nHost: 127.0.0.1\\r\\nConnection: close\\r\\n\\r\\n" >&0; grep "HTTP" | grep -q "200"; }} 0<>/dev/tcp/127.0.0.1/8080'""", # noqa
|
||||
]
|
||||
|
||||
|
||||
def test_curl_healthcheck_as_CMD(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.set_test("curl", {"port": 8080, "path": "/health", "data": {"test": "val"}, "exec_type": "CMD"})
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["healthcheck"]["test"] == [
|
||||
"CMD",
|
||||
"curl",
|
||||
"--request",
|
||||
"GET",
|
||||
"--silent",
|
||||
"--output",
|
||||
"/dev/null",
|
||||
"--show-error",
|
||||
"--fail",
|
||||
"--data",
|
||||
'{"test": "val"}',
|
||||
"http://127.0.0.1:8080/health",
|
||||
]
|
||||
|
||||
|
||||
def test_curl_healthcheck(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.set_test("curl", {"port": 8080, "path": "/health", "data": {"test": "val"}})
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["healthcheck"]["test"] == [
|
||||
"CMD",
|
||||
"curl",
|
||||
"--request",
|
||||
"GET",
|
||||
"--silent",
|
||||
"--output",
|
||||
"/dev/null",
|
||||
"--show-error",
|
||||
"--fail",
|
||||
"--data",
|
||||
'{"test": "val"}',
|
||||
"http://127.0.0.1:8080/health",
|
||||
]
|
||||
|
||||
|
||||
def test_curl_healthcheck_with_headers_and_method_and_data(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.set_test(
|
||||
"curl", {"port": 8080, "path": "/health", "method": "POST", "headers": [("X-Test", "some-value")], "data": {}}
|
||||
)
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["healthcheck"]["test"] == [
|
||||
"CMD",
|
||||
"curl",
|
||||
"--request",
|
||||
"POST",
|
||||
"--silent",
|
||||
"--output",
|
||||
"/dev/null",
|
||||
"--show-error",
|
||||
"--fail",
|
||||
"--header",
|
||||
"X-Test: some-value",
|
||||
"--data",
|
||||
"{}",
|
||||
"http://127.0.0.1:8080/health",
|
||||
]
|
||||
|
||||
|
||||
def test_wget_healthcheck(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.set_test("wget", {"port": 8080, "path": "/health"})
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["healthcheck"]["test"] == [
|
||||
"CMD",
|
||||
"wget",
|
||||
"--quiet",
|
||||
"--spider",
|
||||
"http://127.0.0.1:8080/health",
|
||||
]
|
||||
|
||||
|
||||
def test_wget_healthcheck_no_spider(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.set_test("wget", {"port": 8080, "path": "/health", "spider": False})
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["healthcheck"]["test"] == [
|
||||
"CMD",
|
||||
"wget",
|
||||
"--quiet",
|
||||
"-O",
|
||||
"/dev/null",
|
||||
"http://127.0.0.1:8080/health",
|
||||
]
|
||||
|
||||
|
||||
def test_netcat_healthcheck(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.set_test("netcat", {"port": 8080})
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["healthcheck"]["test"] == [
|
||||
"CMD",
|
||||
"nc",
|
||||
"-z",
|
||||
"-w",
|
||||
"1",
|
||||
"127.0.0.1",
|
||||
"8080",
|
||||
]
|
||||
|
||||
|
||||
def test_netcat_udp_healthcheck(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.set_test("netcat", {"port": 8080, "udp": True})
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["healthcheck"]["test"] == [
|
||||
"CMD",
|
||||
"nc",
|
||||
"-z",
|
||||
"-w",
|
||||
"1",
|
||||
"-u",
|
||||
"127.0.0.1",
|
||||
"8080",
|
||||
]
|
||||
|
||||
|
||||
def test_tcp_healthcheck(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.set_test("tcp", {"port": 8080})
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["healthcheck"]["test"] == [
|
||||
"CMD",
|
||||
"timeout",
|
||||
"1",
|
||||
"bash",
|
||||
"-c",
|
||||
"cat < /dev/null > /dev/tcp/127.0.0.1/8080",
|
||||
]
|
||||
|
||||
|
||||
def test_redis_healthcheck(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.set_test("redis", {"password": "test"})
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["healthcheck"]["test"] == [
|
||||
"CMD",
|
||||
"redis-cli",
|
||||
"-h",
|
||||
"127.0.0.1",
|
||||
"-p",
|
||||
"6379",
|
||||
"-a",
|
||||
"test",
|
||||
"ping",
|
||||
]
|
||||
|
||||
|
||||
def test_redis_healthcheck_no_password(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.set_test("redis", {"password": ""})
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["healthcheck"]["test"] == [
|
||||
"CMD",
|
||||
"redis-cli",
|
||||
"-h",
|
||||
"127.0.0.1",
|
||||
"-p",
|
||||
"6379",
|
||||
"ping",
|
||||
]
|
||||
|
||||
|
||||
def test_postgres_healthcheck(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.set_test("postgres", {"user": "test-user", "db": "test-db"})
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["healthcheck"]["test"] == [
|
||||
"CMD",
|
||||
"pg_isready",
|
||||
"-h",
|
||||
"127.0.0.1",
|
||||
"-p",
|
||||
"5432",
|
||||
"-U",
|
||||
"test-user",
|
||||
"-d",
|
||||
"test-db",
|
||||
]
|
||||
|
||||
|
||||
def test_mariadb_healthcheck(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.set_test("mariadb", {"password": "test-pass"})
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["healthcheck"]["test"] == [
|
||||
"CMD",
|
||||
"mariadb-admin",
|
||||
"--user=root",
|
||||
"--host=127.0.0.1",
|
||||
"--port=3306",
|
||||
"--password=test-pass",
|
||||
"ping",
|
||||
]
|
||||
|
||||
|
||||
def test_mongodb_healthcheck(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.set_test("mongodb", {"db": "test-db"})
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["healthcheck"]["test"] == [
|
||||
"CMD",
|
||||
"mongosh",
|
||||
"--host",
|
||||
"127.0.0.1",
|
||||
"--port",
|
||||
"27017",
|
||||
"test-db",
|
||||
"--eval",
|
||||
'db.adminCommand("ping")',
|
||||
"--quiet",
|
||||
]
|
||||
@@ -0,0 +1,88 @@
|
||||
import pytest
|
||||
|
||||
from render import Render
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_values():
|
||||
return {
|
||||
"images": {
|
||||
"test_image": {
|
||||
"repository": "nginx",
|
||||
"tag": "latest",
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def test_add_disallowed_label(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
with pytest.raises(Exception):
|
||||
c1.labels.add_label("com.docker.compose.service", "test_service")
|
||||
|
||||
|
||||
def test_add_duplicate_label(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.labels.add_label("my.custom.label", "test_value")
|
||||
with pytest.raises(Exception):
|
||||
c1.labels.add_label("my.custom.label", "test_value1")
|
||||
|
||||
|
||||
def test_add_label_on_non_existing_container(mock_values):
|
||||
mock_values["labels"] = [
|
||||
{
|
||||
"key": "my.custom.label1",
|
||||
"value": "test_value1",
|
||||
"containers": ["test_container", "test_container2"],
|
||||
},
|
||||
]
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
with pytest.raises(Exception):
|
||||
render.render()
|
||||
|
||||
|
||||
def test_add_label(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.labels.add_label("my.custom.label1", "test_value1")
|
||||
c1.labels.add_label("my.custom.label2", "test_value2")
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["labels"] == {
|
||||
"my.custom.label1": "test_value1",
|
||||
"my.custom.label2": "test_value2",
|
||||
}
|
||||
|
||||
|
||||
def test_auto_add_labels(mock_values):
|
||||
mock_values["labels"] = [
|
||||
{
|
||||
"key": "my.custom.label1",
|
||||
"value": "test_value1",
|
||||
"containers": ["test_container", "test_container2"],
|
||||
},
|
||||
{
|
||||
"key": "my.custom.label2",
|
||||
"value": "test_value2",
|
||||
"containers": ["test_container"],
|
||||
},
|
||||
]
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c2 = render.add_container("test_container2", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c2.healthcheck.disable()
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["labels"] == {
|
||||
"my.custom.label1": "test_value1",
|
||||
"my.custom.label2": "test_value2",
|
||||
}
|
||||
assert output["services"]["test_container2"]["labels"] == {
|
||||
"my.custom.label1": "test_value1",
|
||||
}
|
||||
@@ -0,0 +1,369 @@
|
||||
import pytest
|
||||
|
||||
|
||||
from render import Render
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_values():
|
||||
return {
|
||||
"ix_context": {
|
||||
"app_metadata": {
|
||||
"name": "test_app",
|
||||
"title": "Test App",
|
||||
"train": "enterprise",
|
||||
}
|
||||
},
|
||||
"images": {
|
||||
"test_image": {
|
||||
"repository": "nginx",
|
||||
"tag": "latest",
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def test_notes(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
output = render.render()
|
||||
assert (
|
||||
output["x-notes"]
|
||||
== """# Test App
|
||||
|
||||
## Security
|
||||
|
||||
**Read the following security precautions to ensure that you wish to continue using this application.**
|
||||
|
||||
---
|
||||
|
||||
### Container: [test_container]
|
||||
|
||||
#### Running user/group(s)
|
||||
|
||||
- User: unknown
|
||||
- Group: unknown
|
||||
- Supplementary Groups: apps
|
||||
|
||||
---
|
||||
|
||||
## Bug Reports and Feature Requests
|
||||
|
||||
If you find a bug in this app or have an idea for a new feature, please file an issue at
|
||||
https://ixsystems.atlassian.net
|
||||
"""
|
||||
)
|
||||
|
||||
|
||||
def test_notes_on_non_enterprise_train(mock_values):
|
||||
mock_values["ix_context"]["app_metadata"]["train"] = "community"
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.set_user(568, 568)
|
||||
c1.healthcheck.disable()
|
||||
output = render.render()
|
||||
assert (
|
||||
output["x-notes"]
|
||||
== """# Test App
|
||||
|
||||
## Security
|
||||
|
||||
**Read the following security precautions to ensure that you wish to continue using this application.**
|
||||
|
||||
---
|
||||
|
||||
### Container: [test_container]
|
||||
|
||||
#### Running user/group(s)
|
||||
|
||||
- User: 568
|
||||
- Group: 568
|
||||
- Supplementary Groups: apps
|
||||
|
||||
---
|
||||
|
||||
## Bug Reports and Feature Requests
|
||||
|
||||
If you find a bug in this app or have an idea for a new feature, please file an issue at
|
||||
https://github.com/truenas/apps
|
||||
"""
|
||||
)
|
||||
|
||||
|
||||
def test_notes_with_warnings(mock_values):
|
||||
render = Render(mock_values)
|
||||
render.notes.add_warning("this is not properly configured. fix it now!")
|
||||
render.notes.add_warning("that is not properly configured. fix it later!")
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.set_user(568, 568)
|
||||
c1.healthcheck.disable()
|
||||
output = render.render()
|
||||
assert (
|
||||
output["x-notes"]
|
||||
== """# Test App
|
||||
|
||||
## Warnings
|
||||
|
||||
- this is not properly configured. fix it now!
|
||||
- that is not properly configured. fix it later!
|
||||
|
||||
## Security
|
||||
|
||||
**Read the following security precautions to ensure that you wish to continue using this application.**
|
||||
|
||||
---
|
||||
|
||||
### Container: [test_container]
|
||||
|
||||
#### Running user/group(s)
|
||||
|
||||
- User: 568
|
||||
- Group: 568
|
||||
- Supplementary Groups: apps
|
||||
|
||||
---
|
||||
|
||||
## Bug Reports and Feature Requests
|
||||
|
||||
If you find a bug in this app or have an idea for a new feature, please file an issue at
|
||||
https://ixsystems.atlassian.net
|
||||
"""
|
||||
)
|
||||
|
||||
|
||||
def test_notes_with_deprecations(mock_values):
|
||||
render = Render(mock_values)
|
||||
render.notes.add_deprecation("this is will be removed later. fix it now!")
|
||||
render.notes.add_deprecation("that is will be removed later. fix it later!")
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.set_user(568, 568)
|
||||
c1.healthcheck.disable()
|
||||
output = render.render()
|
||||
assert (
|
||||
output["x-notes"]
|
||||
== """# Test App
|
||||
|
||||
## Deprecations
|
||||
|
||||
- this is will be removed later. fix it now!
|
||||
- that is will be removed later. fix it later!
|
||||
|
||||
## Security
|
||||
|
||||
**Read the following security precautions to ensure that you wish to continue using this application.**
|
||||
|
||||
---
|
||||
|
||||
### Container: [test_container]
|
||||
|
||||
#### Running user/group(s)
|
||||
|
||||
- User: 568
|
||||
- Group: 568
|
||||
- Supplementary Groups: apps
|
||||
|
||||
---
|
||||
|
||||
## Bug Reports and Feature Requests
|
||||
|
||||
If you find a bug in this app or have an idea for a new feature, please file an issue at
|
||||
https://ixsystems.atlassian.net
|
||||
"""
|
||||
)
|
||||
|
||||
|
||||
def test_notes_with_body(mock_values):
|
||||
render = Render(mock_values)
|
||||
render.notes.set_body(
|
||||
"""## Additional info
|
||||
|
||||
Some info
|
||||
some other info.
|
||||
"""
|
||||
)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.set_user(568, 568)
|
||||
c1.healthcheck.disable()
|
||||
output = render.render()
|
||||
assert (
|
||||
output["x-notes"]
|
||||
== """# Test App
|
||||
|
||||
## Security
|
||||
|
||||
**Read the following security precautions to ensure that you wish to continue using this application.**
|
||||
|
||||
---
|
||||
|
||||
### Container: [test_container]
|
||||
|
||||
#### Running user/group(s)
|
||||
|
||||
- User: 568
|
||||
- Group: 568
|
||||
- Supplementary Groups: apps
|
||||
|
||||
---
|
||||
|
||||
## Additional info
|
||||
|
||||
Some info
|
||||
some other info.
|
||||
|
||||
## Bug Reports and Feature Requests
|
||||
|
||||
If you find a bug in this app or have an idea for a new feature, please file an issue at
|
||||
https://ixsystems.atlassian.net
|
||||
"""
|
||||
)
|
||||
|
||||
|
||||
def test_notes_all(mock_values):
|
||||
render = Render(mock_values)
|
||||
render.notes.add_warning("this is not properly configured. fix it now!")
|
||||
render.notes.add_warning("that is not properly configured. fix it later!")
|
||||
render.notes.add_deprecation("this is will be removed later. fix it now!")
|
||||
render.notes.add_deprecation("that is will be removed later. fix it later!")
|
||||
render.notes.add_info("some info")
|
||||
render.notes.add_info("some other info")
|
||||
render.notes.set_body(
|
||||
"""## Additional info
|
||||
|
||||
Some info
|
||||
some other info.
|
||||
"""
|
||||
)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.set_privileged(True)
|
||||
c1.set_user(0, 0)
|
||||
c1.add_group(0)
|
||||
c1.set_ipc_mode("host")
|
||||
c1.set_pid_mode("host")
|
||||
c1.set_cgroup("host")
|
||||
c1.set_tty(True)
|
||||
c1.remove_security_opt("no-new-privileges")
|
||||
c1.add_docker_socket()
|
||||
c1.add_tun_device()
|
||||
c1.add_usb_bus()
|
||||
c1.add_snd_device()
|
||||
c1.devices.add_device("/dev/null", "/dev/null", "rwm")
|
||||
c1.add_storage("/etc/os-release", {"type": "host_path", "host_path_config": {"path": "/etc/os-release"}})
|
||||
c1.restart.set_policy("on-failure", 1)
|
||||
|
||||
c2 = render.add_container("test_container2", "test_image")
|
||||
c2.healthcheck.disable()
|
||||
c2.set_user(568, 568)
|
||||
|
||||
c3 = render.add_container("test_container3", "test_image")
|
||||
c3.healthcheck.disable()
|
||||
c3.restart.set_policy("on-failure", 1)
|
||||
c3.set_user(568, 568)
|
||||
|
||||
output = render.render()
|
||||
assert (
|
||||
output["x-notes"]
|
||||
== """# Test App
|
||||
|
||||
## Warnings
|
||||
|
||||
- Container [test_container] is running with a TTY, Logs do not appear correctly in the UI due to an [upstream bug](https://github.com/docker/docker-py/issues/1394)
|
||||
- this is not properly configured. fix it now!
|
||||
- that is not properly configured. fix it later!
|
||||
|
||||
## Deprecations
|
||||
|
||||
- this is will be removed later. fix it now!
|
||||
- that is will be removed later. fix it later!
|
||||
|
||||
## Info
|
||||
|
||||
- some info
|
||||
- some other info
|
||||
|
||||
## Security
|
||||
|
||||
**Read the following security precautions to ensure that you wish to continue using this application.**
|
||||
|
||||
---
|
||||
|
||||
### Container: [test_container2]
|
||||
|
||||
#### Running user/group(s)
|
||||
|
||||
- User: 568
|
||||
- Group: 568
|
||||
- Supplementary Groups: apps
|
||||
|
||||
---
|
||||
|
||||
### Container: [test_container]
|
||||
|
||||
**This container is short-lived.**
|
||||
|
||||
#### Privileged mode is enabled
|
||||
|
||||
- Has the same level of control as a system administrator
|
||||
- Can access and modify any part of your TrueNAS system
|
||||
|
||||
#### Running user/group(s)
|
||||
|
||||
- User: root
|
||||
- Group: root
|
||||
- Supplementary Groups: apps, audio, docker, root
|
||||
|
||||
#### Host IPC namespace is enabled
|
||||
|
||||
- Container can access the inter-process communication mechanisms of the host
|
||||
- Allows communication with other processes on the host under particular circumstances
|
||||
|
||||
#### Host PID namespace is enabled
|
||||
|
||||
- Container can see and interact with all host processes
|
||||
- Potential for privilege escalation or process manipulation
|
||||
|
||||
#### Host cgroup namespace is enabled
|
||||
|
||||
- Container shares control groups with the host system
|
||||
- Can bypass resource limits and isolation boundaries
|
||||
|
||||
#### Security option [no-new-privileges] is not set
|
||||
|
||||
- Processes can gain additional privileges through setuid/setgid binaries
|
||||
- Can potentially allow privilege escalation attacks within the container
|
||||
|
||||
#### Passing Host Files, Devices, or Sockets into the Container
|
||||
|
||||
- /dev/null - (rwm)
|
||||
- Docker Socket (/var/run/docker.sock) - (Read Only)
|
||||
- OS Release File (/etc/os-release) - (Read/Write)
|
||||
- Sound Device (/dev/snd) - (Read/Write)
|
||||
- TUN Device (/dev/net/tun) - (Read/Write)
|
||||
- USB Devices (/dev/bus/usb) - (Read/Write)
|
||||
|
||||
---
|
||||
|
||||
### Container: [test_container3]
|
||||
|
||||
**This container is short-lived.**
|
||||
|
||||
#### Running user/group(s)
|
||||
|
||||
- User: 568
|
||||
- Group: 568
|
||||
- Supplementary Groups: apps
|
||||
|
||||
---
|
||||
|
||||
## Additional info
|
||||
|
||||
Some info
|
||||
some other info.
|
||||
|
||||
## Bug Reports and Feature Requests
|
||||
|
||||
If you find a bug in this app or have an idea for a new feature, please file an issue at
|
||||
https://ixsystems.atlassian.net
|
||||
""" # noqa
|
||||
)
|
||||
@@ -0,0 +1,93 @@
|
||||
import pytest
|
||||
|
||||
|
||||
from render import Render
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_values():
|
||||
return {
|
||||
"images": {
|
||||
"test_image": {
|
||||
"repository": "nginx",
|
||||
"tag": "latest",
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def test_no_portals(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
output = render.render()
|
||||
assert output["x-portals"] == []
|
||||
|
||||
|
||||
def test_add_portal_with_host_ips(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
port1 = {"bind_mode": "published", "port_number": 8080, "host_ips": ["1.2.3.4", "5.6.7.8"]}
|
||||
port2 = {"bind_mode": "published", "port_number": 8081, "host_ips": ["::", "0.0.0.0"]}
|
||||
port3 = {"bind_mode": "published", "port_number": 8081, "host_ips": ["1.2.3.4"]}
|
||||
port3 = {"bind_mode": "published", "port_number": 8081, "host_ips": ["1.2.3.4"]}
|
||||
port4 = {"bind_mode": "exposed", "port_number": 1234, "host_ips": ["1.2.3.4"]}
|
||||
render.portals.add(port1)
|
||||
render.portals.add(port1, {"name": "test1", "host": "my-host.com"})
|
||||
render.portals.add(port2, {"name": "test2"})
|
||||
render.portals.add(port3, {"name": "test3", "port": None})
|
||||
render.portals.add(port3, {"name": "test4", "port": 1234})
|
||||
render.portals.add(port4, {"name": "test5", "port": 1234})
|
||||
output = render.render()
|
||||
assert output["x-portals"] == [
|
||||
{"name": "Web UI", "scheme": "http", "host": "1.2.3.4", "port": 8080, "path": "/"},
|
||||
{"name": "test1", "scheme": "http", "host": "my-host.com", "port": 8080, "path": "/"},
|
||||
{"name": "test2", "scheme": "http", "host": "0.0.0.0", "port": 8081, "path": "/"},
|
||||
{"name": "test3", "scheme": "http", "host": "1.2.3.4", "port": 8081, "path": "/"},
|
||||
{"name": "test4", "scheme": "http", "host": "1.2.3.4", "port": 1234, "path": "/"},
|
||||
]
|
||||
|
||||
|
||||
def test_add_duplicate_portal(mock_values):
|
||||
render = Render(mock_values)
|
||||
port = {"bind_mode": "published", "port_number": 8080, "host_ips": ["1.2.3.4", "5.6.7.8"]}
|
||||
render.portals.add(port)
|
||||
with pytest.raises(Exception):
|
||||
render.portals.add(port)
|
||||
|
||||
|
||||
def test_add_duplicate_portal_with_explicit_name(mock_values):
|
||||
render = Render(mock_values)
|
||||
port = {"bind_mode": "published", "port_number": 8080, "host_ips": ["1.2.3.4", "5.6.7.8"]}
|
||||
render.portals.add(port, {"name": "Some Portal"})
|
||||
with pytest.raises(Exception):
|
||||
render.portals.add(port, {"name": "Some Portal"})
|
||||
|
||||
|
||||
def test_add_portal_with_invalid_scheme(mock_values):
|
||||
render = Render(mock_values)
|
||||
port = {"bind_mode": "published", "port_number": 8080, "host_ips": ["1.2.3.4", "5.6.7.8"]}
|
||||
with pytest.raises(Exception):
|
||||
render.portals.add(port, {"scheme": "invalid_scheme"})
|
||||
|
||||
|
||||
def test_add_portal_with_invalid_path(mock_values):
|
||||
render = Render(mock_values)
|
||||
port = {"bind_mode": "published", "port_number": 8080, "host_ips": ["1.2.3.4", "5.6.7.8"]}
|
||||
with pytest.raises(Exception):
|
||||
render.portals.add(port, {"path": "invalid_path"})
|
||||
|
||||
|
||||
def test_add_portal_with_invalid_path_double_slash(mock_values):
|
||||
render = Render(mock_values)
|
||||
port = {"bind_mode": "published", "port_number": 8080, "host_ips": ["1.2.3.4", "5.6.7.8"]}
|
||||
with pytest.raises(Exception):
|
||||
render.portals.add(port, {"path": "/some//path"})
|
||||
|
||||
|
||||
def test_add_portal_with_invalid_port(mock_values):
|
||||
render = Render(mock_values)
|
||||
port = {"bind_mode": "published", "port_number": 8080, "host_ips": ["1.2.3.4", "5.6.7.8"]}
|
||||
with pytest.raises(Exception):
|
||||
render.portals.add(port, {"port": -1})
|
||||
@@ -0,0 +1,383 @@
|
||||
import pytest
|
||||
|
||||
|
||||
from render import Render
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_values():
|
||||
return {
|
||||
"images": {
|
||||
"test_image": {
|
||||
"repository": "nginx",
|
||||
"tag": "latest",
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
tests = [
|
||||
{
|
||||
"name": "add_ports_should_work",
|
||||
"inputs": [
|
||||
{
|
||||
"values": ({"bind_mode": "published", "port_number": 8081}, {"container_port": 8080}),
|
||||
"expect_error": False,
|
||||
},
|
||||
{
|
||||
"values": (
|
||||
{"bind_mode": "published", "port_number": 8082},
|
||||
{"container_port": 8080, "protocol": "udp"},
|
||||
),
|
||||
"expect_error": False,
|
||||
},
|
||||
],
|
||||
"expected": [
|
||||
{"published": 8081, "target": 8080, "protocol": "tcp", "mode": "ingress"},
|
||||
{"published": 8082, "target": 8080, "protocol": "udp", "mode": "ingress"},
|
||||
],
|
||||
},
|
||||
{
|
||||
"name": "add_duplicate_ports_should_fail",
|
||||
"inputs": [
|
||||
{
|
||||
"values": ({"bind_mode": "published", "port_number": 8081}, {"container_port": 8080}),
|
||||
"expect_error": False,
|
||||
},
|
||||
{
|
||||
"values": ({"bind_mode": "published", "port_number": 8081}, {"container_port": 8080}),
|
||||
"expect_error": True,
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
"name": "adding_duplicate_port_different_protocol_should_work",
|
||||
"inputs": [
|
||||
{
|
||||
"values": ({"bind_mode": "published", "port_number": 8081}, {"container_port": 8080}),
|
||||
"expect_error": False,
|
||||
},
|
||||
{
|
||||
"values": (
|
||||
{"bind_mode": "published", "port_number": 8081},
|
||||
{"container_port": 8080, "protocol": "udp"},
|
||||
),
|
||||
"expect_error": False,
|
||||
},
|
||||
],
|
||||
"expected": [
|
||||
{"published": 8081, "target": 8080, "protocol": "tcp", "mode": "ingress"},
|
||||
{"published": 8081, "target": 8080, "protocol": "udp", "mode": "ingress"},
|
||||
],
|
||||
},
|
||||
{
|
||||
"name": "adding_same_port_for_both_wildcard_families_should_work",
|
||||
"inputs": [
|
||||
{
|
||||
"values": (
|
||||
{"bind_mode": "published", "port_number": 8081},
|
||||
{"container_port": 8080, "host_ips": ["0.0.0.0"]},
|
||||
),
|
||||
"expect_error": False,
|
||||
},
|
||||
{
|
||||
"values": (
|
||||
{"bind_mode": "published", "port_number": 8081},
|
||||
{"container_port": 8080, "host_ips": ["::"]},
|
||||
),
|
||||
"expect_error": False,
|
||||
},
|
||||
],
|
||||
"expected": [
|
||||
{"published": 8081, "target": 8080, "protocol": "tcp", "mode": "ingress"},
|
||||
],
|
||||
},
|
||||
{
|
||||
"name": "adding_duplicate_port_for_v4_ip_and_v4_wildcard_should_fail",
|
||||
"inputs": [
|
||||
{
|
||||
"values": (
|
||||
{"bind_mode": "published", "port_number": 8081},
|
||||
{"container_port": 8080, "host_ips": ["192.168.1.10"]},
|
||||
),
|
||||
"expect_error": False,
|
||||
},
|
||||
{
|
||||
"values": (
|
||||
{"bind_mode": "published", "port_number": 8081},
|
||||
{"container_port": 8080, "host_ips": ["0.0.0.0"]},
|
||||
),
|
||||
"expect_error": True,
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
"name": "adding_duplicate_port_for_v4_wildcard_and_v4_ip_should_fail",
|
||||
"inputs": [
|
||||
{
|
||||
"values": (
|
||||
{"bind_mode": "published", "port_number": 8081},
|
||||
{"container_port": 8080, "host_ips": ["0.0.0.0"]},
|
||||
),
|
||||
"expect_error": False,
|
||||
},
|
||||
{
|
||||
"values": (
|
||||
{"bind_mode": "published", "port_number": 8081},
|
||||
{"container_port": 8080, "host_ips": ["192.168.1.10"]},
|
||||
),
|
||||
"expect_error": True,
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
"name": "adding_duplicate_port_for_v4_wildcard_and_v6_ip_should_work",
|
||||
"inputs": [
|
||||
{
|
||||
"values": (
|
||||
{"bind_mode": "published", "port_number": 8081},
|
||||
{"container_port": 8080, "host_ips": ["192.168.1.10"]},
|
||||
),
|
||||
"expect_error": False,
|
||||
},
|
||||
{
|
||||
"values": (
|
||||
{"bind_mode": "published", "port_number": 8081},
|
||||
{"container_port": 8080, "host_ips": ["fd00:1234:5678:abcd::10"]},
|
||||
),
|
||||
"expect_error": False,
|
||||
},
|
||||
],
|
||||
"expected": [
|
||||
{"published": 8081, "target": 8080, "protocol": "tcp", "mode": "ingress", "host_ip": "192.168.1.10"},
|
||||
{
|
||||
"published": 8081,
|
||||
"target": 8080,
|
||||
"protocol": "tcp",
|
||||
"mode": "ingress",
|
||||
"host_ip": "fd00:1234:5678:abcd::10",
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
"name": "adding_duplicate_port_for_v6_wildcard_and_v4_ip_should_work",
|
||||
"inputs": [
|
||||
{
|
||||
"values": (
|
||||
{"bind_mode": "published", "port_number": 8081},
|
||||
{"container_port": 8080, "host_ips": ["::"]},
|
||||
),
|
||||
"expect_error": False,
|
||||
},
|
||||
{
|
||||
"values": (
|
||||
{"bind_mode": "published", "port_number": 8081},
|
||||
{"container_port": 8080, "host_ips": ["192.168.1.10"]},
|
||||
),
|
||||
"expect_error": False,
|
||||
},
|
||||
],
|
||||
"expected": [
|
||||
{"published": 8081, "target": 8080, "protocol": "tcp", "mode": "ingress", "host_ip": "192.168.1.10"},
|
||||
{"published": 8081, "target": 8080, "protocol": "tcp", "mode": "ingress", "host_ip": "::"},
|
||||
],
|
||||
},
|
||||
{
|
||||
"name": "adding_duplicate_port_for_v6_wildcard_and_v6_ip_should_fail",
|
||||
"inputs": [
|
||||
{
|
||||
"values": (
|
||||
{"bind_mode": "published", "port_number": 8081},
|
||||
{"container_port": 8080, "host_ips": ["::"]},
|
||||
),
|
||||
"expect_error": False,
|
||||
},
|
||||
{
|
||||
"values": (
|
||||
{"bind_mode": "published", "port_number": 8081},
|
||||
{"container_port": 8080, "host_ips": ["fd00:1234:5678:abcd::10"]},
|
||||
),
|
||||
"expect_error": True,
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
"name": "adding_duplicate_port_for_v6_ip_and_v6_wildcard_should_fail",
|
||||
"inputs": [
|
||||
{
|
||||
"values": (
|
||||
{"bind_mode": "published", "port_number": 8081},
|
||||
{"container_port": 8080, "host_ips": ["fd00:1234:5678:abcd::10"]},
|
||||
),
|
||||
"expect_error": False,
|
||||
},
|
||||
{
|
||||
"values": (
|
||||
{"bind_mode": "published", "port_number": 8081},
|
||||
{"container_port": 8080, "host_ips": ["::"]},
|
||||
),
|
||||
"expect_error": True,
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
"name": "adding_duplicate_port_with_different_v4_ip_should_work",
|
||||
"inputs": [
|
||||
{
|
||||
"values": (
|
||||
{"bind_mode": "published", "port_number": 8081},
|
||||
{"container_port": 8080, "host_ips": ["192.168.1.10"]},
|
||||
),
|
||||
"expect_error": False,
|
||||
},
|
||||
{
|
||||
"values": (
|
||||
{"bind_mode": "published", "port_number": 8081},
|
||||
{"container_port": 8080, "host_ips": ["192.168.1.11"]},
|
||||
),
|
||||
"expect_error": False,
|
||||
},
|
||||
],
|
||||
"expected": [
|
||||
{"published": 8081, "target": 8080, "protocol": "tcp", "mode": "ingress", "host_ip": "192.168.1.10"},
|
||||
{"published": 8081, "target": 8080, "protocol": "tcp", "mode": "ingress", "host_ip": "192.168.1.11"},
|
||||
],
|
||||
},
|
||||
{
|
||||
"name": "adding_port_with_invalid_protocol_should_fail",
|
||||
"inputs": [
|
||||
{
|
||||
"values": (
|
||||
{"bind_mode": "published", "port_number": 8081},
|
||||
{"container_port": 8080, "protocol": "invalid_protocol"},
|
||||
),
|
||||
"expect_error": True,
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
"name": "adding_port_with_invalid_mode_should_fail",
|
||||
"inputs": [
|
||||
{
|
||||
"values": (
|
||||
{"bind_mode": "published", "port_number": 8081},
|
||||
{"container_port": 8080, "mode": "invalid_mode"},
|
||||
),
|
||||
"expect_error": True,
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
"name": "adding_port_with_invalid_ip_should_fail",
|
||||
"inputs": [
|
||||
{
|
||||
"values": (
|
||||
{"bind_mode": "published", "port_number": 8081},
|
||||
{"container_port": 8080, "host_ips": ["invalid_ip"]},
|
||||
),
|
||||
"expect_error": True,
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
"name": "adding_port_with_invalid_port_number_should_fail",
|
||||
"inputs": [
|
||||
{"values": ({"bind_mode": "published", "port_number": -1}, {"container_port": 8080}), "expect_error": True},
|
||||
],
|
||||
},
|
||||
{
|
||||
"name": "adding_port_with_invalid_container_port_should_fail",
|
||||
"inputs": [
|
||||
{"values": ({"bind_mode": "published", "port_number": 8081}, {"container_port": -1}), "expect_error": True},
|
||||
],
|
||||
},
|
||||
{
|
||||
"name": "adding_duplicate_ports_with_different_host_ip_should_work",
|
||||
"inputs": [
|
||||
{
|
||||
"values": (
|
||||
{"bind_mode": "published", "port_number": 8081},
|
||||
{"container_port": 8080, "host_ips": ["192.168.1.10"]},
|
||||
),
|
||||
"expect_error": False,
|
||||
},
|
||||
{
|
||||
"values": (
|
||||
{"bind_mode": "published", "port_number": 8081},
|
||||
{"container_port": 8080, "host_ips": ["192.168.1.10"], "protocol": "udp"},
|
||||
),
|
||||
"expect_error": False,
|
||||
},
|
||||
{
|
||||
"values": (
|
||||
{"bind_mode": "published", "port_number": 8081},
|
||||
{"container_port": 8080, "host_ips": ["192.168.1.11"]},
|
||||
),
|
||||
"expect_error": False,
|
||||
},
|
||||
{
|
||||
"values": (
|
||||
{"bind_mode": "published", "port_number": 8081},
|
||||
{"container_port": 8080, "host_ips": ["192.168.1.11"], "protocol": "udp"},
|
||||
),
|
||||
"expect_error": False,
|
||||
},
|
||||
{
|
||||
"values": (
|
||||
{"bind_mode": "published", "port_number": 8081},
|
||||
{"container_port": 8080, "host_ips": ["fd00:1234:5678:abcd::10"]},
|
||||
),
|
||||
"expect_error": False,
|
||||
},
|
||||
{
|
||||
"values": (
|
||||
{"bind_mode": "published", "port_number": 8081},
|
||||
{"container_port": 8080, "host_ips": ["fd00:1234:5678:abcd::11"]},
|
||||
),
|
||||
"expect_error": False,
|
||||
},
|
||||
],
|
||||
# fmt: off
|
||||
"expected": [
|
||||
{"published": 8081, "target": 8080, "protocol": "tcp", "mode": "ingress", "host_ip": "192.168.1.10"},
|
||||
{"published": 8081, "target": 8080, "protocol": "tcp", "mode": "ingress", "host_ip": "192.168.1.11"},
|
||||
{"published": 8081, "target": 8080, "protocol": "tcp", "mode": "ingress", "host_ip": "fd00:1234:5678:abcd::10"}, # noqa
|
||||
{"published": 8081, "target": 8080, "protocol": "tcp", "mode": "ingress", "host_ip": "fd00:1234:5678:abcd::11"}, # noqa
|
||||
{"published": 8081, "target": 8080, "protocol": "udp", "mode": "ingress", "host_ip": "192.168.1.10"},
|
||||
{"published": 8081, "target": 8080, "protocol": "udp", "mode": "ingress", "host_ip": "192.168.1.11"},
|
||||
],
|
||||
# fmt: on
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.parametrize("test", tests)
|
||||
def test_ports(test):
|
||||
mock_values = {
|
||||
"images": {
|
||||
"test_image": {
|
||||
"repository": "nginx",
|
||||
"tag": "latest",
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
|
||||
errored = False
|
||||
for input in test["inputs"]:
|
||||
if input["expect_error"]:
|
||||
with pytest.raises(Exception):
|
||||
c1.add_port(*input["values"])
|
||||
errored = True
|
||||
else:
|
||||
c1.add_port(*input["values"])
|
||||
|
||||
errored = True if [i["expect_error"] for i in test["inputs"]].count(True) > 0 else False
|
||||
if errored:
|
||||
return
|
||||
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["ports"] == test["expected"]
|
||||
@@ -0,0 +1,37 @@
|
||||
import pytest
|
||||
|
||||
|
||||
from render import Render
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_values():
|
||||
return {
|
||||
"images": {
|
||||
"test_image": {
|
||||
"repository": "nginx",
|
||||
"tag": "latest",
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def test_values_cannot_be_modified(mock_values):
|
||||
render = Render(mock_values)
|
||||
render.values["test"] = "test"
|
||||
with pytest.raises(Exception):
|
||||
render.render()
|
||||
|
||||
|
||||
def test_duplicate_containers(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
with pytest.raises(Exception):
|
||||
render.add_container("test_container", "test_image")
|
||||
|
||||
|
||||
def test_no_containers(mock_values):
|
||||
render = Render(mock_values)
|
||||
with pytest.raises(Exception):
|
||||
render.render()
|
||||
@@ -0,0 +1,140 @@
|
||||
import pytest
|
||||
|
||||
|
||||
from render import Render
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_values():
|
||||
return {
|
||||
"images": {
|
||||
"test_image": {
|
||||
"repository": "nginx",
|
||||
"tag": "latest",
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def test_automatically_add_cpu(mock_values):
|
||||
mock_values["resources"] = {"limits": {"cpus": 1.0}}
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["deploy"]["resources"]["limits"]["cpus"] == "1.0"
|
||||
|
||||
|
||||
def test_invalid_cpu(mock_values):
|
||||
mock_values["resources"] = {"limits": {"cpus": "invalid"}}
|
||||
render = Render(mock_values)
|
||||
with pytest.raises(Exception):
|
||||
render.add_container("test_container", "test_image")
|
||||
|
||||
|
||||
def test_automatically_add_memory(mock_values):
|
||||
mock_values["resources"] = {"limits": {"memory": 1024}}
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["deploy"]["resources"]["limits"]["memory"] == "1024M"
|
||||
|
||||
|
||||
def test_invalid_memory(mock_values):
|
||||
mock_values["resources"] = {"limits": {"memory": "invalid"}}
|
||||
render = Render(mock_values)
|
||||
with pytest.raises(Exception):
|
||||
render.add_container("test_container", "test_image")
|
||||
|
||||
|
||||
def test_automatically_add_gpus(mock_values):
|
||||
mock_values["resources"] = {
|
||||
"gpus": {
|
||||
"nvidia_gpu_selection": {
|
||||
"pci_slot_0": {"uuid": "uuid_0", "use_gpu": True},
|
||||
"pci_slot_1": {"uuid": "uuid_1", "use_gpu": True},
|
||||
},
|
||||
}
|
||||
}
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
output = render.render()
|
||||
devices = output["services"]["test_container"]["deploy"]["resources"]["reservations"]["devices"]
|
||||
assert len(devices) == 1
|
||||
assert devices[0] == {
|
||||
"capabilities": ["gpu"],
|
||||
"driver": "nvidia",
|
||||
"device_ids": ["uuid_0", "uuid_1"],
|
||||
}
|
||||
assert output["services"]["test_container"]["group_add"] == [44, 107, 568]
|
||||
|
||||
|
||||
def test_gpu_without_uuid(mock_values):
|
||||
mock_values["resources"] = {
|
||||
"gpus": {
|
||||
"nvidia_gpu_selection": {
|
||||
"pci_slot_0": {"uuid": "", "use_gpu": True},
|
||||
"pci_slot_1": {"uuid": "uuid_1", "use_gpu": True},
|
||||
},
|
||||
}
|
||||
}
|
||||
render = Render(mock_values)
|
||||
with pytest.raises(Exception):
|
||||
render.add_container("test_container", "test_image")
|
||||
|
||||
|
||||
def test_remove_cpus_and_memory_with_gpus(mock_values):
|
||||
mock_values["resources"] = {"gpus": {"nvidia_gpu_selection": {"pci_slot_0": {"uuid": "uuid_1", "use_gpu": True}}}}
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.deploy.resources.remove_cpus_and_memory()
|
||||
output = render.render()
|
||||
assert "limits" not in output["services"]["test_container"]["deploy"]["resources"]
|
||||
devices = output["services"]["test_container"]["deploy"]["resources"]["reservations"]["devices"]
|
||||
assert len(devices) == 1
|
||||
assert devices[0] == {
|
||||
"capabilities": ["gpu"],
|
||||
"driver": "nvidia",
|
||||
"device_ids": ["uuid_1"],
|
||||
}
|
||||
|
||||
|
||||
def test_remove_cpus_and_memory(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.deploy.resources.remove_cpus_and_memory()
|
||||
output = render.render()
|
||||
assert "deploy" not in output["services"]["test_container"]
|
||||
|
||||
|
||||
def test_remove_devices(mock_values):
|
||||
mock_values["resources"] = {"gpus": {"nvidia_gpu_selection": {"pci_slot_0": {"uuid": "uuid_0", "use_gpu": True}}}}
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.deploy.resources.remove_devices()
|
||||
output = render.render()
|
||||
assert "reservations" not in output["services"]["test_container"]["deploy"]["resources"]
|
||||
assert output["services"]["test_container"]["group_add"] == [568]
|
||||
|
||||
|
||||
def test_set_profile(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.deploy.resources.set_profile("low")
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["deploy"]["resources"]["limits"]["cpus"] == "1"
|
||||
assert output["services"]["test_container"]["deploy"]["resources"]["limits"]["memory"] == "512M"
|
||||
|
||||
|
||||
def test_set_profile_invalid_profile(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
with pytest.raises(Exception):
|
||||
c1.deploy.resources.set_profile("invalid_profile")
|
||||
@@ -0,0 +1,57 @@
|
||||
import pytest
|
||||
|
||||
from render import Render
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_values():
|
||||
return {
|
||||
"images": {
|
||||
"test_image": {
|
||||
"repository": "nginx",
|
||||
"tag": "latest",
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def test_invalid_restart_policy(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
with pytest.raises(Exception):
|
||||
c1.restart.set_policy("invalid_policy")
|
||||
|
||||
|
||||
def test_valid_restart_policy(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.restart.set_policy("on-failure")
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["restart"] == "on-failure"
|
||||
|
||||
|
||||
def test_valid_restart_policy_with_maximum_retry_count(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.restart.set_policy("on-failure", 10)
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["restart"] == "on-failure:10"
|
||||
|
||||
|
||||
def test_invalid_restart_policy_with_maximum_retry_count(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
with pytest.raises(Exception):
|
||||
c1.restart.set_policy("on-failure", maximum_retry_count=-1)
|
||||
|
||||
|
||||
def test_invalid_restart_policy_with_maximum_retry_count_and_policy(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
with pytest.raises(Exception):
|
||||
c1.restart.set_policy("always", maximum_retry_count=10)
|
||||
@@ -0,0 +1,91 @@
|
||||
import pytest
|
||||
|
||||
|
||||
from render import Render
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_values():
|
||||
return {
|
||||
"images": {
|
||||
"test_image": {
|
||||
"repository": "nginx",
|
||||
"tag": "latest",
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def test_add_security_opt(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.add_security_opt("apparmor", "unconfined")
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["security_opt"] == ["apparmor=unconfined", "no-new-privileges=true"]
|
||||
|
||||
|
||||
def test_add_duplicate_security_opt(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
with pytest.raises(Exception):
|
||||
c1.add_security_opt("no-new-privileges", True)
|
||||
|
||||
|
||||
def test_add_empty_security_opt(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
with pytest.raises(Exception):
|
||||
c1.add_security_opt("", True)
|
||||
|
||||
|
||||
def test_remove_security_opt(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.remove_security_opt("no-new-privileges")
|
||||
output = render.render()
|
||||
assert "security_opt" not in output["services"]["test_container"]
|
||||
|
||||
|
||||
def test_add_security_opt_boolean(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.remove_security_opt("no-new-privileges")
|
||||
c1.add_security_opt("no-new-privileges", False)
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["security_opt"] == ["no-new-privileges=false"]
|
||||
|
||||
|
||||
def test_add_security_opt_arg(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.add_security_opt("label", "type", "svirt_apache_t")
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["security_opt"] == [
|
||||
"label=type:svirt_apache_t",
|
||||
"no-new-privileges=true",
|
||||
]
|
||||
|
||||
|
||||
def test_add_security_opt_with_invalid_opt(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
with pytest.raises(Exception):
|
||||
c1.add_security_opt("invalid")
|
||||
|
||||
|
||||
def test_add_security_opt_with_opt_containing_value(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.remove_security_opt("no-new-privileges")
|
||||
with pytest.raises(Exception):
|
||||
c1.add_security_opt("no-new-privileges=true")
|
||||
with pytest.raises(Exception):
|
||||
c1.add_security_opt("apparmor:unconfined")
|
||||
@@ -0,0 +1,62 @@
|
||||
import pytest
|
||||
|
||||
|
||||
from render import Render
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_values():
|
||||
return {
|
||||
"images": {
|
||||
"test_image": {
|
||||
"repository": "nginx",
|
||||
"tag": "latest",
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def test_add_sysctl(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.sysctls.add("net.ipv4.ip_forward", 1)
|
||||
c1.sysctls.add("fs.mqueue.msg_max", 100)
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["sysctls"] == {"net.ipv4.ip_forward": "1", "fs.mqueue.msg_max": "100"}
|
||||
|
||||
|
||||
def test_add_net_sysctl_with_host_network(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.set_network_mode("host")
|
||||
c1.sysctls.add("net.ipv4.ip_forward", 1)
|
||||
with pytest.raises(Exception):
|
||||
render.render()
|
||||
|
||||
|
||||
def test_add_duplicate_sysctl(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.sysctls.add("net.ipv4.ip_forward", 1)
|
||||
with pytest.raises(Exception):
|
||||
c1.sysctls.add("net.ipv4.ip_forward", 0)
|
||||
|
||||
|
||||
def test_add_empty_sysctl(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
with pytest.raises(Exception):
|
||||
c1.sysctls.add("", 1)
|
||||
|
||||
|
||||
def test_add_sysctl_with_invalid_key(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.sysctls.add("invalid.sysctl", 1)
|
||||
with pytest.raises(Exception):
|
||||
render.render()
|
||||
@@ -0,0 +1,132 @@
|
||||
import pytest
|
||||
from unittest.mock import patch
|
||||
|
||||
from pathlib import Path
|
||||
from validations import is_allowed_path, RESTRICTED, RESTRICTED_IN
|
||||
|
||||
|
||||
def mock_resolve(self):
|
||||
# Don't modify paths that are from RESTRICTED list initialization
|
||||
if str(self) in [str(p) for p in RESTRICTED]:
|
||||
return self
|
||||
|
||||
# For symlinks that point to restricted paths, return the target path
|
||||
# without stripping /private/
|
||||
if str(self).endswith("symlink_restricted"):
|
||||
return Path("/home") # Return the actual restricted target
|
||||
|
||||
# For other paths, strip /private/ if present
|
||||
return Path(str(self).removeprefix("/private/"))
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"test_path, expected",
|
||||
[
|
||||
# Non-restricted path (should be valid)
|
||||
("/tmp/somefile", True),
|
||||
# Exactly /mnt (restricted_in)
|
||||
("/mnt", False),
|
||||
# Exactly / (restricted_in)
|
||||
("/", False),
|
||||
# Subdirectory inside /mnt/.ix-apps (restricted)
|
||||
("/mnt/.ix-apps/something", False),
|
||||
# A path that is a restricted directory exactly
|
||||
("/home", False),
|
||||
("/var/log", False),
|
||||
("/mnt/.ix-apps", False),
|
||||
("/data", False),
|
||||
# Subdirectory inside e.g. /data
|
||||
("/data/subdir", False),
|
||||
# Not an obviously restricted path
|
||||
("/usr/local/share", True),
|
||||
# Another system path likely not in restricted list
|
||||
("/opt/myapp", True),
|
||||
],
|
||||
)
|
||||
@patch.object(Path, "resolve", mock_resolve)
|
||||
def test_is_allowed_path_direct(test_path, expected):
|
||||
"""Test direct paths against the is_allowed_path function."""
|
||||
assert is_allowed_path(test_path) == expected
|
||||
|
||||
|
||||
@patch.object(Path, "resolve", mock_resolve)
|
||||
def test_is_allowed_path_ix_volume():
|
||||
"""Test that IX volumes are not allowed"""
|
||||
assert is_allowed_path("/mnt/.ix-apps/something", True)
|
||||
|
||||
|
||||
@patch.object(Path, "resolve", mock_resolve)
|
||||
def test_is_allowed_path_symlink(tmp_path):
|
||||
"""
|
||||
Test that a symlink pointing to a restricted directory is detected as invalid,
|
||||
and a symlink pointing to an allowed directory is valid.
|
||||
"""
|
||||
# Create a real (allowed) directory and a restricted directory in a temp location
|
||||
allowed_dir = tmp_path / "allowed_dir"
|
||||
allowed_dir.mkdir()
|
||||
|
||||
restricted_dir = tmp_path / "restricted_dir"
|
||||
restricted_dir.mkdir()
|
||||
|
||||
# We will simulate that "restricted_dir" is actually a symlink link pointing to e.g. "/var/log"
|
||||
# or we create a subdir to match the restricted pattern.
|
||||
# For demonstration, let's just patch it to a path in the restricted list.
|
||||
real_restricted_path = Path("/home") # This is one of the restricted directories
|
||||
|
||||
# Create symlinks to test
|
||||
symlink_allowed = tmp_path / "symlink_allowed"
|
||||
symlink_restricted = tmp_path / "symlink_restricted"
|
||||
|
||||
# Point the symlinks
|
||||
symlink_allowed.symlink_to(allowed_dir)
|
||||
symlink_restricted.symlink_to(real_restricted_path)
|
||||
|
||||
assert is_allowed_path(str(symlink_allowed)) is True
|
||||
assert is_allowed_path(str(symlink_restricted)) is False
|
||||
|
||||
|
||||
def test_is_allowed_path_nested_symlink(tmp_path):
|
||||
"""
|
||||
Test that even a nested symlink that eventually resolves into restricted
|
||||
directories is seen as invalid.
|
||||
"""
|
||||
# e.g., Create 2 symlinks that chain to /root
|
||||
link1 = tmp_path / "link1"
|
||||
link2 = tmp_path / "link2"
|
||||
|
||||
# link2 -> /root
|
||||
link2.symlink_to(Path("/root"))
|
||||
# link1 -> link2
|
||||
link1.symlink_to(link2)
|
||||
|
||||
assert is_allowed_path(str(link1)) is False
|
||||
|
||||
|
||||
def test_is_allowed_path_nonexistent(tmp_path):
|
||||
"""
|
||||
Test a path that does not exist at all. The code calls .resolve() which will
|
||||
give the absolute path, but if it's not restricted, it should still be valid.
|
||||
"""
|
||||
nonexistent = tmp_path / "this_does_not_exist"
|
||||
assert is_allowed_path(str(nonexistent)) is True
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"test_path",
|
||||
list(RESTRICTED),
|
||||
)
|
||||
@patch.object(Path, "resolve", mock_resolve)
|
||||
def test_is_allowed_path_restricted_list(test_path):
|
||||
"""Test that all items in the RESTRICTED list are invalid."""
|
||||
assert is_allowed_path(test_path) is False
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"test_path",
|
||||
list(RESTRICTED_IN),
|
||||
)
|
||||
def test_is_allowed_path_restricted_in_list(test_path):
|
||||
"""
|
||||
Test that items in RESTRICTED_IN are invalid.
|
||||
"""
|
||||
assert is_allowed_path(test_path) is False
|
||||
@@ -0,0 +1,746 @@
|
||||
import pytest
|
||||
|
||||
|
||||
from render import Render
|
||||
from formatter import get_hashed_name_for_volume
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_values():
|
||||
return {
|
||||
"images": {
|
||||
"test_image": {
|
||||
"repository": "nginx",
|
||||
"tag": "latest",
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def test_add_volume_invalid_type(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
with pytest.raises(Exception):
|
||||
c1.add_storage("/some/path", {"type": "invalid_type"})
|
||||
|
||||
|
||||
def test_add_volume_empty_mount_path(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
with pytest.raises(Exception):
|
||||
c1.add_storage("", {"type": "tmpfs"})
|
||||
|
||||
|
||||
def test_add_volume_duplicate_mount_path(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.add_storage("/some/path", {"type": "tmpfs"})
|
||||
with pytest.raises(Exception):
|
||||
c1.add_storage("/some/path", {"type": "tmpfs"})
|
||||
|
||||
|
||||
def test_add_volume_host_path_invalid_propagation(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
host_path_config = {
|
||||
"type": "host_path",
|
||||
"host_path_config": {"path": "/mnt/test", "propagation": "invalid_propagation"},
|
||||
}
|
||||
with pytest.raises(Exception):
|
||||
c1.add_storage("/some/path", host_path_config)
|
||||
|
||||
|
||||
def test_add_host_path_volume_no_host_path_config(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
host_path_config = {"type": "host_path"}
|
||||
with pytest.raises(Exception):
|
||||
c1.add_storage("/some/path", host_path_config)
|
||||
|
||||
|
||||
def test_add_host_path_volume_no_path(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
host_path_config = {"type": "host_path", "host_path_config": {"path": ""}}
|
||||
with pytest.raises(Exception):
|
||||
c1.add_storage("/some/path", host_path_config)
|
||||
|
||||
|
||||
def test_add_host_path_with_acl_no_path(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
host_path_config = {"type": "host_path", "host_path_config": {"acl_enable": True, "acl": {"path": ""}}}
|
||||
with pytest.raises(Exception):
|
||||
c1.add_storage("/some/path", host_path_config)
|
||||
|
||||
|
||||
def test_add_host_path_volume_mount(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
host_path_config = {"type": "host_path", "host_path_config": {"path": "/mnt/test"}}
|
||||
c1.add_storage("/some/path", host_path_config)
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["volumes"] == [
|
||||
{
|
||||
"type": "bind",
|
||||
"source": "/mnt/test",
|
||||
"target": "/some/path",
|
||||
"read_only": False,
|
||||
"bind": {"create_host_path": False, "propagation": "rprivate"},
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
def test_add_host_path_volume_mount_with_acl(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
host_path_config = {
|
||||
"type": "host_path",
|
||||
"host_path_config": {"path": "/mnt/test", "acl_enable": True, "acl": {"path": "/mnt/test/acl"}},
|
||||
}
|
||||
c1.add_storage("/some/path", host_path_config)
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["volumes"] == [
|
||||
{
|
||||
"type": "bind",
|
||||
"source": "/mnt/test/acl",
|
||||
"target": "/some/path",
|
||||
"read_only": False,
|
||||
"bind": {"create_host_path": False, "propagation": "rprivate"},
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
def test_add_host_path_volume_mount_with_propagation(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
host_path_config = {"type": "host_path", "host_path_config": {"path": "/mnt/test", "propagation": "slave"}}
|
||||
c1.add_storage("/some/path", host_path_config)
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["volumes"] == [
|
||||
{
|
||||
"type": "bind",
|
||||
"source": "/mnt/test",
|
||||
"target": "/some/path",
|
||||
"read_only": False,
|
||||
"bind": {"create_host_path": False, "propagation": "slave"},
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
def test_add_host_path_volume_mount_with_create_host_path(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
host_path_config = {"type": "host_path", "host_path_config": {"path": "/mnt/test", "create_host_path": True}}
|
||||
c1.add_storage("/some/path", host_path_config)
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["volumes"] == [
|
||||
{
|
||||
"type": "bind",
|
||||
"source": "/mnt/test",
|
||||
"target": "/some/path",
|
||||
"read_only": False,
|
||||
"bind": {"create_host_path": True, "propagation": "rprivate"},
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
def test_add_host_path_volume_mount_with_read_only(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
host_path_config = {"type": "host_path", "read_only": True, "host_path_config": {"path": "/mnt/test"}}
|
||||
c1.add_storage("/some/path", host_path_config)
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["volumes"] == [
|
||||
{
|
||||
"type": "bind",
|
||||
"source": "/mnt/test",
|
||||
"target": "/some/path",
|
||||
"read_only": True,
|
||||
"bind": {"create_host_path": False, "propagation": "rprivate"},
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
def test_add_ix_volume_invalid_dataset_name(mock_values):
|
||||
mock_values["ix_volumes"] = {"test_dataset": "/mnt/test"}
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
ix_volume_config = {"type": "ix_volume", "ix_volume_config": {"dataset_name": "invalid_dataset"}}
|
||||
with pytest.raises(Exception):
|
||||
c1.add_storage("/some/path", ix_volume_config)
|
||||
|
||||
|
||||
def test_add_ix_volume_no_ix_volume_config(mock_values):
|
||||
mock_values["ix_volumes"] = {"test_dataset": "/mnt/test"}
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
ix_volume_config = {"type": "ix_volume"}
|
||||
with pytest.raises(Exception):
|
||||
c1.add_storage("/some/path", ix_volume_config)
|
||||
|
||||
|
||||
def test_add_ix_volume_volume_mount(mock_values):
|
||||
mock_values["ix_volumes"] = {"test_dataset": "/mnt/test"}
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
ix_volume_config = {"type": "ix_volume", "ix_volume_config": {"dataset_name": "test_dataset"}}
|
||||
c1.add_storage("/some/path", ix_volume_config)
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["volumes"] == [
|
||||
{
|
||||
"type": "bind",
|
||||
"source": "/mnt/test",
|
||||
"target": "/some/path",
|
||||
"read_only": False,
|
||||
"bind": {"create_host_path": False, "propagation": "rprivate"},
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
def test_add_ix_volume_volume_mount_with_options(mock_values):
|
||||
mock_values["ix_volumes"] = {"test_dataset": "/mnt/test"}
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
ix_volume_config = {
|
||||
"type": "ix_volume",
|
||||
"ix_volume_config": {"dataset_name": "test_dataset", "propagation": "rslave", "create_host_path": True},
|
||||
}
|
||||
c1.add_storage("/some/path", ix_volume_config)
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["volumes"] == [
|
||||
{
|
||||
"type": "bind",
|
||||
"source": "/mnt/test",
|
||||
"target": "/some/path",
|
||||
"read_only": False,
|
||||
"bind": {"create_host_path": True, "propagation": "rslave"},
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
def test_cifs_volume_missing_server(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
cifs_config = {"type": "cifs", "cifs_config": {"path": "/path", "username": "user", "password": "password"}}
|
||||
with pytest.raises(Exception):
|
||||
c1.add_storage("/some/path", cifs_config)
|
||||
|
||||
|
||||
def test_cifs_volume_missing_path(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
cifs_config = {"type": "cifs", "cifs_config": {"server": "server", "username": "user", "password": "password"}}
|
||||
with pytest.raises(Exception):
|
||||
c1.add_storage("/some/path", cifs_config)
|
||||
|
||||
|
||||
def test_cifs_volume_missing_username(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
cifs_config = {"type": "cifs", "cifs_config": {"server": "server", "path": "/path", "password": "password"}}
|
||||
with pytest.raises(Exception):
|
||||
c1.add_storage("/some/path", cifs_config)
|
||||
|
||||
|
||||
def test_cifs_volume_missing_password(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
cifs_config = {"type": "cifs", "cifs_config": {"server": "server", "path": "/path", "username": "user"}}
|
||||
with pytest.raises(Exception):
|
||||
c1.add_storage("/some/path", cifs_config)
|
||||
|
||||
|
||||
def test_cifs_volume_without_cifs_config(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
cifs_config = {"type": "cifs"}
|
||||
with pytest.raises(Exception):
|
||||
c1.add_storage("/some/path", cifs_config)
|
||||
|
||||
|
||||
def test_cifs_volume_duplicate_option(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
cifs_config = {
|
||||
"type": "cifs",
|
||||
"cifs_config": {
|
||||
"server": "server",
|
||||
"path": "/path",
|
||||
"username": "user",
|
||||
"password": "pas$word",
|
||||
"options": ["verbose=true", "verbose=true"],
|
||||
},
|
||||
}
|
||||
with pytest.raises(Exception):
|
||||
c1.add_storage("/some/path", cifs_config)
|
||||
|
||||
|
||||
def test_cifs_volume_disallowed_option(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
cifs_config = {
|
||||
"type": "cifs",
|
||||
"cifs_config": {
|
||||
"server": "server",
|
||||
"path": "/path",
|
||||
"username": "user",
|
||||
"password": "pas$word",
|
||||
"options": ["user=username"],
|
||||
},
|
||||
}
|
||||
with pytest.raises(Exception):
|
||||
c1.add_storage("/some/path", cifs_config)
|
||||
|
||||
|
||||
def test_cifs_volume_invalid_options(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
cifs_config = {
|
||||
"type": "cifs",
|
||||
"cifs_config": {
|
||||
"server": "server",
|
||||
"path": "/path",
|
||||
"username": "user",
|
||||
"password": "pas$word",
|
||||
"options": {"verbose": True},
|
||||
},
|
||||
}
|
||||
with pytest.raises(Exception):
|
||||
c1.add_storage("/some/path", cifs_config)
|
||||
|
||||
|
||||
def test_cifs_volume_invalid_options2(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
cifs_config = {
|
||||
"type": "cifs",
|
||||
"cifs_config": {
|
||||
"server": "server",
|
||||
"path": "/path",
|
||||
"username": "user",
|
||||
"password": "pas$word",
|
||||
"options": [{"verbose": True}],
|
||||
},
|
||||
}
|
||||
with pytest.raises(Exception):
|
||||
c1.add_storage("/some/path", cifs_config)
|
||||
|
||||
|
||||
def test_add_cifs_volume(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
cifs_inner_config = {"server": "server", "path": "/path", "username": "user", "password": "pas$word"}
|
||||
cifs_config = {"type": "cifs", "cifs_config": cifs_inner_config}
|
||||
c1.add_storage("/some/path", cifs_config)
|
||||
output = render.render()
|
||||
vol_name = get_hashed_name_for_volume("cifs", cifs_inner_config)
|
||||
assert output["volumes"] == {
|
||||
vol_name: {
|
||||
"driver_opts": {"type": "cifs", "device": "//server/path", "o": "noperm,password=pas$$word,user=user"}
|
||||
}
|
||||
}
|
||||
assert output["services"]["test_container"]["volumes"] == [
|
||||
{"type": "volume", "source": vol_name, "target": "/some/path", "read_only": False, "volume": {"nocopy": False}}
|
||||
]
|
||||
|
||||
|
||||
def test_cifs_volume_with_options(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
cifs_inner_config = {
|
||||
"server": "server",
|
||||
"path": "/path",
|
||||
"username": "user",
|
||||
"password": "pas$word",
|
||||
"options": ["vers=3.0", "verbose=true"],
|
||||
}
|
||||
cifs_config = {"type": "cifs", "cifs_config": cifs_inner_config}
|
||||
c1.add_storage("/some/path", cifs_config)
|
||||
output = render.render()
|
||||
vol_name = get_hashed_name_for_volume("cifs", cifs_inner_config)
|
||||
assert output["volumes"] == {
|
||||
vol_name: {
|
||||
"driver_opts": {
|
||||
"type": "cifs",
|
||||
"device": "//server/path",
|
||||
"o": "noperm,password=pas$$word,user=user,verbose=true,vers=3.0",
|
||||
}
|
||||
}
|
||||
}
|
||||
assert output["services"]["test_container"]["volumes"] == [
|
||||
{"type": "volume", "source": vol_name, "target": "/some/path", "read_only": False, "volume": {"nocopy": False}}
|
||||
]
|
||||
|
||||
|
||||
def test_nfs_volume_missing_server(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
nfs_config = {"type": "nfs", "nfs_config": {"path": "/path"}}
|
||||
with pytest.raises(Exception):
|
||||
c1.add_storage("/some/path", nfs_config)
|
||||
|
||||
|
||||
def test_nfs_volume_missing_path(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
nfs_config = {"type": "nfs", "nfs_config": {"server": "server"}}
|
||||
with pytest.raises(Exception):
|
||||
c1.add_storage("/some/path", nfs_config)
|
||||
|
||||
|
||||
def test_nfs_volume_without_nfs_config(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
nfs_config = {"type": "nfs"}
|
||||
with pytest.raises(Exception):
|
||||
c1.add_storage("/some/path", nfs_config)
|
||||
|
||||
|
||||
def test_nfs_volume_duplicate_option(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
nfs_config = {
|
||||
"type": "nfs",
|
||||
"nfs_config": {"server": "server", "path": "/path", "options": ["verbose=true", "verbose=true"]},
|
||||
}
|
||||
with pytest.raises(Exception):
|
||||
c1.add_storage("/some/path", nfs_config)
|
||||
|
||||
|
||||
def test_nfs_volume_disallowed_option(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
nfs_config = {"type": "nfs", "nfs_config": {"server": "server", "path": "/path", "options": ["addr=server"]}}
|
||||
with pytest.raises(Exception):
|
||||
c1.add_storage("/some/path", nfs_config)
|
||||
|
||||
|
||||
def test_nfs_volume_invalid_options(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
nfs_config = {"type": "nfs", "nfs_config": {"server": "server", "path": "/path", "options": {"verbose": True}}}
|
||||
with pytest.raises(Exception):
|
||||
c1.add_storage("/some/path", nfs_config)
|
||||
|
||||
|
||||
def test_nfs_volume_invalid_options2(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
nfs_config = {"type": "nfs", "nfs_config": {"server": "server", "path": "/path", "options": [{"verbose": True}]}}
|
||||
with pytest.raises(Exception):
|
||||
c1.add_storage("/some/path", nfs_config)
|
||||
|
||||
|
||||
def test_add_nfs_volume(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
nfs_inner_config = {"server": "server", "path": "/path"}
|
||||
nfs_config = {"type": "nfs", "nfs_config": nfs_inner_config}
|
||||
c1.add_storage("/some/path", nfs_config)
|
||||
output = render.render()
|
||||
vol_name = get_hashed_name_for_volume("nfs", nfs_inner_config)
|
||||
assert output["volumes"] == {vol_name: {"driver_opts": {"type": "nfs", "device": ":/path", "o": "addr=server"}}}
|
||||
assert output["services"]["test_container"]["volumes"] == [
|
||||
{"type": "volume", "source": vol_name, "target": "/some/path", "read_only": False, "volume": {"nocopy": False}}
|
||||
]
|
||||
|
||||
|
||||
def test_nfs_volume_with_options(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
nfs_inner_config = {"server": "server", "path": "/path", "options": ["vers=3.0", "verbose=true"]}
|
||||
nfs_config = {"type": "nfs", "nfs_config": nfs_inner_config}
|
||||
c1.add_storage("/some/path", nfs_config)
|
||||
output = render.render()
|
||||
vol_name = get_hashed_name_for_volume("nfs", nfs_inner_config)
|
||||
assert output["volumes"] == {
|
||||
vol_name: {
|
||||
"driver_opts": {
|
||||
"type": "nfs",
|
||||
"device": ":/path",
|
||||
"o": "addr=server,verbose=true,vers=3.0",
|
||||
}
|
||||
}
|
||||
}
|
||||
assert output["services"]["test_container"]["volumes"] == [
|
||||
{"type": "volume", "source": vol_name, "target": "/some/path", "read_only": False, "volume": {"nocopy": False}}
|
||||
]
|
||||
|
||||
|
||||
def test_tmpfs_invalid_size(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
vol_config = {"type": "tmpfs", "tmpfs_config": {"size": "2"}}
|
||||
with pytest.raises(Exception):
|
||||
c1.add_storage("/some/path", vol_config)
|
||||
|
||||
|
||||
def test_tmpfs_zero_size(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
vol_config = {"type": "tmpfs", "tmpfs_config": {"size": 0}}
|
||||
with pytest.raises(Exception):
|
||||
c1.add_storage("/some/path", vol_config)
|
||||
|
||||
|
||||
def test_tmpfs_invalid_mode(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
vol_config = {"type": "tmpfs", "tmpfs_config": {"mode": "invalid"}}
|
||||
with pytest.raises(Exception):
|
||||
c1.add_storage("/some/path", vol_config)
|
||||
|
||||
|
||||
def test_tmpfs_volume(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.add_storage("/some/path", {"type": "tmpfs"})
|
||||
c1.add_storage("/some/other/path", {"type": "tmpfs", "tmpfs_config": {"size": 100}})
|
||||
c1.add_storage(
|
||||
"/some/other/path2", {"type": "tmpfs", "tmpfs_config": {"size": 100, "mode": "0777", "uid": 1000, "gid": 1000}}
|
||||
)
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["tmpfs"] == [
|
||||
"/some/other/path2:gid=1000,mode=0777,size=104857600,uid=1000",
|
||||
"/some/other/path:size=104857600",
|
||||
"/some/path",
|
||||
]
|
||||
|
||||
|
||||
def test_add_tmpfs_with_existing_volume(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.add_storage("/some/path", {"type": "volume", "volume_config": {"volume_name": "test_volume"}})
|
||||
with pytest.raises(Exception):
|
||||
c1.add_storage("/some/path", {"type": "tmpfs", "tmpfs_config": {"size": 100}})
|
||||
|
||||
|
||||
def test_add_volume_with_existing_tmpfs(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.add_storage("/some/path", {"type": "tmpfs", "tmpfs_config": {"size": 100}})
|
||||
with pytest.raises(Exception):
|
||||
c1.add_storage("/some/path", {"type": "volume", "volume_config": {"volume_name": "test_volume"}})
|
||||
|
||||
|
||||
def test_temporary_volume(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
vol_config = {"type": "temporary", "volume_config": {"volume_name": "test_temp_volume"}}
|
||||
c1.add_storage("/some/path", vol_config)
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["volumes"] == [
|
||||
{
|
||||
"source": "test_temp_volume",
|
||||
"type": "volume",
|
||||
"target": "/some/path",
|
||||
"read_only": False,
|
||||
"volume": {"nocopy": False},
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
def test_docker_volume_missing_config(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
vol_config = {"type": "volume", "volume_config": {}}
|
||||
with pytest.raises(Exception):
|
||||
c1.add_storage("/some/path", vol_config)
|
||||
|
||||
|
||||
def test_docker_volume_missing_volume_name(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
vol_config = {"type": "volume", "volume_config": {"volume_name": ""}}
|
||||
with pytest.raises(Exception):
|
||||
c1.add_storage("/some/path", vol_config)
|
||||
|
||||
|
||||
def test_docker_volume(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
vol_config = {"type": "volume", "volume_config": {"volume_name": "test_volume"}}
|
||||
c1.add_storage("/some/path", vol_config)
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["volumes"] == [
|
||||
{
|
||||
"type": "volume",
|
||||
"source": "test_volume",
|
||||
"target": "/some/path",
|
||||
"read_only": False,
|
||||
"volume": {"nocopy": False},
|
||||
}
|
||||
]
|
||||
assert output["volumes"] == {"test_volume": {}}
|
||||
|
||||
|
||||
def test_anonymous_volume(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
vol_config = {"type": "anonymous", "volume_config": {"nocopy": True}}
|
||||
c1.add_storage("/some/path", vol_config)
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["volumes"] == [
|
||||
{"type": "volume", "target": "/some/path", "read_only": False, "volume": {"nocopy": True}}
|
||||
]
|
||||
assert "volumes" not in output
|
||||
|
||||
|
||||
def test_add_udev(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.add_udev()
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["volumes"] == [
|
||||
{
|
||||
"type": "bind",
|
||||
"source": "/run/udev",
|
||||
"target": "/run/udev",
|
||||
"read_only": True,
|
||||
"bind": {"create_host_path": False, "propagation": "rprivate"},
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
def test_add_udev_not_read_only(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.add_udev(read_only=False)
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["volumes"] == [
|
||||
{
|
||||
"type": "bind",
|
||||
"source": "/run/udev",
|
||||
"target": "/run/udev",
|
||||
"read_only": False,
|
||||
"bind": {"create_host_path": False, "propagation": "rprivate"},
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
def test_add_docker_socket(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.storage._add_docker_socket(mount_path="/var/run/docker.sock")
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["volumes"] == [
|
||||
{
|
||||
"type": "bind",
|
||||
"source": "/var/run/docker.sock",
|
||||
"target": "/var/run/docker.sock",
|
||||
"read_only": True,
|
||||
"bind": {"create_host_path": False, "propagation": "rprivate"},
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
def test_add_docker_socket_not_read_only(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.storage._add_docker_socket(read_only=False, mount_path="/var/run/docker.sock")
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["volumes"] == [
|
||||
{
|
||||
"type": "bind",
|
||||
"source": "/var/run/docker.sock",
|
||||
"target": "/var/run/docker.sock",
|
||||
"read_only": False,
|
||||
"bind": {"create_host_path": False, "propagation": "rprivate"},
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
def test_add_docker_socket_mount_path(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
c1.storage._add_docker_socket(mount_path="/some/path")
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["volumes"] == [
|
||||
{
|
||||
"type": "bind",
|
||||
"source": "/var/run/docker.sock",
|
||||
"target": "/some/path",
|
||||
"read_only": True,
|
||||
"bind": {"create_host_path": False, "propagation": "rprivate"},
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
def test_host_path_with_disallowed_path(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
host_path_config = {"type": "host_path", "host_path_config": {"path": "/mnt"}}
|
||||
with pytest.raises(Exception):
|
||||
c1.add_storage("/some/path", host_path_config)
|
||||
|
||||
|
||||
def test_host_path_without_disallowed_path(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.disable()
|
||||
host_path_config = {"type": "host_path", "host_path_config": {"path": "/mnt/test"}}
|
||||
c1.add_storage("/mnt", host_path_config)
|
||||
output = render.render()
|
||||
assert output["services"]["test_container"]["volumes"] == [
|
||||
{
|
||||
"type": "bind",
|
||||
"source": "/mnt/test",
|
||||
"target": "/mnt",
|
||||
"read_only": False,
|
||||
"bind": {"create_host_path": False, "propagation": "rprivate"},
|
||||
}
|
||||
]
|
||||
@@ -0,0 +1,75 @@
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from container import Container
|
||||
from render import Render
|
||||
from storage import IxStorage
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .validations import valid_fs_path_or_raise, valid_octal_mode_or_raise
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from validations import valid_fs_path_or_raise, valid_octal_mode_or_raise
|
||||
|
||||
|
||||
class Tmpfs:
|
||||
|
||||
def __init__(self, render_instance: "Render", container_instance: "Container"):
|
||||
self._render_instance = render_instance
|
||||
self._container_instance = container_instance
|
||||
self._tmpfs: dict = {}
|
||||
|
||||
def add(self, mount_path: str, config: "IxStorage"):
|
||||
mount_path = valid_fs_path_or_raise(mount_path)
|
||||
if self.is_defined(mount_path):
|
||||
raise RenderError(f"Tmpfs mount path [{mount_path}] already added")
|
||||
|
||||
if self._container_instance.storage.is_defined(mount_path):
|
||||
raise RenderError(f"Tmpfs mount path [{mount_path}] already used for another volume mount")
|
||||
|
||||
mount_config = config.get("tmpfs_config", {})
|
||||
size = mount_config.get("size", None)
|
||||
mode = mount_config.get("mode", None)
|
||||
uid = mount_config.get("uid", None)
|
||||
gid = mount_config.get("gid", None)
|
||||
|
||||
if size is not None:
|
||||
if not isinstance(size, int):
|
||||
raise RenderError(f"Expected [size] to be an integer for [tmpfs] type, got [{size}]")
|
||||
if not size > 0:
|
||||
raise RenderError(f"Expected [size] to be greater than 0 for [tmpfs] type, got [{size}]")
|
||||
# Convert Mebibytes to Bytes
|
||||
size = size * 1024 * 1024
|
||||
|
||||
if mode is not None:
|
||||
mode = valid_octal_mode_or_raise(mode)
|
||||
|
||||
if uid is not None and not isinstance(uid, int):
|
||||
raise RenderError(f"Expected [uid] to be an integer for [tmpfs] type, got [{uid}]")
|
||||
|
||||
if gid is not None and not isinstance(gid, int):
|
||||
raise RenderError(f"Expected [gid] to be an integer for [tmpfs] type, got [{gid}]")
|
||||
|
||||
self._tmpfs[mount_path] = {}
|
||||
if size is not None:
|
||||
self._tmpfs[mount_path]["size"] = str(size)
|
||||
if mode is not None:
|
||||
self._tmpfs[mount_path]["mode"] = str(mode)
|
||||
if uid is not None:
|
||||
self._tmpfs[mount_path]["uid"] = str(uid)
|
||||
if gid is not None:
|
||||
self._tmpfs[mount_path]["gid"] = str(gid)
|
||||
|
||||
def is_defined(self, mount_path: str):
|
||||
return mount_path in self._tmpfs
|
||||
|
||||
def has_tmpfs(self):
|
||||
return bool(self._tmpfs)
|
||||
|
||||
def render(self):
|
||||
result = []
|
||||
for mount_path, config in self._tmpfs.items():
|
||||
opts = sorted([f"{k}={v}" for k, v in config.items()])
|
||||
result.append(f"{mount_path}:{','.join(opts)}" if opts else mount_path)
|
||||
return sorted(result)
|
||||
@@ -0,0 +1,344 @@
|
||||
import re
|
||||
import ipaddress
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
|
||||
OCTAL_MODE_REGEX = re.compile(r"^0[0-7]{3}$")
|
||||
RESTRICTED_IN: tuple[Path, ...] = (Path("/mnt"), Path("/"))
|
||||
RESTRICTED: tuple[Path, ...] = (
|
||||
Path("/mnt/.ix-apps"),
|
||||
Path("/data"),
|
||||
Path("/var/db"),
|
||||
Path("/root"),
|
||||
Path("/conf"),
|
||||
Path("/audit"),
|
||||
Path("/var/run/middleware"),
|
||||
Path("/home"),
|
||||
Path("/boot"),
|
||||
Path("/var/log"),
|
||||
)
|
||||
|
||||
|
||||
def valid_security_opt_or_raise(opt: str):
|
||||
if ":" in opt or "=" in opt:
|
||||
raise RenderError(f"Security Option [{opt}] cannot contain [:] or [=]. Pass value as an argument")
|
||||
valid_opts = ["apparmor", "no-new-privileges", "seccomp", "systempaths", "label"]
|
||||
if opt not in valid_opts:
|
||||
raise RenderError(f"Security Option [{opt}] is not valid. Valid options are: [{', '.join(valid_opts)}]")
|
||||
|
||||
return opt
|
||||
|
||||
|
||||
def valid_port_bind_mode_or_raise(status: str):
|
||||
valid_statuses = ("published", "exposed", "")
|
||||
if status not in valid_statuses:
|
||||
raise RenderError(f"Invalid port status [{status}]")
|
||||
return status
|
||||
|
||||
|
||||
def valid_pull_policy_or_raise(pull_policy: str):
|
||||
valid_policies = ("missing", "always", "never", "build")
|
||||
if pull_policy not in valid_policies:
|
||||
raise RenderError(f"Pull policy [{pull_policy}] is not valid. Valid options are: [{', '.join(valid_policies)}]")
|
||||
return pull_policy
|
||||
|
||||
|
||||
def valid_ipc_mode_or_raise(ipc_mode: str, containers: list[str]):
|
||||
valid_modes = ("", "host", "private", "shareable", "none")
|
||||
if ipc_mode in valid_modes:
|
||||
return ipc_mode
|
||||
if ipc_mode.startswith("container:"):
|
||||
if ipc_mode[10:] not in containers:
|
||||
raise RenderError(f"IPC mode [{ipc_mode}] is not valid. Container [{ipc_mode[10:]}] does not exist")
|
||||
return ipc_mode
|
||||
raise RenderError(f"IPC mode [{ipc_mode}] is not valid. Valid options are: [{', '.join(valid_modes)}]")
|
||||
|
||||
|
||||
def valid_pid_mode_or_raise(ipc_mode: str, containers: list[str]):
|
||||
valid_modes = ("", "host")
|
||||
if ipc_mode in valid_modes:
|
||||
return ipc_mode
|
||||
if ipc_mode.startswith("container:"):
|
||||
if ipc_mode[10:] not in containers:
|
||||
raise RenderError(f"PID mode [{ipc_mode}] is not valid. Container [{ipc_mode[10:]}] does not exist")
|
||||
return ipc_mode
|
||||
raise RenderError(f"PID mode [{ipc_mode}] is not valid. Valid options are: [{', '.join(valid_modes)}]")
|
||||
|
||||
|
||||
def valid_sysctl_or_raise(sysctl: str, host_network: bool):
|
||||
if not sysctl:
|
||||
raise RenderError("Sysctl cannot be empty")
|
||||
if host_network and sysctl.startswith("net."):
|
||||
raise RenderError(f"Sysctl [{sysctl}] cannot start with [net.] when host network is enabled")
|
||||
|
||||
valid_sysctls = [
|
||||
"kernel.msgmax",
|
||||
"kernel.msgmnb",
|
||||
"kernel.msgmni",
|
||||
"kernel.sem",
|
||||
"kernel.shmall",
|
||||
"kernel.shmmax",
|
||||
"kernel.shmmni",
|
||||
"kernel.shm_rmid_forced",
|
||||
]
|
||||
# https://docs.docker.com/reference/cli/docker/container/run/#currently-supported-sysctls
|
||||
if not sysctl.startswith("fs.mqueue.") and not sysctl.startswith("net.") and sysctl not in valid_sysctls:
|
||||
raise RenderError(
|
||||
f"Sysctl [{sysctl}] is not valid. Valid options are: [{', '.join(valid_sysctls)}], [net.*], [fs.mqueue.*]"
|
||||
)
|
||||
return sysctl
|
||||
|
||||
|
||||
def valid_redis_password_or_raise(password: str):
|
||||
forbidden_chars = [" ", "'", "#"]
|
||||
for char in forbidden_chars:
|
||||
if char in password:
|
||||
raise RenderError(f"Redis password cannot contain [{char}]")
|
||||
|
||||
|
||||
def valid_octal_mode_or_raise(mode: str):
|
||||
mode = str(mode)
|
||||
if not OCTAL_MODE_REGEX.match(mode):
|
||||
raise RenderError(f"Expected [mode] to be a octal string, got [{mode}]")
|
||||
return mode
|
||||
|
||||
|
||||
def valid_host_path_propagation(propagation: str):
|
||||
valid_propagations = ("shared", "slave", "private", "rshared", "rslave", "rprivate")
|
||||
if propagation not in valid_propagations:
|
||||
raise RenderError(f"Expected [propagation] to be one of [{', '.join(valid_propagations)}], got [{propagation}]")
|
||||
return propagation
|
||||
|
||||
|
||||
def valid_portal_scheme_or_raise(scheme: str):
|
||||
schemes = ("http", "https")
|
||||
if scheme not in schemes:
|
||||
raise RenderError(f"Portal Scheme [{scheme}] is not valid. Valid options are: [{', '.join(schemes)}]")
|
||||
return scheme
|
||||
|
||||
|
||||
def valid_port_or_raise(port: int):
|
||||
if port < 1 or port > 65535:
|
||||
raise RenderError(f"Invalid port [{port}]. Valid ports are between 1 and 65535")
|
||||
return port
|
||||
|
||||
|
||||
def valid_ip_or_raise(ip: str):
|
||||
try:
|
||||
ipaddress.ip_address(ip)
|
||||
except ValueError:
|
||||
raise RenderError(f"Invalid IP address [{ip}]")
|
||||
return ip
|
||||
|
||||
|
||||
def valid_port_mode_or_raise(mode: str):
|
||||
modes = ("ingress", "host")
|
||||
if mode not in modes:
|
||||
raise RenderError(f"Port Mode [{mode}] is not valid. Valid options are: [{', '.join(modes)}]")
|
||||
return mode
|
||||
|
||||
|
||||
def valid_port_protocol_or_raise(protocol: str):
|
||||
protocols = ("tcp", "udp")
|
||||
if protocol not in protocols:
|
||||
raise RenderError(f"Port Protocol [{protocol}] is not valid. Valid options are: [{', '.join(protocols)}]")
|
||||
return protocol
|
||||
|
||||
|
||||
def valid_depend_condition_or_raise(condition: str):
|
||||
valid_conditions = ("service_started", "service_healthy", "service_completed_successfully")
|
||||
if condition not in valid_conditions:
|
||||
raise RenderError(
|
||||
f"Depend Condition [{condition}] is not valid. Valid options are: [{', '.join(valid_conditions)}]"
|
||||
)
|
||||
return condition
|
||||
|
||||
|
||||
def valid_cgroup_perm_or_raise(cgroup_perm: str):
|
||||
valid_cgroup_perms = ("r", "w", "m", "rw", "rm", "wm", "rwm", "")
|
||||
if cgroup_perm not in valid_cgroup_perms:
|
||||
raise RenderError(
|
||||
f"Cgroup Permission [{cgroup_perm}] is not valid. Valid options are: [{', '.join(valid_cgroup_perms)}]"
|
||||
)
|
||||
return cgroup_perm
|
||||
|
||||
|
||||
def valid_cgroup_or_raise(cgroup: str):
|
||||
valid_cgroup = ("host", "private")
|
||||
if cgroup not in valid_cgroup:
|
||||
raise RenderError(f"Cgroup [{cgroup}] is not valid. Valid options are: [{', '.join(valid_cgroup)}]")
|
||||
return cgroup
|
||||
|
||||
|
||||
def valid_device_cgroup_rule_or_raise(dev_grp_rule: str):
|
||||
parts = dev_grp_rule.split(" ")
|
||||
if len(parts) != 3:
|
||||
raise RenderError(
|
||||
f"Device Group Rule [{dev_grp_rule}] is not valid. Expected format is [<type> <major>:<minor> <permission>]"
|
||||
)
|
||||
|
||||
valid_types = ("a", "b", "c")
|
||||
if parts[0] not in valid_types:
|
||||
raise RenderError(
|
||||
f"Device Group Rule [{dev_grp_rule}] is not valid. Expected type to be one of [{', '.join(valid_types)}]"
|
||||
f" but got [{parts[0]}]"
|
||||
)
|
||||
|
||||
major, minor = parts[1].split(":")
|
||||
for part in (major, minor):
|
||||
if part != "*" and not part.isdigit():
|
||||
raise RenderError(
|
||||
f"Device Group Rule [{dev_grp_rule}] is not valid. Expected major and minor to be digits"
|
||||
f" or [*] but got [{major}] and [{minor}]"
|
||||
)
|
||||
|
||||
valid_cgroup_perm_or_raise(parts[2])
|
||||
|
||||
return dev_grp_rule
|
||||
|
||||
|
||||
def allowed_dns_opt_or_raise(dns_opt: str):
|
||||
disallowed_dns_opts = []
|
||||
if dns_opt in disallowed_dns_opts:
|
||||
raise RenderError(f"DNS Option [{dns_opt}] is not allowed to added.")
|
||||
return dns_opt
|
||||
|
||||
|
||||
def valid_http_path_or_raise(path: str):
|
||||
path = _valid_path_or_raise(path)
|
||||
return path
|
||||
|
||||
|
||||
def valid_fs_path_or_raise(path: str):
|
||||
# There is no reason to allow / as a path,
|
||||
# either on host or in a container side.
|
||||
if path == "/":
|
||||
raise RenderError(f"Path [{path}] cannot be [/]")
|
||||
path = _valid_path_or_raise(path)
|
||||
return path
|
||||
|
||||
|
||||
def is_allowed_path(input_path: str, is_ix_volume: bool = False) -> bool:
|
||||
"""
|
||||
Validates that the given path (after resolving symlinks) is not
|
||||
one of the restricted paths or within those restricted directories.
|
||||
|
||||
Returns True if the path is allowed, False otherwise.
|
||||
"""
|
||||
# Resolve the path to avoid symlink bypasses
|
||||
real_path = Path(input_path).resolve()
|
||||
for restricted in RESTRICTED if not is_ix_volume else [r for r in RESTRICTED if r != Path("/mnt/.ix-apps")]:
|
||||
if real_path.is_relative_to(restricted):
|
||||
return False
|
||||
|
||||
return real_path not in RESTRICTED_IN
|
||||
|
||||
|
||||
def allowed_fs_host_path_or_raise(path: str, is_ix_volume: bool = False):
|
||||
if not is_allowed_path(path, is_ix_volume):
|
||||
raise RenderError(f"Path [{path}] is not allowed to be mounted.")
|
||||
return path
|
||||
|
||||
|
||||
def _valid_path_or_raise(path: str):
|
||||
if path == "":
|
||||
raise RenderError(f"Path [{path}] cannot be empty")
|
||||
if not path.startswith("/"):
|
||||
raise RenderError(f"Path [{path}] must start with /")
|
||||
if "//" in path:
|
||||
raise RenderError(f"Path [{path}] cannot contain [//]")
|
||||
return path
|
||||
|
||||
|
||||
def allowed_device_or_raise(path: str):
|
||||
disallowed_devices = ["/dev/dri", "/dev/kfd", "/dev/bus/usb", "/dev/snd", "/dev/net/tun"]
|
||||
if path in disallowed_devices:
|
||||
raise RenderError(f"Device [{path}] is not allowed to be manually added.")
|
||||
return path
|
||||
|
||||
|
||||
def valid_network_mode_or_raise(mode: str, containers: list[str]):
|
||||
valid_modes = ("host", "none")
|
||||
if mode in valid_modes:
|
||||
return mode
|
||||
|
||||
if mode.startswith("service:"):
|
||||
if mode[8:] not in containers:
|
||||
raise RenderError(f"Service [{mode[8:]}] not found")
|
||||
return mode
|
||||
|
||||
raise RenderError(
|
||||
f"Invalid network mode [{mode}]. Valid options are: [{', '.join(valid_modes)}] or [service:<name>]"
|
||||
)
|
||||
|
||||
|
||||
def valid_restart_policy_or_raise(policy: str, maximum_retry_count: int = 0):
|
||||
valid_restart_policies = ("always", "on-failure", "unless-stopped", "no")
|
||||
if policy not in valid_restart_policies:
|
||||
raise RenderError(
|
||||
f"Restart policy [{policy}] is not valid. Valid options are: [{', '.join(valid_restart_policies)}]"
|
||||
)
|
||||
if policy != "on-failure" and maximum_retry_count != 0:
|
||||
raise RenderError("Maximum retry count can only be set for [on-failure] restart policy")
|
||||
|
||||
if maximum_retry_count < 0:
|
||||
raise RenderError("Maximum retry count must be a positive integer")
|
||||
|
||||
return policy
|
||||
|
||||
|
||||
def valid_cap_or_raise(cap: str):
|
||||
valid_policies = (
|
||||
"ALL",
|
||||
"AUDIT_CONTROL",
|
||||
"AUDIT_READ",
|
||||
"AUDIT_WRITE",
|
||||
"BLOCK_SUSPEND",
|
||||
"BPF",
|
||||
"CHECKPOINT_RESTORE",
|
||||
"CHOWN",
|
||||
"DAC_OVERRIDE",
|
||||
"DAC_READ_SEARCH",
|
||||
"FOWNER",
|
||||
"FSETID",
|
||||
"IPC_LOCK",
|
||||
"IPC_OWNER",
|
||||
"KILL",
|
||||
"LEASE",
|
||||
"LINUX_IMMUTABLE",
|
||||
"MAC_ADMIN",
|
||||
"MAC_OVERRIDE",
|
||||
"MKNOD",
|
||||
"NET_ADMIN",
|
||||
"NET_BIND_SERVICE",
|
||||
"NET_BROADCAST",
|
||||
"NET_RAW",
|
||||
"PERFMON",
|
||||
"SETFCAP",
|
||||
"SETGID",
|
||||
"SETPCAP",
|
||||
"SETUID",
|
||||
"SYS_ADMIN",
|
||||
"SYS_BOOT",
|
||||
"SYS_CHROOT",
|
||||
"SYS_MODULE",
|
||||
"SYS_NICE",
|
||||
"SYS_PACCT",
|
||||
"SYS_PTRACE",
|
||||
"SYS_RAWIO",
|
||||
"SYS_RESOURCE",
|
||||
"SYS_TIME",
|
||||
"SYS_TTY_CONFIG",
|
||||
"SYSLOG",
|
||||
"WAKE_ALARM",
|
||||
)
|
||||
|
||||
if cap not in valid_policies:
|
||||
raise RenderError(f"Capability [{cap}] is not valid. " f"Valid options are: [{', '.join(valid_policies)}]")
|
||||
|
||||
return cap
|
||||
@@ -0,0 +1,87 @@
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
from storage import IxStorage
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .formatter import merge_dicts_no_overwrite
|
||||
from .volume_mount_types import BindMountType, VolumeMountType
|
||||
from .volume_sources import HostPathSource, IxVolumeSource, CifsSource, NfsSource, VolumeSource
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from formatter import merge_dicts_no_overwrite
|
||||
from volume_mount_types import BindMountType, VolumeMountType
|
||||
from volume_sources import HostPathSource, IxVolumeSource, CifsSource, NfsSource, VolumeSource
|
||||
|
||||
|
||||
class VolumeMount:
|
||||
def __init__(self, render_instance: "Render", mount_path: str, config: "IxStorage"):
|
||||
self._render_instance = render_instance
|
||||
self.mount_path: str = mount_path
|
||||
|
||||
storage_type: str = config.get("type", "")
|
||||
if not storage_type:
|
||||
raise RenderError("Expected [type] to be set for volume mounts.")
|
||||
|
||||
match storage_type:
|
||||
case "host_path":
|
||||
spec_type = "bind"
|
||||
mount_config = config.get("host_path_config")
|
||||
if mount_config is None:
|
||||
raise RenderError("Expected [host_path_config] to be set for [host_path] type.")
|
||||
mount_type_specific_definition = BindMountType(self._render_instance, mount_config).render()
|
||||
source = HostPathSource(self._render_instance, mount_config).get()
|
||||
case "ix_volume":
|
||||
spec_type = "bind"
|
||||
mount_config = config.get("ix_volume_config")
|
||||
if mount_config is None:
|
||||
raise RenderError("Expected [ix_volume_config] to be set for [ix_volume] type.")
|
||||
mount_type_specific_definition = BindMountType(self._render_instance, mount_config).render()
|
||||
source = IxVolumeSource(self._render_instance, mount_config).get()
|
||||
case "nfs":
|
||||
spec_type = "volume"
|
||||
mount_config = config.get("nfs_config")
|
||||
if mount_config is None:
|
||||
raise RenderError("Expected [nfs_config] to be set for [nfs] type.")
|
||||
mount_type_specific_definition = VolumeMountType(self._render_instance, mount_config).render()
|
||||
source = NfsSource(self._render_instance, mount_config).get()
|
||||
case "cifs":
|
||||
spec_type = "volume"
|
||||
mount_config = config.get("cifs_config")
|
||||
if mount_config is None:
|
||||
raise RenderError("Expected [cifs_config] to be set for [cifs] type.")
|
||||
mount_type_specific_definition = VolumeMountType(self._render_instance, mount_config).render()
|
||||
source = CifsSource(self._render_instance, mount_config).get()
|
||||
case "volume":
|
||||
spec_type = "volume"
|
||||
mount_config = config.get("volume_config")
|
||||
if mount_config is None:
|
||||
raise RenderError("Expected [volume_config] to be set for [volume] type.")
|
||||
mount_type_specific_definition = VolumeMountType(self._render_instance, mount_config).render()
|
||||
source = VolumeSource(self._render_instance, mount_config).get()
|
||||
case "temporary":
|
||||
spec_type = "volume"
|
||||
mount_config = config.get("volume_config")
|
||||
if mount_config is None:
|
||||
raise RenderError("Expected [volume_config] to be set for [temporary] type.")
|
||||
mount_type_specific_definition = VolumeMountType(self._render_instance, mount_config).render()
|
||||
source = VolumeSource(self._render_instance, mount_config).get()
|
||||
case "anonymous":
|
||||
spec_type = "volume"
|
||||
mount_config = config.get("volume_config") or {}
|
||||
mount_type_specific_definition = VolumeMountType(self._render_instance, mount_config).render()
|
||||
source = None
|
||||
case _:
|
||||
raise RenderError(f"Storage type [{storage_type}] is not supported for volume mounts.")
|
||||
|
||||
common_spec = {"type": spec_type, "target": self.mount_path, "read_only": config.get("read_only", False)}
|
||||
if source is not None:
|
||||
common_spec["source"] = source
|
||||
self._render_instance.volumes.add_volume(source, storage_type, mount_config) # type: ignore
|
||||
|
||||
self.volume_mount_spec = merge_dicts_no_overwrite(common_spec, mount_type_specific_definition)
|
||||
|
||||
def render(self) -> dict:
|
||||
return self.volume_mount_spec
|
||||
@@ -0,0 +1,43 @@
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
from storage import IxStorageVolumeConfig, IxStorageBindLikeConfigs
|
||||
|
||||
|
||||
try:
|
||||
from .validations import valid_host_path_propagation
|
||||
except ImportError:
|
||||
from validations import valid_host_path_propagation
|
||||
|
||||
|
||||
class BindMountType:
|
||||
def __init__(self, render_instance: "Render", config: "IxStorageBindLikeConfigs"):
|
||||
self._render_instance = render_instance
|
||||
self.spec: dict = {}
|
||||
|
||||
propagation = valid_host_path_propagation(config.get("propagation", "rprivate"))
|
||||
create_host_path = config.get("create_host_path", False)
|
||||
|
||||
self.spec: dict = {
|
||||
"bind": {
|
||||
"create_host_path": create_host_path,
|
||||
"propagation": propagation,
|
||||
}
|
||||
}
|
||||
|
||||
def render(self) -> dict:
|
||||
"""Render the bind mount specification."""
|
||||
return self.spec
|
||||
|
||||
|
||||
class VolumeMountType:
|
||||
def __init__(self, render_instance: "Render", config: "IxStorageVolumeConfig"):
|
||||
self._render_instance = render_instance
|
||||
self.spec: dict = {}
|
||||
|
||||
self.spec: dict = {"volume": {"nocopy": config.get("nocopy", False)}}
|
||||
|
||||
def render(self) -> dict:
|
||||
"""Render the volume mount specification."""
|
||||
return self.spec
|
||||
@@ -0,0 +1,108 @@
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
from storage import IxStorageHostPathConfig, IxStorageIxVolumeConfig, IxStorageVolumeConfig
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .formatter import get_hashed_name_for_volume
|
||||
from .validations import valid_fs_path_or_raise, allowed_fs_host_path_or_raise
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from formatter import get_hashed_name_for_volume
|
||||
from validations import valid_fs_path_or_raise, allowed_fs_host_path_or_raise
|
||||
|
||||
|
||||
class HostPathSource:
|
||||
def __init__(self, render_instance: "Render", config: "IxStorageHostPathConfig"):
|
||||
self._render_instance = render_instance
|
||||
self.source: str = ""
|
||||
|
||||
if not config:
|
||||
raise RenderError("Expected [host_path_config] to be set for [host_path] type.")
|
||||
|
||||
path = ""
|
||||
if config.get("acl_enable", False):
|
||||
acl_path = config.get("acl", {}).get("path")
|
||||
if not acl_path:
|
||||
raise RenderError("Expected [host_path_config.acl.path] to be set for [host_path] type.")
|
||||
path = valid_fs_path_or_raise(acl_path)
|
||||
else:
|
||||
path = valid_fs_path_or_raise(config.get("path", ""))
|
||||
|
||||
path = path.rstrip("/")
|
||||
self.source = allowed_fs_host_path_or_raise(path)
|
||||
|
||||
def get(self):
|
||||
return self.source
|
||||
|
||||
|
||||
class IxVolumeSource:
|
||||
def __init__(self, render_instance: "Render", config: "IxStorageIxVolumeConfig"):
|
||||
self._render_instance = render_instance
|
||||
self.source: str = ""
|
||||
|
||||
if not config:
|
||||
raise RenderError("Expected [ix_volume_config] to be set for [ix_volume] type.")
|
||||
dataset_name = config.get("dataset_name")
|
||||
if not dataset_name:
|
||||
raise RenderError("Expected [ix_volume_config.dataset_name] to be set for [ix_volume] type.")
|
||||
|
||||
ix_volumes = self._render_instance.values.get("ix_volumes", {})
|
||||
if dataset_name not in ix_volumes:
|
||||
available = ", ".join(ix_volumes.keys())
|
||||
raise RenderError(
|
||||
f"Expected the key [{dataset_name}] to be set in [ix_volumes] for [ix_volume] type. "
|
||||
f"Available keys: [{available}]."
|
||||
)
|
||||
|
||||
path = valid_fs_path_or_raise(ix_volumes[dataset_name].rstrip("/"))
|
||||
self.source = allowed_fs_host_path_or_raise(path, True)
|
||||
|
||||
def get(self):
|
||||
return self.source
|
||||
|
||||
|
||||
class CifsSource:
|
||||
def __init__(self, render_instance: "Render", config: dict):
|
||||
self._render_instance = render_instance
|
||||
self.source: str = ""
|
||||
|
||||
if not config:
|
||||
raise RenderError("Expected [cifs_config] to be set for [cifs] type.")
|
||||
self.source = get_hashed_name_for_volume("cifs", config)
|
||||
|
||||
def get(self):
|
||||
return self.source
|
||||
|
||||
|
||||
class NfsSource:
|
||||
def __init__(self, render_instance: "Render", config: dict):
|
||||
self._render_instance = render_instance
|
||||
self.source: str = ""
|
||||
|
||||
if not config:
|
||||
raise RenderError("Expected [nfs_config] to be set for [nfs] type.")
|
||||
self.source = get_hashed_name_for_volume("nfs", config)
|
||||
|
||||
def get(self):
|
||||
return self.source
|
||||
|
||||
|
||||
class VolumeSource:
|
||||
def __init__(self, render_instance: "Render", config: "IxStorageVolumeConfig"):
|
||||
self._render_instance = render_instance
|
||||
self.source: str = ""
|
||||
|
||||
if not config:
|
||||
raise RenderError("Expected [volume_config] to be set for [volume] type.")
|
||||
|
||||
volume_name: str = config.get("volume_name", "")
|
||||
if not volume_name:
|
||||
raise RenderError("Expected [volume_config.volume_name] to be set for [volume] type.")
|
||||
|
||||
self.source = volume_name
|
||||
|
||||
def get(self):
|
||||
return self.source
|
||||
@@ -0,0 +1,130 @@
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
from storage import IxStorageNfsConfig, IxStorageCifsConfig, IxStorageVolumeConfig
|
||||
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .formatter import escape_dollar
|
||||
from .validations import valid_fs_path_or_raise
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from formatter import escape_dollar
|
||||
from validations import valid_fs_path_or_raise
|
||||
|
||||
|
||||
class NfsVolume:
|
||||
def __init__(self, render_instance: "Render", config: "IxStorageNfsConfig"):
|
||||
self._render_instance = render_instance
|
||||
|
||||
if not config:
|
||||
raise RenderError("Expected [nfs_config] to be set for [nfs] type")
|
||||
|
||||
required_keys = ["server", "path"]
|
||||
for key in required_keys:
|
||||
if not config.get(key):
|
||||
raise RenderError(f"Expected [{key}] to be set for [nfs] type")
|
||||
|
||||
opts = [f"addr={config['server']}"]
|
||||
cfg_options = config.get("options")
|
||||
if cfg_options:
|
||||
if not isinstance(cfg_options, list):
|
||||
raise RenderError("Expected [nfs_config.options] to be a list for [nfs] type")
|
||||
|
||||
tracked_keys: set[str] = set()
|
||||
disallowed_opts = ["addr"]
|
||||
for opt in cfg_options:
|
||||
if not isinstance(opt, str):
|
||||
raise RenderError("Options for [nfs] type must be a list of strings.")
|
||||
|
||||
key = opt.split("=")[0]
|
||||
if key in tracked_keys:
|
||||
raise RenderError(f"Option [{key}] already added for [nfs] type.")
|
||||
if key in disallowed_opts:
|
||||
raise RenderError(f"Option [{key}] is not allowed for [nfs] type.")
|
||||
opts.append(opt)
|
||||
tracked_keys.add(key)
|
||||
|
||||
opts.sort()
|
||||
|
||||
path = valid_fs_path_or_raise(config["path"].rstrip("/"))
|
||||
self.volume_spec = {
|
||||
"driver_opts": {
|
||||
"type": "nfs",
|
||||
"device": f":{path}",
|
||||
"o": f"{','.join([escape_dollar(opt) for opt in opts])}",
|
||||
},
|
||||
}
|
||||
|
||||
def get(self):
|
||||
return self.volume_spec
|
||||
|
||||
|
||||
class CifsVolume:
|
||||
def __init__(self, render_instance: "Render", config: "IxStorageCifsConfig"):
|
||||
self._render_instance = render_instance
|
||||
self.volume_spec: dict = {}
|
||||
|
||||
if not config:
|
||||
raise RenderError("Expected [cifs_config] to be set for [cifs] type")
|
||||
|
||||
required_keys = ["server", "path", "username", "password"]
|
||||
for key in required_keys:
|
||||
if not config.get(key):
|
||||
raise RenderError(f"Expected [{key}] to be set for [cifs] type")
|
||||
|
||||
opts = [
|
||||
"noperm",
|
||||
f"user={config['username']}",
|
||||
f"password={config['password']}",
|
||||
]
|
||||
|
||||
domain = config.get("domain")
|
||||
if domain:
|
||||
opts.append(f"domain={domain}")
|
||||
|
||||
cfg_options = config.get("options")
|
||||
if cfg_options:
|
||||
if not isinstance(cfg_options, list):
|
||||
raise RenderError("Expected [cifs_config.options] to be a list for [cifs] type")
|
||||
|
||||
tracked_keys: set[str] = set()
|
||||
disallowed_opts = ["user", "password", "domain", "noperm"]
|
||||
for opt in cfg_options:
|
||||
if not isinstance(opt, str):
|
||||
raise RenderError("Options for [cifs] type must be a list of strings.")
|
||||
|
||||
key = opt.split("=")[0]
|
||||
if key in tracked_keys:
|
||||
raise RenderError(f"Option [{key}] already added for [cifs] type.")
|
||||
if key in disallowed_opts:
|
||||
raise RenderError(f"Option [{key}] is not allowed for [cifs] type.")
|
||||
opts.append(opt)
|
||||
tracked_keys.add(key)
|
||||
opts.sort()
|
||||
|
||||
server = config["server"].lstrip("/")
|
||||
path = config["path"].strip("/")
|
||||
path = valid_fs_path_or_raise("/" + path).lstrip("/")
|
||||
|
||||
self.volume_spec = {
|
||||
"driver_opts": {
|
||||
"type": "cifs",
|
||||
"device": f"//{server}/{path}",
|
||||
"o": f"{','.join([escape_dollar(opt) for opt in opts])}",
|
||||
},
|
||||
}
|
||||
|
||||
def get(self):
|
||||
return self.volume_spec
|
||||
|
||||
|
||||
class DockerVolume:
|
||||
def __init__(self, render_instance: "Render", config: "IxStorageVolumeConfig"):
|
||||
self._render_instance = render_instance
|
||||
self.volume_spec: dict = {}
|
||||
|
||||
def get(self):
|
||||
return self.volume_spec
|
||||
@@ -0,0 +1,61 @@
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .storage import IxStorageVolumeLikeConfigs
|
||||
from .volume_types import NfsVolume, CifsVolume, DockerVolume
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from storage import IxStorageVolumeLikeConfigs
|
||||
from volume_types import NfsVolume, CifsVolume, DockerVolume
|
||||
|
||||
|
||||
class Volumes:
|
||||
def __init__(self, render_instance: "Render"):
|
||||
self._render_instance = render_instance
|
||||
self._volumes: dict[str, Volume] = {}
|
||||
|
||||
def add_volume(self, source: str, storage_type: str, config: "IxStorageVolumeLikeConfigs"):
|
||||
# This method can be called many times from the volume mounts
|
||||
# Only add the volume if it is not already added, but dont raise an error
|
||||
if source == "":
|
||||
raise RenderError(f"Volume source [{source}] cannot be empty")
|
||||
|
||||
if source in self._volumes:
|
||||
return
|
||||
|
||||
self._volumes[source] = Volume(self._render_instance, storage_type, config)
|
||||
|
||||
def has_volumes(self) -> bool:
|
||||
return bool(self._volumes)
|
||||
|
||||
def render(self):
|
||||
return {name: v.render() for name, v in sorted(self._volumes.items()) if v.render() is not None}
|
||||
|
||||
|
||||
class Volume:
|
||||
def __init__(
|
||||
self,
|
||||
render_instance: "Render",
|
||||
storage_type: str,
|
||||
config: "IxStorageVolumeLikeConfigs",
|
||||
):
|
||||
self._render_instance = render_instance
|
||||
self.volume_spec: dict | None = {}
|
||||
|
||||
match storage_type:
|
||||
case "nfs":
|
||||
self.volume_spec = NfsVolume(self._render_instance, config).get() # type: ignore
|
||||
case "cifs":
|
||||
self.volume_spec = CifsVolume(self._render_instance, config).get() # type: ignore
|
||||
case "volume" | "temporary":
|
||||
self.volume_spec = DockerVolume(self._render_instance, config).get() # type: ignore
|
||||
case _:
|
||||
self.volume_spec = None
|
||||
|
||||
def render(self):
|
||||
return self.volume_spec
|
||||
@@ -0,0 +1,29 @@
|
||||
resources:
|
||||
limits:
|
||||
cpus: 1.0
|
||||
memory: 1024
|
||||
|
||||
signal_cli_rest_api:
|
||||
mode: normal
|
||||
additional_envs: []
|
||||
|
||||
network:
|
||||
host_network: false
|
||||
rest_port:
|
||||
bind_mode: published
|
||||
port_number: 8081
|
||||
|
||||
run_as:
|
||||
user: 568
|
||||
group: 568
|
||||
|
||||
ix_volumes:
|
||||
config: /opt/tests/mnt/config
|
||||
|
||||
storage:
|
||||
config:
|
||||
type: ix_volume
|
||||
ix_volume_config:
|
||||
dataset_name: config
|
||||
create_host_path: true
|
||||
additional_storage: []
|
||||
Reference in New Issue
Block a user