mirror of
https://github.com/MAGICGrants/truenas-apps.git
synced 2026-01-09 20:47:58 -05:00
Add mongodb (#2238)
* initial mongodb * add lib * update meta * update hashes * fix * logo * quiet
This commit is contained in:
3
ix-dev/community/mongodb/README.md
Normal file
3
ix-dev/community/mongodb/README.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# MongoDB
|
||||
|
||||
[MongoDB](https://www.mongodb.com/) is a document database designed for ease of application development and scaling.
|
||||
32
ix-dev/community/mongodb/app.yaml
Normal file
32
ix-dev/community/mongodb/app.yaml
Normal file
@@ -0,0 +1,32 @@
|
||||
app_version: 8.0.9
|
||||
capabilities: []
|
||||
categories:
|
||||
- database
|
||||
changelog_url: https://www.mongodb.com/docs/manual/release-notes/
|
||||
date_added: '2025-05-12'
|
||||
description: MongoDB is a document database designed for ease of application development
|
||||
and scaling.
|
||||
home: https://www.mongodb.com/
|
||||
host_mounts: []
|
||||
icon: https://media.sys.truenas.net/apps/mongodb/icons/icon.svg
|
||||
keywords:
|
||||
- database
|
||||
lib_version: 2.1.24
|
||||
lib_version_hash: 86fdabbf2e49acd315eca1b3731bf9aadbb48ad77f54697ad7b772543a0211ad
|
||||
maintainers:
|
||||
- email: dev@ixsystems.com
|
||||
name: truenas
|
||||
url: https://www.truenas.com/
|
||||
name: mongodb
|
||||
run_as_context:
|
||||
- description: MongoDB runs as any non-root user.
|
||||
gid: 568
|
||||
group_name: mongodb
|
||||
uid: 568
|
||||
user_name: mongodb
|
||||
screenshots: []
|
||||
sources:
|
||||
- https://hub.docker.com/_/mongo
|
||||
title: MongoDB
|
||||
train: community
|
||||
version: 1.0.0
|
||||
6
ix-dev/community/mongodb/item.yaml
Normal file
6
ix-dev/community/mongodb/item.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
categories:
|
||||
- database
|
||||
icon_url: https://media.sys.truenas.net/apps/mongodb/icons/icon.svg
|
||||
screenshots: []
|
||||
tags:
|
||||
- database
|
||||
11
ix-dev/community/mongodb/ix_values.yaml
Normal file
11
ix-dev/community/mongodb/ix_values.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
images:
|
||||
image:
|
||||
repository: mongo
|
||||
tag: 8.0.9
|
||||
v7_image:
|
||||
repository: mongo
|
||||
tag: 7.0.20
|
||||
|
||||
consts:
|
||||
mongodb_container_name: mongodb
|
||||
perms_container_name: permissions
|
||||
425
ix-dev/community/mongodb/questions.yaml
Normal file
425
ix-dev/community/mongodb/questions.yaml
Normal file
@@ -0,0 +1,425 @@
|
||||
groups:
|
||||
- name: MongoDB Configuration
|
||||
description: Configure MongoDB
|
||||
- name: Network Configuration
|
||||
description: Configure Network for MongoDB
|
||||
- name: Storage Configuration
|
||||
description: Configure Storage for MongoDB
|
||||
- name: Labels Configuration
|
||||
description: Configure Labels for MongoDB
|
||||
- name: Resources Configuration
|
||||
description: Configure Resources for MongoDB
|
||||
|
||||
questions:
|
||||
- variable: mongodb
|
||||
label: ""
|
||||
group: MongoDB Configuration
|
||||
schema:
|
||||
type: dict
|
||||
attrs:
|
||||
- variable: image_selector
|
||||
label: Image Selector
|
||||
description: |
|
||||
The image to use for MongoDB.</br>
|
||||
Selecting an image with pgvector will use a pgvector image (not the official MongoDB).
|
||||
schema:
|
||||
type: string
|
||||
default: "image"
|
||||
required: true
|
||||
enum:
|
||||
- value: "image"
|
||||
description: MongoDB 8 Image
|
||||
- value: "v7_image"
|
||||
description: MongoDB 7 Image
|
||||
- variable: user
|
||||
label: User
|
||||
description: The database user.
|
||||
schema:
|
||||
type: string
|
||||
default: ""
|
||||
required: true
|
||||
- variable: password
|
||||
label: Password
|
||||
description: The database password.
|
||||
schema:
|
||||
type: string
|
||||
default: ""
|
||||
required: true
|
||||
private: true
|
||||
- variable: database
|
||||
label: Database
|
||||
description: The database name.
|
||||
schema:
|
||||
type: string
|
||||
default: ""
|
||||
required: true
|
||||
- variable: additional_envs
|
||||
label: Additional Environment Variables
|
||||
description: Configure additional environment variables for MongoDB.
|
||||
schema:
|
||||
type: list
|
||||
default: []
|
||||
items:
|
||||
- variable: env
|
||||
label: Environment Variable
|
||||
schema:
|
||||
type: dict
|
||||
attrs:
|
||||
- variable: name
|
||||
label: Name
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
- variable: value
|
||||
label: Value
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
|
||||
- variable: network
|
||||
label: ""
|
||||
group: Network Configuration
|
||||
schema:
|
||||
type: dict
|
||||
attrs:
|
||||
- variable: tcp_port
|
||||
label: TCP Port
|
||||
description: The port for MongoDB TCP
|
||||
schema:
|
||||
type: dict
|
||||
show_if: [["host_network", "=", false]]
|
||||
attrs:
|
||||
- variable: bind_mode
|
||||
label: Port Bind Mode
|
||||
description: |
|
||||
The port bind mode.</br>
|
||||
- Publish: The port will be published on the host for external access.</br>
|
||||
- Expose: The port will be exposed for inter-container communication.</br>
|
||||
- None: The port will not be exposed or published.</br>
|
||||
Note: If the Dockerfile defines an EXPOSE directive,
|
||||
the port will still be exposed for inter-container communication regardless of this setting.
|
||||
schema:
|
||||
type: string
|
||||
default: "published"
|
||||
enum:
|
||||
- value: "published"
|
||||
description: Publish port on the host for external access
|
||||
- value: "exposed"
|
||||
description: Expose port for inter-container communication
|
||||
- value: ""
|
||||
description: None
|
||||
- variable: port_number
|
||||
label: Port Number
|
||||
schema:
|
||||
type: int
|
||||
default: 27017
|
||||
show_if: [["bind_mode", "=", "published"]]
|
||||
required: true
|
||||
$ref:
|
||||
- definitions/port
|
||||
- variable: host_ips
|
||||
label: Host IPs
|
||||
description: IPs on the host to bind this port
|
||||
schema:
|
||||
type: list
|
||||
show_if: [["bind_mode", "=", "published"]]
|
||||
default: []
|
||||
items:
|
||||
- variable: host_ip
|
||||
label: Host IP
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
$ref:
|
||||
- definitions/node_bind_ip
|
||||
- variable: host_network
|
||||
label: Host Network
|
||||
description: Bind to the host network. It's recommended to keep this disabled.
|
||||
schema:
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
- variable: storage
|
||||
label: ""
|
||||
group: Storage Configuration
|
||||
schema:
|
||||
type: dict
|
||||
attrs:
|
||||
- variable: data
|
||||
label: MongoDB Data Storage
|
||||
description: The path to store MongoDB Data.
|
||||
schema:
|
||||
type: dict
|
||||
attrs:
|
||||
- variable: type
|
||||
label: Type
|
||||
description: |
|
||||
ixVolume: Is dataset created automatically by the system.</br>
|
||||
Host Path: Is a path that already exists on the system.
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
immutable: true
|
||||
default: "ix_volume"
|
||||
enum:
|
||||
- value: "host_path"
|
||||
description: Host Path (Path that already exists on the system)
|
||||
- value: "ix_volume"
|
||||
description: ixVolume (Dataset created automatically by the system)
|
||||
- variable: ix_volume_config
|
||||
label: ixVolume Configuration
|
||||
description: The configuration for the ixVolume dataset.
|
||||
schema:
|
||||
type: dict
|
||||
show_if: [["type", "=", "ix_volume"]]
|
||||
$ref:
|
||||
- "normalize/ix_volume"
|
||||
attrs:
|
||||
- variable: acl_enable
|
||||
label: Enable ACL
|
||||
description: Enable ACL for the storage.
|
||||
schema:
|
||||
type: boolean
|
||||
default: false
|
||||
- variable: dataset_name
|
||||
label: Dataset Name
|
||||
description: The name of the dataset to use for storage.
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
immutable: true
|
||||
hidden: true
|
||||
default: "data"
|
||||
- variable: acl_entries
|
||||
label: ACL Configuration
|
||||
schema:
|
||||
type: dict
|
||||
show_if: [["acl_enable", "=", true]]
|
||||
attrs: []
|
||||
- variable: host_path_config
|
||||
label: Host Path Configuration
|
||||
schema:
|
||||
type: dict
|
||||
show_if: [["type", "=", "host_path"]]
|
||||
attrs:
|
||||
- variable: acl_enable
|
||||
label: Enable ACL
|
||||
description: Enable ACL for the storage.
|
||||
schema:
|
||||
type: boolean
|
||||
default: false
|
||||
- variable: acl
|
||||
label: ACL Configuration
|
||||
schema:
|
||||
type: dict
|
||||
show_if: [["acl_enable", "=", true]]
|
||||
attrs: []
|
||||
$ref:
|
||||
- "normalize/acl"
|
||||
- variable: path
|
||||
label: Host Path
|
||||
description: The host path to use for storage.
|
||||
schema:
|
||||
type: hostpath
|
||||
show_if: [["acl_enable", "=", false]]
|
||||
required: true
|
||||
- variable: additional_storage
|
||||
label: Additional Storage
|
||||
description: Additional storage for MongoDB.
|
||||
schema:
|
||||
type: list
|
||||
default: []
|
||||
items:
|
||||
- variable: storageEntry
|
||||
label: Storage Entry
|
||||
schema:
|
||||
type: dict
|
||||
attrs:
|
||||
- variable: type
|
||||
label: Type
|
||||
description: |
|
||||
ixVolume: Is dataset created automatically by the system.</br>
|
||||
Host Path: Is a path that already exists on the system.</br>
|
||||
SMB Share: Is a SMB share that is mounted to as a volume.
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
default: "ix_volume"
|
||||
immutable: true
|
||||
enum:
|
||||
- value: "host_path"
|
||||
description: Host Path (Path that already exists on the system)
|
||||
- value: "ix_volume"
|
||||
description: ixVolume (Dataset created automatically by the system)
|
||||
- value: "cifs"
|
||||
description: SMB/CIFS Share (Mounts a volume to a SMB share)
|
||||
- variable: read_only
|
||||
label: Read Only
|
||||
description: Mount the volume as read only.
|
||||
schema:
|
||||
type: boolean
|
||||
default: false
|
||||
- variable: mount_path
|
||||
label: Mount Path
|
||||
description: The path inside the container to mount the storage.
|
||||
schema:
|
||||
type: path
|
||||
required: true
|
||||
- variable: host_path_config
|
||||
label: Host Path Configuration
|
||||
schema:
|
||||
type: dict
|
||||
show_if: [["type", "=", "host_path"]]
|
||||
attrs:
|
||||
- variable: acl_enable
|
||||
label: Enable ACL
|
||||
description: Enable ACL for the storage.
|
||||
schema:
|
||||
type: boolean
|
||||
default: false
|
||||
- variable: acl
|
||||
label: ACL Configuration
|
||||
schema:
|
||||
type: dict
|
||||
show_if: [["acl_enable", "=", true]]
|
||||
attrs: []
|
||||
$ref:
|
||||
- "normalize/acl"
|
||||
- variable: path
|
||||
label: Host Path
|
||||
description: The host path to use for storage.
|
||||
schema:
|
||||
type: hostpath
|
||||
show_if: [["acl_enable", "=", false]]
|
||||
required: true
|
||||
- variable: ix_volume_config
|
||||
label: ixVolume Configuration
|
||||
description: The configuration for the ixVolume dataset.
|
||||
schema:
|
||||
type: dict
|
||||
show_if: [["type", "=", "ix_volume"]]
|
||||
$ref:
|
||||
- "normalize/ix_volume"
|
||||
attrs:
|
||||
- variable: acl_enable
|
||||
label: Enable ACL
|
||||
description: Enable ACL for the storage.
|
||||
schema:
|
||||
type: boolean
|
||||
default: false
|
||||
- variable: dataset_name
|
||||
label: Dataset Name
|
||||
description: The name of the dataset to use for storage.
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
immutable: true
|
||||
default: "storage_entry"
|
||||
- variable: acl_entries
|
||||
label: ACL Configuration
|
||||
schema:
|
||||
type: dict
|
||||
show_if: [["acl_enable", "=", true]]
|
||||
attrs: []
|
||||
$ref:
|
||||
- "normalize/acl"
|
||||
- variable: cifs_config
|
||||
label: SMB Configuration
|
||||
description: The configuration for the SMB dataset.
|
||||
schema:
|
||||
type: dict
|
||||
show_if: [["type", "=", "cifs"]]
|
||||
attrs:
|
||||
- variable: server
|
||||
label: Server
|
||||
description: The server to mount the SMB share.
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
- variable: path
|
||||
label: Path
|
||||
description: The path to mount the SMB share.
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
- variable: username
|
||||
label: Username
|
||||
description: The username to use for the SMB share.
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
- variable: password
|
||||
label: Password
|
||||
description: The password to use for the SMB share.
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
private: true
|
||||
- variable: domain
|
||||
label: Domain
|
||||
description: The domain to use for the SMB share.
|
||||
schema:
|
||||
type: string
|
||||
|
||||
- variable: labels
|
||||
label: ""
|
||||
group: Labels Configuration
|
||||
schema:
|
||||
type: list
|
||||
default: []
|
||||
items:
|
||||
- variable: label
|
||||
label: Label
|
||||
schema:
|
||||
type: dict
|
||||
attrs:
|
||||
- variable: key
|
||||
label: Key
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
- variable: value
|
||||
label: Value
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
- variable: containers
|
||||
label: Containers
|
||||
description: Containers where the label should be applied
|
||||
schema:
|
||||
type: list
|
||||
items:
|
||||
- variable: container
|
||||
label: Container
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
enum:
|
||||
- value: mongodb
|
||||
description: mongodb
|
||||
|
||||
- variable: resources
|
||||
label: ""
|
||||
group: Resources Configuration
|
||||
schema:
|
||||
type: dict
|
||||
attrs:
|
||||
- variable: limits
|
||||
label: Limits
|
||||
schema:
|
||||
type: dict
|
||||
attrs:
|
||||
- variable: cpus
|
||||
label: CPUs
|
||||
description: CPUs limit for MongoDB.
|
||||
schema:
|
||||
type: int
|
||||
default: 2
|
||||
required: true
|
||||
- variable: memory
|
||||
label: Memory (in MB)
|
||||
description: Memory limit for MongoDB.
|
||||
schema:
|
||||
type: int
|
||||
default: 4096
|
||||
required: true
|
||||
32
ix-dev/community/mongodb/templates/docker-compose.yaml
Normal file
32
ix-dev/community/mongodb/templates/docker-compose.yaml
Normal file
@@ -0,0 +1,32 @@
|
||||
{% set tpl = ix_lib.base.render.Render(values) %}
|
||||
|
||||
{% set c1 = tpl.add_container(values.consts.mongodb_container_name, values.mongodb.image_selector) %}
|
||||
{% set perm_container = tpl.deps.perms(values.consts.perms_container_name) %}
|
||||
|
||||
{% do c1.set_user(values.run_as.user, values.run_as.group) %}
|
||||
|
||||
{% set perms_config = {"uid": values.run_as.user, "gid": values.run_as.group, "mode": "check"} %}
|
||||
|
||||
{% do c1.add_storage("/data/db", values.storage.data) %}
|
||||
{% do perm_container.add_or_skip_action("data", values.storage.data, perms_config) %}
|
||||
|
||||
{% for store in values.storage.additional_storage %}
|
||||
{% do c1.add_storage(store.mount_path, store) %}
|
||||
{% do perm_container.add_or_skip_action(store.mount_path, store, perms_config) %}
|
||||
{% endfor %}
|
||||
|
||||
{% do c1.add_port(values.network.tcp_port, {"container_port": 27017}) %}
|
||||
|
||||
{% do c1.environment.add_env("MONGO_INITDB_ROOT_PASSWORD", values.mongodb.password) %}
|
||||
{% do c1.environment.add_env("MONGO_INITDB_ROOT_USERNAME", values.mongodb.user) %}
|
||||
{% do c1.environment.add_env("MONGO_INITDB_DATABASE", values.mongodb.database) %}
|
||||
{% do c1.environment.add_user_envs(values.mongodb.additional_envs) %}
|
||||
|
||||
{% do c1.healthcheck.set_test("mongodb", {"port": 27017}) %}
|
||||
|
||||
{% if perm_container.has_actions() %}
|
||||
{% do perm_container.activate() %}
|
||||
{% do c1.depends.add_dependency(values.consts.perms_container_name, "service_completed_successfully") %}
|
||||
{% endif %}
|
||||
|
||||
{{ tpl.render() | tojson }}
|
||||
@@ -95,6 +95,7 @@ def test_mapping(variant: str, config: dict | None = None) -> str:
|
||||
"redis": redis_test,
|
||||
"postgres": postgres_test,
|
||||
"mariadb": mariadb_test,
|
||||
"mongodb": mongodb_test,
|
||||
}
|
||||
|
||||
if variant not in tests:
|
||||
@@ -208,3 +209,11 @@ def mariadb_test(config: dict) -> str:
|
||||
host = get_key(config, "host", "127.0.0.1", False)
|
||||
|
||||
return f"mariadb-admin --user=root --host={host} --port={port} --password=$MARIADB_ROOT_PASSWORD ping"
|
||||
|
||||
|
||||
def mongodb_test(config: dict) -> str:
|
||||
config = config or {}
|
||||
port = get_key(config, "port", 27017, False)
|
||||
host = get_key(config, "host", "127.0.0.1", False)
|
||||
|
||||
return f"mongosh --host {host} --port {port} $MONGO_INITDB_DATABASE --eval 'db.adminCommand(\"ping\")' --quiet"
|
||||
@@ -208,3 +208,14 @@ def test_mariadb_healthcheck(mock_values):
|
||||
output["services"]["test_container"]["healthcheck"]["test"]
|
||||
== "mariadb-admin --user=root --host=127.0.0.1 --port=3306 --password=$$MARIADB_ROOT_PASSWORD ping"
|
||||
)
|
||||
|
||||
|
||||
def test_mongodb_healthcheck(mock_values):
|
||||
render = Render(mock_values)
|
||||
c1 = render.add_container("test_container", "test_image")
|
||||
c1.healthcheck.set_test("mongodb")
|
||||
output = render.render()
|
||||
assert (
|
||||
output["services"]["test_container"]["healthcheck"]["test"]
|
||||
== "mongosh --host 127.0.0.1 --port 27017 $$MONGO_INITDB_DATABASE --eval 'db.adminCommand(\"ping\")' --quiet"
|
||||
)
|
||||
@@ -0,0 +1,28 @@
|
||||
TZ: Europe/Athens
|
||||
|
||||
mongodb:
|
||||
image_selector: image
|
||||
user: mongo
|
||||
password: mongo
|
||||
database: mongo
|
||||
|
||||
run_as:
|
||||
user: 568
|
||||
group: 568
|
||||
|
||||
network:
|
||||
tcp_port:
|
||||
bind_mode: published
|
||||
port_number: 8080
|
||||
host_network: false
|
||||
|
||||
ix_volumes:
|
||||
data: /opt/tests/mnt/data
|
||||
|
||||
storage:
|
||||
data:
|
||||
type: ix_volume
|
||||
ix_volume_config:
|
||||
dataset_name: data
|
||||
create_host_path: true
|
||||
additional_storage: []
|
||||
@@ -0,0 +1,28 @@
|
||||
TZ: Europe/Athens
|
||||
|
||||
mongodb:
|
||||
image_selector: v7_image
|
||||
user: mongo
|
||||
password: mongo
|
||||
database: mongo
|
||||
|
||||
run_as:
|
||||
user: 568
|
||||
group: 568
|
||||
|
||||
network:
|
||||
tcp_port:
|
||||
bind_mode: published
|
||||
port_number: 8080
|
||||
host_network: false
|
||||
|
||||
ix_volumes:
|
||||
data: /opt/tests/mnt/data
|
||||
|
||||
storage:
|
||||
data:
|
||||
type: ix_volume
|
||||
ix_volume_config:
|
||||
dataset_name: data
|
||||
create_host_path: true
|
||||
additional_storage: []
|
||||
0
library/2.1.24/__init__.py
Normal file
0
library/2.1.24/__init__.py
Normal file
86
library/2.1.24/configs.py
Normal file
86
library/2.1.24/configs.py
Normal file
@@ -0,0 +1,86 @@
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .formatter import escape_dollar
|
||||
from .validations import valid_octal_mode_or_raise, valid_fs_path_or_raise
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from formatter import escape_dollar
|
||||
from validations import valid_octal_mode_or_raise, valid_fs_path_or_raise
|
||||
|
||||
|
||||
class Configs:
|
||||
def __init__(self, render_instance: "Render"):
|
||||
self._render_instance = render_instance
|
||||
self._configs: dict[str, dict] = {}
|
||||
|
||||
def add(self, name: str, data: str):
|
||||
if not isinstance(data, str):
|
||||
raise RenderError(f"Expected [data] to be a string, got [{type(data)}]")
|
||||
|
||||
if name not in self._configs:
|
||||
self._configs[name] = {"name": name, "data": data}
|
||||
return
|
||||
|
||||
if data == self._configs[name]["data"]:
|
||||
return
|
||||
|
||||
raise RenderError(f"Config [{name}] already added with different data")
|
||||
|
||||
def has_configs(self):
|
||||
return bool(self._configs)
|
||||
|
||||
def render(self):
|
||||
return {
|
||||
c["name"]: {"content": escape_dollar(c["data"])}
|
||||
for c in sorted(self._configs.values(), key=lambda c: c["name"])
|
||||
}
|
||||
|
||||
|
||||
class ContainerConfigs:
|
||||
def __init__(self, render_instance: "Render", configs: Configs):
|
||||
self._render_instance = render_instance
|
||||
self.top_level_configs: Configs = configs
|
||||
self.container_configs: set[ContainerConfig] = set()
|
||||
|
||||
def add(self, name: str, data: str, target: str, mode: str = ""):
|
||||
self.top_level_configs.add(name, data)
|
||||
|
||||
if target == "":
|
||||
raise RenderError(f"Expected [target] to be set for config [{name}]")
|
||||
if mode != "":
|
||||
mode = valid_octal_mode_or_raise(mode)
|
||||
|
||||
if target in [c.target for c in self.container_configs]:
|
||||
raise RenderError(f"Target [{target}] already used for another config")
|
||||
target = valid_fs_path_or_raise(target)
|
||||
self.container_configs.add(ContainerConfig(self._render_instance, name, target, mode))
|
||||
|
||||
def has_configs(self):
|
||||
return bool(self.container_configs)
|
||||
|
||||
def render(self):
|
||||
return [c.render() for c in sorted(self.container_configs, key=lambda c: c.source)]
|
||||
|
||||
|
||||
class ContainerConfig:
|
||||
def __init__(self, render_instance: "Render", source: str, target: str, mode: str):
|
||||
self._render_instance = render_instance
|
||||
self.source = source
|
||||
self.target = target
|
||||
self.mode = mode
|
||||
|
||||
def render(self):
|
||||
result: dict[str, str | int] = {
|
||||
"source": self.source,
|
||||
"target": self.target,
|
||||
}
|
||||
|
||||
if self.mode:
|
||||
result["mode"] = int(self.mode, 8)
|
||||
|
||||
return result
|
||||
437
library/2.1.24/container.py
Normal file
437
library/2.1.24/container.py
Normal file
@@ -0,0 +1,437 @@
|
||||
from typing import Any, TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
from storage import IxStorage
|
||||
|
||||
try:
|
||||
from .configs import ContainerConfigs
|
||||
from .depends import Depends
|
||||
from .deploy import Deploy
|
||||
from .device_cgroup_rules import DeviceCGroupRules
|
||||
from .devices import Devices
|
||||
from .dns import Dns
|
||||
from .environment import Environment
|
||||
from .error import RenderError
|
||||
from .expose import Expose
|
||||
from .extra_hosts import ExtraHosts
|
||||
from .formatter import escape_dollar, get_image_with_hashed_data
|
||||
from .healthcheck import Healthcheck
|
||||
from .labels import Labels
|
||||
from .ports import Ports
|
||||
from .restart import RestartPolicy
|
||||
from .tmpfs import Tmpfs
|
||||
from .validations import (
|
||||
valid_cap_or_raise,
|
||||
valid_cgroup_or_raise,
|
||||
valid_ipc_mode_or_raise,
|
||||
valid_network_mode_or_raise,
|
||||
valid_port_bind_mode_or_raise,
|
||||
valid_pull_policy_or_raise,
|
||||
)
|
||||
from .security_opts import SecurityOpts
|
||||
from .storage import Storage
|
||||
from .sysctls import Sysctls
|
||||
except ImportError:
|
||||
from configs import ContainerConfigs
|
||||
from depends import Depends
|
||||
from deploy import Deploy
|
||||
from device_cgroup_rules import DeviceCGroupRules
|
||||
from devices import Devices
|
||||
from dns import Dns
|
||||
from environment import Environment
|
||||
from error import RenderError
|
||||
from expose import Expose
|
||||
from extra_hosts import ExtraHosts
|
||||
from formatter import escape_dollar, get_image_with_hashed_data
|
||||
from healthcheck import Healthcheck
|
||||
from labels import Labels
|
||||
from ports import Ports
|
||||
from restart import RestartPolicy
|
||||
from tmpfs import Tmpfs
|
||||
from validations import (
|
||||
valid_cap_or_raise,
|
||||
valid_cgroup_or_raise,
|
||||
valid_ipc_mode_or_raise,
|
||||
valid_network_mode_or_raise,
|
||||
valid_port_bind_mode_or_raise,
|
||||
valid_pull_policy_or_raise,
|
||||
)
|
||||
from security_opts import SecurityOpts
|
||||
from storage import Storage
|
||||
from sysctls import Sysctls
|
||||
|
||||
|
||||
class Container:
|
||||
def __init__(self, render_instance: "Render", name: str, image: str):
|
||||
self._render_instance = render_instance
|
||||
|
||||
self._name: str = name
|
||||
self._image: str = self._resolve_image(image)
|
||||
self._build_image: str = ""
|
||||
self._pull_policy: str = ""
|
||||
self._user: str = ""
|
||||
self._tty: bool = False
|
||||
self._stdin_open: bool = False
|
||||
self._init: bool | None = None
|
||||
self._read_only: bool | None = None
|
||||
self._extra_hosts: ExtraHosts = ExtraHosts(self._render_instance)
|
||||
self._hostname: str = ""
|
||||
self._cap_drop: set[str] = set(["ALL"]) # Drop all capabilities by default and add caps granularly
|
||||
self._cap_add: set[str] = set()
|
||||
self._security_opt: SecurityOpts = SecurityOpts(self._render_instance)
|
||||
self._privileged: bool = False
|
||||
self._group_add: set[int | str] = set()
|
||||
self._network_mode: str = ""
|
||||
self._entrypoint: list[str] = []
|
||||
self._command: list[str] = []
|
||||
self._grace_period: int | None = None
|
||||
self._shm_size: int | None = None
|
||||
self._storage: Storage = Storage(self._render_instance, self)
|
||||
self._tmpfs: Tmpfs = Tmpfs(self._render_instance, self)
|
||||
self._ipc_mode: str | None = None
|
||||
self._cgroup: str | None = None
|
||||
self._device_cgroup_rules: DeviceCGroupRules = DeviceCGroupRules(self._render_instance)
|
||||
self.sysctls: Sysctls = Sysctls(self._render_instance, self)
|
||||
self.configs: ContainerConfigs = ContainerConfigs(self._render_instance, self._render_instance.configs)
|
||||
self.deploy: Deploy = Deploy(self._render_instance)
|
||||
self.networks: set[str] = set()
|
||||
self.devices: Devices = Devices(self._render_instance)
|
||||
self.environment: Environment = Environment(self._render_instance, self.deploy.resources)
|
||||
self.dns: Dns = Dns(self._render_instance)
|
||||
self.depends: Depends = Depends(self._render_instance)
|
||||
self.healthcheck: Healthcheck = Healthcheck(self._render_instance)
|
||||
self.labels: Labels = Labels(self._render_instance)
|
||||
self.restart: RestartPolicy = RestartPolicy(self._render_instance)
|
||||
self.ports: Ports = Ports(self._render_instance)
|
||||
self.expose: Expose = Expose(self._render_instance)
|
||||
|
||||
self._auto_set_network_mode()
|
||||
self._auto_add_labels()
|
||||
self._auto_add_groups()
|
||||
|
||||
def _auto_add_groups(self):
|
||||
self.add_group(568)
|
||||
|
||||
def _auto_set_network_mode(self):
|
||||
if self._render_instance.values.get("network", {}).get("host_network", False):
|
||||
self.set_network_mode("host")
|
||||
|
||||
def _auto_add_labels(self):
|
||||
labels = self._render_instance.values.get("labels", [])
|
||||
if not labels:
|
||||
return
|
||||
|
||||
for label in labels:
|
||||
containers = label.get("containers", [])
|
||||
if not containers:
|
||||
raise RenderError(f'Label [{label.get("key", "")}] must have at least one container')
|
||||
|
||||
if self._name in containers:
|
||||
self.labels.add_label(label["key"], label["value"])
|
||||
|
||||
def _resolve_image(self, image: str):
|
||||
images = self._render_instance.values["images"]
|
||||
if image not in images:
|
||||
raise RenderError(
|
||||
f"Image [{image}] not found in values. " f"Available images: [{', '.join(images.keys())}]"
|
||||
)
|
||||
repo = images[image].get("repository", "")
|
||||
tag = images[image].get("tag", "")
|
||||
|
||||
if not repo:
|
||||
raise RenderError(f"Repository not found for image [{image}]")
|
||||
if not tag:
|
||||
raise RenderError(f"Tag not found for image [{image}]")
|
||||
|
||||
return f"{repo}:{tag}"
|
||||
|
||||
def build_image(self, content: list[str | None]):
|
||||
dockerfile = f"FROM {self._image}\n"
|
||||
for line in content:
|
||||
line = line.strip() if line else ""
|
||||
if not line:
|
||||
continue
|
||||
if line.startswith("FROM"):
|
||||
# TODO: This will also block multi-stage builds
|
||||
# We can revisit this later if we need it
|
||||
raise RenderError(
|
||||
"FROM cannot be used in build image. Define the base image when creating the container."
|
||||
)
|
||||
dockerfile += line + "\n"
|
||||
|
||||
self._build_image = dockerfile
|
||||
self._image = get_image_with_hashed_data(self._image, dockerfile)
|
||||
|
||||
def set_pull_policy(self, pull_policy: str):
|
||||
self._pull_policy = valid_pull_policy_or_raise(pull_policy)
|
||||
|
||||
def set_user(self, user: int, group: int):
|
||||
for i in (user, group):
|
||||
if not isinstance(i, int) or i < 0:
|
||||
raise RenderError(f"User/Group [{i}] is not valid")
|
||||
self._user = f"{user}:{group}"
|
||||
|
||||
def add_extra_host(self, host: str, ip: str):
|
||||
self._extra_hosts.add_host(host, ip)
|
||||
|
||||
def add_group(self, group: int | str):
|
||||
if isinstance(group, str):
|
||||
group = str(group).strip()
|
||||
if group.isdigit():
|
||||
raise RenderError(f"Group is a number [{group}] but passed as a string")
|
||||
|
||||
if group in self._group_add:
|
||||
raise RenderError(f"Group [{group}] already added")
|
||||
self._group_add.add(group)
|
||||
|
||||
def get_additional_groups(self) -> list[int | str]:
|
||||
result = []
|
||||
if self.deploy.resources.has_gpus() or self.devices.has_gpus():
|
||||
result.append(44) # video
|
||||
result.append(107) # render
|
||||
return result
|
||||
|
||||
def get_current_groups(self) -> list[str]:
|
||||
result = [str(g) for g in self._group_add]
|
||||
result.extend([str(g) for g in self.get_additional_groups()])
|
||||
return result
|
||||
|
||||
def set_tty(self, enabled: bool = False):
|
||||
self._tty = enabled
|
||||
|
||||
def set_stdin(self, enabled: bool = False):
|
||||
self._stdin_open = enabled
|
||||
|
||||
def set_ipc_mode(self, ipc_mode: str):
|
||||
self._ipc_mode = valid_ipc_mode_or_raise(ipc_mode, self._render_instance.container_names())
|
||||
|
||||
def add_device_cgroup_rule(self, dev_grp_rule: str):
|
||||
self._device_cgroup_rules.add_rule(dev_grp_rule)
|
||||
|
||||
def set_cgroup(self, cgroup: str):
|
||||
self._cgroup = valid_cgroup_or_raise(cgroup)
|
||||
|
||||
def set_init(self, enabled: bool = False):
|
||||
self._init = enabled
|
||||
|
||||
def set_read_only(self, enabled: bool = False):
|
||||
self._read_only = enabled
|
||||
|
||||
def set_hostname(self, hostname: str):
|
||||
self._hostname = hostname
|
||||
|
||||
def set_grace_period(self, grace_period: int):
|
||||
if grace_period < 0:
|
||||
raise RenderError(f"Grace period [{grace_period}] cannot be negative")
|
||||
self._grace_period = grace_period
|
||||
|
||||
def set_privileged(self, enabled: bool = False):
|
||||
self._privileged = enabled
|
||||
|
||||
def clear_caps(self):
|
||||
self._cap_add.clear()
|
||||
self._cap_drop.clear()
|
||||
|
||||
def add_caps(self, caps: list[str]):
|
||||
for c in caps:
|
||||
if c in self._cap_add:
|
||||
raise RenderError(f"Capability [{c}] already added")
|
||||
self._cap_add.add(valid_cap_or_raise(c))
|
||||
|
||||
def add_security_opt(self, key: str, value: str | bool | None = None, arg: str | None = None):
|
||||
self._security_opt.add_opt(key, value, arg)
|
||||
|
||||
def remove_security_opt(self, key: str):
|
||||
self._security_opt.remove_opt(key)
|
||||
|
||||
def set_network_mode(self, mode: str):
|
||||
self._network_mode = valid_network_mode_or_raise(mode, self._render_instance.container_names())
|
||||
|
||||
def add_port(self, port_config: dict | None = None, dev_config: dict | None = None):
|
||||
port_config = port_config or {}
|
||||
dev_config = dev_config or {}
|
||||
# Merge port_config and dev_config (dev_config has precedence)
|
||||
config = port_config | dev_config
|
||||
|
||||
bind_mode = valid_port_bind_mode_or_raise(config.get("bind_mode", ""))
|
||||
# Skip port if its neither published nor exposed
|
||||
if not bind_mode:
|
||||
return
|
||||
|
||||
# Collect port config
|
||||
host_port = config.get("port_number", 0)
|
||||
container_port = config.get("container_port", 0) or host_port
|
||||
protocol = config.get("protocol", "tcp")
|
||||
host_ips = config.get("host_ips") or ["0.0.0.0", "::"]
|
||||
if not isinstance(host_ips, list):
|
||||
raise RenderError(f"Expected [host_ips] to be a list, got [{host_ips}]")
|
||||
|
||||
if bind_mode == "published":
|
||||
for host_ip in host_ips:
|
||||
self.ports.add_port(host_port, container_port, {"protocol": protocol, "host_ip": host_ip})
|
||||
elif bind_mode == "exposed":
|
||||
self.expose.add_port(container_port, protocol)
|
||||
|
||||
def set_entrypoint(self, entrypoint: list[str]):
|
||||
self._entrypoint = [escape_dollar(str(e)) for e in entrypoint]
|
||||
|
||||
def set_command(self, command: list[str]):
|
||||
self._command = [escape_dollar(str(e)) for e in command]
|
||||
|
||||
def add_storage(self, mount_path: str, config: "IxStorage"):
|
||||
if config.get("type", "") == "tmpfs":
|
||||
self._tmpfs.add(mount_path, config)
|
||||
else:
|
||||
self._storage.add(mount_path, config)
|
||||
|
||||
def add_docker_socket(self, read_only: bool = True, mount_path: str = "/var/run/docker.sock"):
|
||||
self.add_group(999)
|
||||
self._storage._add_docker_socket(read_only, mount_path)
|
||||
|
||||
def add_udev(self, read_only: bool = True, mount_path: str = "/run/udev"):
|
||||
self._storage._add_udev(read_only, mount_path)
|
||||
|
||||
def add_tun_device(self):
|
||||
self.devices._add_tun_device()
|
||||
|
||||
def add_snd_device(self):
|
||||
self.add_group(29)
|
||||
self.devices._add_snd_device()
|
||||
|
||||
def set_shm_size_mb(self, size: int):
|
||||
self._shm_size = size
|
||||
|
||||
# Easily remove devices from the container
|
||||
# Useful in dependencies like postgres and redis
|
||||
# where there is no need to pass devices to them
|
||||
def remove_devices(self):
|
||||
self.deploy.resources.remove_devices()
|
||||
self.devices.remove_devices()
|
||||
|
||||
@property
|
||||
def storage(self):
|
||||
return self._storage
|
||||
|
||||
def render(self) -> dict[str, Any]:
|
||||
if self._network_mode and self.networks:
|
||||
raise RenderError("Cannot set both [network_mode] and [networks]")
|
||||
|
||||
result = {
|
||||
"image": self._image,
|
||||
"platform": "linux/amd64",
|
||||
"tty": self._tty,
|
||||
"stdin_open": self._stdin_open,
|
||||
"restart": self.restart.render(),
|
||||
}
|
||||
|
||||
if self._pull_policy:
|
||||
result["pull_policy"] = self._pull_policy
|
||||
|
||||
if self.healthcheck.has_healthcheck():
|
||||
result["healthcheck"] = self.healthcheck.render()
|
||||
|
||||
if self._hostname:
|
||||
result["hostname"] = self._hostname
|
||||
|
||||
if self._build_image:
|
||||
result["build"] = {"tags": [self._image], "dockerfile_inline": self._build_image}
|
||||
|
||||
if self.configs.has_configs():
|
||||
result["configs"] = self.configs.render()
|
||||
|
||||
if self._ipc_mode is not None:
|
||||
result["ipc"] = self._ipc_mode
|
||||
|
||||
if self._device_cgroup_rules.has_rules():
|
||||
result["device_cgroup_rules"] = self._device_cgroup_rules.render()
|
||||
|
||||
if self._cgroup is not None:
|
||||
result["cgroup"] = self._cgroup
|
||||
|
||||
if self._extra_hosts.has_hosts():
|
||||
result["extra_hosts"] = self._extra_hosts.render()
|
||||
|
||||
if self._init is not None:
|
||||
result["init"] = self._init
|
||||
|
||||
if self._read_only is not None:
|
||||
result["read_only"] = self._read_only
|
||||
|
||||
if self._grace_period is not None:
|
||||
result["stop_grace_period"] = f"{self._grace_period}s"
|
||||
|
||||
if self._user:
|
||||
result["user"] = self._user
|
||||
|
||||
for g in self.get_additional_groups():
|
||||
self.add_group(g)
|
||||
|
||||
if self._group_add:
|
||||
result["group_add"] = sorted(self._group_add, key=lambda g: (isinstance(g, str), g))
|
||||
|
||||
if self._shm_size is not None:
|
||||
result["shm_size"] = f"{self._shm_size}M"
|
||||
|
||||
if self._privileged is not None:
|
||||
result["privileged"] = self._privileged
|
||||
|
||||
if self._cap_drop:
|
||||
result["cap_drop"] = sorted(self._cap_drop)
|
||||
|
||||
if self._cap_add:
|
||||
result["cap_add"] = sorted(self._cap_add)
|
||||
|
||||
if self._security_opt.has_opts():
|
||||
result["security_opt"] = self._security_opt.render()
|
||||
|
||||
if self._network_mode:
|
||||
result["network_mode"] = self._network_mode
|
||||
|
||||
if self.sysctls.has_sysctls():
|
||||
result["sysctls"] = self.sysctls.render()
|
||||
|
||||
if self._network_mode != "host":
|
||||
if self.ports.has_ports():
|
||||
result["ports"] = self.ports.render()
|
||||
|
||||
if self.expose.has_ports():
|
||||
result["expose"] = self.expose.render()
|
||||
|
||||
if self._entrypoint:
|
||||
result["entrypoint"] = self._entrypoint
|
||||
|
||||
if self._command:
|
||||
result["command"] = self._command
|
||||
|
||||
if self.devices.has_devices():
|
||||
result["devices"] = self.devices.render()
|
||||
|
||||
if self.deploy.has_deploy():
|
||||
result["deploy"] = self.deploy.render()
|
||||
|
||||
if self.environment.has_variables():
|
||||
result["environment"] = self.environment.render()
|
||||
|
||||
if self.labels.has_labels():
|
||||
result["labels"] = self.labels.render()
|
||||
|
||||
if self.dns.has_dns_nameservers():
|
||||
result["dns"] = self.dns.render_dns_nameservers()
|
||||
|
||||
if self.dns.has_dns_searches():
|
||||
result["dns_search"] = self.dns.render_dns_searches()
|
||||
|
||||
if self.dns.has_dns_opts():
|
||||
result["dns_opt"] = self.dns.render_dns_opts()
|
||||
|
||||
if self.depends.has_dependencies():
|
||||
result["depends_on"] = self.depends.render()
|
||||
|
||||
if self._storage.has_mounts():
|
||||
result["volumes"] = self._storage.render()
|
||||
|
||||
if self._tmpfs.has_tmpfs():
|
||||
result["tmpfs"] = self._tmpfs.render()
|
||||
|
||||
return result
|
||||
34
library/2.1.24/depends.py
Normal file
34
library/2.1.24/depends.py
Normal file
@@ -0,0 +1,34 @@
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .validations import valid_depend_condition_or_raise
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from validations import valid_depend_condition_or_raise
|
||||
|
||||
|
||||
class Depends:
|
||||
def __init__(self, render_instance: "Render"):
|
||||
self._render_instance = render_instance
|
||||
self._dependencies: dict[str, str] = {}
|
||||
|
||||
def add_dependency(self, name: str, condition: str):
|
||||
condition = valid_depend_condition_or_raise(condition)
|
||||
if name in self._dependencies.keys():
|
||||
raise RenderError(f"Dependency [{name}] already added")
|
||||
if name not in self._render_instance.container_names():
|
||||
raise RenderError(
|
||||
f"Dependency [{name}] not found in defined containers. "
|
||||
f"Available containers: [{', '.join(self._render_instance.container_names())}]"
|
||||
)
|
||||
self._dependencies[name] = condition
|
||||
|
||||
def has_dependencies(self):
|
||||
return len(self._dependencies) > 0
|
||||
|
||||
def render(self):
|
||||
return {d: {"condition": c} for d, c in self._dependencies.items()}
|
||||
24
library/2.1.24/deploy.py
Normal file
24
library/2.1.24/deploy.py
Normal file
@@ -0,0 +1,24 @@
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
|
||||
try:
|
||||
from .resources import Resources
|
||||
except ImportError:
|
||||
from resources import Resources
|
||||
|
||||
|
||||
class Deploy:
|
||||
def __init__(self, render_instance: "Render"):
|
||||
self._render_instance = render_instance
|
||||
self.resources: Resources = Resources(self._render_instance)
|
||||
|
||||
def has_deploy(self):
|
||||
return self.resources.has_resources()
|
||||
|
||||
def render(self):
|
||||
if self.resources.has_resources():
|
||||
return {"resources": self.resources.render()}
|
||||
|
||||
return {}
|
||||
32
library/2.1.24/deps.py
Normal file
32
library/2.1.24/deps.py
Normal file
@@ -0,0 +1,32 @@
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
|
||||
try:
|
||||
from .deps_postgres import PostgresContainer, PostgresConfig
|
||||
from .deps_redis import RedisContainer, RedisConfig
|
||||
from .deps_mariadb import MariadbContainer, MariadbConfig
|
||||
from .deps_perms import PermsContainer
|
||||
except ImportError:
|
||||
from deps_postgres import PostgresContainer, PostgresConfig
|
||||
from deps_redis import RedisContainer, RedisConfig
|
||||
from deps_mariadb import MariadbContainer, MariadbConfig
|
||||
from deps_perms import PermsContainer
|
||||
|
||||
|
||||
class Deps:
|
||||
def __init__(self, render_instance: "Render"):
|
||||
self._render_instance = render_instance
|
||||
|
||||
def perms(self, name: str):
|
||||
return PermsContainer(self._render_instance, name)
|
||||
|
||||
def postgres(self, name: str, image: str, config: PostgresConfig, perms_instance: PermsContainer):
|
||||
return PostgresContainer(self._render_instance, name, image, config, perms_instance)
|
||||
|
||||
def redis(self, name: str, image: str, config: RedisConfig, perms_instance: PermsContainer):
|
||||
return RedisContainer(self._render_instance, name, image, config, perms_instance)
|
||||
|
||||
def mariadb(self, name: str, image: str, config: MariadbConfig, perms_instance: PermsContainer):
|
||||
return MariadbContainer(self._render_instance, name, image, config, perms_instance)
|
||||
81
library/2.1.24/deps_mariadb.py
Normal file
81
library/2.1.24/deps_mariadb.py
Normal file
@@ -0,0 +1,81 @@
|
||||
from typing import TYPE_CHECKING, TypedDict, NotRequired
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
from storage import IxStorage
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .deps_perms import PermsContainer
|
||||
from .validations import valid_port_or_raise
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from deps_perms import PermsContainer
|
||||
from validations import valid_port_or_raise
|
||||
|
||||
|
||||
class MariadbConfig(TypedDict):
|
||||
user: str
|
||||
password: str
|
||||
database: str
|
||||
root_password: NotRequired[str]
|
||||
port: NotRequired[int]
|
||||
auto_upgrade: NotRequired[bool]
|
||||
volume: "IxStorage"
|
||||
|
||||
|
||||
class MariadbContainer:
|
||||
def __init__(
|
||||
self, render_instance: "Render", name: str, image: str, config: MariadbConfig, perms_instance: PermsContainer
|
||||
):
|
||||
self._render_instance = render_instance
|
||||
self._name = name
|
||||
self._config = config
|
||||
|
||||
for key in ("user", "password", "database", "volume"):
|
||||
if key not in config:
|
||||
raise RenderError(f"Expected [{key}] to be set for mariadb")
|
||||
|
||||
port = valid_port_or_raise(self._get_port())
|
||||
root_password = config.get("root_password") or config["password"]
|
||||
auto_upgrade = config.get("auto_upgrade", True)
|
||||
|
||||
self._get_repo(image, ("mariadb"))
|
||||
c = self._render_instance.add_container(name, image)
|
||||
c.set_user(999, 999)
|
||||
c.healthcheck.set_test("mariadb")
|
||||
c.remove_devices()
|
||||
|
||||
c.add_storage("/var/lib/mysql", config["volume"])
|
||||
perms_instance.add_or_skip_action(
|
||||
f"{self._name}_mariadb_data", config["volume"], {"uid": 999, "gid": 999, "mode": "check"}
|
||||
)
|
||||
|
||||
c.environment.add_env("MARIADB_USER", config["user"])
|
||||
c.environment.add_env("MARIADB_PASSWORD", config["password"])
|
||||
c.environment.add_env("MARIADB_ROOT_PASSWORD", root_password)
|
||||
c.environment.add_env("MARIADB_DATABASE", config["database"])
|
||||
c.environment.add_env("MARIADB_AUTO_UPGRADE", str(auto_upgrade).lower())
|
||||
c.set_command(["--port", str(port)])
|
||||
|
||||
# Store container for further configuration
|
||||
# For example: c.depends.add_dependency("other_container", "service_started")
|
||||
self._container = c
|
||||
|
||||
def _get_port(self):
|
||||
return self._config.get("port") or 3306
|
||||
|
||||
def _get_repo(self, image, supported_repos):
|
||||
images = self._render_instance.values["images"]
|
||||
if image not in images:
|
||||
raise RenderError(f"Image [{image}] not found in values. Available images: [{', '.join(images.keys())}]")
|
||||
repo = images[image].get("repository")
|
||||
if not repo:
|
||||
raise RenderError("Could not determine repo")
|
||||
if repo not in supported_repos:
|
||||
raise RenderError(f"Unsupported repo [{repo}] for mariadb. Supported repos: {', '.join(supported_repos)}")
|
||||
return repo
|
||||
|
||||
@property
|
||||
def container(self):
|
||||
return self._container
|
||||
252
library/2.1.24/deps_perms.py
Normal file
252
library/2.1.24/deps_perms.py
Normal file
@@ -0,0 +1,252 @@
|
||||
import json
|
||||
import pathlib
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
from storage import IxStorage
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .validations import valid_octal_mode_or_raise, valid_fs_path_or_raise
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from validations import valid_octal_mode_or_raise, valid_fs_path_or_raise
|
||||
|
||||
|
||||
class PermsContainer:
|
||||
def __init__(self, render_instance: "Render", name: str):
|
||||
self._render_instance = render_instance
|
||||
self._name = name
|
||||
self.actions: set[str] = set()
|
||||
self.parsed_configs: list[dict] = []
|
||||
|
||||
def add_or_skip_action(self, identifier: str, volume_config: "IxStorage", action_config: dict):
|
||||
identifier = self.normalize_identifier_for_path(identifier)
|
||||
if identifier in self.actions:
|
||||
raise RenderError(f"Action with id [{identifier}] already used for another permission action")
|
||||
|
||||
parsed_action = self.parse_action(identifier, volume_config, action_config)
|
||||
if parsed_action:
|
||||
self.parsed_configs.append(parsed_action)
|
||||
self.actions.add(identifier)
|
||||
|
||||
def parse_action(self, identifier: str, volume_config: "IxStorage", action_config: dict):
|
||||
valid_modes = [
|
||||
"always", # Always set permissions, without checking.
|
||||
"check", # Checks if permissions are correct, and set them if not.
|
||||
]
|
||||
mode = action_config.get("mode", "check")
|
||||
uid = action_config.get("uid", None)
|
||||
gid = action_config.get("gid", None)
|
||||
chmod = action_config.get("chmod", None)
|
||||
recursive = action_config.get("recursive", False)
|
||||
mount_path = pathlib.Path("/mnt/permission", identifier).as_posix()
|
||||
is_temporary = False
|
||||
|
||||
vol_type = volume_config.get("type", "")
|
||||
match vol_type:
|
||||
case "temporary":
|
||||
# If it is a temporary volume, we force auto permissions
|
||||
# and set is_temporary to True, so it will be cleaned up
|
||||
is_temporary = True
|
||||
recursive = True
|
||||
case "volume":
|
||||
if not volume_config.get("volume_config", {}).get("auto_permissions", False):
|
||||
return None
|
||||
case "host_path":
|
||||
host_path_config = volume_config.get("host_path_config", {})
|
||||
# Skip when ACL enabled
|
||||
if host_path_config.get("acl_enable", False):
|
||||
return None
|
||||
if not host_path_config.get("auto_permissions", False):
|
||||
return None
|
||||
case "ix_volume":
|
||||
ix_vol_config = volume_config.get("ix_volume_config", {})
|
||||
# Skip when ACL enabled
|
||||
if ix_vol_config.get("acl_enable", False):
|
||||
return None
|
||||
# For ix_volumes, we default to auto_permissions = True
|
||||
if not ix_vol_config.get("auto_permissions", True):
|
||||
return None
|
||||
case _:
|
||||
# Skip for other types
|
||||
return None
|
||||
|
||||
if mode not in valid_modes:
|
||||
raise RenderError(f"Expected [mode] to be one of [{', '.join(valid_modes)}], got [{mode}]")
|
||||
if not isinstance(uid, int) or not isinstance(gid, int):
|
||||
raise RenderError("Expected [uid] and [gid] to be set when [auto_permissions] is enabled")
|
||||
if chmod is not None:
|
||||
chmod = valid_octal_mode_or_raise(chmod)
|
||||
|
||||
mount_path = valid_fs_path_or_raise(mount_path)
|
||||
return {
|
||||
"mount_path": mount_path,
|
||||
"volume_config": volume_config,
|
||||
"action_data": {
|
||||
"mount_path": mount_path,
|
||||
"is_temporary": is_temporary,
|
||||
"identifier": identifier,
|
||||
"recursive": recursive,
|
||||
"mode": mode,
|
||||
"uid": uid,
|
||||
"gid": gid,
|
||||
"chmod": chmod,
|
||||
},
|
||||
}
|
||||
|
||||
def normalize_identifier_for_path(self, identifier: str):
|
||||
return identifier.rstrip("/").lstrip("/").lower().replace("/", "_").replace(".", "-").replace(" ", "-")
|
||||
|
||||
def has_actions(self):
|
||||
return bool(self.actions)
|
||||
|
||||
def activate(self):
|
||||
if len(self.parsed_configs) != len(self.actions):
|
||||
raise RenderError("Number of actions and parsed configs does not match")
|
||||
|
||||
if not self.has_actions():
|
||||
raise RenderError("No actions added. Check if there are actions before activating")
|
||||
|
||||
# Add the container and set it up
|
||||
c = self._render_instance.add_container(self._name, "python_permissions_image")
|
||||
c.set_user(0, 0)
|
||||
c.add_caps(["CHOWN", "FOWNER", "DAC_OVERRIDE"])
|
||||
c.set_network_mode("none")
|
||||
|
||||
# Don't attach any devices
|
||||
c.remove_devices()
|
||||
|
||||
c.deploy.resources.set_profile("medium")
|
||||
c.restart.set_policy("on-failure", maximum_retry_count=1)
|
||||
c.healthcheck.disable()
|
||||
|
||||
c.set_entrypoint(["python3", "/script/run.py"])
|
||||
script = "#!/usr/bin/env python3\n"
|
||||
script += get_script()
|
||||
c.configs.add("permissions_run_script", script, "/script/run.py", "0700")
|
||||
|
||||
actions_data: list[dict] = []
|
||||
for parsed in self.parsed_configs:
|
||||
c.add_storage(parsed["mount_path"], parsed["volume_config"])
|
||||
actions_data.append(parsed["action_data"])
|
||||
|
||||
actions_data_json = json.dumps(actions_data)
|
||||
c.configs.add("permissions_actions_data", actions_data_json, "/script/actions.json", "0500")
|
||||
|
||||
|
||||
def get_script():
|
||||
return """
|
||||
import os
|
||||
import json
|
||||
import time
|
||||
import shutil
|
||||
|
||||
with open("/script/actions.json", "r") as f:
|
||||
actions_data = json.load(f)
|
||||
|
||||
if not actions_data:
|
||||
# If this script is called, there should be actions data
|
||||
raise ValueError("No actions data found")
|
||||
|
||||
def fix_perms(path, chmod, recursive=False):
|
||||
print(f"Changing permissions{' recursively ' if recursive else ' '}to {chmod} on: [{path}]")
|
||||
os.chmod(path, int(chmod, 8))
|
||||
if recursive:
|
||||
for root, dirs, files in os.walk(path):
|
||||
for f in files:
|
||||
os.chmod(os.path.join(root, f), int(chmod, 8))
|
||||
print("Permissions after changes:")
|
||||
print_chmod_stat()
|
||||
|
||||
def fix_owner(path, uid, gid, recursive=False):
|
||||
print(f"Changing ownership{' recursively ' if recursive else ' '}to {uid}:{gid} on: [{path}]")
|
||||
os.chown(path, uid, gid)
|
||||
if recursive:
|
||||
for root, dirs, files in os.walk(path):
|
||||
for f in files:
|
||||
os.chown(os.path.join(root, f), uid, gid)
|
||||
print("Ownership after changes:")
|
||||
print_chown_stat()
|
||||
|
||||
def print_chown_stat():
|
||||
curr_stat = os.stat(action["mount_path"])
|
||||
print(f"Ownership: [{curr_stat.st_uid}:{curr_stat.st_gid}]")
|
||||
|
||||
def print_chmod_stat():
|
||||
curr_stat = os.stat(action["mount_path"])
|
||||
print(f"Permissions: [{oct(curr_stat.st_mode)[3:]}]")
|
||||
|
||||
def print_chown_diff(curr_stat, uid, gid):
|
||||
print(f"Ownership: wanted [{uid}:{gid}], got [{curr_stat.st_uid}:{curr_stat.st_gid}].")
|
||||
|
||||
def print_chmod_diff(curr_stat, mode):
|
||||
print(f"Permissions: wanted [{mode}], got [{oct(curr_stat.st_mode)[3:]}].")
|
||||
|
||||
def perform_action(action):
|
||||
start_time = time.time()
|
||||
print(f"=== Applying configuration on volume with identifier [{action['identifier']}] ===")
|
||||
|
||||
if not os.path.isdir(action["mount_path"]):
|
||||
print(f"Path [{action['mount_path']}] is not a directory, skipping...")
|
||||
return
|
||||
|
||||
if action["is_temporary"]:
|
||||
print(f"Path [{action['mount_path']}] is a temporary directory, ensuring it is empty...")
|
||||
for item in os.listdir(action["mount_path"]):
|
||||
item_path = os.path.join(action["mount_path"], item)
|
||||
|
||||
# Exclude the safe directory, where we can use to mount files temporarily
|
||||
if os.path.basename(item_path) == "ix-safe":
|
||||
continue
|
||||
if os.path.isdir(item_path):
|
||||
shutil.rmtree(item_path)
|
||||
else:
|
||||
os.remove(item_path)
|
||||
|
||||
if not action["is_temporary"] and os.listdir(action["mount_path"]):
|
||||
print(f"Path [{action['mount_path']}] is not empty, skipping...")
|
||||
return
|
||||
|
||||
print(f"Current Ownership and Permissions on [{action['mount_path']}]:")
|
||||
curr_stat = os.stat(action["mount_path"])
|
||||
print_chown_diff(curr_stat, action["uid"], action["gid"])
|
||||
print_chmod_diff(curr_stat, action["chmod"])
|
||||
print("---")
|
||||
|
||||
if action["mode"] == "always":
|
||||
fix_owner(action["mount_path"], action["uid"], action["gid"], action["recursive"])
|
||||
if not action["chmod"]:
|
||||
print("Skipping permissions check, chmod is falsy")
|
||||
else:
|
||||
fix_perms(action["mount_path"], action["chmod"], action["recursive"])
|
||||
return
|
||||
|
||||
elif action["mode"] == "check":
|
||||
if curr_stat.st_uid != action["uid"] or curr_stat.st_gid != action["gid"]:
|
||||
print("Ownership is incorrect. Fixing...")
|
||||
fix_owner(action["mount_path"], action["uid"], action["gid"], action["recursive"])
|
||||
else:
|
||||
print("Ownership is correct. Skipping...")
|
||||
|
||||
if not action["chmod"]:
|
||||
print("Skipping permissions check, chmod is falsy")
|
||||
else:
|
||||
if oct(curr_stat.st_mode)[3:] != action["chmod"]:
|
||||
print("Permissions are incorrect. Fixing...")
|
||||
fix_perms(action["mount_path"], action["chmod"], action["recursive"])
|
||||
else:
|
||||
print("Permissions are correct. Skipping...")
|
||||
|
||||
print(f"Time taken: {(time.time() - start_time) * 1000:.2f}ms")
|
||||
print(f"=== Finished applying configuration on volume with identifier [{action['identifier']}] ==")
|
||||
print()
|
||||
|
||||
if __name__ == "__main__":
|
||||
start_time = time.time()
|
||||
for action in actions_data:
|
||||
perform_action(action)
|
||||
print(f"Total time taken: {(time.time() - start_time) * 1000:.2f}ms")
|
||||
"""
|
||||
152
library/2.1.24/deps_postgres.py
Normal file
152
library/2.1.24/deps_postgres.py
Normal file
@@ -0,0 +1,152 @@
|
||||
import urllib.parse
|
||||
from typing import TYPE_CHECKING, TypedDict, NotRequired
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
from storage import IxStorage
|
||||
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .deps_perms import PermsContainer
|
||||
from .validations import valid_port_or_raise
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from deps_perms import PermsContainer
|
||||
from validations import valid_port_or_raise
|
||||
|
||||
|
||||
class PostgresConfig(TypedDict):
|
||||
user: str
|
||||
password: str
|
||||
database: str
|
||||
port: NotRequired[int]
|
||||
volume: "IxStorage"
|
||||
|
||||
|
||||
MAX_POSTGRES_VERSION = 17
|
||||
|
||||
|
||||
class PostgresContainer:
|
||||
def __init__(
|
||||
self, render_instance: "Render", name: str, image: str, config: PostgresConfig, perms_instance: PermsContainer
|
||||
):
|
||||
self._render_instance = render_instance
|
||||
self._name = name
|
||||
self._config = config
|
||||
self._data_dir = "/var/lib/postgresql/data"
|
||||
self._upgrade_name = f"{self._name}_upgrade"
|
||||
self._upgrade_container = None
|
||||
|
||||
for key in ("user", "password", "database", "volume"):
|
||||
if key not in config:
|
||||
raise RenderError(f"Expected [{key}] to be set for postgres")
|
||||
|
||||
port = valid_port_or_raise(self._get_port())
|
||||
|
||||
c = self._render_instance.add_container(name, image)
|
||||
|
||||
c.set_user(999, 999)
|
||||
c.healthcheck.set_test("postgres")
|
||||
c.remove_devices()
|
||||
c.add_storage(self._data_dir, config["volume"])
|
||||
|
||||
common_variables = {
|
||||
"POSTGRES_USER": config["user"],
|
||||
"POSTGRES_PASSWORD": config["password"],
|
||||
"POSTGRES_DB": config["database"],
|
||||
"PGPORT": port,
|
||||
}
|
||||
|
||||
for k, v in common_variables.items():
|
||||
c.environment.add_env(k, v)
|
||||
|
||||
perms_instance.add_or_skip_action(
|
||||
f"{self._name}_postgres_data", config["volume"], {"uid": 999, "gid": 999, "mode": "check"}
|
||||
)
|
||||
|
||||
repo = self._get_repo(image, ("postgres", "tensorchord/pgvecto-rs"))
|
||||
# eg we don't want to handle upgrades of pg_vector at the moment
|
||||
if repo == "postgres":
|
||||
target_major_version = self._get_target_version(image)
|
||||
upg = self._render_instance.add_container(self._upgrade_name, "postgres_upgrade_image")
|
||||
upg.set_entrypoint(["/bin/bash", "-c", "/upgrade.sh"])
|
||||
upg.restart.set_policy("on-failure", 1)
|
||||
upg.set_user(999, 999)
|
||||
upg.healthcheck.disable()
|
||||
upg.remove_devices()
|
||||
upg.add_storage(self._data_dir, config["volume"])
|
||||
for k, v in common_variables.items():
|
||||
upg.environment.add_env(k, v)
|
||||
|
||||
upg.environment.add_env("TARGET_VERSION", target_major_version)
|
||||
upg.environment.add_env("DATA_DIR", self._data_dir)
|
||||
|
||||
self._upgrade_container = upg
|
||||
|
||||
c.depends.add_dependency(self._upgrade_name, "service_completed_successfully")
|
||||
|
||||
# Store container for further configuration
|
||||
# For example: c.depends.add_dependency("other_container", "service_started")
|
||||
self._container = c
|
||||
|
||||
@property
|
||||
def container(self):
|
||||
return self._container
|
||||
|
||||
def add_dependency(self, container_name: str, condition: str):
|
||||
self._container.depends.add_dependency(container_name, condition)
|
||||
if self._upgrade_container:
|
||||
self._upgrade_container.depends.add_dependency(container_name, condition)
|
||||
|
||||
def _get_port(self):
|
||||
return self._config.get("port") or 5432
|
||||
|
||||
def _get_repo(self, image, supported_repos):
|
||||
images = self._render_instance.values["images"]
|
||||
if image not in images:
|
||||
raise RenderError(f"Image [{image}] not found in values. Available images: [{', '.join(images.keys())}]")
|
||||
repo = images[image].get("repository")
|
||||
if not repo:
|
||||
raise RenderError("Could not determine repo")
|
||||
if repo not in supported_repos:
|
||||
raise RenderError(f"Unsupported repo [{repo}] for postgres. Supported repos: {', '.join(supported_repos)}")
|
||||
return repo
|
||||
|
||||
def _get_target_version(self, image):
|
||||
images = self._render_instance.values["images"]
|
||||
if image not in images:
|
||||
raise RenderError(f"Image [{image}] not found in values. Available images: [{', '.join(images.keys())}]")
|
||||
tag = images[image].get("tag", "")
|
||||
tag = str(tag) # Account for tags like 16.6
|
||||
target_major_version = tag.split(".")[0]
|
||||
|
||||
try:
|
||||
target_major_version = int(target_major_version)
|
||||
except ValueError:
|
||||
raise RenderError(f"Could not determine target major version from tag [{tag}]")
|
||||
|
||||
if target_major_version > MAX_POSTGRES_VERSION:
|
||||
raise RenderError(f"Postgres version [{target_major_version}] is not supported")
|
||||
|
||||
return target_major_version
|
||||
|
||||
def get_url(self, variant: str):
|
||||
user = urllib.parse.quote_plus(self._config["user"])
|
||||
password = urllib.parse.quote_plus(self._config["password"])
|
||||
creds = f"{user}:{password}"
|
||||
addr = f"{self._name}:{self._get_port()}"
|
||||
db = self._config["database"]
|
||||
|
||||
match variant:
|
||||
case "postgres":
|
||||
return f"postgres://{creds}@{addr}/{db}?sslmode=disable"
|
||||
case "postgresql":
|
||||
return f"postgresql://{creds}@{addr}/{db}?sslmode=disable"
|
||||
case "postgresql_no_creds":
|
||||
return f"postgresql://{addr}/{db}?sslmode=disable"
|
||||
case "host_port":
|
||||
return addr
|
||||
case _:
|
||||
raise RenderError(f"Expected [variant] to be one of [postgres, postgresql], got [{variant}]")
|
||||
83
library/2.1.24/deps_redis.py
Normal file
83
library/2.1.24/deps_redis.py
Normal file
@@ -0,0 +1,83 @@
|
||||
import urllib.parse
|
||||
from typing import TYPE_CHECKING, TypedDict, NotRequired
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
from storage import IxStorage
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .deps_perms import PermsContainer
|
||||
from .validations import valid_port_or_raise, valid_redis_password_or_raise
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from deps_perms import PermsContainer
|
||||
from validations import valid_port_or_raise, valid_redis_password_or_raise
|
||||
|
||||
|
||||
class RedisConfig(TypedDict):
|
||||
password: str
|
||||
port: NotRequired[int]
|
||||
volume: "IxStorage"
|
||||
|
||||
|
||||
class RedisContainer:
|
||||
def __init__(
|
||||
self, render_instance: "Render", name: str, image: str, config: RedisConfig, perms_instance: PermsContainer
|
||||
):
|
||||
self._render_instance = render_instance
|
||||
self._name = name
|
||||
self._config = config
|
||||
|
||||
for key in ("password", "volume"):
|
||||
if key not in config:
|
||||
raise RenderError(f"Expected [{key}] to be set for redis")
|
||||
|
||||
valid_redis_password_or_raise(config["password"])
|
||||
|
||||
port = valid_port_or_raise(self._get_port())
|
||||
self._get_repo(image, ("bitnami/redis"))
|
||||
|
||||
c = self._render_instance.add_container(name, image)
|
||||
c.set_user(1001, 0)
|
||||
c.healthcheck.set_test("redis")
|
||||
c.remove_devices()
|
||||
|
||||
c.add_storage("/bitnami/redis/data", config["volume"])
|
||||
perms_instance.add_or_skip_action(
|
||||
f"{self._name}_redis_data", config["volume"], {"uid": 1001, "gid": 0, "mode": "check"}
|
||||
)
|
||||
|
||||
c.environment.add_env("ALLOW_EMPTY_PASSWORD", "no")
|
||||
c.environment.add_env("REDIS_PASSWORD", config["password"])
|
||||
c.environment.add_env("REDIS_PORT_NUMBER", port)
|
||||
|
||||
# Store container for further configuration
|
||||
# For example: c.depends.add_dependency("other_container", "service_started")
|
||||
self._container = c
|
||||
|
||||
def _get_port(self):
|
||||
return self._config.get("port") or 6379
|
||||
|
||||
def _get_repo(self, image, supported_repos):
|
||||
images = self._render_instance.values["images"]
|
||||
if image not in images:
|
||||
raise RenderError(f"Image [{image}] not found in values. Available images: [{', '.join(images.keys())}]")
|
||||
repo = images[image].get("repository")
|
||||
if not repo:
|
||||
raise RenderError("Could not determine repo")
|
||||
if repo not in supported_repos:
|
||||
raise RenderError(f"Unsupported repo [{repo}] for redis. Supported repos: {', '.join(supported_repos)}")
|
||||
return repo
|
||||
|
||||
def get_url(self, variant: str):
|
||||
addr = f"{self._name}:{self._get_port()}"
|
||||
password = urllib.parse.quote_plus(self._config["password"])
|
||||
|
||||
match variant:
|
||||
case "redis":
|
||||
return f"redis://default:{password}@{addr}"
|
||||
|
||||
@property
|
||||
def container(self):
|
||||
return self._container
|
||||
31
library/2.1.24/device.py
Normal file
31
library/2.1.24/device.py
Normal file
@@ -0,0 +1,31 @@
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .validations import valid_fs_path_or_raise, allowed_device_or_raise, valid_cgroup_perm_or_raise
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from validations import valid_fs_path_or_raise, allowed_device_or_raise, valid_cgroup_perm_or_raise
|
||||
|
||||
|
||||
class Device:
|
||||
def __init__(self, host_device: str, container_device: str, cgroup_perm: str = "", allow_disallowed=False):
|
||||
hd = valid_fs_path_or_raise(host_device.rstrip("/"))
|
||||
cd = valid_fs_path_or_raise(container_device.rstrip("/"))
|
||||
if not hd or not cd:
|
||||
raise RenderError(
|
||||
"Expected [host_device] and [container_device] to be set. "
|
||||
f"Got host_device [{host_device}] and container_device [{container_device}]"
|
||||
)
|
||||
|
||||
cgroup_perm = valid_cgroup_perm_or_raise(cgroup_perm)
|
||||
if not allow_disallowed:
|
||||
hd = allowed_device_or_raise(hd)
|
||||
|
||||
self.cgroup_perm: str = cgroup_perm
|
||||
self.host_device: str = hd
|
||||
self.container_device: str = cd
|
||||
|
||||
def render(self):
|
||||
result = f"{self.host_device}:{self.container_device}"
|
||||
if self.cgroup_perm:
|
||||
result += f":{self.cgroup_perm}"
|
||||
return result
|
||||
54
library/2.1.24/device_cgroup_rules.py
Normal file
54
library/2.1.24/device_cgroup_rules.py
Normal file
@@ -0,0 +1,54 @@
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .validations import valid_device_cgroup_rule_or_raise
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from validations import valid_device_cgroup_rule_or_raise
|
||||
|
||||
|
||||
class DeviceCGroupRule:
|
||||
def __init__(self, rule: str):
|
||||
rule = valid_device_cgroup_rule_or_raise(rule)
|
||||
parts = rule.split(" ")
|
||||
major, minor = parts[1].split(":")
|
||||
|
||||
self._type = parts[0]
|
||||
self._major = major
|
||||
self._minor = minor
|
||||
self._permissions = parts[2]
|
||||
|
||||
def get_key(self):
|
||||
return f"{self._type}_{self._major}_{self._minor}"
|
||||
|
||||
def render(self):
|
||||
return f"{self._type} {self._major}:{self._minor} {self._permissions}"
|
||||
|
||||
|
||||
class DeviceCGroupRules:
|
||||
def __init__(self, render_instance: "Render"):
|
||||
self._render_instance = render_instance
|
||||
self._rules: set[DeviceCGroupRule] = set()
|
||||
self._track_rule_combos: set[str] = set()
|
||||
|
||||
def add_rule(self, rule: str):
|
||||
dev_group_rule = DeviceCGroupRule(rule)
|
||||
if dev_group_rule in self._rules:
|
||||
raise RenderError(f"Device Group Rule [{rule}] already added")
|
||||
|
||||
rule_key = dev_group_rule.get_key()
|
||||
if rule_key in self._track_rule_combos:
|
||||
raise RenderError(f"Device Group Rule [{rule}] has already been added for this device group")
|
||||
|
||||
self._rules.add(dev_group_rule)
|
||||
self._track_rule_combos.add(rule_key)
|
||||
|
||||
def has_rules(self):
|
||||
return len(self._rules) > 0
|
||||
|
||||
def render(self):
|
||||
return sorted([rule.render() for rule in self._rules])
|
||||
71
library/2.1.24/devices.py
Normal file
71
library/2.1.24/devices.py
Normal file
@@ -0,0 +1,71 @@
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .device import Device
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from device import Device
|
||||
|
||||
|
||||
class Devices:
|
||||
def __init__(self, render_instance: "Render"):
|
||||
self._render_instance = render_instance
|
||||
self._devices: set[Device] = set()
|
||||
|
||||
# Tracks all container device paths to make sure they are not duplicated
|
||||
self._container_device_paths: set[str] = set()
|
||||
# Scan values for devices we should automatically add
|
||||
# for example /dev/dri for gpus
|
||||
self._auto_add_devices_from_values()
|
||||
|
||||
def _auto_add_devices_from_values(self):
|
||||
resources = self._render_instance.values.get("resources", {})
|
||||
|
||||
if resources.get("gpus", {}).get("use_all_gpus", False):
|
||||
self.add_device("/dev/dri", "/dev/dri", allow_disallowed=True)
|
||||
if resources["gpus"].get("kfd_device_exists", False):
|
||||
self.add_device("/dev/kfd", "/dev/kfd", allow_disallowed=True) # AMD ROCm
|
||||
|
||||
def add_device(self, host_device: str, container_device: str, cgroup_perm: str = "", allow_disallowed=False):
|
||||
# Host device can be mapped to multiple container devices,
|
||||
# so we only make sure container devices are not duplicated
|
||||
if container_device in self._container_device_paths:
|
||||
raise RenderError(f"Device with container path [{container_device}] already added")
|
||||
|
||||
self._devices.add(Device(host_device, container_device, cgroup_perm, allow_disallowed))
|
||||
self._container_device_paths.add(container_device)
|
||||
|
||||
def add_usb_bus(self):
|
||||
self.add_device("/dev/bus/usb", "/dev/bus/usb", allow_disallowed=True)
|
||||
|
||||
def _add_snd_device(self):
|
||||
self.add_device("/dev/snd", "/dev/snd", allow_disallowed=True)
|
||||
|
||||
def _add_tun_device(self):
|
||||
self.add_device("/dev/net/tun", "/dev/net/tun", allow_disallowed=True)
|
||||
|
||||
def has_devices(self):
|
||||
return len(self._devices) > 0
|
||||
|
||||
# Mainly will be used from dependencies
|
||||
# There is no reason to pass devices to
|
||||
# redis or postgres for example
|
||||
def remove_devices(self):
|
||||
self._devices.clear()
|
||||
self._container_device_paths.clear()
|
||||
|
||||
# Check if there are any gpu devices
|
||||
# Used to determine if we should add groups
|
||||
# like 'video' to the container
|
||||
def has_gpus(self):
|
||||
for d in self._devices:
|
||||
if d.host_device == "/dev/dri":
|
||||
return True
|
||||
return False
|
||||
|
||||
def render(self) -> list[str]:
|
||||
return sorted([d.render() for d in self._devices])
|
||||
79
library/2.1.24/dns.py
Normal file
79
library/2.1.24/dns.py
Normal file
@@ -0,0 +1,79 @@
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .validations import allowed_dns_opt_or_raise
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from validations import allowed_dns_opt_or_raise
|
||||
|
||||
|
||||
class Dns:
|
||||
def __init__(self, render_instance: "Render"):
|
||||
self._render_instance = render_instance
|
||||
self._dns_options: set[str] = set()
|
||||
self._dns_searches: set[str] = set()
|
||||
self._dns_nameservers: set[str] = set()
|
||||
|
||||
self._auto_add_dns_opts_from_values()
|
||||
self._auto_add_dns_searches_from_values()
|
||||
self._auto_add_dns_nameservers_from_values()
|
||||
|
||||
def _get_dns_opt_keys(self):
|
||||
return [self._get_key_from_opt(opt) for opt in self._dns_options]
|
||||
|
||||
def _get_key_from_opt(self, opt):
|
||||
return opt.split(":")[0]
|
||||
|
||||
def _auto_add_dns_opts_from_values(self):
|
||||
values = self._render_instance.values
|
||||
for dns_opt in values.get("network", {}).get("dns_opts", []):
|
||||
self.add_dns_opt(dns_opt)
|
||||
|
||||
def _auto_add_dns_searches_from_values(self):
|
||||
values = self._render_instance.values
|
||||
for dns_search in values.get("network", {}).get("dns_searches", []):
|
||||
self.add_dns_search(dns_search)
|
||||
|
||||
def _auto_add_dns_nameservers_from_values(self):
|
||||
values = self._render_instance.values
|
||||
for dns_nameserver in values.get("network", {}).get("dns_nameservers", []):
|
||||
self.add_dns_nameserver(dns_nameserver)
|
||||
|
||||
def add_dns_search(self, dns_search):
|
||||
if dns_search in self._dns_searches:
|
||||
raise RenderError(f"DNS Search [{dns_search}] already added")
|
||||
self._dns_searches.add(dns_search)
|
||||
|
||||
def add_dns_nameserver(self, dns_nameserver):
|
||||
if dns_nameserver in self._dns_nameservers:
|
||||
raise RenderError(f"DNS Nameserver [{dns_nameserver}] already added")
|
||||
self._dns_nameservers.add(dns_nameserver)
|
||||
|
||||
def add_dns_opt(self, dns_opt):
|
||||
# eg attempts:3
|
||||
key = allowed_dns_opt_or_raise(self._get_key_from_opt(dns_opt))
|
||||
if key in self._get_dns_opt_keys():
|
||||
raise RenderError(f"DNS Option [{key}] already added")
|
||||
self._dns_options.add(dns_opt)
|
||||
|
||||
def has_dns_opts(self):
|
||||
return len(self._dns_options) > 0
|
||||
|
||||
def has_dns_searches(self):
|
||||
return len(self._dns_searches) > 0
|
||||
|
||||
def has_dns_nameservers(self):
|
||||
return len(self._dns_nameservers) > 0
|
||||
|
||||
def render_dns_searches(self):
|
||||
return sorted(self._dns_searches)
|
||||
|
||||
def render_dns_opts(self):
|
||||
return sorted(self._dns_options)
|
||||
|
||||
def render_dns_nameservers(self):
|
||||
return sorted(self._dns_nameservers)
|
||||
112
library/2.1.24/environment.py
Normal file
112
library/2.1.24/environment.py
Normal file
@@ -0,0 +1,112 @@
|
||||
from typing import Any, TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .formatter import escape_dollar
|
||||
from .resources import Resources
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from formatter import escape_dollar
|
||||
from resources import Resources
|
||||
|
||||
|
||||
class Environment:
|
||||
def __init__(self, render_instance: "Render", resources: Resources):
|
||||
self._render_instance = render_instance
|
||||
self._resources = resources
|
||||
# Stores variables that user defined
|
||||
self._user_vars: dict[str, Any] = {}
|
||||
# Stores variables that are automatically added (based on values)
|
||||
self._auto_variables: dict[str, Any] = {}
|
||||
# Stores variables that are added by the application developer
|
||||
self._app_dev_variables: dict[str, Any] = {}
|
||||
|
||||
self._skip_generic_variables: bool = render_instance.values.get("skip_generic_variables", False)
|
||||
|
||||
self._auto_add_variables_from_values()
|
||||
|
||||
def _auto_add_variables_from_values(self):
|
||||
if not self._skip_generic_variables:
|
||||
self._add_generic_variables()
|
||||
self._add_nvidia_variables()
|
||||
|
||||
def _add_generic_variables(self):
|
||||
self._auto_variables["TZ"] = self._render_instance.values.get("TZ", "Etc/UTC")
|
||||
self._auto_variables["UMASK"] = self._render_instance.values.get("UMASK", "002")
|
||||
self._auto_variables["UMASK_SET"] = self._render_instance.values.get("UMASK", "002")
|
||||
|
||||
run_as = self._render_instance.values.get("run_as", {})
|
||||
user = run_as.get("user")
|
||||
group = run_as.get("group")
|
||||
if user:
|
||||
self._auto_variables["PUID"] = user
|
||||
self._auto_variables["UID"] = user
|
||||
self._auto_variables["USER_ID"] = user
|
||||
if group:
|
||||
self._auto_variables["PGID"] = group
|
||||
self._auto_variables["GID"] = group
|
||||
self._auto_variables["GROUP_ID"] = group
|
||||
|
||||
def _add_nvidia_variables(self):
|
||||
if self._resources._nvidia_ids:
|
||||
self._auto_variables["NVIDIA_DRIVER_CAPABILITIES"] = "all"
|
||||
self._auto_variables["NVIDIA_VISIBLE_DEVICES"] = ",".join(sorted(self._resources._nvidia_ids))
|
||||
else:
|
||||
self._auto_variables["NVIDIA_VISIBLE_DEVICES"] = "void"
|
||||
|
||||
def _format_value(self, v: Any) -> str:
|
||||
value = str(v)
|
||||
|
||||
# str(bool) returns "True" or "False",
|
||||
# but we want "true" or "false"
|
||||
if isinstance(v, bool):
|
||||
value = value.lower()
|
||||
return value
|
||||
|
||||
def add_env(self, name: str, value: Any):
|
||||
if not name:
|
||||
raise RenderError(f"Environment variable name cannot be empty. [{name}]")
|
||||
if name in self._app_dev_variables.keys():
|
||||
raise RenderError(
|
||||
f"Found duplicate environment variable [{name}] in application developer environment variables."
|
||||
)
|
||||
self._app_dev_variables[name] = value
|
||||
|
||||
def add_user_envs(self, user_env: list[dict]):
|
||||
for item in user_env:
|
||||
if not item.get("name"):
|
||||
raise RenderError(f"Environment variable name cannot be empty. [{item}]")
|
||||
if item["name"] in self._user_vars.keys():
|
||||
raise RenderError(
|
||||
f"Found duplicate environment variable [{item['name']}] in user environment variables."
|
||||
)
|
||||
self._user_vars[item["name"]] = item.get("value")
|
||||
|
||||
def has_variables(self):
|
||||
return len(self._auto_variables) > 0 or len(self._user_vars) > 0 or len(self._app_dev_variables) > 0
|
||||
|
||||
def render(self):
|
||||
result: dict[str, str] = {}
|
||||
|
||||
# Add envs from auto variables
|
||||
result.update({k: self._format_value(v) for k, v in self._auto_variables.items()})
|
||||
|
||||
# Track defined keys for faster lookup
|
||||
defined_keys = set(result.keys())
|
||||
|
||||
# Add envs from application developer (prohibit overwriting auto variables)
|
||||
for k, v in self._app_dev_variables.items():
|
||||
if k in defined_keys:
|
||||
raise RenderError(f"Environment variable [{k}] is already defined automatically from the library.")
|
||||
result[k] = self._format_value(v)
|
||||
defined_keys.add(k)
|
||||
|
||||
# Add envs from user (prohibit overwriting app developer envs and auto variables)
|
||||
for k, v in self._user_vars.items():
|
||||
if k in defined_keys:
|
||||
raise RenderError(f"Environment variable [{k}] is already defined from the application developer.")
|
||||
result[k] = self._format_value(v)
|
||||
|
||||
return {k: escape_dollar(v) for k, v in result.items()}
|
||||
4
library/2.1.24/error.py
Normal file
4
library/2.1.24/error.py
Normal file
@@ -0,0 +1,4 @@
|
||||
class RenderError(Exception):
|
||||
"""Base class for exceptions in this module."""
|
||||
|
||||
pass
|
||||
31
library/2.1.24/expose.py
Normal file
31
library/2.1.24/expose.py
Normal file
@@ -0,0 +1,31 @@
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .validations import valid_port_or_raise, valid_port_protocol_or_raise
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from validations import valid_port_or_raise, valid_port_protocol_or_raise
|
||||
|
||||
|
||||
class Expose:
|
||||
def __init__(self, render_instance: "Render"):
|
||||
self._render_instance = render_instance
|
||||
self._ports: set[str] = set()
|
||||
|
||||
def add_port(self, port: int, protocol: str = "tcp"):
|
||||
port = valid_port_or_raise(port)
|
||||
protocol = valid_port_protocol_or_raise(protocol)
|
||||
key = f"{port}/{protocol}"
|
||||
if key in self._ports:
|
||||
raise RenderError(f"Exposed port [{port}/{protocol}] already added")
|
||||
self._ports.add(key)
|
||||
|
||||
def has_ports(self):
|
||||
return len(self._ports) > 0
|
||||
|
||||
def render(self):
|
||||
return sorted(self._ports)
|
||||
33
library/2.1.24/extra_hosts.py
Normal file
33
library/2.1.24/extra_hosts.py
Normal file
@@ -0,0 +1,33 @@
|
||||
import ipaddress
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
|
||||
|
||||
class ExtraHosts:
|
||||
def __init__(self, render_instance: "Render"):
|
||||
self._render_instance = render_instance
|
||||
self._extra_hosts: dict[str, str] = {}
|
||||
|
||||
def add_host(self, host: str, ip: str):
|
||||
if not ip == "host-gateway":
|
||||
try:
|
||||
ipaddress.ip_address(ip)
|
||||
except ValueError:
|
||||
raise RenderError(f"Invalid IP address [{ip}] for host [{host}]")
|
||||
|
||||
if host in self._extra_hosts:
|
||||
raise RenderError(f"Host [{host}] already added with [{self._extra_hosts[host]}]")
|
||||
self._extra_hosts[host] = ip
|
||||
|
||||
def has_hosts(self):
|
||||
return len(self._extra_hosts) > 0
|
||||
|
||||
def render(self):
|
||||
return {host: ip for host, ip in self._extra_hosts.items()}
|
||||
26
library/2.1.24/formatter.py
Normal file
26
library/2.1.24/formatter.py
Normal file
@@ -0,0 +1,26 @@
|
||||
import json
|
||||
import hashlib
|
||||
|
||||
|
||||
def escape_dollar(text: str) -> str:
|
||||
return text.replace("$", "$$")
|
||||
|
||||
|
||||
def get_hashed_name_for_volume(prefix: str, config: dict):
|
||||
config_hash = hashlib.sha256(json.dumps(config).encode("utf-8")).hexdigest()
|
||||
return f"{prefix}_{config_hash}"
|
||||
|
||||
|
||||
def get_hash_with_prefix(prefix: str, data: str):
|
||||
return f"{prefix}_{hashlib.sha256(data.encode('utf-8')).hexdigest()}"
|
||||
|
||||
|
||||
def merge_dicts_no_overwrite(dict1, dict2):
|
||||
overlapping_keys = dict1.keys() & dict2.keys()
|
||||
if overlapping_keys:
|
||||
raise ValueError(f"Merging of dicts failed. Overlapping keys: {overlapping_keys}")
|
||||
return {**dict1, **dict2}
|
||||
|
||||
|
||||
def get_image_with_hashed_data(image: str, data: str):
|
||||
return get_hash_with_prefix(f"ix-{image}", data)
|
||||
168
library/2.1.24/functions.py
Normal file
168
library/2.1.24/functions.py
Normal file
@@ -0,0 +1,168 @@
|
||||
import re
|
||||
import copy
|
||||
import bcrypt
|
||||
import secrets
|
||||
from base64 import b64encode
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .volume_sources import HostPathSource, IxVolumeSource
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from volume_sources import HostPathSource, IxVolumeSource
|
||||
|
||||
|
||||
class Functions:
|
||||
def __init__(self, render_instance: "Render"):
|
||||
self._render_instance = render_instance
|
||||
|
||||
def _bcrypt_hash(self, password):
|
||||
hashed = bcrypt.hashpw(password.encode("utf-8"), bcrypt.gensalt()).decode("utf-8")
|
||||
return hashed
|
||||
|
||||
def _htpasswd(self, username, password):
|
||||
hashed = self._bcrypt_hash(password)
|
||||
return username + ":" + hashed
|
||||
|
||||
def _secure_string(self, length):
|
||||
return secrets.token_urlsafe(length)[:length]
|
||||
|
||||
def _basic_auth(self, username, password):
|
||||
return b64encode(f"{username}:{password}".encode("utf-8")).decode("utf-8")
|
||||
|
||||
def _basic_auth_header(self, username, password):
|
||||
return f"Basic {self._basic_auth(username, password)}"
|
||||
|
||||
def _fail(self, message):
|
||||
raise RenderError(message)
|
||||
|
||||
def _camel_case(self, string):
|
||||
return string.title()
|
||||
|
||||
def _auto_cast(self, value):
|
||||
lower_str_value = str(value).lower()
|
||||
if lower_str_value in ["true", "false"]:
|
||||
return lower_str_value == "true"
|
||||
|
||||
try:
|
||||
return float(value)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
try:
|
||||
return int(value)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
return value
|
||||
|
||||
def _match_regex(self, value, regex):
|
||||
if not re.match(regex, value):
|
||||
return False
|
||||
return True
|
||||
|
||||
def _must_match_regex(self, value, regex):
|
||||
if not self._match_regex(value, regex):
|
||||
raise RenderError(f"Expected [{value}] to match [{regex}]")
|
||||
return value
|
||||
|
||||
def _is_boolean(self, string):
|
||||
return string.lower() in ["true", "false"]
|
||||
|
||||
def _is_number(self, string):
|
||||
try:
|
||||
float(string)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
def _copy_dict(self, dict):
|
||||
return copy.deepcopy(dict)
|
||||
|
||||
def _merge_dicts(self, *dicts):
|
||||
merged_dict = {}
|
||||
for dictionary in dicts:
|
||||
merged_dict.update(dictionary)
|
||||
return merged_dict
|
||||
|
||||
def _disallow_chars(self, string: str, chars: list[str], key: str):
|
||||
for char in chars:
|
||||
if char in string:
|
||||
raise RenderError(f"Disallowed character [{char}] in [{key}]")
|
||||
return string
|
||||
|
||||
def _or_default(self, value, default):
|
||||
if not value:
|
||||
return default
|
||||
return value
|
||||
|
||||
def _require_unique(self, values, key, split_char=""):
|
||||
new_values = []
|
||||
for value in values:
|
||||
new_values.append(value.split(split_char)[0] if split_char else value)
|
||||
|
||||
if len(new_values) != len(set(new_values)):
|
||||
raise RenderError(f"Expected values in [{key}] to be unique, but got [{', '.join(values)}]")
|
||||
|
||||
def _require_no_reserved(self, values, key, reserved, split_char=""):
|
||||
new_values = []
|
||||
for value in values:
|
||||
new_values.append(value.split(split_char)[0] if split_char else value)
|
||||
|
||||
for reserved_value in reserved:
|
||||
if reserved_value in new_values:
|
||||
raise RenderError(f"Value [{reserved_value}] is reserved and cannot be set in [{key}]")
|
||||
|
||||
def _temp_config(self, name):
|
||||
if not name:
|
||||
raise RenderError("Expected [name] to be set when calling [temp_config].")
|
||||
return {"type": "temporary", "volume_config": {"volume_name": name}}
|
||||
|
||||
def _get_host_path(self, storage):
|
||||
source_type = storage.get("type", "")
|
||||
if not source_type:
|
||||
raise RenderError("Expected [type] to be set for volume mounts.")
|
||||
|
||||
match source_type:
|
||||
case "host_path":
|
||||
mount_config = storage.get("host_path_config")
|
||||
if mount_config is None:
|
||||
raise RenderError("Expected [host_path_config] to be set for [host_path] type.")
|
||||
host_source = HostPathSource(self._render_instance, mount_config).get()
|
||||
return host_source
|
||||
case "ix_volume":
|
||||
mount_config = storage.get("ix_volume_config")
|
||||
if mount_config is None:
|
||||
raise RenderError("Expected [ix_volume_config] to be set for [ix_volume] type.")
|
||||
ix_source = IxVolumeSource(self._render_instance, mount_config).get()
|
||||
return ix_source
|
||||
case _:
|
||||
raise RenderError(f"Storage type [{source_type}] does not support host path.")
|
||||
|
||||
def func_map(self):
|
||||
return {
|
||||
"auto_cast": self._auto_cast,
|
||||
"basic_auth_header": self._basic_auth_header,
|
||||
"basic_auth": self._basic_auth,
|
||||
"bcrypt_hash": self._bcrypt_hash,
|
||||
"camel_case": self._camel_case,
|
||||
"copy_dict": self._copy_dict,
|
||||
"fail": self._fail,
|
||||
"htpasswd": self._htpasswd,
|
||||
"is_boolean": self._is_boolean,
|
||||
"is_number": self._is_number,
|
||||
"match_regex": self._match_regex,
|
||||
"merge_dicts": self._merge_dicts,
|
||||
"must_match_regex": self._must_match_regex,
|
||||
"secure_string": self._secure_string,
|
||||
"disallow_chars": self._disallow_chars,
|
||||
"get_host_path": self._get_host_path,
|
||||
"or_default": self._or_default,
|
||||
"temp_config": self._temp_config,
|
||||
"require_unique": self._require_unique,
|
||||
"require_no_reserved": self._require_no_reserved,
|
||||
}
|
||||
219
library/2.1.24/healthcheck.py
Normal file
219
library/2.1.24/healthcheck.py
Normal file
@@ -0,0 +1,219 @@
|
||||
from typing import Any, TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .formatter import escape_dollar
|
||||
from .validations import valid_http_path_or_raise
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from formatter import escape_dollar
|
||||
from validations import valid_http_path_or_raise
|
||||
|
||||
|
||||
class Healthcheck:
|
||||
def __init__(self, render_instance: "Render"):
|
||||
self._render_instance = render_instance
|
||||
self._test: str | list[str] = ""
|
||||
self._interval_sec: int = 30
|
||||
self._timeout_sec: int = 5
|
||||
self._retries: int = 5
|
||||
self._start_period_sec: int = 15
|
||||
self._start_interval_sec: int = 2
|
||||
self._disabled: bool = False
|
||||
self._use_built_in: bool = False
|
||||
|
||||
def _get_test(self):
|
||||
if isinstance(self._test, str):
|
||||
return escape_dollar(self._test)
|
||||
|
||||
return [escape_dollar(t) for t in self._test]
|
||||
|
||||
def disable(self):
|
||||
self._disabled = True
|
||||
|
||||
def use_built_in(self):
|
||||
self._use_built_in = True
|
||||
|
||||
def set_custom_test(self, test: str | list[str]):
|
||||
if self._disabled:
|
||||
raise RenderError("Cannot set custom test when healthcheck is disabled")
|
||||
self._test = test
|
||||
|
||||
def set_test(self, variant: str, config: dict | None = None):
|
||||
config = config or {}
|
||||
self.set_custom_test(test_mapping(variant, config))
|
||||
|
||||
def set_interval(self, interval: int):
|
||||
self._interval_sec = interval
|
||||
|
||||
def set_timeout(self, timeout: int):
|
||||
self._timeout_sec = timeout
|
||||
|
||||
def set_retries(self, retries: int):
|
||||
self._retries = retries
|
||||
|
||||
def set_start_period(self, start_period: int):
|
||||
self._start_period_sec = start_period
|
||||
|
||||
def set_start_interval(self, start_interval: int):
|
||||
self._start_interval_sec = start_interval
|
||||
|
||||
def has_healthcheck(self):
|
||||
return not self._use_built_in
|
||||
|
||||
def render(self):
|
||||
if self._use_built_in:
|
||||
return RenderError("Should not be called when built in healthcheck is used")
|
||||
|
||||
if self._disabled:
|
||||
return {"disable": True}
|
||||
|
||||
if not self._test:
|
||||
raise RenderError("Healthcheck test is not set")
|
||||
|
||||
return {
|
||||
"test": self._get_test(),
|
||||
"retries": self._retries,
|
||||
"interval": f"{self._interval_sec}s",
|
||||
"timeout": f"{self._timeout_sec}s",
|
||||
"start_period": f"{self._start_period_sec}s",
|
||||
"start_interval": f"{self._start_interval_sec}s",
|
||||
}
|
||||
|
||||
|
||||
def test_mapping(variant: str, config: dict | None = None) -> str:
|
||||
config = config or {}
|
||||
tests = {
|
||||
"curl": curl_test,
|
||||
"wget": wget_test,
|
||||
"http": http_test,
|
||||
"netcat": netcat_test,
|
||||
"tcp": tcp_test,
|
||||
"redis": redis_test,
|
||||
"postgres": postgres_test,
|
||||
"mariadb": mariadb_test,
|
||||
"mongodb": mongodb_test,
|
||||
}
|
||||
|
||||
if variant not in tests:
|
||||
raise RenderError(f"Test variant [{variant}] is not valid. Valid options are: [{', '.join(tests.keys())}]")
|
||||
|
||||
return tests[variant](config)
|
||||
|
||||
|
||||
def get_key(config: dict, key: str, default: Any, required: bool):
|
||||
if key not in config:
|
||||
if not required:
|
||||
return default
|
||||
raise RenderError(f"Expected [{key}] to be set")
|
||||
return config[key]
|
||||
|
||||
|
||||
def curl_test(config: dict) -> str:
|
||||
config = config or {}
|
||||
port = get_key(config, "port", None, True)
|
||||
path = valid_http_path_or_raise(get_key(config, "path", "/", False))
|
||||
scheme = get_key(config, "scheme", "http", False)
|
||||
host = get_key(config, "host", "127.0.0.1", False)
|
||||
headers = get_key(config, "headers", [], False)
|
||||
|
||||
opts = []
|
||||
if scheme == "https":
|
||||
opts.append("--insecure")
|
||||
|
||||
for header in headers:
|
||||
if not header[0] or not header[1]:
|
||||
raise RenderError("Expected [header] to be a list of two items for curl test")
|
||||
opts.append(f'--header "{header[0]}: {header[1]}"')
|
||||
|
||||
cmd = "curl --silent --output /dev/null --show-error --fail"
|
||||
if opts:
|
||||
cmd += f" {' '.join(opts)}"
|
||||
cmd += f" {scheme}://{host}:{port}{path}"
|
||||
return cmd
|
||||
|
||||
|
||||
def wget_test(config: dict) -> str:
|
||||
config = config or {}
|
||||
port = get_key(config, "port", None, True)
|
||||
path = valid_http_path_or_raise(get_key(config, "path", "/", False))
|
||||
scheme = get_key(config, "scheme", "http", False)
|
||||
host = get_key(config, "host", "127.0.0.1", False)
|
||||
headers = get_key(config, "headers", [], False)
|
||||
spider = get_key(config, "spider", True, False)
|
||||
|
||||
opts = []
|
||||
if scheme == "https":
|
||||
opts.append("--no-check-certificate")
|
||||
|
||||
for header in headers:
|
||||
if not header[0] or not header[1]:
|
||||
raise RenderError("Expected [header] to be a list of two items for wget test")
|
||||
opts.append(f'--header "{header[0]}: {header[1]}"')
|
||||
|
||||
cmd = f"wget --quiet {'--spider' if spider else '-O /dev/null'}"
|
||||
|
||||
if opts:
|
||||
cmd += f" {' '.join(opts)}"
|
||||
cmd += f" {scheme}://{host}:{port}{path}"
|
||||
return cmd
|
||||
|
||||
|
||||
def http_test(config: dict) -> str:
|
||||
config = config or {}
|
||||
port = get_key(config, "port", None, True)
|
||||
path = valid_http_path_or_raise(get_key(config, "path", "/", False))
|
||||
host = get_key(config, "host", "127.0.0.1", False)
|
||||
|
||||
return f"""/bin/bash -c 'exec {{hc_fd}}<>/dev/tcp/{host}/{port} && echo -e "GET {path} HTTP/1.1\\r\\nHost: {host}\\r\\nConnection: close\\r\\n\\r\\n" >&${{hc_fd}} && cat <&${{hc_fd}} | grep "HTTP" | grep -q "200"'""" # noqa
|
||||
|
||||
|
||||
def netcat_test(config: dict) -> str:
|
||||
config = config or {}
|
||||
port = get_key(config, "port", None, True)
|
||||
host = get_key(config, "host", "127.0.0.1", False)
|
||||
|
||||
return f"nc -z -w 1 {host} {port}"
|
||||
|
||||
|
||||
def tcp_test(config: dict) -> str:
|
||||
config = config or {}
|
||||
port = get_key(config, "port", None, True)
|
||||
host = get_key(config, "host", "127.0.0.1", False)
|
||||
|
||||
return f"timeout 1 bash -c 'cat < /dev/null > /dev/tcp/{host}/{port}'"
|
||||
|
||||
|
||||
def redis_test(config: dict) -> str:
|
||||
config = config or {}
|
||||
port = get_key(config, "port", 6379, False)
|
||||
host = get_key(config, "host", "127.0.0.1", False)
|
||||
|
||||
return f"redis-cli -h {host} -p {port} -a $REDIS_PASSWORD ping | grep -q PONG"
|
||||
|
||||
|
||||
def postgres_test(config: dict) -> str:
|
||||
config = config or {}
|
||||
port = get_key(config, "port", 5432, False)
|
||||
host = get_key(config, "host", "127.0.0.1", False)
|
||||
|
||||
return f"pg_isready -h {host} -p {port} -U $POSTGRES_USER -d $POSTGRES_DB"
|
||||
|
||||
|
||||
def mariadb_test(config: dict) -> str:
|
||||
config = config or {}
|
||||
port = get_key(config, "port", 3306, False)
|
||||
host = get_key(config, "host", "127.0.0.1", False)
|
||||
|
||||
return f"mariadb-admin --user=root --host={host} --port={port} --password=$MARIADB_ROOT_PASSWORD ping"
|
||||
|
||||
|
||||
def mongodb_test(config: dict) -> str:
|
||||
config = config or {}
|
||||
port = get_key(config, "port", 27017, False)
|
||||
host = get_key(config, "host", "127.0.0.1", False)
|
||||
|
||||
return f"mongosh --host {host} --port {port} $MONGO_INITDB_DATABASE --eval 'db.adminCommand(\"ping\")' --quiet"
|
||||
37
library/2.1.24/labels.py
Normal file
37
library/2.1.24/labels.py
Normal file
@@ -0,0 +1,37 @@
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .formatter import escape_dollar
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from formatter import escape_dollar
|
||||
|
||||
|
||||
class Labels:
|
||||
def __init__(self, render_instance: "Render"):
|
||||
self._render_instance = render_instance
|
||||
self._labels: dict[str, str] = {}
|
||||
|
||||
def add_label(self, key: str, value: str):
|
||||
if not key:
|
||||
raise RenderError("Labels must have a key")
|
||||
|
||||
if key.startswith("com.docker.compose"):
|
||||
raise RenderError(f"Label [{key}] cannot start with [com.docker.compose] as it is reserved")
|
||||
|
||||
if key in self._labels.keys():
|
||||
raise RenderError(f"Label [{key}] already added")
|
||||
|
||||
self._labels[key] = escape_dollar(str(value))
|
||||
|
||||
def has_labels(self) -> bool:
|
||||
return bool(self._labels)
|
||||
|
||||
def render(self) -> dict[str, str]:
|
||||
if not self.has_labels():
|
||||
return {}
|
||||
return {label: value for label, value in sorted(self._labels.items())}
|
||||
125
library/2.1.24/notes.py
Normal file
125
library/2.1.24/notes.py
Normal file
@@ -0,0 +1,125 @@
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
|
||||
|
||||
class Notes:
|
||||
def __init__(self, render_instance: "Render"):
|
||||
self._render_instance = render_instance
|
||||
self._app_name: str = ""
|
||||
self._app_train: str = ""
|
||||
self._warnings: list[str] = []
|
||||
self._deprecations: list[str] = []
|
||||
self._security: dict[str, list[str]] = {}
|
||||
self._header: str = ""
|
||||
self._body: str = ""
|
||||
self._footer: str = ""
|
||||
|
||||
self._auto_set_app_name()
|
||||
self._auto_set_app_train()
|
||||
self._auto_set_header()
|
||||
self._auto_set_footer()
|
||||
|
||||
def _is_enterprise_train(self):
|
||||
if self._app_train == "enterprise":
|
||||
return True
|
||||
|
||||
def _auto_set_app_name(self):
|
||||
app_name = self._render_instance.values.get("ix_context", {}).get("app_metadata", {}).get("title", "")
|
||||
self._app_name = app_name or "<app_name>"
|
||||
|
||||
def _auto_set_app_train(self):
|
||||
app_train = self._render_instance.values.get("ix_context", {}).get("app_metadata", {}).get("train", "")
|
||||
self._app_train = app_train or "<app_train>"
|
||||
|
||||
def _auto_set_header(self):
|
||||
self._header = f"# {self._app_name}\n\n"
|
||||
|
||||
def _auto_set_footer(self):
|
||||
url = "https://github.com/truenas/apps"
|
||||
if self._is_enterprise_train():
|
||||
url = "https://ixsystems.atlassian.net"
|
||||
footer = "## Bug Reports and Feature Requests\n\n"
|
||||
footer += "If you find a bug in this app or have an idea for a new feature, please file an issue at\n"
|
||||
footer += f"{url}\n\n"
|
||||
self._footer = footer
|
||||
|
||||
def add_warning(self, warning: str):
|
||||
self._warnings.append(warning)
|
||||
|
||||
def _prepend_warning(self, warning: str):
|
||||
self._warnings.insert(0, warning)
|
||||
|
||||
def add_deprecation(self, deprecation: str):
|
||||
self._deprecations.append(deprecation)
|
||||
|
||||
def set_body(self, body: str):
|
||||
self._body = body
|
||||
|
||||
def scan_containers(self):
|
||||
for name, c in self._render_instance._containers.items():
|
||||
if self._security.get(name) is None:
|
||||
self._security[name] = []
|
||||
|
||||
if c.restart._policy == "on-failure":
|
||||
self._security[name].append("short-lived")
|
||||
|
||||
if c._privileged:
|
||||
self._security[name].append("Is running with privileged mode enabled")
|
||||
|
||||
run_as = c._user.split(":") if c._user else [-1, -1]
|
||||
if run_as[0] in ["0", -1]:
|
||||
self._security[name].append(f"Is running as {'root' if run_as[0] == '0' else 'unknown'} user")
|
||||
if run_as[1] in ["0", -1]:
|
||||
self._security[name].append(f"Is running as {'root' if run_as[1] == '0' else 'unknown'} group")
|
||||
|
||||
if c._ipc_mode == "host":
|
||||
self._security[name].append("Is running with host IPC namespace")
|
||||
if c._cgroup == "host":
|
||||
self._security[name].append("Is running with host cgroup namespace")
|
||||
if "no-new-privileges=true" not in c._security_opt.render():
|
||||
self._security[name].append("Is running without [no-new-privileges] security option")
|
||||
if c._tty:
|
||||
self._prepend_warning(
|
||||
f"Container [{name}] is running with a TTY, "
|
||||
"Logs will not appear correctly in the UI due to an [upstream bug]"
|
||||
"(https://github.com/docker/docker-py/issues/1394)"
|
||||
)
|
||||
|
||||
self._security = {k: v for k, v in self._security.items() if v}
|
||||
|
||||
def render(self):
|
||||
self.scan_containers()
|
||||
|
||||
result = self._header
|
||||
|
||||
if self._warnings:
|
||||
result += "## Warnings\n\n"
|
||||
for warning in self._warnings:
|
||||
result += f"- {warning}\n"
|
||||
result += "\n"
|
||||
|
||||
if self._deprecations:
|
||||
result += "## Deprecations\n\n"
|
||||
for deprecation in self._deprecations:
|
||||
result += f"- {deprecation}\n"
|
||||
result += "\n"
|
||||
|
||||
if self._security:
|
||||
result += "## Security\n\n"
|
||||
for c_name, security in self._security.items():
|
||||
result += f"### Container: [{c_name}]"
|
||||
if "short-lived" in security:
|
||||
result += "\n\n**This container is short-lived.**"
|
||||
result += "\n\n"
|
||||
for s in [s for s in security if s != "short-lived"]:
|
||||
result += f"- {s}\n"
|
||||
result += "\n"
|
||||
|
||||
if self._body:
|
||||
result += self._body.strip() + "\n\n"
|
||||
|
||||
result += self._footer
|
||||
|
||||
return result
|
||||
22
library/2.1.24/portal.py
Normal file
22
library/2.1.24/portal.py
Normal file
@@ -0,0 +1,22 @@
|
||||
try:
|
||||
from .validations import valid_portal_scheme_or_raise, valid_http_path_or_raise, valid_port_or_raise
|
||||
except ImportError:
|
||||
from validations import valid_portal_scheme_or_raise, valid_http_path_or_raise, valid_port_or_raise
|
||||
|
||||
|
||||
class Portal:
|
||||
def __init__(self, name: str, config: dict):
|
||||
self._name = name
|
||||
self._scheme = valid_portal_scheme_or_raise(config.get("scheme", "http"))
|
||||
self._host = config.get("host", "0.0.0.0") or "0.0.0.0"
|
||||
self._port = valid_port_or_raise(config.get("port", 0))
|
||||
self._path = valid_http_path_or_raise(config.get("path", "/"))
|
||||
|
||||
def render(self):
|
||||
return {
|
||||
"name": self._name,
|
||||
"scheme": self._scheme,
|
||||
"host": self._host,
|
||||
"port": self._port,
|
||||
"path": self._path,
|
||||
}
|
||||
28
library/2.1.24/portals.py
Normal file
28
library/2.1.24/portals.py
Normal file
@@ -0,0 +1,28 @@
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .portal import Portal
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from portal import Portal
|
||||
|
||||
|
||||
class Portals:
|
||||
def __init__(self, render_instance: "Render"):
|
||||
self._render_instance = render_instance
|
||||
self._portals: set[Portal] = set()
|
||||
|
||||
def add_portal(self, config: dict):
|
||||
name = config.get("name", "Web UI")
|
||||
|
||||
if name in [p._name for p in self._portals]:
|
||||
raise RenderError(f"Portal [{name}] already added")
|
||||
|
||||
self._portals.add(Portal(name, config))
|
||||
|
||||
def render(self):
|
||||
return [p.render() for _, p in sorted([(p._name, p) for p in self._portals])]
|
||||
153
library/2.1.24/ports.py
Normal file
153
library/2.1.24/ports.py
Normal file
@@ -0,0 +1,153 @@
|
||||
import ipaddress
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .validations import (
|
||||
valid_ip_or_raise,
|
||||
valid_port_mode_or_raise,
|
||||
valid_port_or_raise,
|
||||
valid_port_protocol_or_raise,
|
||||
)
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from validations import (
|
||||
valid_ip_or_raise,
|
||||
valid_port_mode_or_raise,
|
||||
valid_port_or_raise,
|
||||
valid_port_protocol_or_raise,
|
||||
)
|
||||
|
||||
|
||||
class Ports:
|
||||
def __init__(self, render_instance: "Render"):
|
||||
self._render_instance = render_instance
|
||||
self._ports: dict[str, dict] = {}
|
||||
|
||||
def _gen_port_key(self, host_port: int, host_ip: str, proto: str, ip_family: int) -> str:
|
||||
return f"{host_port}_{host_ip}_{proto}_{ip_family}"
|
||||
|
||||
def _is_wildcard_ip(self, ip: str) -> bool:
|
||||
return ip in ["0.0.0.0", "::"]
|
||||
|
||||
def _get_opposite_wildcard(self, ip: str) -> str:
|
||||
return "0.0.0.0" if ip == "::" else "::"
|
||||
|
||||
def _get_sort_key(self, p: dict) -> str:
|
||||
return f"{p['published']}_{p['target']}_{p['protocol']}_{p.get('host_ip', '_')}"
|
||||
|
||||
def _is_ports_same(self, port1: dict, port2: dict) -> bool:
|
||||
return (
|
||||
port1["published"] == port2["published"]
|
||||
and port1["target"] == port2["target"]
|
||||
and port1["protocol"] == port2["protocol"]
|
||||
and port1.get("host_ip", "_") == port2.get("host_ip", "_")
|
||||
)
|
||||
|
||||
def _has_opposite_family_port(self, port_config: dict, wildcard_ports: dict) -> bool:
|
||||
comparison_port = port_config.copy()
|
||||
comparison_port["host_ip"] = self._get_opposite_wildcard(port_config["host_ip"])
|
||||
for p in wildcard_ports.values():
|
||||
if self._is_ports_same(comparison_port, p):
|
||||
return True
|
||||
return False
|
||||
|
||||
def _check_port_conflicts(self, port_config: dict, ip_family: int) -> None:
|
||||
host_port = port_config["published"]
|
||||
host_ip = port_config["host_ip"]
|
||||
proto = port_config["protocol"]
|
||||
|
||||
key = self._gen_port_key(host_port, host_ip, proto, ip_family)
|
||||
|
||||
if key in self._ports.keys():
|
||||
raise RenderError(f"Port [{host_port}/{proto}/ipv{ip_family}] already added for [{host_ip}]")
|
||||
|
||||
wildcard_ip = "0.0.0.0" if ip_family == 4 else "::"
|
||||
if host_ip != wildcard_ip:
|
||||
# Check if there is a port with same details but with wildcard IP of the same family
|
||||
wildcard_key = self._gen_port_key(host_port, wildcard_ip, proto, ip_family)
|
||||
if wildcard_key in self._ports.keys():
|
||||
raise RenderError(
|
||||
f"Cannot bind port [{host_port}/{proto}/ipv{ip_family}] to [{host_ip}], "
|
||||
f"already bound to [{wildcard_ip}]"
|
||||
)
|
||||
else:
|
||||
# We are adding a port with wildcard IP
|
||||
# Check if there is a port with same details but with specific IP of the same family
|
||||
for p in self._ports.values():
|
||||
# Skip if the port is not for the same family
|
||||
if ip_family != ipaddress.ip_address(p["host_ip"]).version:
|
||||
continue
|
||||
|
||||
# Make a copy of the port config
|
||||
search_port = p.copy()
|
||||
# Replace the host IP with wildcard IP
|
||||
search_port["host_ip"] = wildcard_ip
|
||||
# If the ports match, means that a port for specific IP is already added
|
||||
# and we are trying to add it again with wildcard IP. Raise an error
|
||||
if self._is_ports_same(search_port, port_config):
|
||||
raise RenderError(
|
||||
f"Cannot bind port [{host_port}/{proto}/ipv{ip_family}] to [{host_ip}], "
|
||||
f"already bound to [{p['host_ip']}]"
|
||||
)
|
||||
|
||||
def add_port(self, host_port: int, container_port: int, config: dict | None = None):
|
||||
config = config or {}
|
||||
host_port = valid_port_or_raise(host_port)
|
||||
container_port = valid_port_or_raise(container_port)
|
||||
proto = valid_port_protocol_or_raise(config.get("protocol", "tcp"))
|
||||
mode = valid_port_mode_or_raise(config.get("mode", "ingress"))
|
||||
|
||||
# TODO: Once all apps stop using this function directly, (ie using the container.add_port function)
|
||||
# Remove this, and let container.add_port call this for each host_ip
|
||||
host_ip = config.get("host_ip", None)
|
||||
if host_ip is None:
|
||||
self.add_port(host_port, container_port, config | {"host_ip": "0.0.0.0"})
|
||||
self.add_port(host_port, container_port, config | {"host_ip": "::"})
|
||||
return
|
||||
|
||||
host_ip = valid_ip_or_raise(config.get("host_ip", None))
|
||||
ip = ipaddress.ip_address(host_ip)
|
||||
|
||||
port_config = {
|
||||
"published": host_port,
|
||||
"target": container_port,
|
||||
"protocol": proto,
|
||||
"mode": mode,
|
||||
"host_ip": host_ip,
|
||||
}
|
||||
self._check_port_conflicts(port_config, ip.version)
|
||||
|
||||
key = self._gen_port_key(host_port, host_ip, proto, ip.version)
|
||||
self._ports[key] = port_config
|
||||
|
||||
def has_ports(self):
|
||||
return len(self._ports) > 0
|
||||
|
||||
def render(self):
|
||||
specific_ports = []
|
||||
wildcard_ports = {}
|
||||
|
||||
for port_config in self._ports.values():
|
||||
if self._is_wildcard_ip(port_config["host_ip"]):
|
||||
wildcard_ports[id(port_config)] = port_config.copy()
|
||||
else:
|
||||
specific_ports.append(port_config.copy())
|
||||
|
||||
processed_ports = specific_ports.copy()
|
||||
for wild_port in wildcard_ports.values():
|
||||
processed_port = wild_port.copy()
|
||||
|
||||
# Check if there's a matching wildcard port for the opposite IP family
|
||||
has_opposite_family = self._has_opposite_family_port(wild_port, wildcard_ports)
|
||||
|
||||
if has_opposite_family:
|
||||
processed_port.pop("host_ip")
|
||||
|
||||
if processed_port not in processed_ports:
|
||||
processed_ports.append(processed_port)
|
||||
|
||||
return sorted(processed_ports, key=self._get_sort_key)
|
||||
95
library/2.1.24/render.py
Normal file
95
library/2.1.24/render.py
Normal file
@@ -0,0 +1,95 @@
|
||||
import copy
|
||||
|
||||
try:
|
||||
from .configs import Configs
|
||||
from .container import Container
|
||||
from .deps import Deps
|
||||
from .error import RenderError
|
||||
from .functions import Functions
|
||||
from .notes import Notes
|
||||
from .portals import Portals
|
||||
from .volumes import Volumes
|
||||
except ImportError:
|
||||
from configs import Configs
|
||||
from container import Container
|
||||
from deps import Deps
|
||||
from error import RenderError
|
||||
from functions import Functions
|
||||
from notes import Notes
|
||||
from portals import Portals
|
||||
from volumes import Volumes
|
||||
|
||||
|
||||
class Render(object):
|
||||
def __init__(self, values):
|
||||
self._containers: dict[str, Container] = {}
|
||||
self.values = values
|
||||
self._add_images_internal_use()
|
||||
# Make a copy after we inject the images
|
||||
self._original_values: dict = copy.deepcopy(self.values)
|
||||
|
||||
self.deps: Deps = Deps(self)
|
||||
|
||||
self.configs = Configs(render_instance=self)
|
||||
self.funcs = Functions(render_instance=self).func_map()
|
||||
self.portals: Portals = Portals(render_instance=self)
|
||||
self.notes: Notes = Notes(render_instance=self)
|
||||
self.volumes = Volumes(render_instance=self)
|
||||
|
||||
def _add_images_internal_use(self):
|
||||
if not self.values.get("images"):
|
||||
self.values["images"] = {}
|
||||
|
||||
if "python_permissions_image" not in self.values["images"]:
|
||||
self.values["images"]["python_permissions_image"] = {"repository": "python", "tag": "3.13.0-slim-bookworm"}
|
||||
|
||||
if "postgres_upgrade_image" not in self.values["images"]:
|
||||
self.values["images"]["postgres_upgrade_image"] = {
|
||||
"repository": "ixsystems/postgres-upgrade",
|
||||
"tag": "1.0.1",
|
||||
}
|
||||
|
||||
def container_names(self):
|
||||
return list(self._containers.keys())
|
||||
|
||||
def add_container(self, name: str, image: str):
|
||||
name = name.strip()
|
||||
if not name:
|
||||
raise RenderError("Container name cannot be empty")
|
||||
container = Container(self, name, image)
|
||||
if name in self._containers:
|
||||
raise RenderError(f"Container {name} already exists.")
|
||||
self._containers[name] = container
|
||||
return container
|
||||
|
||||
def render(self):
|
||||
if self.values != self._original_values:
|
||||
raise RenderError("Values have been modified since the renderer was created.")
|
||||
|
||||
if not self._containers:
|
||||
raise RenderError("No containers added.")
|
||||
|
||||
result: dict = {
|
||||
"x-notes": self.notes.render(),
|
||||
"x-portals": self.portals.render(),
|
||||
"services": {c._name: c.render() for c in self._containers.values()},
|
||||
}
|
||||
|
||||
# Make sure that after services are rendered
|
||||
# there are no labels that target a non-existent container
|
||||
# This is to prevent typos
|
||||
for label in self.values.get("labels", []):
|
||||
for c in label.get("containers", []):
|
||||
if c not in self.container_names():
|
||||
raise RenderError(f"Label [{label['key']}] references container [{c}] which does not exist")
|
||||
|
||||
if self.volumes.has_volumes():
|
||||
result["volumes"] = self.volumes.render()
|
||||
|
||||
if self.configs.has_configs():
|
||||
result["configs"] = self.configs.render()
|
||||
|
||||
# if self.networks:
|
||||
# result["networks"] = {...}
|
||||
|
||||
return result
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user