mirror of
https://github.com/MAGICGrants/truenas-apps.git
synced 2026-01-09 20:47:58 -05:00
Chatwoot (#2636)
* allow pgvector images * add lib * add chatwoot * allow selecting image
This commit is contained in:
@@ -35,6 +35,7 @@ words:
|
||||
- castopod
|
||||
- changedetection
|
||||
- changeip
|
||||
- chatwoot
|
||||
- chrislusf
|
||||
- chunksize
|
||||
- cifs
|
||||
|
||||
4
ix-dev/community/chatwoot/README.md
Normal file
4
ix-dev/community/chatwoot/README.md
Normal file
@@ -0,0 +1,4 @@
|
||||
# Chatwoot
|
||||
|
||||
[Chatwoot](https://www.chatwoot.com/) is an open-source live-chat, email support, omni-channel desk.
|
||||
An alternative to Intercom, Zendesk, Salesforce Service Cloud etc.
|
||||
44
ix-dev/community/chatwoot/app.yaml
Normal file
44
ix-dev/community/chatwoot/app.yaml
Normal file
@@ -0,0 +1,44 @@
|
||||
app_version: v4.3.0
|
||||
capabilities: []
|
||||
categories:
|
||||
- productivity
|
||||
changelog_url: https://github.com/chatwoot/chatwoot/releases
|
||||
date_added: '2025-06-24'
|
||||
description: Open-source live-chat, email support, omni-channel desk. An alternative
|
||||
to Intercom, Zendesk, Salesforce Service Cloud etc.
|
||||
home: https://www.chatwoot.com/
|
||||
host_mounts: []
|
||||
icon: https://media.sys.truenas.net/apps/chatwoot/icons/icon.svg
|
||||
keywords:
|
||||
- support
|
||||
- live chat
|
||||
lib_version: 2.1.38
|
||||
lib_version_hash: 6ae5d9fa6d100b2d34ca45d5ec8d5e27fdf31f3c49ce831a6c876b2c7625d882
|
||||
maintainers:
|
||||
- email: dev@ixsystems.com
|
||||
name: truenas
|
||||
url: https://www.truenas.com/
|
||||
name: chatwoot
|
||||
run_as_context:
|
||||
- description: Chatwoot runs as root user.
|
||||
gid: 0
|
||||
group_name: root
|
||||
uid: 0
|
||||
user_name: root
|
||||
- description: Postgres runs as non-root user.
|
||||
gid: 999
|
||||
group_name: postgres
|
||||
uid: 999
|
||||
user_name: postgres
|
||||
- description: Redis runs as a non-root user and root group.
|
||||
gid: 0
|
||||
group_name: root
|
||||
uid: 1001
|
||||
user_name: redis
|
||||
screenshots: []
|
||||
sources:
|
||||
- https://github.com/chatwoot/chatwoot
|
||||
- https://hub.docker.com/r/chatwoot/chatwoot
|
||||
title: Chatwoot
|
||||
train: community
|
||||
version: 1.0.0
|
||||
7
ix-dev/community/chatwoot/item.yaml
Normal file
7
ix-dev/community/chatwoot/item.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
categories:
|
||||
- productivity
|
||||
icon_url: https://media.sys.truenas.net/apps/chatwoot/icons/icon.svg
|
||||
screenshots: []
|
||||
tags:
|
||||
- support
|
||||
- live chat
|
||||
26
ix-dev/community/chatwoot/ix_values.yaml
Normal file
26
ix-dev/community/chatwoot/ix_values.yaml
Normal file
@@ -0,0 +1,26 @@
|
||||
images:
|
||||
image:
|
||||
repository: chatwoot/chatwoot
|
||||
tag: v4.3.0
|
||||
ce_image:
|
||||
repository: chatwoot/chatwoot
|
||||
tag: v4.3.0-ce
|
||||
redis_image:
|
||||
repository: bitnami/redis
|
||||
tag: 8.0.2
|
||||
postgres_17_image:
|
||||
repository: pgvector/pgvector
|
||||
tag: 0.8.0-pg17
|
||||
postgres_upgrade_image:
|
||||
repository: ixsystems/postgres-upgrade
|
||||
tag: 1.0.2
|
||||
|
||||
consts:
|
||||
chatwoot_container_name: chatwoot
|
||||
sidekiq_container_name: sidekiq
|
||||
migrations_container_name: migrations
|
||||
redis_container_name: redis
|
||||
postgres_container_name: postgres
|
||||
perms_container_name: permissions
|
||||
db_user: chatwoot
|
||||
db_name: chatwoot
|
||||
581
ix-dev/community/chatwoot/questions.yaml
Normal file
581
ix-dev/community/chatwoot/questions.yaml
Normal file
@@ -0,0 +1,581 @@
|
||||
groups:
|
||||
- name: Chatwoot Configuration
|
||||
description: Configure Chatwoot
|
||||
- name: User and Group Configuration
|
||||
description: Configure User and Group for Chatwoot
|
||||
- name: Network Configuration
|
||||
description: Configure Network for Chatwoot
|
||||
- name: Storage Configuration
|
||||
description: Configure Storage for Chatwoot
|
||||
- name: Labels Configuration
|
||||
description: Configure Labels for Chatwoot
|
||||
- name: Resources Configuration
|
||||
description: Configure Resources for Chatwoot
|
||||
|
||||
questions:
|
||||
- variable: TZ
|
||||
group: Chatwoot Configuration
|
||||
label: Timezone
|
||||
schema:
|
||||
type: string
|
||||
default: Etc/UTC
|
||||
required: true
|
||||
$ref:
|
||||
- definitions/timezone
|
||||
- variable: chatwoot
|
||||
label: ""
|
||||
group: Chatwoot Configuration
|
||||
schema:
|
||||
type: dict
|
||||
attrs:
|
||||
- variable: image_selector
|
||||
label: Chatwoot Image
|
||||
schema:
|
||||
type: string
|
||||
default: ce_image
|
||||
required: true
|
||||
enum:
|
||||
- value: image
|
||||
description: Chatwoot Enterprise Edition
|
||||
- value: ce_image
|
||||
description: Chatwoot Community Edition
|
||||
- variable: postgres_image_selector
|
||||
label: Postgres Image (CAUTION)
|
||||
description: |
|
||||
If you are changing this after the postgres directory has been initialized,</br>
|
||||
STOP! and make sure you have a backup of your data.</br>
|
||||
Changing this will trigger an one way database upgrade.</br>
|
||||
You can only select newer versions of postgres.</br>
|
||||
Selecting an older version will refuse to start.</br>
|
||||
If something goes wrong, you will have to restore from backup.
|
||||
schema:
|
||||
type: string
|
||||
default: postgres_17_image
|
||||
required: true
|
||||
enum:
|
||||
- value: postgres_17_image
|
||||
description: Postgres 17
|
||||
- variable: db_password
|
||||
label: Database Password
|
||||
description: The password for Chatwoot.
|
||||
schema:
|
||||
type: string
|
||||
default: ""
|
||||
required: true
|
||||
private: true
|
||||
- variable: redis_password
|
||||
label: Redis Password
|
||||
schema:
|
||||
type: string
|
||||
default: ""
|
||||
required: true
|
||||
private: true
|
||||
- variable: secret_key_base
|
||||
label: Secret Key Base
|
||||
description: |
|
||||
The secret key base is used to encrypt sensitive data.</br>
|
||||
It should be a long, random string.</br>
|
||||
If you change this, all existing sessions will be invalidated.
|
||||
schema:
|
||||
type: string
|
||||
default: ""
|
||||
valid_chars: "^[a-zA-Z0-9]+$"
|
||||
valid_chars_error: Please use only alphanumeric characters for the secret key base.
|
||||
required: true
|
||||
private: true
|
||||
- variable: enable_account_signup
|
||||
label: Enable Account Signup
|
||||
schema:
|
||||
type: string
|
||||
default: "true"
|
||||
required: true
|
||||
enum:
|
||||
- value: "true"
|
||||
description: Enable account signup
|
||||
- value: "false"
|
||||
description: Disable account signup
|
||||
- value: "api_only"
|
||||
description: Disable UI account signup and only allow API account creation
|
||||
- variable: frontend_url
|
||||
label: Frontend URL
|
||||
description: |
|
||||
The URL that Chatwoot will be accessible at.</br>
|
||||
Examples:</br>
|
||||
- http://192.168.1.100:30208</br>
|
||||
- https://chatwoot.example.com</br>
|
||||
schema:
|
||||
type: uri
|
||||
default: ""
|
||||
required: true
|
||||
- variable: additional_envs
|
||||
label: Additional Environment Variables
|
||||
schema:
|
||||
type: list
|
||||
default: []
|
||||
items:
|
||||
- variable: env
|
||||
label: Environment Variable
|
||||
schema:
|
||||
type: dict
|
||||
attrs:
|
||||
- variable: name
|
||||
label: Name
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
- variable: value
|
||||
label: Value
|
||||
schema:
|
||||
type: string
|
||||
- variable: run_as
|
||||
label: ""
|
||||
group: User and Group Configuration
|
||||
schema:
|
||||
type: dict
|
||||
attrs:
|
||||
- variable: user
|
||||
label: User ID
|
||||
description: The user id that Chatwoot files will be owned by.
|
||||
schema:
|
||||
type: int
|
||||
min: 568
|
||||
default: 568
|
||||
required: true
|
||||
- variable: group
|
||||
label: Group ID
|
||||
description: The group id that Chatwoot files will be owned by.
|
||||
schema:
|
||||
type: int
|
||||
min: 568
|
||||
default: 568
|
||||
required: true
|
||||
- variable: network
|
||||
label: ""
|
||||
group: Network Configuration
|
||||
schema:
|
||||
type: dict
|
||||
attrs:
|
||||
- variable: web_port
|
||||
label: WebUI Port
|
||||
schema:
|
||||
type: dict
|
||||
attrs:
|
||||
- variable: bind_mode
|
||||
label: Port Bind Mode
|
||||
description: |
|
||||
The port bind mode.</br>
|
||||
- Publish: The port will be published on the host for external access.</br>
|
||||
- Expose: The port will be exposed for inter-container communication.</br>
|
||||
- None: The port will not be exposed or published.</br>
|
||||
Note: If the Dockerfile defines an EXPOSE directive,
|
||||
the port will still be exposed for inter-container communication regardless of this setting.
|
||||
schema:
|
||||
type: string
|
||||
default: "published"
|
||||
enum:
|
||||
- value: "published"
|
||||
description: Publish port on the host for external access
|
||||
- value: "exposed"
|
||||
description: Expose port for inter-container communication
|
||||
- value: ""
|
||||
description: None
|
||||
- variable: port_number
|
||||
label: Port Number
|
||||
schema:
|
||||
type: int
|
||||
default: 30208
|
||||
min: 1
|
||||
max: 65535
|
||||
required: true
|
||||
- variable: host_ips
|
||||
label: Host IPs
|
||||
description: IPs on the host to bind this port
|
||||
schema:
|
||||
type: list
|
||||
show_if: [["bind_mode", "=", "published"]]
|
||||
default: []
|
||||
items:
|
||||
- variable: host_ip
|
||||
label: Host IP
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
$ref:
|
||||
- definitions/node_bind_ip
|
||||
- variable: storage
|
||||
label: ""
|
||||
group: Storage Configuration
|
||||
schema:
|
||||
type: dict
|
||||
attrs:
|
||||
- variable: data
|
||||
label: Data Storage
|
||||
schema:
|
||||
type: dict
|
||||
attrs:
|
||||
- variable: type
|
||||
label: Type
|
||||
description: |
|
||||
ixVolume: Is dataset created automatically by the system.</br>
|
||||
Host Path: Is a path that already exists on the system.
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
immutable: true
|
||||
default: "ix_volume"
|
||||
enum:
|
||||
- value: "host_path"
|
||||
description: Host Path (Path that already exists on the system)
|
||||
- value: "ix_volume"
|
||||
description: ixVolume (Dataset created automatically by the system)
|
||||
- variable: ix_volume_config
|
||||
label: ixVolume Configuration
|
||||
description: The configuration for the ixVolume dataset.
|
||||
schema:
|
||||
type: dict
|
||||
show_if: [["type", "=", "ix_volume"]]
|
||||
$ref:
|
||||
- "normalize/ix_volume"
|
||||
attrs:
|
||||
- variable: acl_enable
|
||||
label: Enable ACL
|
||||
description: Enable ACL for the storage.
|
||||
schema:
|
||||
type: boolean
|
||||
default: false
|
||||
- variable: dataset_name
|
||||
label: Dataset Name
|
||||
description: The name of the dataset to use for storage.
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
immutable: true
|
||||
hidden: true
|
||||
default: "data"
|
||||
- variable: acl_entries
|
||||
label: ACL Configuration
|
||||
schema:
|
||||
type: dict
|
||||
show_if: [["acl_enable", "=", true]]
|
||||
attrs: []
|
||||
- variable: host_path_config
|
||||
label: Host Path Configuration
|
||||
schema:
|
||||
type: dict
|
||||
show_if: [["type", "=", "host_path"]]
|
||||
attrs:
|
||||
- variable: acl_enable
|
||||
label: Enable ACL
|
||||
description: Enable ACL for the storage.
|
||||
schema:
|
||||
type: boolean
|
||||
default: false
|
||||
- variable: acl
|
||||
label: ACL Configuration
|
||||
schema:
|
||||
type: dict
|
||||
show_if: [["acl_enable", "=", true]]
|
||||
attrs: []
|
||||
$ref:
|
||||
- "normalize/acl"
|
||||
- variable: path
|
||||
label: Host Path
|
||||
description: The host path to use for storage.
|
||||
schema:
|
||||
type: hostpath
|
||||
show_if: [["acl_enable", "=", false]]
|
||||
required: true
|
||||
- variable: postgres_data
|
||||
label: Postgres Data Storage
|
||||
description: The path to store Postgres Data.
|
||||
schema:
|
||||
type: dict
|
||||
attrs:
|
||||
- variable: type
|
||||
label: Type
|
||||
description: |
|
||||
ixVolume: Is dataset created automatically by the system.</br>
|
||||
Host Path: Is a path that already exists on the system.
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
immutable: true
|
||||
default: "ix_volume"
|
||||
enum:
|
||||
- value: "host_path"
|
||||
description: Host Path (Path that already exists on the system)
|
||||
- value: "ix_volume"
|
||||
description: ixVolume (Dataset created automatically by the system)
|
||||
- variable: ix_volume_config
|
||||
label: ixVolume Configuration
|
||||
description: The configuration for the ixVolume dataset.
|
||||
schema:
|
||||
type: dict
|
||||
show_if: [["type", "=", "ix_volume"]]
|
||||
$ref:
|
||||
- "normalize/ix_volume"
|
||||
attrs:
|
||||
- variable: acl_enable
|
||||
label: Enable ACL
|
||||
description: Enable ACL for the storage.
|
||||
schema:
|
||||
type: boolean
|
||||
default: false
|
||||
- variable: dataset_name
|
||||
label: Dataset Name
|
||||
description: The name of the dataset to use for storage.
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
immutable: true
|
||||
hidden: true
|
||||
default: "pg_data"
|
||||
- variable: acl_entries
|
||||
label: ACL Configuration
|
||||
schema:
|
||||
type: dict
|
||||
show_if: [["acl_enable", "=", true]]
|
||||
attrs: []
|
||||
- variable: host_path_config
|
||||
label: Host Path Configuration
|
||||
schema:
|
||||
type: dict
|
||||
show_if: [["type", "=", "host_path"]]
|
||||
attrs:
|
||||
- variable: acl_enable
|
||||
label: Enable ACL
|
||||
description: Enable ACL for the storage.
|
||||
schema:
|
||||
type: boolean
|
||||
default: false
|
||||
- variable: acl
|
||||
label: ACL Configuration
|
||||
schema:
|
||||
type: dict
|
||||
show_if: [["acl_enable", "=", true]]
|
||||
attrs: []
|
||||
$ref:
|
||||
- "normalize/acl"
|
||||
- variable: path
|
||||
label: Host Path
|
||||
description: The host path to use for storage.
|
||||
schema:
|
||||
type: hostpath
|
||||
show_if: [["acl_enable", "=", false]]
|
||||
required: true
|
||||
- variable: auto_permissions
|
||||
label: Automatic Permissions
|
||||
description: |
|
||||
Automatically set permissions for the host path.
|
||||
Enabling this, will check the top level directory,</br>
|
||||
If it finds incorrect permissions, it will `chown` the
|
||||
host path to the user and group required for the
|
||||
postgres container.
|
||||
schema:
|
||||
type: boolean
|
||||
default: false
|
||||
show_if: [["acl_enable", "=", false]]
|
||||
- variable: additional_storage
|
||||
label: Additional Storage
|
||||
description: Additional storage for Chatwoot.
|
||||
schema:
|
||||
type: list
|
||||
default: []
|
||||
items:
|
||||
- variable: storageEntry
|
||||
label: Storage Entry
|
||||
schema:
|
||||
type: dict
|
||||
attrs:
|
||||
- variable: type
|
||||
label: Type
|
||||
description: |
|
||||
ixVolume: Is dataset created automatically by the system.</br>
|
||||
Host Path: Is a path that already exists on the system.</br>
|
||||
SMB Share: Is a SMB share that is mounted to as a volume.
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
default: "ix_volume"
|
||||
immutable: true
|
||||
enum:
|
||||
- value: "host_path"
|
||||
description: Host Path (Path that already exists on the system)
|
||||
- value: "ix_volume"
|
||||
description: ixVolume (Dataset created automatically by the system)
|
||||
- value: "cifs"
|
||||
description: SMB/CIFS Share (Mounts a volume to a SMB share)
|
||||
- variable: read_only
|
||||
label: Read Only
|
||||
description: Mount the volume as read only.
|
||||
schema:
|
||||
type: boolean
|
||||
default: false
|
||||
- variable: mount_path
|
||||
label: Mount Path
|
||||
description: The path inside the container to mount the storage.
|
||||
schema:
|
||||
type: path
|
||||
required: true
|
||||
- variable: host_path_config
|
||||
label: Host Path Configuration
|
||||
schema:
|
||||
type: dict
|
||||
show_if: [["type", "=", "host_path"]]
|
||||
attrs:
|
||||
- variable: acl_enable
|
||||
label: Enable ACL
|
||||
description: Enable ACL for the storage.
|
||||
schema:
|
||||
type: boolean
|
||||
default: false
|
||||
- variable: acl
|
||||
label: ACL Configuration
|
||||
schema:
|
||||
type: dict
|
||||
show_if: [["acl_enable", "=", true]]
|
||||
attrs: []
|
||||
$ref:
|
||||
- "normalize/acl"
|
||||
- variable: path
|
||||
label: Host Path
|
||||
description: The host path to use for storage.
|
||||
schema:
|
||||
type: hostpath
|
||||
show_if: [["acl_enable", "=", false]]
|
||||
required: true
|
||||
- variable: ix_volume_config
|
||||
label: ixVolume Configuration
|
||||
description: The configuration for the ixVolume dataset.
|
||||
schema:
|
||||
type: dict
|
||||
show_if: [["type", "=", "ix_volume"]]
|
||||
$ref:
|
||||
- "normalize/ix_volume"
|
||||
attrs:
|
||||
- variable: acl_enable
|
||||
label: Enable ACL
|
||||
description: Enable ACL for the storage.
|
||||
schema:
|
||||
type: boolean
|
||||
default: false
|
||||
- variable: dataset_name
|
||||
label: Dataset Name
|
||||
description: The name of the dataset to use for storage.
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
immutable: true
|
||||
default: "storage_entry"
|
||||
- variable: acl_entries
|
||||
label: ACL Configuration
|
||||
schema:
|
||||
type: dict
|
||||
show_if: [["acl_enable", "=", true]]
|
||||
attrs: []
|
||||
$ref:
|
||||
- "normalize/acl"
|
||||
- variable: cifs_config
|
||||
label: SMB Configuration
|
||||
description: The configuration for the SMB dataset.
|
||||
schema:
|
||||
type: dict
|
||||
show_if: [["type", "=", "cifs"]]
|
||||
attrs:
|
||||
- variable: server
|
||||
label: Server
|
||||
description: The server to mount the SMB share.
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
- variable: path
|
||||
label: Path
|
||||
description: The path to mount the SMB share.
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
- variable: username
|
||||
label: Username
|
||||
description: The username to use for the SMB share.
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
- variable: password
|
||||
label: Password
|
||||
description: The password to use for the SMB share.
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
private: true
|
||||
- variable: domain
|
||||
label: Domain
|
||||
description: The domain to use for the SMB share.
|
||||
schema:
|
||||
type: string
|
||||
- variable: labels
|
||||
label: ""
|
||||
group: Labels Configuration
|
||||
schema:
|
||||
type: list
|
||||
default: []
|
||||
items:
|
||||
- variable: label
|
||||
label: Label
|
||||
schema:
|
||||
type: dict
|
||||
attrs:
|
||||
- variable: key
|
||||
label: Key
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
- variable: value
|
||||
label: Value
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
- variable: containers
|
||||
label: Containers
|
||||
description: Containers where the label should be applied
|
||||
schema:
|
||||
type: list
|
||||
items:
|
||||
- variable: container
|
||||
label: Container
|
||||
schema:
|
||||
type: string
|
||||
required: true
|
||||
enum:
|
||||
- value: chatwoot
|
||||
description: chatwoot
|
||||
- value: sidekiq
|
||||
description: sidekiq
|
||||
- value: postgres
|
||||
description: postgres
|
||||
- value: redis
|
||||
description: redis
|
||||
- variable: resources
|
||||
label: ""
|
||||
group: Resources Configuration
|
||||
schema:
|
||||
type: dict
|
||||
attrs:
|
||||
- variable: limits
|
||||
label: Limits
|
||||
schema:
|
||||
type: dict
|
||||
attrs:
|
||||
- variable: cpus
|
||||
label: CPUs
|
||||
description: CPUs limit for Chatwoot.
|
||||
schema:
|
||||
type: int
|
||||
default: 2
|
||||
required: true
|
||||
- variable: memory
|
||||
label: Memory (in MB)
|
||||
description: Memory limit for Chatwoot.
|
||||
schema:
|
||||
type: int
|
||||
default: 4096
|
||||
required: true
|
||||
90
ix-dev/community/chatwoot/templates/docker-compose.yaml
Normal file
90
ix-dev/community/chatwoot/templates/docker-compose.yaml
Normal file
@@ -0,0 +1,90 @@
|
||||
{% set tpl = ix_lib.base.render.Render(values) %}
|
||||
|
||||
{% set app = tpl.add_container(values.consts.chatwoot_container_name, "image") %}
|
||||
{% set sidekiq = tpl.add_container(values.consts.sidekiq_container_name, "image") %}
|
||||
{% set migrations = tpl.add_container(values.consts.migrations_container_name, "image") %}
|
||||
{% set perm_container = tpl.deps.perms(values.consts.perms_container_name) %}
|
||||
{% set perms_config = {"uid": values.run_as.user, "gid": values.run_as.group, "mode": "check"} %}
|
||||
|
||||
{% set pg_config = {
|
||||
"user": values.consts.db_user,
|
||||
"password": values.chatwoot.db_password,
|
||||
"database": values.consts.db_name,
|
||||
"volume": values.storage.postgres_data,
|
||||
} %}
|
||||
{% set postgres = tpl.deps.postgres(
|
||||
values.consts.postgres_container_name,
|
||||
values.chatwoot.postgres_image_selector,
|
||||
pg_config, perm_container
|
||||
) %}
|
||||
|
||||
{% set redis_config = {
|
||||
"password": values.chatwoot.redis_password,
|
||||
"volume": {"type": "temporary", "volume_config": {"volume_name": "redis-data"}},
|
||||
} %}
|
||||
{% set redis = tpl.deps.redis(values.consts.redis_container_name, "redis_image", redis_config, perm_container) %}
|
||||
|
||||
{% set containers = [app, sidekiq, migrations] %}
|
||||
{% for c in containers %}
|
||||
{% do c.environment.add_env("NODE_ENV", "production") %}
|
||||
{% do c.environment.add_env("RAILS_ENV", "production") %}
|
||||
{% do c.environment.add_env("INSTALLATION_ENV", "docker") %}
|
||||
{% do c.environment.add_env("SECRET_KEY_BASE", values.chatwoot.secret_key_base) %}
|
||||
{% do c.environment.add_env("REDIS_URL", redis.get_url("redis")) %}
|
||||
{% do c.environment.add_env("REDIS_PASSWORD", values.chatwoot.redis_password) %}
|
||||
{% do c.environment.add_env("POSTGRES_HOST", values.consts.postgres_container_name) %}
|
||||
{% do c.environment.add_env("POSTGRES_USERNAME", values.consts.db_user) %}
|
||||
{% do c.environment.add_env("POSTGRES_PASSWORD", values.chatwoot.db_password) %}
|
||||
{% do c.environment.add_env("POSTGRES_DATABASE", values.consts.db_name) %}
|
||||
{% do c.environment.add_env("ACTIVE_STORAGE_SERVICE", "local") %}
|
||||
{% do c.environment.add_env("ENABLE_ACCOUNT_SIGNUP", values.chatwoot.enable_account_signup) %}
|
||||
{% do c.environment.add_env("FRONTEND_URL", values.chatwoot.frontend_url) %}
|
||||
|
||||
{% do c.environment.add_user_envs(values.chatwoot.additional_envs) %}
|
||||
{% do c.add_storage("/app/storage", values.storage.data) %}
|
||||
|
||||
{% for store in values.storage.additional_storage %}
|
||||
{% do c.add_storage(store.mount_path, store) %}
|
||||
{% do perm_container.add_or_skip_action(store.mount_path, store, perms_config) %}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
|
||||
{% do perm_container.add_or_skip_action("data", values.storage.data, perms_config) %}
|
||||
|
||||
{# Migrations #}
|
||||
{% do migrations.depends.add_dependency(values.consts.postgres_container_name, "service_healthy") %}
|
||||
{% do migrations.depends.add_dependency(values.consts.redis_container_name, "service_healthy") %}
|
||||
{% do migrations.restart.set_policy("on-failure", 1) %}
|
||||
{% do migrations.healthcheck.disable() %}
|
||||
{% do migrations.deploy.resources.set_profile("low") %}
|
||||
{% do migrations.remove_devices() %}
|
||||
{% do migrations.set_command(["bundle", "exec", "rails", "db:chatwoot_prepare"]) %}
|
||||
|
||||
{# Sidekiq #}
|
||||
{% do sidekiq.healthcheck.set_custom_test("pgrep -f sidekiq") %}
|
||||
{% do sidekiq.set_command(["bundle", "exec", "sidekiq", "-C", "config/sidekiq.yml"]) %}
|
||||
{% do sidekiq.depends.add_dependency(values.consts.postgres_container_name, "service_healthy") %}
|
||||
{% do sidekiq.depends.add_dependency(values.consts.redis_container_name, "service_healthy") %}
|
||||
{% do sidekiq.depends.add_dependency(values.consts.migrations_container_name, "service_completed_successfully") %}
|
||||
|
||||
{# Application #}
|
||||
{% do app.healthcheck.set_test("wget", {"port": values.network.web_port.port_number, "path": "/api"}) %}
|
||||
{% do app.set_command(["bundle", "exec", "rails", "s", "-p", values.network.web_port.port_number, "-b", "::"]) %}
|
||||
{% do app.depends.add_dependency(values.consts.postgres_container_name, "service_healthy") %}
|
||||
{% do app.depends.add_dependency(values.consts.redis_container_name, "service_healthy") %}
|
||||
{% do app.depends.add_dependency(values.consts.migrations_container_name, "service_completed_successfully") %}
|
||||
|
||||
{% do app.add_port(values.network.web_port) %}
|
||||
|
||||
{% if perm_container.has_actions() %}
|
||||
{% do perm_container.activate() %}
|
||||
{% do postgres.add_dependency(values.consts.perms_container_name, "service_completed_successfully") %}
|
||||
{% do redis.container.depends.add_dependency(values.consts.perms_container_name, "service_completed_successfully") %}
|
||||
{% for c in containers %}
|
||||
{% do c.depends.add_dependency(values.consts.perms_container_name, "service_completed_successfully") %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
{% do tpl.portals.add(values.network.web_port) %}
|
||||
|
||||
{{ tpl.render() | tojson }}
|
||||
@@ -67,7 +67,14 @@ class PostgresContainer:
|
||||
)
|
||||
|
||||
repo = self._get_repo(
|
||||
image, ("postgres", "tensorchord/pgvecto-rs", "postgis/postgis", "ghcr.io/immich-app/postgres")
|
||||
image,
|
||||
(
|
||||
"postgres",
|
||||
"postgis/postgis",
|
||||
"pgvector/pgvector",
|
||||
"tensorchord/pgvecto-rs",
|
||||
"ghcr.io/immich-app/postgres",
|
||||
),
|
||||
)
|
||||
# eg we don't want to handle upgrades of pg_vector at the moment
|
||||
if repo == "postgres":
|
||||
@@ -0,0 +1,41 @@
|
||||
resources:
|
||||
limits:
|
||||
cpus: 2.0
|
||||
memory: 4096
|
||||
|
||||
chatwoot:
|
||||
image_selector: image
|
||||
postgres_image_selector: postgres_17_image
|
||||
db_password: secret
|
||||
redis_password: secret
|
||||
secret_key_base: secret
|
||||
# true/false/api_only
|
||||
enable_account_signup: "true"
|
||||
frontend_url: http://localhost:8080
|
||||
additional_envs: []
|
||||
|
||||
run_as:
|
||||
user: 568
|
||||
group: 568
|
||||
|
||||
network:
|
||||
web_port:
|
||||
bind_mode: published
|
||||
port_number: 8080
|
||||
|
||||
ix_volumes:
|
||||
postgres_data: /opt/tests/mnt/chatwoot/postgres_data
|
||||
data: /opt/tests/mnt/chatwoot/data
|
||||
|
||||
storage:
|
||||
data:
|
||||
type: ix_volume
|
||||
ix_volume_config:
|
||||
dataset_name: data
|
||||
create_host_path: true
|
||||
postgres_data:
|
||||
type: ix_volume
|
||||
ix_volume_config:
|
||||
dataset_name: postgres_data
|
||||
create_host_path: true
|
||||
additional_storage: []
|
||||
@@ -0,0 +1,41 @@
|
||||
resources:
|
||||
limits:
|
||||
cpus: 2.0
|
||||
memory: 4096
|
||||
|
||||
chatwoot:
|
||||
image_selector: ce_image
|
||||
postgres_image_selector: postgres_17_image
|
||||
db_password: secret
|
||||
redis_password: secret
|
||||
secret_key_base: secret
|
||||
# true/false/api_only
|
||||
enable_account_signup: "true"
|
||||
frontend_url: http://localhost:8080
|
||||
additional_envs: []
|
||||
|
||||
run_as:
|
||||
user: 568
|
||||
group: 568
|
||||
|
||||
network:
|
||||
web_port:
|
||||
bind_mode: published
|
||||
port_number: 8080
|
||||
|
||||
ix_volumes:
|
||||
postgres_data: /opt/tests/mnt/chatwoot/postgres_data
|
||||
data: /opt/tests/mnt/chatwoot/data
|
||||
|
||||
storage:
|
||||
data:
|
||||
type: ix_volume
|
||||
ix_volume_config:
|
||||
dataset_name: data
|
||||
create_host_path: true
|
||||
postgres_data:
|
||||
type: ix_volume
|
||||
ix_volume_config:
|
||||
dataset_name: postgres_data
|
||||
create_host_path: true
|
||||
additional_storage: []
|
||||
0
library/2.1.38/__init__.py
Normal file
0
library/2.1.38/__init__.py
Normal file
70
library/2.1.38/client.py
Normal file
70
library/2.1.38/client.py
Normal file
@@ -0,0 +1,70 @@
|
||||
import os
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
|
||||
|
||||
def is_truenas_system():
|
||||
"""Check if we're running on a TrueNAS system"""
|
||||
return "truenas" in os.uname().release
|
||||
|
||||
|
||||
# Import based on system detection
|
||||
if is_truenas_system():
|
||||
from truenas_api_client import Client as TrueNASClient
|
||||
|
||||
try:
|
||||
# 25.04 and later
|
||||
from truenas_api_client.exc import ValidationErrors
|
||||
except ImportError:
|
||||
# 24.10 and earlier
|
||||
from truenas_api_client import ValidationErrors
|
||||
else:
|
||||
# Mock classes for non-TrueNAS systems
|
||||
class TrueNASClient:
|
||||
def call(self, *args, **kwargs):
|
||||
return None
|
||||
|
||||
class ValidationErrors(Exception):
|
||||
def __init__(self, errors):
|
||||
self.errors = errors
|
||||
|
||||
|
||||
class Client:
|
||||
def __init__(self, render_instance: "Render"):
|
||||
self.client = TrueNASClient()
|
||||
self._render_instance = render_instance
|
||||
self._app_name: str = self._render_instance.values.get("ix_context", {}).get("app_name", "") or "unknown"
|
||||
|
||||
def validate_ip_port_combo(self, ip: str, port: int) -> None:
|
||||
# Example of an error messages:
|
||||
# The port is being used by following services: 1) "0.0.0.0:80" used by WebUI Service
|
||||
# The port is being used by following services: 1) "0.0.0.0:9998" used by Applications ('$app_name' application)
|
||||
try:
|
||||
self.client.call("port.validate_port", f"render.{self._app_name}.schema", port, ip, None, True)
|
||||
except ValidationErrors as e:
|
||||
err_str = str(e)
|
||||
# If the IP:port combo appears more than once in the error message,
|
||||
# means that the port is used by more than one service/app.
|
||||
# This shouldn't happen in a well-configured system.
|
||||
# Notice that the ip portion is not included check,
|
||||
# because input might be a specific IP, but another service or app
|
||||
# might be using the same port on a wildcard IP
|
||||
if err_str.count(f':{port}" used by') > 1:
|
||||
raise RenderError(err_str) from None
|
||||
|
||||
# If the error complains about the current app, we ignore it
|
||||
# This is to handle cases where the app is being updated or edited
|
||||
if f"Applications ('{self._app_name}' application)" in err_str:
|
||||
# During upgrade, we want to ignore the error if it is related to the current app
|
||||
return
|
||||
|
||||
raise RenderError(err_str) from None
|
||||
except Exception:
|
||||
pass
|
||||
86
library/2.1.38/configs.py
Normal file
86
library/2.1.38/configs.py
Normal file
@@ -0,0 +1,86 @@
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .formatter import escape_dollar
|
||||
from .validations import valid_octal_mode_or_raise, valid_fs_path_or_raise
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from formatter import escape_dollar
|
||||
from validations import valid_octal_mode_or_raise, valid_fs_path_or_raise
|
||||
|
||||
|
||||
class Configs:
|
||||
def __init__(self, render_instance: "Render"):
|
||||
self._render_instance = render_instance
|
||||
self._configs: dict[str, dict] = {}
|
||||
|
||||
def add(self, name: str, data: str):
|
||||
if not isinstance(data, str):
|
||||
raise RenderError(f"Expected [data] to be a string, got [{type(data)}]")
|
||||
|
||||
if name not in self._configs:
|
||||
self._configs[name] = {"name": name, "data": data}
|
||||
return
|
||||
|
||||
if data == self._configs[name]["data"]:
|
||||
return
|
||||
|
||||
raise RenderError(f"Config [{name}] already added with different data")
|
||||
|
||||
def has_configs(self):
|
||||
return bool(self._configs)
|
||||
|
||||
def render(self):
|
||||
return {
|
||||
c["name"]: {"content": escape_dollar(c["data"])}
|
||||
for c in sorted(self._configs.values(), key=lambda c: c["name"])
|
||||
}
|
||||
|
||||
|
||||
class ContainerConfigs:
|
||||
def __init__(self, render_instance: "Render", configs: Configs):
|
||||
self._render_instance = render_instance
|
||||
self.top_level_configs: Configs = configs
|
||||
self.container_configs: set[ContainerConfig] = set()
|
||||
|
||||
def add(self, name: str, data: str, target: str, mode: str = ""):
|
||||
self.top_level_configs.add(name, data)
|
||||
|
||||
if target == "":
|
||||
raise RenderError(f"Expected [target] to be set for config [{name}]")
|
||||
if mode != "":
|
||||
mode = valid_octal_mode_or_raise(mode)
|
||||
|
||||
if target in [c.target for c in self.container_configs]:
|
||||
raise RenderError(f"Target [{target}] already used for another config")
|
||||
target = valid_fs_path_or_raise(target)
|
||||
self.container_configs.add(ContainerConfig(self._render_instance, name, target, mode))
|
||||
|
||||
def has_configs(self):
|
||||
return bool(self.container_configs)
|
||||
|
||||
def render(self):
|
||||
return [c.render() for c in sorted(self.container_configs, key=lambda c: c.source)]
|
||||
|
||||
|
||||
class ContainerConfig:
|
||||
def __init__(self, render_instance: "Render", source: str, target: str, mode: str):
|
||||
self._render_instance = render_instance
|
||||
self.source = source
|
||||
self.target = target
|
||||
self.mode = mode
|
||||
|
||||
def render(self):
|
||||
result: dict[str, str | int] = {
|
||||
"source": self.source,
|
||||
"target": self.target,
|
||||
}
|
||||
|
||||
if self.mode:
|
||||
result["mode"] = int(self.mode, 8)
|
||||
|
||||
return result
|
||||
441
library/2.1.38/container.py
Normal file
441
library/2.1.38/container.py
Normal file
@@ -0,0 +1,441 @@
|
||||
from typing import Any, TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
from storage import IxStorage
|
||||
|
||||
try:
|
||||
from .configs import ContainerConfigs
|
||||
from .depends import Depends
|
||||
from .deploy import Deploy
|
||||
from .device_cgroup_rules import DeviceCGroupRules
|
||||
from .devices import Devices
|
||||
from .dns import Dns
|
||||
from .environment import Environment
|
||||
from .error import RenderError
|
||||
from .expose import Expose
|
||||
from .extra_hosts import ExtraHosts
|
||||
from .formatter import escape_dollar, get_image_with_hashed_data
|
||||
from .healthcheck import Healthcheck
|
||||
from .labels import Labels
|
||||
from .ports import Ports
|
||||
from .restart import RestartPolicy
|
||||
from .tmpfs import Tmpfs
|
||||
from .validations import (
|
||||
valid_cap_or_raise,
|
||||
valid_cgroup_or_raise,
|
||||
valid_ipc_mode_or_raise,
|
||||
valid_network_mode_or_raise,
|
||||
valid_port_bind_mode_or_raise,
|
||||
valid_port_mode_or_raise,
|
||||
valid_pull_policy_or_raise,
|
||||
)
|
||||
from .security_opts import SecurityOpts
|
||||
from .storage import Storage
|
||||
from .sysctls import Sysctls
|
||||
except ImportError:
|
||||
from configs import ContainerConfigs
|
||||
from depends import Depends
|
||||
from deploy import Deploy
|
||||
from device_cgroup_rules import DeviceCGroupRules
|
||||
from devices import Devices
|
||||
from dns import Dns
|
||||
from environment import Environment
|
||||
from error import RenderError
|
||||
from expose import Expose
|
||||
from extra_hosts import ExtraHosts
|
||||
from formatter import escape_dollar, get_image_with_hashed_data
|
||||
from healthcheck import Healthcheck
|
||||
from labels import Labels
|
||||
from ports import Ports
|
||||
from restart import RestartPolicy
|
||||
from tmpfs import Tmpfs
|
||||
from validations import (
|
||||
valid_cap_or_raise,
|
||||
valid_cgroup_or_raise,
|
||||
valid_ipc_mode_or_raise,
|
||||
valid_network_mode_or_raise,
|
||||
valid_port_bind_mode_or_raise,
|
||||
valid_port_mode_or_raise,
|
||||
valid_pull_policy_or_raise,
|
||||
)
|
||||
from security_opts import SecurityOpts
|
||||
from storage import Storage
|
||||
from sysctls import Sysctls
|
||||
|
||||
|
||||
class Container:
|
||||
def __init__(self, render_instance: "Render", name: str, image: str):
|
||||
self._render_instance = render_instance
|
||||
|
||||
self._name: str = name
|
||||
self._image: str = self._resolve_image(image)
|
||||
self._build_image: str = ""
|
||||
self._pull_policy: str = ""
|
||||
self._user: str = ""
|
||||
self._tty: bool = False
|
||||
self._stdin_open: bool = False
|
||||
self._init: bool | None = None
|
||||
self._read_only: bool | None = None
|
||||
self._extra_hosts: ExtraHosts = ExtraHosts(self._render_instance)
|
||||
self._hostname: str = ""
|
||||
self._cap_drop: set[str] = set(["ALL"]) # Drop all capabilities by default and add caps granularly
|
||||
self._cap_add: set[str] = set()
|
||||
self._security_opt: SecurityOpts = SecurityOpts(self._render_instance)
|
||||
self._privileged: bool = False
|
||||
self._group_add: set[int | str] = set()
|
||||
self._network_mode: str = ""
|
||||
self._entrypoint: list[str] = []
|
||||
self._command: list[str] = []
|
||||
self._grace_period: int | None = None
|
||||
self._shm_size: int | None = None
|
||||
self._storage: Storage = Storage(self._render_instance, self)
|
||||
self._tmpfs: Tmpfs = Tmpfs(self._render_instance, self)
|
||||
self._ipc_mode: str | None = None
|
||||
self._cgroup: str | None = None
|
||||
self._device_cgroup_rules: DeviceCGroupRules = DeviceCGroupRules(self._render_instance)
|
||||
self.sysctls: Sysctls = Sysctls(self._render_instance, self)
|
||||
self.configs: ContainerConfigs = ContainerConfigs(self._render_instance, self._render_instance.configs)
|
||||
self.deploy: Deploy = Deploy(self._render_instance)
|
||||
self.networks: set[str] = set()
|
||||
self.devices: Devices = Devices(self._render_instance)
|
||||
self.environment: Environment = Environment(self._render_instance, self.deploy.resources)
|
||||
self.dns: Dns = Dns(self._render_instance)
|
||||
self.depends: Depends = Depends(self._render_instance)
|
||||
self.healthcheck: Healthcheck = Healthcheck(self._render_instance)
|
||||
self.labels: Labels = Labels(self._render_instance)
|
||||
self.restart: RestartPolicy = RestartPolicy(self._render_instance)
|
||||
self.ports: Ports = Ports(self._render_instance)
|
||||
self.expose: Expose = Expose(self._render_instance)
|
||||
|
||||
self._auto_set_network_mode()
|
||||
self._auto_add_labels()
|
||||
self._auto_add_groups()
|
||||
|
||||
def _auto_add_groups(self):
|
||||
self.add_group(568)
|
||||
|
||||
def _auto_set_network_mode(self):
|
||||
if self._render_instance.values.get("network", {}).get("host_network", False):
|
||||
self.set_network_mode("host")
|
||||
|
||||
def _auto_add_labels(self):
|
||||
labels = self._render_instance.values.get("labels", [])
|
||||
if not labels:
|
||||
return
|
||||
|
||||
for label in labels:
|
||||
containers = label.get("containers", [])
|
||||
if not containers:
|
||||
raise RenderError(f'Label [{label.get("key", "")}] must have at least one container')
|
||||
|
||||
if self._name in containers:
|
||||
self.labels.add_label(label["key"], label["value"])
|
||||
|
||||
def _resolve_image(self, image: str):
|
||||
images = self._render_instance.values["images"]
|
||||
if image not in images:
|
||||
raise RenderError(
|
||||
f"Image [{image}] not found in values. " f"Available images: [{', '.join(images.keys())}]"
|
||||
)
|
||||
repo = images[image].get("repository", "")
|
||||
tag = images[image].get("tag", "")
|
||||
|
||||
if not repo:
|
||||
raise RenderError(f"Repository not found for image [{image}]")
|
||||
if not tag:
|
||||
raise RenderError(f"Tag not found for image [{image}]")
|
||||
|
||||
return f"{repo}:{tag}"
|
||||
|
||||
def build_image(self, content: list[str | None]):
|
||||
dockerfile = f"FROM {self._image}\n"
|
||||
for line in content:
|
||||
line = line.strip() if line else ""
|
||||
if not line:
|
||||
continue
|
||||
if line.startswith("FROM"):
|
||||
# TODO: This will also block multi-stage builds
|
||||
# We can revisit this later if we need it
|
||||
raise RenderError(
|
||||
"FROM cannot be used in build image. Define the base image when creating the container."
|
||||
)
|
||||
dockerfile += line + "\n"
|
||||
|
||||
self._build_image = dockerfile
|
||||
self._image = get_image_with_hashed_data(self._image, dockerfile)
|
||||
|
||||
def set_pull_policy(self, pull_policy: str):
|
||||
self._pull_policy = valid_pull_policy_or_raise(pull_policy)
|
||||
|
||||
def set_user(self, user: int, group: int):
|
||||
for i in (user, group):
|
||||
if not isinstance(i, int) or i < 0:
|
||||
raise RenderError(f"User/Group [{i}] is not valid")
|
||||
self._user = f"{user}:{group}"
|
||||
|
||||
def add_extra_host(self, host: str, ip: str):
|
||||
self._extra_hosts.add_host(host, ip)
|
||||
|
||||
def add_group(self, group: int | str):
|
||||
if isinstance(group, str):
|
||||
group = str(group).strip()
|
||||
if group.isdigit():
|
||||
raise RenderError(f"Group is a number [{group}] but passed as a string")
|
||||
|
||||
if group in self._group_add:
|
||||
raise RenderError(f"Group [{group}] already added")
|
||||
self._group_add.add(group)
|
||||
|
||||
def get_additional_groups(self) -> list[int | str]:
|
||||
result = []
|
||||
if self.deploy.resources.has_gpus() or self.devices.has_gpus():
|
||||
result.append(44) # video
|
||||
result.append(107) # render
|
||||
return result
|
||||
|
||||
def get_current_groups(self) -> list[str]:
|
||||
result = [str(g) for g in self._group_add]
|
||||
result.extend([str(g) for g in self.get_additional_groups()])
|
||||
return result
|
||||
|
||||
def set_tty(self, enabled: bool = False):
|
||||
self._tty = enabled
|
||||
|
||||
def set_stdin(self, enabled: bool = False):
|
||||
self._stdin_open = enabled
|
||||
|
||||
def set_ipc_mode(self, ipc_mode: str):
|
||||
self._ipc_mode = valid_ipc_mode_or_raise(ipc_mode, self._render_instance.container_names())
|
||||
|
||||
def add_device_cgroup_rule(self, dev_grp_rule: str):
|
||||
self._device_cgroup_rules.add_rule(dev_grp_rule)
|
||||
|
||||
def set_cgroup(self, cgroup: str):
|
||||
self._cgroup = valid_cgroup_or_raise(cgroup)
|
||||
|
||||
def set_init(self, enabled: bool = False):
|
||||
self._init = enabled
|
||||
|
||||
def set_read_only(self, enabled: bool = False):
|
||||
self._read_only = enabled
|
||||
|
||||
def set_hostname(self, hostname: str):
|
||||
self._hostname = hostname
|
||||
|
||||
def set_grace_period(self, grace_period: int):
|
||||
if grace_period < 0:
|
||||
raise RenderError(f"Grace period [{grace_period}] cannot be negative")
|
||||
self._grace_period = grace_period
|
||||
|
||||
def set_privileged(self, enabled: bool = False):
|
||||
self._privileged = enabled
|
||||
|
||||
def clear_caps(self):
|
||||
self._cap_add.clear()
|
||||
self._cap_drop.clear()
|
||||
|
||||
def add_caps(self, caps: list[str]):
|
||||
for c in caps:
|
||||
if c in self._cap_add:
|
||||
raise RenderError(f"Capability [{c}] already added")
|
||||
self._cap_add.add(valid_cap_or_raise(c))
|
||||
|
||||
def add_security_opt(self, key: str, value: str | bool | None = None, arg: str | None = None):
|
||||
self._security_opt.add_opt(key, value, arg)
|
||||
|
||||
def remove_security_opt(self, key: str):
|
||||
self._security_opt.remove_opt(key)
|
||||
|
||||
def set_network_mode(self, mode: str):
|
||||
self._network_mode = valid_network_mode_or_raise(mode, self._render_instance.container_names())
|
||||
|
||||
def add_port(self, port_config: dict | None = None, dev_config: dict | None = None):
|
||||
port_config = port_config or {}
|
||||
dev_config = dev_config or {}
|
||||
# Merge port_config and dev_config (dev_config has precedence)
|
||||
config = port_config | dev_config
|
||||
bind_mode = valid_port_bind_mode_or_raise(config.get("bind_mode", ""))
|
||||
# Skip port if its neither published nor exposed
|
||||
if not bind_mode:
|
||||
return
|
||||
|
||||
# Collect port config
|
||||
mode = valid_port_mode_or_raise(config.get("mode", "ingress"))
|
||||
host_port = config.get("port_number", 0)
|
||||
container_port = config.get("container_port", 0) or host_port
|
||||
protocol = config.get("protocol", "tcp")
|
||||
host_ips = config.get("host_ips") or ["0.0.0.0", "::"]
|
||||
if not isinstance(host_ips, list):
|
||||
raise RenderError(f"Expected [host_ips] to be a list, got [{host_ips}]")
|
||||
|
||||
if bind_mode == "published":
|
||||
for host_ip in host_ips:
|
||||
self.ports._add_port(
|
||||
host_port, container_port, {"protocol": protocol, "host_ip": host_ip, "mode": mode}
|
||||
)
|
||||
elif bind_mode == "exposed":
|
||||
self.expose.add_port(container_port, protocol)
|
||||
|
||||
def set_entrypoint(self, entrypoint: list[str]):
|
||||
self._entrypoint = [escape_dollar(str(e)) for e in entrypoint]
|
||||
|
||||
def set_command(self, command: list[str]):
|
||||
self._command = [escape_dollar(str(e)) for e in command]
|
||||
|
||||
def add_storage(self, mount_path: str, config: "IxStorage"):
|
||||
if config.get("type", "") == "tmpfs":
|
||||
self._tmpfs.add(mount_path, config)
|
||||
else:
|
||||
self._storage.add(mount_path, config)
|
||||
|
||||
def add_docker_socket(self, read_only: bool = True, mount_path: str = "/var/run/docker.sock"):
|
||||
self.add_group(999)
|
||||
self._storage._add_docker_socket(read_only, mount_path)
|
||||
|
||||
def add_udev(self, read_only: bool = True, mount_path: str = "/run/udev"):
|
||||
self._storage._add_udev(read_only, mount_path)
|
||||
|
||||
def add_tun_device(self):
|
||||
self.devices._add_tun_device()
|
||||
|
||||
def add_snd_device(self):
|
||||
self.add_group(29)
|
||||
self.devices._add_snd_device()
|
||||
|
||||
def set_shm_size_mb(self, size: int):
|
||||
self._shm_size = size
|
||||
|
||||
# Easily remove devices from the container
|
||||
# Useful in dependencies like postgres and redis
|
||||
# where there is no need to pass devices to them
|
||||
def remove_devices(self):
|
||||
self.deploy.resources.remove_devices()
|
||||
self.devices.remove_devices()
|
||||
|
||||
@property
|
||||
def storage(self):
|
||||
return self._storage
|
||||
|
||||
def render(self) -> dict[str, Any]:
|
||||
if self._network_mode and self.networks:
|
||||
raise RenderError("Cannot set both [network_mode] and [networks]")
|
||||
|
||||
result = {
|
||||
"image": self._image,
|
||||
"platform": "linux/amd64",
|
||||
"tty": self._tty,
|
||||
"stdin_open": self._stdin_open,
|
||||
"restart": self.restart.render(),
|
||||
}
|
||||
|
||||
if self._pull_policy:
|
||||
result["pull_policy"] = self._pull_policy
|
||||
|
||||
if self.healthcheck.has_healthcheck():
|
||||
result["healthcheck"] = self.healthcheck.render()
|
||||
|
||||
if self._hostname:
|
||||
result["hostname"] = self._hostname
|
||||
|
||||
if self._build_image:
|
||||
result["build"] = {"tags": [self._image], "dockerfile_inline": self._build_image}
|
||||
|
||||
if self.configs.has_configs():
|
||||
result["configs"] = self.configs.render()
|
||||
|
||||
if self._ipc_mode is not None:
|
||||
result["ipc"] = self._ipc_mode
|
||||
|
||||
if self._device_cgroup_rules.has_rules():
|
||||
result["device_cgroup_rules"] = self._device_cgroup_rules.render()
|
||||
|
||||
if self._cgroup is not None:
|
||||
result["cgroup"] = self._cgroup
|
||||
|
||||
if self._extra_hosts.has_hosts():
|
||||
result["extra_hosts"] = self._extra_hosts.render()
|
||||
|
||||
if self._init is not None:
|
||||
result["init"] = self._init
|
||||
|
||||
if self._read_only is not None:
|
||||
result["read_only"] = self._read_only
|
||||
|
||||
if self._grace_period is not None:
|
||||
result["stop_grace_period"] = f"{self._grace_period}s"
|
||||
|
||||
if self._user:
|
||||
result["user"] = self._user
|
||||
|
||||
for g in self.get_additional_groups():
|
||||
self.add_group(g)
|
||||
|
||||
if self._group_add:
|
||||
result["group_add"] = sorted(self._group_add, key=lambda g: (isinstance(g, str), g))
|
||||
|
||||
if self._shm_size is not None:
|
||||
result["shm_size"] = f"{self._shm_size}M"
|
||||
|
||||
if self._privileged is not None:
|
||||
result["privileged"] = self._privileged
|
||||
|
||||
if self._cap_drop:
|
||||
result["cap_drop"] = sorted(self._cap_drop)
|
||||
|
||||
if self._cap_add:
|
||||
result["cap_add"] = sorted(self._cap_add)
|
||||
|
||||
if self._security_opt.has_opts():
|
||||
result["security_opt"] = self._security_opt.render()
|
||||
|
||||
if self._network_mode:
|
||||
result["network_mode"] = self._network_mode
|
||||
|
||||
if self.sysctls.has_sysctls():
|
||||
result["sysctls"] = self.sysctls.render()
|
||||
|
||||
if self._network_mode != "host":
|
||||
if self.ports.has_ports():
|
||||
result["ports"] = self.ports.render()
|
||||
|
||||
if self.expose.has_ports():
|
||||
result["expose"] = self.expose.render()
|
||||
|
||||
if self._entrypoint:
|
||||
result["entrypoint"] = self._entrypoint
|
||||
|
||||
if self._command:
|
||||
result["command"] = self._command
|
||||
|
||||
if self.devices.has_devices():
|
||||
result["devices"] = self.devices.render()
|
||||
|
||||
if self.deploy.has_deploy():
|
||||
result["deploy"] = self.deploy.render()
|
||||
|
||||
if self.environment.has_variables():
|
||||
result["environment"] = self.environment.render()
|
||||
|
||||
if self.labels.has_labels():
|
||||
result["labels"] = self.labels.render()
|
||||
|
||||
if self.dns.has_dns_nameservers():
|
||||
result["dns"] = self.dns.render_dns_nameservers()
|
||||
|
||||
if self.dns.has_dns_searches():
|
||||
result["dns_search"] = self.dns.render_dns_searches()
|
||||
|
||||
if self.dns.has_dns_opts():
|
||||
result["dns_opt"] = self.dns.render_dns_opts()
|
||||
|
||||
if self.depends.has_dependencies():
|
||||
result["depends_on"] = self.depends.render()
|
||||
|
||||
if self._storage.has_mounts():
|
||||
result["volumes"] = self._storage.render()
|
||||
|
||||
if self._tmpfs.has_tmpfs():
|
||||
result["tmpfs"] = self._tmpfs.render()
|
||||
|
||||
return result
|
||||
34
library/2.1.38/depends.py
Normal file
34
library/2.1.38/depends.py
Normal file
@@ -0,0 +1,34 @@
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .validations import valid_depend_condition_or_raise
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from validations import valid_depend_condition_or_raise
|
||||
|
||||
|
||||
class Depends:
|
||||
def __init__(self, render_instance: "Render"):
|
||||
self._render_instance = render_instance
|
||||
self._dependencies: dict[str, str] = {}
|
||||
|
||||
def add_dependency(self, name: str, condition: str):
|
||||
condition = valid_depend_condition_or_raise(condition)
|
||||
if name in self._dependencies.keys():
|
||||
raise RenderError(f"Dependency [{name}] already added")
|
||||
if name not in self._render_instance.container_names():
|
||||
raise RenderError(
|
||||
f"Dependency [{name}] not found in defined containers. "
|
||||
f"Available containers: [{', '.join(self._render_instance.container_names())}]"
|
||||
)
|
||||
self._dependencies[name] = condition
|
||||
|
||||
def has_dependencies(self):
|
||||
return len(self._dependencies) > 0
|
||||
|
||||
def render(self):
|
||||
return {d: {"condition": c} for d, c in self._dependencies.items()}
|
||||
24
library/2.1.38/deploy.py
Normal file
24
library/2.1.38/deploy.py
Normal file
@@ -0,0 +1,24 @@
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
|
||||
try:
|
||||
from .resources import Resources
|
||||
except ImportError:
|
||||
from resources import Resources
|
||||
|
||||
|
||||
class Deploy:
|
||||
def __init__(self, render_instance: "Render"):
|
||||
self._render_instance = render_instance
|
||||
self.resources: Resources = Resources(self._render_instance)
|
||||
|
||||
def has_deploy(self):
|
||||
return self.resources.has_resources()
|
||||
|
||||
def render(self):
|
||||
if self.resources.has_resources():
|
||||
return {"resources": self.resources.render()}
|
||||
|
||||
return {}
|
||||
37
library/2.1.38/deps.py
Normal file
37
library/2.1.38/deps.py
Normal file
@@ -0,0 +1,37 @@
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
|
||||
try:
|
||||
from .deps_postgres import PostgresContainer, PostgresConfig
|
||||
from .deps_redis import RedisContainer, RedisConfig
|
||||
from .deps_mariadb import MariadbContainer, MariadbConfig
|
||||
from .deps_mongodb import MongoDBContainer, MongoDBConfig
|
||||
from .deps_perms import PermsContainer
|
||||
except ImportError:
|
||||
from deps_postgres import PostgresContainer, PostgresConfig
|
||||
from deps_redis import RedisContainer, RedisConfig
|
||||
from deps_mariadb import MariadbContainer, MariadbConfig
|
||||
from deps_mongodb import MongoDBContainer, MongoDBConfig
|
||||
from deps_perms import PermsContainer
|
||||
|
||||
|
||||
class Deps:
|
||||
def __init__(self, render_instance: "Render"):
|
||||
self._render_instance = render_instance
|
||||
|
||||
def perms(self, name: str):
|
||||
return PermsContainer(self._render_instance, name)
|
||||
|
||||
def postgres(self, name: str, image: str, config: PostgresConfig, perms_instance: PermsContainer):
|
||||
return PostgresContainer(self._render_instance, name, image, config, perms_instance)
|
||||
|
||||
def redis(self, name: str, image: str, config: RedisConfig, perms_instance: PermsContainer):
|
||||
return RedisContainer(self._render_instance, name, image, config, perms_instance)
|
||||
|
||||
def mariadb(self, name: str, image: str, config: MariadbConfig, perms_instance: PermsContainer):
|
||||
return MariadbContainer(self._render_instance, name, image, config, perms_instance)
|
||||
|
||||
def mongodb(self, name: str, image: str, config: MongoDBConfig, perms_instance: PermsContainer):
|
||||
return MongoDBContainer(self._render_instance, name, image, config, perms_instance)
|
||||
81
library/2.1.38/deps_mariadb.py
Normal file
81
library/2.1.38/deps_mariadb.py
Normal file
@@ -0,0 +1,81 @@
|
||||
from typing import TYPE_CHECKING, TypedDict, NotRequired
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
from storage import IxStorage
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .deps_perms import PermsContainer
|
||||
from .validations import valid_port_or_raise
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from deps_perms import PermsContainer
|
||||
from validations import valid_port_or_raise
|
||||
|
||||
|
||||
class MariadbConfig(TypedDict):
|
||||
user: str
|
||||
password: str
|
||||
database: str
|
||||
root_password: NotRequired[str]
|
||||
port: NotRequired[int]
|
||||
auto_upgrade: NotRequired[bool]
|
||||
volume: "IxStorage"
|
||||
|
||||
|
||||
class MariadbContainer:
|
||||
def __init__(
|
||||
self, render_instance: "Render", name: str, image: str, config: MariadbConfig, perms_instance: PermsContainer
|
||||
):
|
||||
self._render_instance = render_instance
|
||||
self._name = name
|
||||
self._config = config
|
||||
|
||||
for key in ("user", "password", "database", "volume"):
|
||||
if key not in config:
|
||||
raise RenderError(f"Expected [{key}] to be set for mariadb")
|
||||
|
||||
port = valid_port_or_raise(self._get_port())
|
||||
root_password = config.get("root_password") or config["password"]
|
||||
auto_upgrade = config.get("auto_upgrade", True)
|
||||
|
||||
self._get_repo(image, ("mariadb"))
|
||||
c = self._render_instance.add_container(name, image)
|
||||
c.set_user(999, 999)
|
||||
c.healthcheck.set_test("mariadb")
|
||||
c.remove_devices()
|
||||
|
||||
c.add_storage("/var/lib/mysql", config["volume"])
|
||||
perms_instance.add_or_skip_action(
|
||||
f"{self._name}_mariadb_data", config["volume"], {"uid": 999, "gid": 999, "mode": "check"}
|
||||
)
|
||||
|
||||
c.environment.add_env("MARIADB_USER", config["user"])
|
||||
c.environment.add_env("MARIADB_PASSWORD", config["password"])
|
||||
c.environment.add_env("MARIADB_ROOT_PASSWORD", root_password)
|
||||
c.environment.add_env("MARIADB_DATABASE", config["database"])
|
||||
c.environment.add_env("MARIADB_AUTO_UPGRADE", str(auto_upgrade).lower())
|
||||
c.set_command(["--port", str(port)])
|
||||
|
||||
# Store container for further configuration
|
||||
# For example: c.depends.add_dependency("other_container", "service_started")
|
||||
self._container = c
|
||||
|
||||
def _get_port(self):
|
||||
return self._config.get("port") or 3306
|
||||
|
||||
def _get_repo(self, image, supported_repos):
|
||||
images = self._render_instance.values["images"]
|
||||
if image not in images:
|
||||
raise RenderError(f"Image [{image}] not found in values. Available images: [{', '.join(images.keys())}]")
|
||||
repo = images[image].get("repository")
|
||||
if not repo:
|
||||
raise RenderError("Could not determine repo")
|
||||
if repo not in supported_repos:
|
||||
raise RenderError(f"Unsupported repo [{repo}] for mariadb. Supported repos: {', '.join(supported_repos)}")
|
||||
return repo
|
||||
|
||||
@property
|
||||
def container(self):
|
||||
return self._container
|
||||
91
library/2.1.38/deps_mongodb.py
Normal file
91
library/2.1.38/deps_mongodb.py
Normal file
@@ -0,0 +1,91 @@
|
||||
import urllib.parse
|
||||
from typing import TYPE_CHECKING, TypedDict
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
from storage import IxStorage
|
||||
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .deps_perms import PermsContainer
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from deps_perms import PermsContainer
|
||||
|
||||
|
||||
class MongoDBConfig(TypedDict):
|
||||
user: str
|
||||
password: str
|
||||
database: str
|
||||
volume: "IxStorage"
|
||||
|
||||
|
||||
class MongoDBContainer:
|
||||
def __init__(
|
||||
self, render_instance: "Render", name: str, image: str, config: MongoDBConfig, perms_instance: PermsContainer
|
||||
):
|
||||
self._render_instance = render_instance
|
||||
self._name = name
|
||||
self._config = config
|
||||
self._data_dir = "/data/db"
|
||||
|
||||
for key in ("user", "password", "database", "volume"):
|
||||
if key not in config:
|
||||
raise RenderError(f"Expected [{key}] to be set for mongodb")
|
||||
|
||||
c = self._render_instance.add_container(name, image)
|
||||
|
||||
c.set_user(999, 999)
|
||||
c.healthcheck.set_test("mongodb")
|
||||
c.remove_devices()
|
||||
c.add_storage(self._data_dir, config["volume"])
|
||||
|
||||
c.environment.add_env("MONGO_INITDB_ROOT_USERNAME", config["user"])
|
||||
c.environment.add_env("MONGO_INITDB_ROOT_PASSWORD", config["password"])
|
||||
c.environment.add_env("MONGO_INITDB_DATABASE", config["database"])
|
||||
|
||||
perms_instance.add_or_skip_action(
|
||||
f"{self._name}_mongodb_data", config["volume"], {"uid": 999, "gid": 999, "mode": "check"}
|
||||
)
|
||||
|
||||
self._get_repo(image, ("mongodb"))
|
||||
|
||||
# Store container for further configuration
|
||||
# For example: c.depends.add_dependency("other_container", "service_started")
|
||||
self._container = c
|
||||
|
||||
@property
|
||||
def container(self):
|
||||
return self._container
|
||||
|
||||
def _get_port(self):
|
||||
return self._config.get("port") or 27017
|
||||
|
||||
def _get_repo(self, image, supported_repos):
|
||||
images = self._render_instance.values["images"]
|
||||
if image not in images:
|
||||
raise RenderError(f"Image [{image}] not found in values. Available images: [{', '.join(images.keys())}]")
|
||||
repo = images[image].get("repository")
|
||||
if not repo:
|
||||
raise RenderError("Could not determine repo")
|
||||
if repo not in supported_repos:
|
||||
raise RenderError(f"Unsupported repo [{repo}] for mongodb. Supported repos: {', '.join(supported_repos)}")
|
||||
return repo
|
||||
|
||||
def get_url(self, variant: str):
|
||||
user = urllib.parse.quote_plus(self._config["user"])
|
||||
password = urllib.parse.quote_plus(self._config["password"])
|
||||
creds = f"{user}:{password}"
|
||||
addr = f"{self._name}:{self._get_port()}"
|
||||
db = self._config["database"]
|
||||
|
||||
urls = {
|
||||
"mongodb": f"mongodb://{creds}@{addr}/{db}",
|
||||
"host_port": addr,
|
||||
}
|
||||
|
||||
if variant not in urls:
|
||||
raise RenderError(f"Expected [variant] to be one of [{', '.join(urls.keys())}], got [{variant}]")
|
||||
return urls[variant]
|
||||
252
library/2.1.38/deps_perms.py
Normal file
252
library/2.1.38/deps_perms.py
Normal file
@@ -0,0 +1,252 @@
|
||||
import json
|
||||
import pathlib
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
from storage import IxStorage
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .validations import valid_octal_mode_or_raise, valid_fs_path_or_raise
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from validations import valid_octal_mode_or_raise, valid_fs_path_or_raise
|
||||
|
||||
|
||||
class PermsContainer:
|
||||
def __init__(self, render_instance: "Render", name: str):
|
||||
self._render_instance = render_instance
|
||||
self._name = name
|
||||
self.actions: set[str] = set()
|
||||
self.parsed_configs: list[dict] = []
|
||||
|
||||
def add_or_skip_action(self, identifier: str, volume_config: "IxStorage", action_config: dict):
|
||||
identifier = self.normalize_identifier_for_path(identifier)
|
||||
if identifier in self.actions:
|
||||
raise RenderError(f"Action with id [{identifier}] already used for another permission action")
|
||||
|
||||
parsed_action = self.parse_action(identifier, volume_config, action_config)
|
||||
if parsed_action:
|
||||
self.parsed_configs.append(parsed_action)
|
||||
self.actions.add(identifier)
|
||||
|
||||
def parse_action(self, identifier: str, volume_config: "IxStorage", action_config: dict):
|
||||
valid_modes = [
|
||||
"always", # Always set permissions, without checking.
|
||||
"check", # Checks if permissions are correct, and set them if not.
|
||||
]
|
||||
mode = action_config.get("mode", "check")
|
||||
uid = action_config.get("uid", None)
|
||||
gid = action_config.get("gid", None)
|
||||
chmod = action_config.get("chmod", None)
|
||||
recursive = action_config.get("recursive", False)
|
||||
mount_path = pathlib.Path("/mnt/permission", identifier).as_posix()
|
||||
is_temporary = False
|
||||
|
||||
vol_type = volume_config.get("type", "")
|
||||
match vol_type:
|
||||
case "temporary":
|
||||
# If it is a temporary volume, we force auto permissions
|
||||
# and set is_temporary to True, so it will be cleaned up
|
||||
is_temporary = True
|
||||
recursive = True
|
||||
case "volume":
|
||||
if not volume_config.get("volume_config", {}).get("auto_permissions", False):
|
||||
return None
|
||||
case "host_path":
|
||||
host_path_config = volume_config.get("host_path_config", {})
|
||||
# Skip when ACL enabled
|
||||
if host_path_config.get("acl_enable", False):
|
||||
return None
|
||||
if not host_path_config.get("auto_permissions", False):
|
||||
return None
|
||||
case "ix_volume":
|
||||
ix_vol_config = volume_config.get("ix_volume_config", {})
|
||||
# Skip when ACL enabled
|
||||
if ix_vol_config.get("acl_enable", False):
|
||||
return None
|
||||
# For ix_volumes, we default to auto_permissions = True
|
||||
if not ix_vol_config.get("auto_permissions", True):
|
||||
return None
|
||||
case _:
|
||||
# Skip for other types
|
||||
return None
|
||||
|
||||
if mode not in valid_modes:
|
||||
raise RenderError(f"Expected [mode] to be one of [{', '.join(valid_modes)}], got [{mode}]")
|
||||
if not isinstance(uid, int) or not isinstance(gid, int):
|
||||
raise RenderError("Expected [uid] and [gid] to be set when [auto_permissions] is enabled")
|
||||
if chmod is not None:
|
||||
chmod = valid_octal_mode_or_raise(chmod)
|
||||
|
||||
mount_path = valid_fs_path_or_raise(mount_path)
|
||||
return {
|
||||
"mount_path": mount_path,
|
||||
"volume_config": volume_config,
|
||||
"action_data": {
|
||||
"mount_path": mount_path,
|
||||
"is_temporary": is_temporary,
|
||||
"identifier": identifier,
|
||||
"recursive": recursive,
|
||||
"mode": mode,
|
||||
"uid": uid,
|
||||
"gid": gid,
|
||||
"chmod": chmod,
|
||||
},
|
||||
}
|
||||
|
||||
def normalize_identifier_for_path(self, identifier: str):
|
||||
return identifier.rstrip("/").lstrip("/").lower().replace("/", "_").replace(".", "-").replace(" ", "-")
|
||||
|
||||
def has_actions(self):
|
||||
return bool(self.actions)
|
||||
|
||||
def activate(self):
|
||||
if len(self.parsed_configs) != len(self.actions):
|
||||
raise RenderError("Number of actions and parsed configs does not match")
|
||||
|
||||
if not self.has_actions():
|
||||
raise RenderError("No actions added. Check if there are actions before activating")
|
||||
|
||||
# Add the container and set it up
|
||||
c = self._render_instance.add_container(self._name, "python_permissions_image")
|
||||
c.set_user(0, 0)
|
||||
c.add_caps(["CHOWN", "FOWNER", "DAC_OVERRIDE"])
|
||||
c.set_network_mode("none")
|
||||
|
||||
# Don't attach any devices
|
||||
c.remove_devices()
|
||||
|
||||
c.deploy.resources.set_profile("medium")
|
||||
c.restart.set_policy("on-failure", maximum_retry_count=1)
|
||||
c.healthcheck.disable()
|
||||
|
||||
c.set_entrypoint(["python3", "/script/run.py"])
|
||||
script = "#!/usr/bin/env python3\n"
|
||||
script += get_script()
|
||||
c.configs.add("permissions_run_script", script, "/script/run.py", "0700")
|
||||
|
||||
actions_data: list[dict] = []
|
||||
for parsed in self.parsed_configs:
|
||||
c.add_storage(parsed["mount_path"], parsed["volume_config"])
|
||||
actions_data.append(parsed["action_data"])
|
||||
|
||||
actions_data_json = json.dumps(actions_data)
|
||||
c.configs.add("permissions_actions_data", actions_data_json, "/script/actions.json", "0500")
|
||||
|
||||
|
||||
def get_script():
|
||||
return """
|
||||
import os
|
||||
import json
|
||||
import time
|
||||
import shutil
|
||||
|
||||
with open("/script/actions.json", "r") as f:
|
||||
actions_data = json.load(f)
|
||||
|
||||
if not actions_data:
|
||||
# If this script is called, there should be actions data
|
||||
raise ValueError("No actions data found")
|
||||
|
||||
def fix_perms(path, chmod, recursive=False):
|
||||
print(f"Changing permissions{' recursively ' if recursive else ' '}to {chmod} on: [{path}]")
|
||||
os.chmod(path, int(chmod, 8))
|
||||
if recursive:
|
||||
for root, dirs, files in os.walk(path):
|
||||
for f in files:
|
||||
os.chmod(os.path.join(root, f), int(chmod, 8))
|
||||
print("Permissions after changes:")
|
||||
print_chmod_stat()
|
||||
|
||||
def fix_owner(path, uid, gid, recursive=False):
|
||||
print(f"Changing ownership{' recursively ' if recursive else ' '}to {uid}:{gid} on: [{path}]")
|
||||
os.chown(path, uid, gid)
|
||||
if recursive:
|
||||
for root, dirs, files in os.walk(path):
|
||||
for f in files:
|
||||
os.chown(os.path.join(root, f), uid, gid)
|
||||
print("Ownership after changes:")
|
||||
print_chown_stat()
|
||||
|
||||
def print_chown_stat():
|
||||
curr_stat = os.stat(action["mount_path"])
|
||||
print(f"Ownership: [{curr_stat.st_uid}:{curr_stat.st_gid}]")
|
||||
|
||||
def print_chmod_stat():
|
||||
curr_stat = os.stat(action["mount_path"])
|
||||
print(f"Permissions: [{oct(curr_stat.st_mode)[3:]}]")
|
||||
|
||||
def print_chown_diff(curr_stat, uid, gid):
|
||||
print(f"Ownership: wanted [{uid}:{gid}], got [{curr_stat.st_uid}:{curr_stat.st_gid}].")
|
||||
|
||||
def print_chmod_diff(curr_stat, mode):
|
||||
print(f"Permissions: wanted [{mode}], got [{oct(curr_stat.st_mode)[3:]}].")
|
||||
|
||||
def perform_action(action):
|
||||
start_time = time.time()
|
||||
print(f"=== Applying configuration on volume with identifier [{action['identifier']}] ===")
|
||||
|
||||
if not os.path.isdir(action["mount_path"]):
|
||||
print(f"Path [{action['mount_path']}] is not a directory, skipping...")
|
||||
return
|
||||
|
||||
if action["is_temporary"]:
|
||||
print(f"Path [{action['mount_path']}] is a temporary directory, ensuring it is empty...")
|
||||
for item in os.listdir(action["mount_path"]):
|
||||
item_path = os.path.join(action["mount_path"], item)
|
||||
|
||||
# Exclude the safe directory, where we can use to mount files temporarily
|
||||
if os.path.basename(item_path) == "ix-safe":
|
||||
continue
|
||||
if os.path.isdir(item_path):
|
||||
shutil.rmtree(item_path)
|
||||
else:
|
||||
os.remove(item_path)
|
||||
|
||||
if not action["is_temporary"] and os.listdir(action["mount_path"]):
|
||||
print(f"Path [{action['mount_path']}] is not empty, skipping...")
|
||||
return
|
||||
|
||||
print(f"Current Ownership and Permissions on [{action['mount_path']}]:")
|
||||
curr_stat = os.stat(action["mount_path"])
|
||||
print_chown_diff(curr_stat, action["uid"], action["gid"])
|
||||
print_chmod_diff(curr_stat, action["chmod"])
|
||||
print("---")
|
||||
|
||||
if action["mode"] == "always":
|
||||
fix_owner(action["mount_path"], action["uid"], action["gid"], action["recursive"])
|
||||
if not action["chmod"]:
|
||||
print("Skipping permissions check, chmod is falsy")
|
||||
else:
|
||||
fix_perms(action["mount_path"], action["chmod"], action["recursive"])
|
||||
return
|
||||
|
||||
elif action["mode"] == "check":
|
||||
if curr_stat.st_uid != action["uid"] or curr_stat.st_gid != action["gid"]:
|
||||
print("Ownership is incorrect. Fixing...")
|
||||
fix_owner(action["mount_path"], action["uid"], action["gid"], action["recursive"])
|
||||
else:
|
||||
print("Ownership is correct. Skipping...")
|
||||
|
||||
if not action["chmod"]:
|
||||
print("Skipping permissions check, chmod is falsy")
|
||||
else:
|
||||
if oct(curr_stat.st_mode)[3:] != action["chmod"]:
|
||||
print("Permissions are incorrect. Fixing...")
|
||||
fix_perms(action["mount_path"], action["chmod"], action["recursive"])
|
||||
else:
|
||||
print("Permissions are correct. Skipping...")
|
||||
|
||||
print(f"Time taken: {(time.time() - start_time) * 1000:.2f}ms")
|
||||
print(f"=== Finished applying configuration on volume with identifier [{action['identifier']}] ==")
|
||||
print()
|
||||
|
||||
if __name__ == "__main__":
|
||||
start_time = time.time()
|
||||
for action in actions_data:
|
||||
perform_action(action)
|
||||
print(f"Total time taken: {(time.time() - start_time) * 1000:.2f}ms")
|
||||
"""
|
||||
160
library/2.1.38/deps_postgres.py
Normal file
160
library/2.1.38/deps_postgres.py
Normal file
@@ -0,0 +1,160 @@
|
||||
import urllib.parse
|
||||
from typing import TYPE_CHECKING, TypedDict, NotRequired
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
from storage import IxStorage
|
||||
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .deps_perms import PermsContainer
|
||||
from .validations import valid_port_or_raise
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from deps_perms import PermsContainer
|
||||
from validations import valid_port_or_raise
|
||||
|
||||
|
||||
class PostgresConfig(TypedDict):
|
||||
user: str
|
||||
password: str
|
||||
database: str
|
||||
port: NotRequired[int]
|
||||
volume: "IxStorage"
|
||||
|
||||
|
||||
MAX_POSTGRES_VERSION = 17
|
||||
|
||||
|
||||
class PostgresContainer:
|
||||
def __init__(
|
||||
self, render_instance: "Render", name: str, image: str, config: PostgresConfig, perms_instance: PermsContainer
|
||||
):
|
||||
self._render_instance = render_instance
|
||||
self._name = name
|
||||
self._config = config
|
||||
self._data_dir = "/var/lib/postgresql/data"
|
||||
self._upgrade_name = f"{self._name}_upgrade"
|
||||
self._upgrade_container = None
|
||||
|
||||
for key in ("user", "password", "database", "volume"):
|
||||
if key not in config:
|
||||
raise RenderError(f"Expected [{key}] to be set for postgres")
|
||||
|
||||
port = valid_port_or_raise(self._get_port())
|
||||
|
||||
c = self._render_instance.add_container(name, image)
|
||||
|
||||
c.set_user(999, 999)
|
||||
c.healthcheck.set_test("postgres")
|
||||
c.remove_devices()
|
||||
c.add_storage(self._data_dir, config["volume"])
|
||||
|
||||
common_variables = {
|
||||
"POSTGRES_USER": config["user"],
|
||||
"POSTGRES_PASSWORD": config["password"],
|
||||
"POSTGRES_DB": config["database"],
|
||||
"PGPORT": port,
|
||||
}
|
||||
|
||||
for k, v in common_variables.items():
|
||||
c.environment.add_env(k, v)
|
||||
|
||||
perms_instance.add_or_skip_action(
|
||||
f"{self._name}_postgres_data", config["volume"], {"uid": 999, "gid": 999, "mode": "check"}
|
||||
)
|
||||
|
||||
repo = self._get_repo(
|
||||
image,
|
||||
(
|
||||
"postgres",
|
||||
"postgis/postgis",
|
||||
"pgvector/pgvector",
|
||||
"tensorchord/pgvecto-rs",
|
||||
"ghcr.io/immich-app/postgres",
|
||||
),
|
||||
)
|
||||
# eg we don't want to handle upgrades of pg_vector at the moment
|
||||
if repo == "postgres":
|
||||
target_major_version = self._get_target_version(image)
|
||||
upg = self._render_instance.add_container(self._upgrade_name, "postgres_upgrade_image")
|
||||
upg.set_entrypoint(["/bin/bash", "-c", "/upgrade.sh"])
|
||||
upg.restart.set_policy("on-failure", 1)
|
||||
upg.set_user(999, 999)
|
||||
upg.healthcheck.disable()
|
||||
upg.remove_devices()
|
||||
upg.add_storage(self._data_dir, config["volume"])
|
||||
for k, v in common_variables.items():
|
||||
upg.environment.add_env(k, v)
|
||||
|
||||
upg.environment.add_env("TARGET_VERSION", target_major_version)
|
||||
upg.environment.add_env("DATA_DIR", self._data_dir)
|
||||
|
||||
self._upgrade_container = upg
|
||||
|
||||
c.depends.add_dependency(self._upgrade_name, "service_completed_successfully")
|
||||
|
||||
# Store container for further configuration
|
||||
# For example: c.depends.add_dependency("other_container", "service_started")
|
||||
self._container = c
|
||||
|
||||
@property
|
||||
def container(self):
|
||||
return self._container
|
||||
|
||||
def add_dependency(self, container_name: str, condition: str):
|
||||
self._container.depends.add_dependency(container_name, condition)
|
||||
if self._upgrade_container:
|
||||
self._upgrade_container.depends.add_dependency(container_name, condition)
|
||||
|
||||
def _get_port(self):
|
||||
return self._config.get("port") or 5432
|
||||
|
||||
def _get_repo(self, image, supported_repos):
|
||||
images = self._render_instance.values["images"]
|
||||
if image not in images:
|
||||
raise RenderError(f"Image [{image}] not found in values. Available images: [{', '.join(images.keys())}]")
|
||||
repo = images[image].get("repository")
|
||||
if not repo:
|
||||
raise RenderError("Could not determine repo")
|
||||
if repo not in supported_repos:
|
||||
raise RenderError(f"Unsupported repo [{repo}] for postgres. Supported repos: {', '.join(supported_repos)}")
|
||||
return repo
|
||||
|
||||
def _get_target_version(self, image):
|
||||
images = self._render_instance.values["images"]
|
||||
if image not in images:
|
||||
raise RenderError(f"Image [{image}] not found in values. Available images: [{', '.join(images.keys())}]")
|
||||
tag = images[image].get("tag", "")
|
||||
tag = str(tag) # Account for tags like 16.6
|
||||
target_major_version = tag.split(".")[0]
|
||||
|
||||
try:
|
||||
target_major_version = int(target_major_version)
|
||||
except ValueError:
|
||||
raise RenderError(f"Could not determine target major version from tag [{tag}]")
|
||||
|
||||
if target_major_version > MAX_POSTGRES_VERSION:
|
||||
raise RenderError(f"Postgres version [{target_major_version}] is not supported")
|
||||
|
||||
return target_major_version
|
||||
|
||||
def get_url(self, variant: str):
|
||||
user = urllib.parse.quote_plus(self._config["user"])
|
||||
password = urllib.parse.quote_plus(self._config["password"])
|
||||
creds = f"{user}:{password}"
|
||||
addr = f"{self._name}:{self._get_port()}"
|
||||
db = self._config["database"]
|
||||
|
||||
urls = {
|
||||
"postgres": f"postgres://{creds}@{addr}/{db}?sslmode=disable",
|
||||
"postgresql": f"postgresql://{creds}@{addr}/{db}?sslmode=disable",
|
||||
"postgresql_no_creds": f"postgresql://{addr}/{db}?sslmode=disable",
|
||||
"host_port": addr,
|
||||
}
|
||||
|
||||
if variant not in urls:
|
||||
raise RenderError(f"Expected [variant] to be one of [{', '.join(urls.keys())}], got [{variant}]")
|
||||
return urls[variant]
|
||||
83
library/2.1.38/deps_redis.py
Normal file
83
library/2.1.38/deps_redis.py
Normal file
@@ -0,0 +1,83 @@
|
||||
import urllib.parse
|
||||
from typing import TYPE_CHECKING, TypedDict, NotRequired
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
from storage import IxStorage
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .deps_perms import PermsContainer
|
||||
from .validations import valid_port_or_raise, valid_redis_password_or_raise
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from deps_perms import PermsContainer
|
||||
from validations import valid_port_or_raise, valid_redis_password_or_raise
|
||||
|
||||
|
||||
class RedisConfig(TypedDict):
|
||||
password: str
|
||||
port: NotRequired[int]
|
||||
volume: "IxStorage"
|
||||
|
||||
|
||||
class RedisContainer:
|
||||
def __init__(
|
||||
self, render_instance: "Render", name: str, image: str, config: RedisConfig, perms_instance: PermsContainer
|
||||
):
|
||||
self._render_instance = render_instance
|
||||
self._name = name
|
||||
self._config = config
|
||||
|
||||
for key in ("password", "volume"):
|
||||
if key not in config:
|
||||
raise RenderError(f"Expected [{key}] to be set for redis")
|
||||
|
||||
valid_redis_password_or_raise(config["password"])
|
||||
|
||||
port = valid_port_or_raise(self._get_port())
|
||||
self._get_repo(image, ("bitnami/redis"))
|
||||
|
||||
c = self._render_instance.add_container(name, image)
|
||||
c.set_user(1001, 0)
|
||||
c.healthcheck.set_test("redis")
|
||||
c.remove_devices()
|
||||
|
||||
c.add_storage("/bitnami/redis/data", config["volume"])
|
||||
perms_instance.add_or_skip_action(
|
||||
f"{self._name}_redis_data", config["volume"], {"uid": 1001, "gid": 0, "mode": "check"}
|
||||
)
|
||||
|
||||
c.environment.add_env("ALLOW_EMPTY_PASSWORD", "no")
|
||||
c.environment.add_env("REDIS_PASSWORD", config["password"])
|
||||
c.environment.add_env("REDIS_PORT_NUMBER", port)
|
||||
|
||||
# Store container for further configuration
|
||||
# For example: c.depends.add_dependency("other_container", "service_started")
|
||||
self._container = c
|
||||
|
||||
def _get_port(self):
|
||||
return self._config.get("port") or 6379
|
||||
|
||||
def _get_repo(self, image, supported_repos):
|
||||
images = self._render_instance.values["images"]
|
||||
if image not in images:
|
||||
raise RenderError(f"Image [{image}] not found in values. Available images: [{', '.join(images.keys())}]")
|
||||
repo = images[image].get("repository")
|
||||
if not repo:
|
||||
raise RenderError("Could not determine repo")
|
||||
if repo not in supported_repos:
|
||||
raise RenderError(f"Unsupported repo [{repo}] for redis. Supported repos: {', '.join(supported_repos)}")
|
||||
return repo
|
||||
|
||||
def get_url(self, variant: str):
|
||||
addr = f"{self._name}:{self._get_port()}"
|
||||
password = urllib.parse.quote_plus(self._config["password"])
|
||||
|
||||
match variant:
|
||||
case "redis":
|
||||
return f"redis://default:{password}@{addr}"
|
||||
|
||||
@property
|
||||
def container(self):
|
||||
return self._container
|
||||
31
library/2.1.38/device.py
Normal file
31
library/2.1.38/device.py
Normal file
@@ -0,0 +1,31 @@
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .validations import valid_fs_path_or_raise, allowed_device_or_raise, valid_cgroup_perm_or_raise
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from validations import valid_fs_path_or_raise, allowed_device_or_raise, valid_cgroup_perm_or_raise
|
||||
|
||||
|
||||
class Device:
|
||||
def __init__(self, host_device: str, container_device: str, cgroup_perm: str = "", allow_disallowed=False):
|
||||
hd = valid_fs_path_or_raise(host_device.rstrip("/"))
|
||||
cd = valid_fs_path_or_raise(container_device.rstrip("/"))
|
||||
if not hd or not cd:
|
||||
raise RenderError(
|
||||
"Expected [host_device] and [container_device] to be set. "
|
||||
f"Got host_device [{host_device}] and container_device [{container_device}]"
|
||||
)
|
||||
|
||||
cgroup_perm = valid_cgroup_perm_or_raise(cgroup_perm)
|
||||
if not allow_disallowed:
|
||||
hd = allowed_device_or_raise(hd)
|
||||
|
||||
self.cgroup_perm: str = cgroup_perm
|
||||
self.host_device: str = hd
|
||||
self.container_device: str = cd
|
||||
|
||||
def render(self):
|
||||
result = f"{self.host_device}:{self.container_device}"
|
||||
if self.cgroup_perm:
|
||||
result += f":{self.cgroup_perm}"
|
||||
return result
|
||||
54
library/2.1.38/device_cgroup_rules.py
Normal file
54
library/2.1.38/device_cgroup_rules.py
Normal file
@@ -0,0 +1,54 @@
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .validations import valid_device_cgroup_rule_or_raise
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from validations import valid_device_cgroup_rule_or_raise
|
||||
|
||||
|
||||
class DeviceCGroupRule:
|
||||
def __init__(self, rule: str):
|
||||
rule = valid_device_cgroup_rule_or_raise(rule)
|
||||
parts = rule.split(" ")
|
||||
major, minor = parts[1].split(":")
|
||||
|
||||
self._type = parts[0]
|
||||
self._major = major
|
||||
self._minor = minor
|
||||
self._permissions = parts[2]
|
||||
|
||||
def get_key(self):
|
||||
return f"{self._type}_{self._major}_{self._minor}"
|
||||
|
||||
def render(self):
|
||||
return f"{self._type} {self._major}:{self._minor} {self._permissions}"
|
||||
|
||||
|
||||
class DeviceCGroupRules:
|
||||
def __init__(self, render_instance: "Render"):
|
||||
self._render_instance = render_instance
|
||||
self._rules: set[DeviceCGroupRule] = set()
|
||||
self._track_rule_combos: set[str] = set()
|
||||
|
||||
def add_rule(self, rule: str):
|
||||
dev_group_rule = DeviceCGroupRule(rule)
|
||||
if dev_group_rule in self._rules:
|
||||
raise RenderError(f"Device Group Rule [{rule}] already added")
|
||||
|
||||
rule_key = dev_group_rule.get_key()
|
||||
if rule_key in self._track_rule_combos:
|
||||
raise RenderError(f"Device Group Rule [{rule}] has already been added for this device group")
|
||||
|
||||
self._rules.add(dev_group_rule)
|
||||
self._track_rule_combos.add(rule_key)
|
||||
|
||||
def has_rules(self):
|
||||
return len(self._rules) > 0
|
||||
|
||||
def render(self):
|
||||
return sorted([rule.render() for rule in self._rules])
|
||||
71
library/2.1.38/devices.py
Normal file
71
library/2.1.38/devices.py
Normal file
@@ -0,0 +1,71 @@
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .device import Device
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from device import Device
|
||||
|
||||
|
||||
class Devices:
|
||||
def __init__(self, render_instance: "Render"):
|
||||
self._render_instance = render_instance
|
||||
self._devices: set[Device] = set()
|
||||
|
||||
# Tracks all container device paths to make sure they are not duplicated
|
||||
self._container_device_paths: set[str] = set()
|
||||
# Scan values for devices we should automatically add
|
||||
# for example /dev/dri for gpus
|
||||
self._auto_add_devices_from_values()
|
||||
|
||||
def _auto_add_devices_from_values(self):
|
||||
resources = self._render_instance.values.get("resources", {})
|
||||
|
||||
if resources.get("gpus", {}).get("use_all_gpus", False):
|
||||
self.add_device("/dev/dri", "/dev/dri", allow_disallowed=True)
|
||||
if resources["gpus"].get("kfd_device_exists", False):
|
||||
self.add_device("/dev/kfd", "/dev/kfd", allow_disallowed=True) # AMD ROCm
|
||||
|
||||
def add_device(self, host_device: str, container_device: str, cgroup_perm: str = "", allow_disallowed=False):
|
||||
# Host device can be mapped to multiple container devices,
|
||||
# so we only make sure container devices are not duplicated
|
||||
if container_device in self._container_device_paths:
|
||||
raise RenderError(f"Device with container path [{container_device}] already added")
|
||||
|
||||
self._devices.add(Device(host_device, container_device, cgroup_perm, allow_disallowed))
|
||||
self._container_device_paths.add(container_device)
|
||||
|
||||
def add_usb_bus(self):
|
||||
self.add_device("/dev/bus/usb", "/dev/bus/usb", allow_disallowed=True)
|
||||
|
||||
def _add_snd_device(self):
|
||||
self.add_device("/dev/snd", "/dev/snd", allow_disallowed=True)
|
||||
|
||||
def _add_tun_device(self):
|
||||
self.add_device("/dev/net/tun", "/dev/net/tun", allow_disallowed=True)
|
||||
|
||||
def has_devices(self):
|
||||
return len(self._devices) > 0
|
||||
|
||||
# Mainly will be used from dependencies
|
||||
# There is no reason to pass devices to
|
||||
# redis or postgres for example
|
||||
def remove_devices(self):
|
||||
self._devices.clear()
|
||||
self._container_device_paths.clear()
|
||||
|
||||
# Check if there are any gpu devices
|
||||
# Used to determine if we should add groups
|
||||
# like 'video' to the container
|
||||
def has_gpus(self):
|
||||
for d in self._devices:
|
||||
if d.host_device == "/dev/dri":
|
||||
return True
|
||||
return False
|
||||
|
||||
def render(self) -> list[str]:
|
||||
return sorted([d.render() for d in self._devices])
|
||||
79
library/2.1.38/dns.py
Normal file
79
library/2.1.38/dns.py
Normal file
@@ -0,0 +1,79 @@
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .validations import allowed_dns_opt_or_raise
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from validations import allowed_dns_opt_or_raise
|
||||
|
||||
|
||||
class Dns:
|
||||
def __init__(self, render_instance: "Render"):
|
||||
self._render_instance = render_instance
|
||||
self._dns_options: set[str] = set()
|
||||
self._dns_searches: set[str] = set()
|
||||
self._dns_nameservers: set[str] = set()
|
||||
|
||||
self._auto_add_dns_opts_from_values()
|
||||
self._auto_add_dns_searches_from_values()
|
||||
self._auto_add_dns_nameservers_from_values()
|
||||
|
||||
def _get_dns_opt_keys(self):
|
||||
return [self._get_key_from_opt(opt) for opt in self._dns_options]
|
||||
|
||||
def _get_key_from_opt(self, opt):
|
||||
return opt.split(":")[0]
|
||||
|
||||
def _auto_add_dns_opts_from_values(self):
|
||||
values = self._render_instance.values
|
||||
for dns_opt in values.get("network", {}).get("dns_opts", []):
|
||||
self.add_dns_opt(dns_opt)
|
||||
|
||||
def _auto_add_dns_searches_from_values(self):
|
||||
values = self._render_instance.values
|
||||
for dns_search in values.get("network", {}).get("dns_searches", []):
|
||||
self.add_dns_search(dns_search)
|
||||
|
||||
def _auto_add_dns_nameservers_from_values(self):
|
||||
values = self._render_instance.values
|
||||
for dns_nameserver in values.get("network", {}).get("dns_nameservers", []):
|
||||
self.add_dns_nameserver(dns_nameserver)
|
||||
|
||||
def add_dns_search(self, dns_search):
|
||||
if dns_search in self._dns_searches:
|
||||
raise RenderError(f"DNS Search [{dns_search}] already added")
|
||||
self._dns_searches.add(dns_search)
|
||||
|
||||
def add_dns_nameserver(self, dns_nameserver):
|
||||
if dns_nameserver in self._dns_nameservers:
|
||||
raise RenderError(f"DNS Nameserver [{dns_nameserver}] already added")
|
||||
self._dns_nameservers.add(dns_nameserver)
|
||||
|
||||
def add_dns_opt(self, dns_opt):
|
||||
# eg attempts:3
|
||||
key = allowed_dns_opt_or_raise(self._get_key_from_opt(dns_opt))
|
||||
if key in self._get_dns_opt_keys():
|
||||
raise RenderError(f"DNS Option [{key}] already added")
|
||||
self._dns_options.add(dns_opt)
|
||||
|
||||
def has_dns_opts(self):
|
||||
return len(self._dns_options) > 0
|
||||
|
||||
def has_dns_searches(self):
|
||||
return len(self._dns_searches) > 0
|
||||
|
||||
def has_dns_nameservers(self):
|
||||
return len(self._dns_nameservers) > 0
|
||||
|
||||
def render_dns_searches(self):
|
||||
return sorted(self._dns_searches)
|
||||
|
||||
def render_dns_opts(self):
|
||||
return sorted(self._dns_options)
|
||||
|
||||
def render_dns_nameservers(self):
|
||||
return sorted(self._dns_nameservers)
|
||||
112
library/2.1.38/environment.py
Normal file
112
library/2.1.38/environment.py
Normal file
@@ -0,0 +1,112 @@
|
||||
from typing import Any, TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .formatter import escape_dollar
|
||||
from .resources import Resources
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from formatter import escape_dollar
|
||||
from resources import Resources
|
||||
|
||||
|
||||
class Environment:
|
||||
def __init__(self, render_instance: "Render", resources: Resources):
|
||||
self._render_instance = render_instance
|
||||
self._resources = resources
|
||||
# Stores variables that user defined
|
||||
self._user_vars: dict[str, Any] = {}
|
||||
# Stores variables that are automatically added (based on values)
|
||||
self._auto_variables: dict[str, Any] = {}
|
||||
# Stores variables that are added by the application developer
|
||||
self._app_dev_variables: dict[str, Any] = {}
|
||||
|
||||
self._skip_generic_variables: bool = render_instance.values.get("skip_generic_variables", False)
|
||||
|
||||
self._auto_add_variables_from_values()
|
||||
|
||||
def _auto_add_variables_from_values(self):
|
||||
if not self._skip_generic_variables:
|
||||
self._add_generic_variables()
|
||||
self._add_nvidia_variables()
|
||||
|
||||
def _add_generic_variables(self):
|
||||
self._auto_variables["TZ"] = self._render_instance.values.get("TZ", "Etc/UTC")
|
||||
self._auto_variables["UMASK"] = self._render_instance.values.get("UMASK", "002")
|
||||
self._auto_variables["UMASK_SET"] = self._render_instance.values.get("UMASK", "002")
|
||||
|
||||
run_as = self._render_instance.values.get("run_as", {})
|
||||
user = run_as.get("user")
|
||||
group = run_as.get("group")
|
||||
if user:
|
||||
self._auto_variables["PUID"] = user
|
||||
self._auto_variables["UID"] = user
|
||||
self._auto_variables["USER_ID"] = user
|
||||
if group:
|
||||
self._auto_variables["PGID"] = group
|
||||
self._auto_variables["GID"] = group
|
||||
self._auto_variables["GROUP_ID"] = group
|
||||
|
||||
def _add_nvidia_variables(self):
|
||||
if self._resources._nvidia_ids:
|
||||
self._auto_variables["NVIDIA_DRIVER_CAPABILITIES"] = "all"
|
||||
self._auto_variables["NVIDIA_VISIBLE_DEVICES"] = ",".join(sorted(self._resources._nvidia_ids))
|
||||
else:
|
||||
self._auto_variables["NVIDIA_VISIBLE_DEVICES"] = "void"
|
||||
|
||||
def _format_value(self, v: Any) -> str:
|
||||
value = str(v)
|
||||
|
||||
# str(bool) returns "True" or "False",
|
||||
# but we want "true" or "false"
|
||||
if isinstance(v, bool):
|
||||
value = value.lower()
|
||||
return value
|
||||
|
||||
def add_env(self, name: str, value: Any):
|
||||
if not name:
|
||||
raise RenderError(f"Environment variable name cannot be empty. [{name}]")
|
||||
if name in self._app_dev_variables.keys():
|
||||
raise RenderError(
|
||||
f"Found duplicate environment variable [{name}] in application developer environment variables."
|
||||
)
|
||||
self._app_dev_variables[name] = value
|
||||
|
||||
def add_user_envs(self, user_env: list[dict]):
|
||||
for item in user_env:
|
||||
if not item.get("name"):
|
||||
raise RenderError(f"Environment variable name cannot be empty. [{item}]")
|
||||
if item["name"] in self._user_vars.keys():
|
||||
raise RenderError(
|
||||
f"Found duplicate environment variable [{item['name']}] in user environment variables."
|
||||
)
|
||||
self._user_vars[item["name"]] = item.get("value")
|
||||
|
||||
def has_variables(self):
|
||||
return len(self._auto_variables) > 0 or len(self._user_vars) > 0 or len(self._app_dev_variables) > 0
|
||||
|
||||
def render(self):
|
||||
result: dict[str, str] = {}
|
||||
|
||||
# Add envs from auto variables
|
||||
result.update({k: self._format_value(v) for k, v in self._auto_variables.items()})
|
||||
|
||||
# Track defined keys for faster lookup
|
||||
defined_keys = set(result.keys())
|
||||
|
||||
# Add envs from application developer (prohibit overwriting auto variables)
|
||||
for k, v in self._app_dev_variables.items():
|
||||
if k in defined_keys:
|
||||
raise RenderError(f"Environment variable [{k}] is already defined automatically from the library.")
|
||||
result[k] = self._format_value(v)
|
||||
defined_keys.add(k)
|
||||
|
||||
# Add envs from user (prohibit overwriting app developer envs and auto variables)
|
||||
for k, v in self._user_vars.items():
|
||||
if k in defined_keys:
|
||||
raise RenderError(f"Environment variable [{k}] is already defined from the application developer.")
|
||||
result[k] = self._format_value(v)
|
||||
|
||||
return {k: escape_dollar(v) for k, v in result.items()}
|
||||
4
library/2.1.38/error.py
Normal file
4
library/2.1.38/error.py
Normal file
@@ -0,0 +1,4 @@
|
||||
class RenderError(Exception):
|
||||
"""Base class for exceptions in this module."""
|
||||
|
||||
pass
|
||||
31
library/2.1.38/expose.py
Normal file
31
library/2.1.38/expose.py
Normal file
@@ -0,0 +1,31 @@
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .validations import valid_port_or_raise, valid_port_protocol_or_raise
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from validations import valid_port_or_raise, valid_port_protocol_or_raise
|
||||
|
||||
|
||||
class Expose:
|
||||
def __init__(self, render_instance: "Render"):
|
||||
self._render_instance = render_instance
|
||||
self._ports: set[str] = set()
|
||||
|
||||
def add_port(self, port: int, protocol: str = "tcp"):
|
||||
port = valid_port_or_raise(port)
|
||||
protocol = valid_port_protocol_or_raise(protocol)
|
||||
key = f"{port}/{protocol}"
|
||||
if key in self._ports:
|
||||
raise RenderError(f"Exposed port [{port}/{protocol}] already added")
|
||||
self._ports.add(key)
|
||||
|
||||
def has_ports(self):
|
||||
return len(self._ports) > 0
|
||||
|
||||
def render(self):
|
||||
return sorted(self._ports)
|
||||
33
library/2.1.38/extra_hosts.py
Normal file
33
library/2.1.38/extra_hosts.py
Normal file
@@ -0,0 +1,33 @@
|
||||
import ipaddress
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
|
||||
|
||||
class ExtraHosts:
|
||||
def __init__(self, render_instance: "Render"):
|
||||
self._render_instance = render_instance
|
||||
self._extra_hosts: dict[str, str] = {}
|
||||
|
||||
def add_host(self, host: str, ip: str):
|
||||
if not ip == "host-gateway":
|
||||
try:
|
||||
ipaddress.ip_address(ip)
|
||||
except ValueError:
|
||||
raise RenderError(f"Invalid IP address [{ip}] for host [{host}]")
|
||||
|
||||
if host in self._extra_hosts:
|
||||
raise RenderError(f"Host [{host}] already added with [{self._extra_hosts[host]}]")
|
||||
self._extra_hosts[host] = ip
|
||||
|
||||
def has_hosts(self):
|
||||
return len(self._extra_hosts) > 0
|
||||
|
||||
def render(self):
|
||||
return {host: ip for host, ip in self._extra_hosts.items()}
|
||||
26
library/2.1.38/formatter.py
Normal file
26
library/2.1.38/formatter.py
Normal file
@@ -0,0 +1,26 @@
|
||||
import json
|
||||
import hashlib
|
||||
|
||||
|
||||
def escape_dollar(text: str) -> str:
|
||||
return text.replace("$", "$$")
|
||||
|
||||
|
||||
def get_hashed_name_for_volume(prefix: str, config: dict):
|
||||
config_hash = hashlib.sha256(json.dumps(config).encode("utf-8")).hexdigest()
|
||||
return f"{prefix}_{config_hash}"
|
||||
|
||||
|
||||
def get_hash_with_prefix(prefix: str, data: str):
|
||||
return f"{prefix}_{hashlib.sha256(data.encode('utf-8')).hexdigest()}"
|
||||
|
||||
|
||||
def merge_dicts_no_overwrite(dict1, dict2):
|
||||
overlapping_keys = dict1.keys() & dict2.keys()
|
||||
if overlapping_keys:
|
||||
raise ValueError(f"Merging of dicts failed. Overlapping keys: {overlapping_keys}")
|
||||
return {**dict1, **dict2}
|
||||
|
||||
|
||||
def get_image_with_hashed_data(image: str, data: str):
|
||||
return get_hash_with_prefix(f"ix-{image}", data)
|
||||
179
library/2.1.38/functions.py
Normal file
179
library/2.1.38/functions.py
Normal file
@@ -0,0 +1,179 @@
|
||||
import re
|
||||
import copy
|
||||
import bcrypt
|
||||
import secrets
|
||||
import urllib.parse
|
||||
from base64 import b64encode
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .volume_sources import HostPathSource, IxVolumeSource
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from volume_sources import HostPathSource, IxVolumeSource
|
||||
|
||||
|
||||
class Functions:
|
||||
def __init__(self, render_instance: "Render"):
|
||||
self._render_instance = render_instance
|
||||
|
||||
def _bcrypt_hash(self, password):
|
||||
hashed = bcrypt.hashpw(password.encode("utf-8"), bcrypt.gensalt()).decode("utf-8")
|
||||
return hashed
|
||||
|
||||
def _htpasswd(self, username, password):
|
||||
hashed = self._bcrypt_hash(password)
|
||||
return username + ":" + hashed
|
||||
|
||||
def _secure_string(self, length):
|
||||
return secrets.token_urlsafe(length)[:length]
|
||||
|
||||
def _basic_auth(self, username, password):
|
||||
return b64encode(f"{username}:{password}".encode("utf-8")).decode("utf-8")
|
||||
|
||||
def _basic_auth_header(self, username, password):
|
||||
return f"Basic {self._basic_auth(username, password)}"
|
||||
|
||||
def _fail(self, message):
|
||||
raise RenderError(message)
|
||||
|
||||
def _camel_case(self, string):
|
||||
return string.title()
|
||||
|
||||
def _auto_cast(self, value):
|
||||
lower_str_value = str(value).lower()
|
||||
if lower_str_value in ["true", "false"]:
|
||||
return lower_str_value == "true"
|
||||
|
||||
try:
|
||||
float_value = float(value)
|
||||
if float_value.is_integer():
|
||||
return int(float_value)
|
||||
else:
|
||||
return float(value)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
return value
|
||||
|
||||
def _match_regex(self, value, regex):
|
||||
if not re.match(regex, value):
|
||||
return False
|
||||
return True
|
||||
|
||||
def _must_match_regex(self, value, regex):
|
||||
if not self._match_regex(value, regex):
|
||||
raise RenderError(f"Expected [{value}] to match [{regex}]")
|
||||
return value
|
||||
|
||||
def _is_boolean(self, string):
|
||||
return string.lower() in ["true", "false"]
|
||||
|
||||
def _is_number(self, string):
|
||||
try:
|
||||
float(string)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
def _copy_dict(self, dict):
|
||||
return copy.deepcopy(dict)
|
||||
|
||||
def _merge_dicts(self, *dicts):
|
||||
merged_dict = {}
|
||||
for dictionary in dicts:
|
||||
merged_dict.update(dictionary)
|
||||
return merged_dict
|
||||
|
||||
def _disallow_chars(self, string: str, chars: list[str], key: str):
|
||||
for char in chars:
|
||||
if char in string:
|
||||
raise RenderError(f"Disallowed character [{char}] in [{key}]")
|
||||
return string
|
||||
|
||||
def _or_default(self, value, default):
|
||||
if not value:
|
||||
return default
|
||||
return value
|
||||
|
||||
def _require_unique(self, values, key, split_char=""):
|
||||
new_values = []
|
||||
for value in values:
|
||||
new_values.append(value.split(split_char)[0] if split_char else value)
|
||||
|
||||
if len(new_values) != len(set(new_values)):
|
||||
raise RenderError(f"Expected values in [{key}] to be unique, but got [{', '.join(values)}]")
|
||||
|
||||
def _require_no_reserved(self, values, key, reserved, split_char="", starts_with=False):
|
||||
new_values = []
|
||||
for value in values:
|
||||
new_values.append(value.split(split_char)[0] if split_char else value)
|
||||
|
||||
if starts_with:
|
||||
for arg in new_values:
|
||||
for reserved_value in reserved:
|
||||
if arg.startswith(reserved_value):
|
||||
raise RenderError(f"Value [{reserved_value}] is reserved and cannot be set in [{key}]")
|
||||
return
|
||||
|
||||
for reserved_value in reserved:
|
||||
if reserved_value in new_values:
|
||||
raise RenderError(f"Value [{reserved_value}] is reserved and cannot be set in [{key}]")
|
||||
|
||||
def _url_encode(self, string):
|
||||
return urllib.parse.quote_plus(string)
|
||||
|
||||
def _temp_config(self, name):
|
||||
if not name:
|
||||
raise RenderError("Expected [name] to be set when calling [temp_config].")
|
||||
return {"type": "temporary", "volume_config": {"volume_name": name}}
|
||||
|
||||
def _get_host_path(self, storage):
|
||||
source_type = storage.get("type", "")
|
||||
if not source_type:
|
||||
raise RenderError("Expected [type] to be set for volume mounts.")
|
||||
|
||||
match source_type:
|
||||
case "host_path":
|
||||
mount_config = storage.get("host_path_config")
|
||||
if mount_config is None:
|
||||
raise RenderError("Expected [host_path_config] to be set for [host_path] type.")
|
||||
host_source = HostPathSource(self._render_instance, mount_config).get()
|
||||
return host_source
|
||||
case "ix_volume":
|
||||
mount_config = storage.get("ix_volume_config")
|
||||
if mount_config is None:
|
||||
raise RenderError("Expected [ix_volume_config] to be set for [ix_volume] type.")
|
||||
ix_source = IxVolumeSource(self._render_instance, mount_config).get()
|
||||
return ix_source
|
||||
case _:
|
||||
raise RenderError(f"Storage type [{source_type}] does not support host path.")
|
||||
|
||||
def func_map(self):
|
||||
return {
|
||||
"auto_cast": self._auto_cast,
|
||||
"basic_auth_header": self._basic_auth_header,
|
||||
"basic_auth": self._basic_auth,
|
||||
"bcrypt_hash": self._bcrypt_hash,
|
||||
"camel_case": self._camel_case,
|
||||
"copy_dict": self._copy_dict,
|
||||
"fail": self._fail,
|
||||
"htpasswd": self._htpasswd,
|
||||
"is_boolean": self._is_boolean,
|
||||
"is_number": self._is_number,
|
||||
"match_regex": self._match_regex,
|
||||
"merge_dicts": self._merge_dicts,
|
||||
"must_match_regex": self._must_match_regex,
|
||||
"secure_string": self._secure_string,
|
||||
"disallow_chars": self._disallow_chars,
|
||||
"get_host_path": self._get_host_path,
|
||||
"or_default": self._or_default,
|
||||
"temp_config": self._temp_config,
|
||||
"require_unique": self._require_unique,
|
||||
"require_no_reserved": self._require_no_reserved,
|
||||
"url_encode": self._url_encode,
|
||||
}
|
||||
228
library/2.1.38/healthcheck.py
Normal file
228
library/2.1.38/healthcheck.py
Normal file
@@ -0,0 +1,228 @@
|
||||
import json
|
||||
from typing import Any, TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .formatter import escape_dollar
|
||||
from .validations import valid_http_path_or_raise
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from formatter import escape_dollar
|
||||
from validations import valid_http_path_or_raise
|
||||
|
||||
|
||||
class Healthcheck:
|
||||
def __init__(self, render_instance: "Render"):
|
||||
self._render_instance = render_instance
|
||||
self._test: str | list[str] = ""
|
||||
self._interval_sec: int = 30
|
||||
self._timeout_sec: int = 5
|
||||
self._retries: int = 5
|
||||
self._start_period_sec: int = 15
|
||||
self._start_interval_sec: int = 2
|
||||
self._disabled: bool = False
|
||||
self._use_built_in: bool = False
|
||||
|
||||
def _get_test(self):
|
||||
if isinstance(self._test, str):
|
||||
return escape_dollar(self._test)
|
||||
|
||||
return [escape_dollar(t) for t in self._test]
|
||||
|
||||
def disable(self):
|
||||
self._disabled = True
|
||||
|
||||
def use_built_in(self):
|
||||
self._use_built_in = True
|
||||
|
||||
def set_custom_test(self, test: str | list[str]):
|
||||
if self._disabled:
|
||||
raise RenderError("Cannot set custom test when healthcheck is disabled")
|
||||
self._test = test
|
||||
|
||||
def set_test(self, variant: str, config: dict | None = None):
|
||||
config = config or {}
|
||||
self.set_custom_test(test_mapping(variant, config))
|
||||
|
||||
def set_interval(self, interval: int):
|
||||
self._interval_sec = interval
|
||||
|
||||
def set_timeout(self, timeout: int):
|
||||
self._timeout_sec = timeout
|
||||
|
||||
def set_retries(self, retries: int):
|
||||
self._retries = retries
|
||||
|
||||
def set_start_period(self, start_period: int):
|
||||
self._start_period_sec = start_period
|
||||
|
||||
def set_start_interval(self, start_interval: int):
|
||||
self._start_interval_sec = start_interval
|
||||
|
||||
def has_healthcheck(self):
|
||||
return not self._use_built_in
|
||||
|
||||
def render(self):
|
||||
if self._use_built_in:
|
||||
return RenderError("Should not be called when built in healthcheck is used")
|
||||
|
||||
if self._disabled:
|
||||
return {"disable": True}
|
||||
|
||||
if not self._test:
|
||||
raise RenderError("Healthcheck test is not set")
|
||||
|
||||
return {
|
||||
"test": self._get_test(),
|
||||
"retries": self._retries,
|
||||
"interval": f"{self._interval_sec}s",
|
||||
"timeout": f"{self._timeout_sec}s",
|
||||
"start_period": f"{self._start_period_sec}s",
|
||||
"start_interval": f"{self._start_interval_sec}s",
|
||||
}
|
||||
|
||||
|
||||
def test_mapping(variant: str, config: dict | None = None) -> str:
|
||||
config = config or {}
|
||||
tests = {
|
||||
"curl": curl_test,
|
||||
"wget": wget_test,
|
||||
"http": http_test,
|
||||
"netcat": netcat_test,
|
||||
"tcp": tcp_test,
|
||||
"redis": redis_test,
|
||||
"postgres": postgres_test,
|
||||
"mariadb": mariadb_test,
|
||||
"mongodb": mongodb_test,
|
||||
}
|
||||
|
||||
if variant not in tests:
|
||||
raise RenderError(f"Test variant [{variant}] is not valid. Valid options are: [{', '.join(tests.keys())}]")
|
||||
|
||||
return tests[variant](config)
|
||||
|
||||
|
||||
def get_key(config: dict, key: str, default: Any, required: bool):
|
||||
if key not in config:
|
||||
if not required:
|
||||
return default
|
||||
raise RenderError(f"Expected [{key}] to be set")
|
||||
return config[key]
|
||||
|
||||
|
||||
def curl_test(config: dict) -> str:
|
||||
config = config or {}
|
||||
port = get_key(config, "port", None, True)
|
||||
path = valid_http_path_or_raise(get_key(config, "path", "/", False))
|
||||
scheme = get_key(config, "scheme", "http", False)
|
||||
host = get_key(config, "host", "127.0.0.1", False)
|
||||
headers = get_key(config, "headers", [], False)
|
||||
method = get_key(config, "method", "GET", False)
|
||||
data = get_key(config, "data", None, False)
|
||||
|
||||
opts = []
|
||||
if scheme == "https":
|
||||
opts.append("--insecure")
|
||||
|
||||
for header in headers:
|
||||
if not header[0] or not header[1]:
|
||||
raise RenderError("Expected [header] to be a list of two items for curl test")
|
||||
opts.append(f'--header "{header[0]}: {header[1]}"')
|
||||
if data is not None:
|
||||
opts.append(f"--data '{json.dumps(data)}'")
|
||||
|
||||
cmd = f"curl --request {method} --silent --output /dev/null --show-error --fail"
|
||||
if opts:
|
||||
cmd += f" {' '.join(opts)}"
|
||||
cmd += f" {scheme}://{host}:{port}{path}"
|
||||
return cmd
|
||||
|
||||
|
||||
def wget_test(config: dict) -> str:
|
||||
config = config or {}
|
||||
port = get_key(config, "port", None, True)
|
||||
path = valid_http_path_or_raise(get_key(config, "path", "/", False))
|
||||
scheme = get_key(config, "scheme", "http", False)
|
||||
host = get_key(config, "host", "127.0.0.1", False)
|
||||
headers = get_key(config, "headers", [], False)
|
||||
spider = get_key(config, "spider", True, False)
|
||||
|
||||
opts = []
|
||||
if scheme == "https":
|
||||
opts.append("--no-check-certificate")
|
||||
|
||||
for header in headers:
|
||||
if not header[0] or not header[1]:
|
||||
raise RenderError("Expected [header] to be a list of two items for wget test")
|
||||
opts.append(f'--header "{header[0]}: {header[1]}"')
|
||||
|
||||
cmd = f"wget --quiet {'--spider' if spider else '-O /dev/null'}"
|
||||
|
||||
if opts:
|
||||
cmd += f" {' '.join(opts)}"
|
||||
cmd += f" {scheme}://{host}:{port}{path}"
|
||||
return cmd
|
||||
|
||||
|
||||
def http_test(config: dict) -> str:
|
||||
config = config or {}
|
||||
port = get_key(config, "port", None, True)
|
||||
path = valid_http_path_or_raise(get_key(config, "path", "/", False))
|
||||
host = get_key(config, "host", "127.0.0.1", False)
|
||||
|
||||
return f"""/bin/bash -c 'exec {{hc_fd}}<>/dev/tcp/{host}/{port} && echo -e "GET {path} HTTP/1.1\\r\\nHost: {host}\\r\\nConnection: close\\r\\n\\r\\n" >&${{hc_fd}} && cat <&${{hc_fd}} | grep "HTTP" | grep -q "200"'""" # noqa
|
||||
|
||||
|
||||
def netcat_test(config: dict) -> str:
|
||||
config = config or {}
|
||||
port = get_key(config, "port", None, True)
|
||||
host = get_key(config, "host", "127.0.0.1", False)
|
||||
udp_mode = get_key(config, "udp", False, False)
|
||||
cmd = ["nc", "-z", "-w", "1"]
|
||||
if udp_mode:
|
||||
cmd.append("-u")
|
||||
cmd.extend([host, str(port)])
|
||||
return " ".join(cmd)
|
||||
|
||||
|
||||
def tcp_test(config: dict) -> str:
|
||||
config = config or {}
|
||||
port = get_key(config, "port", None, True)
|
||||
host = get_key(config, "host", "127.0.0.1", False)
|
||||
|
||||
return f"timeout 1 bash -c 'cat < /dev/null > /dev/tcp/{host}/{port}'"
|
||||
|
||||
|
||||
def redis_test(config: dict) -> str:
|
||||
config = config or {}
|
||||
port = get_key(config, "port", 6379, False)
|
||||
host = get_key(config, "host", "127.0.0.1", False)
|
||||
|
||||
return f"redis-cli -h {host} -p {port} -a $REDIS_PASSWORD ping | grep -q PONG"
|
||||
|
||||
|
||||
def postgres_test(config: dict) -> str:
|
||||
config = config or {}
|
||||
port = get_key(config, "port", 5432, False)
|
||||
host = get_key(config, "host", "127.0.0.1", False)
|
||||
|
||||
return f"pg_isready -h {host} -p {port} -U $POSTGRES_USER -d $POSTGRES_DB"
|
||||
|
||||
|
||||
def mariadb_test(config: dict) -> str:
|
||||
config = config or {}
|
||||
port = get_key(config, "port", 3306, False)
|
||||
host = get_key(config, "host", "127.0.0.1", False)
|
||||
|
||||
return f"mariadb-admin --user=root --host={host} --port={port} --password=$MARIADB_ROOT_PASSWORD ping"
|
||||
|
||||
|
||||
def mongodb_test(config: dict) -> str:
|
||||
config = config or {}
|
||||
port = get_key(config, "port", 27017, False)
|
||||
host = get_key(config, "host", "127.0.0.1", False)
|
||||
|
||||
return f"mongosh --host {host} --port {port} $MONGO_INITDB_DATABASE --eval 'db.adminCommand(\"ping\")' --quiet"
|
||||
37
library/2.1.38/labels.py
Normal file
37
library/2.1.38/labels.py
Normal file
@@ -0,0 +1,37 @@
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
|
||||
try:
|
||||
from .error import RenderError
|
||||
from .formatter import escape_dollar
|
||||
except ImportError:
|
||||
from error import RenderError
|
||||
from formatter import escape_dollar
|
||||
|
||||
|
||||
class Labels:
|
||||
def __init__(self, render_instance: "Render"):
|
||||
self._render_instance = render_instance
|
||||
self._labels: dict[str, str] = {}
|
||||
|
||||
def add_label(self, key: str, value: str):
|
||||
if not key:
|
||||
raise RenderError("Labels must have a key")
|
||||
|
||||
if key.startswith("com.docker.compose"):
|
||||
raise RenderError(f"Label [{key}] cannot start with [com.docker.compose] as it is reserved")
|
||||
|
||||
if key in self._labels.keys():
|
||||
raise RenderError(f"Label [{key}] already added")
|
||||
|
||||
self._labels[key] = escape_dollar(str(value))
|
||||
|
||||
def has_labels(self) -> bool:
|
||||
return bool(self._labels)
|
||||
|
||||
def render(self) -> dict[str, str]:
|
||||
if not self.has_labels():
|
||||
return {}
|
||||
return {label: value for label, value in sorted(self._labels.items())}
|
||||
129
library/2.1.38/notes.py
Normal file
129
library/2.1.38/notes.py
Normal file
@@ -0,0 +1,129 @@
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from render import Render
|
||||
|
||||
SHORT_LIVED = "short-lived"
|
||||
|
||||
|
||||
class Notes:
|
||||
def __init__(self, render_instance: "Render"):
|
||||
self._render_instance = render_instance
|
||||
self._app_name: str = ""
|
||||
self._app_train: str = ""
|
||||
self._warnings: list[str] = []
|
||||
self._deprecations: list[str] = []
|
||||
self._security: dict[str, list[str]] = {}
|
||||
self._header: str = ""
|
||||
self._body: str = ""
|
||||
self._footer: str = ""
|
||||
|
||||
self._auto_set_app_name()
|
||||
self._auto_set_app_train()
|
||||
self._auto_set_header()
|
||||
self._auto_set_footer()
|
||||
|
||||
def _is_enterprise_train(self):
|
||||
if self._app_train == "enterprise":
|
||||
return True
|
||||
|
||||
def _auto_set_app_name(self):
|
||||
app_name = self._render_instance.values.get("ix_context", {}).get("app_metadata", {}).get("title", "")
|
||||
self._app_name = app_name or "<app_name>"
|
||||
|
||||
def _auto_set_app_train(self):
|
||||
app_train = self._render_instance.values.get("ix_context", {}).get("app_metadata", {}).get("train", "")
|
||||
self._app_train = app_train or "<app_train>"
|
||||
|
||||
def _auto_set_header(self):
|
||||
self._header = f"# {self._app_name}\n\n"
|
||||
|
||||
def _auto_set_footer(self):
|
||||
url = "https://github.com/truenas/apps"
|
||||
if self._is_enterprise_train():
|
||||
url = "https://ixsystems.atlassian.net"
|
||||
footer = "## Bug Reports and Feature Requests\n\n"
|
||||
footer += "If you find a bug in this app or have an idea for a new feature, please file an issue at\n"
|
||||
footer += f"{url}\n\n"
|
||||
self._footer = footer
|
||||
|
||||
def add_warning(self, warning: str):
|
||||
self._warnings.append(warning)
|
||||
|
||||
def _prepend_warning(self, warning: str):
|
||||
self._warnings.insert(0, warning)
|
||||
|
||||
def add_deprecation(self, deprecation: str):
|
||||
self._deprecations.append(deprecation)
|
||||
|
||||
def set_body(self, body: str):
|
||||
self._body = body
|
||||
|
||||
def scan_containers(self):
|
||||
for name, c in self._render_instance._containers.items():
|
||||
if self._security.get(name) is None:
|
||||
self._security[name] = []
|
||||
|
||||
if c.restart._policy == "on-failure":
|
||||
self._security[name].append(SHORT_LIVED)
|
||||
|
||||
if c._privileged:
|
||||
self._security[name].append("Is running with privileged mode enabled")
|
||||
|
||||
run_as = c._user.split(":") if c._user else [-1, -1]
|
||||
if run_as[0] in ["0", -1]:
|
||||
self._security[name].append(f"Is running as {'root' if run_as[0] == '0' else 'unknown'} user")
|
||||
if run_as[1] in ["0", -1]:
|
||||
self._security[name].append(f"Is running as {'root' if run_as[1] == '0' else 'unknown'} group")
|
||||
|
||||
if c._ipc_mode == "host":
|
||||
self._security[name].append("Is running with host IPC namespace")
|
||||
if c._cgroup == "host":
|
||||
self._security[name].append("Is running with host cgroup namespace")
|
||||
if "no-new-privileges=true" not in c._security_opt.render():
|
||||
self._security[name].append("Is running without [no-new-privileges] security option")
|
||||
if c._tty:
|
||||
self._prepend_warning(
|
||||
f"Container [{name}] is running with a TTY, "
|
||||
"Logs will not appear correctly in the UI due to an [upstream bug]"
|
||||
"(https://github.com/docker/docker-py/issues/1394)"
|
||||
)
|
||||
|
||||
self._security = {k: v for k, v in self._security.items() if v}
|
||||
|
||||
def render(self):
|
||||
self.scan_containers()
|
||||
|
||||
result = self._header
|
||||
|
||||
if self._warnings:
|
||||
result += "## Warnings\n\n"
|
||||
for warning in self._warnings:
|
||||
result += f"- {warning}\n"
|
||||
result += "\n"
|
||||
|
||||
if self._deprecations:
|
||||
result += "## Deprecations\n\n"
|
||||
for deprecation in self._deprecations:
|
||||
result += f"- {deprecation}\n"
|
||||
result += "\n"
|
||||
|
||||
if self._security:
|
||||
result += "## Security\n\n"
|
||||
for c_name, security in self._security.items():
|
||||
if SHORT_LIVED in security and len(security) == 0:
|
||||
continue
|
||||
result += f"### Container: [{c_name}]"
|
||||
if SHORT_LIVED in security:
|
||||
result += "\n\n**This container is short-lived.**"
|
||||
result += "\n\n"
|
||||
for s in [s for s in security if s != "short-lived"]:
|
||||
result += f"- {s}\n"
|
||||
result += "\n"
|
||||
|
||||
if self._body:
|
||||
result += self._body.strip() + "\n\n"
|
||||
|
||||
result += self._footer
|
||||
|
||||
return result
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user