(enh) Docker runtime builder with BuildKit support, enh. caching (#4009)

This commit is contained in:
tobitege
2024-09-26 08:50:53 +02:00
committed by GitHub
parent ef0b08a46e
commit 2cc1c3ef42
9 changed files with 591 additions and 89 deletions

View File

@@ -14,20 +14,38 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Free Disk Space (Ubuntu)
uses: jlumbroso/free-disk-space@main
with:
# this might remove tools that are actually needed,
# if set to "true" but frees about 6 GB
tool-cache: true
# all of these default to true, but feel free to set to
# "false" if necessary for your workflow
android: true
dotnet: true
haskell: true
large-packages: true
docker-images: false
swap-storage: true
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v3
- name: Install poetry via pipx
run: pipx install poetry
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Set up environment
run: |
curl -sSL https://install.python-poetry.org | python3 -
poetry install --without evaluation,llama-index
poetry run playwright install --with-deps chromium
wget https://huggingface.co/BAAI/bge-small-en-v1.5/raw/main/1_Pooling/config.json -P /tmp/llama_index/models--BAAI--bge-small-en-v1.5/snapshots/5c38ec7c405ec4b44b94cc5a9bb96e735b38267a/1_Pooling/
cache: 'poetry'
- name: Install Python dependencies using Poetry
run: poetry install --without evaluation,llama-index
- name: Build Environment
run: make build
- name: Run tests
run: |
set -e
poetry run python openhands/core/main.py -t "do a flip" -d ./workspace/ -c DummyAgent
poetry run python3 openhands/core/main.py -t "do a flip" -d ./workspace/ -c DummyAgent
- name: Check exit code
run: |
if [ $? -ne 0 ]; then

View File

@@ -115,6 +115,23 @@ jobs:
base_image: ['nikolaik']
steps:
- uses: actions/checkout@v4
- name: Free Disk Space (Ubuntu)
uses: jlumbroso/free-disk-space@main
with:
# this might remove tools that are actually needed,
# if set to "true" but frees about 6 GB
tool-cache: true
# all of these default to true, but feel free to set to
# "false" if necessary for your workflow
android: true
dotnet: true
haskell: true
large-packages: true
docker-images: false
swap-storage: true
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v3
# Forked repos can't push to GHCR, so we need to download the image as an artifact
- name: Download runtime image for fork
if: github.event.pull_request.head.repo.fork
@@ -176,6 +193,23 @@ jobs:
base_image: ['nikolaik']
steps:
- uses: actions/checkout@v4
- name: Free Disk Space (Ubuntu)
uses: jlumbroso/free-disk-space@main
with:
# this might remove tools that are actually needed,
# if set to "true" but frees about 6 GB
tool-cache: true
# all of these default to true, but feel free to set to
# "false" if necessary for your workflow
android: true
dotnet: true
haskell: true
large-packages: true
docker-images: false
swap-storage: true
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v3
# Forked repos can't push to GHCR, so we need to download the image as an artifact
- name: Download runtime image for fork
if: github.event.pull_request.head.repo.fork
@@ -238,6 +272,23 @@ jobs:
base_image: ['nikolaik']
steps:
- uses: actions/checkout@v4
- name: Free Disk Space (Ubuntu)
uses: jlumbroso/free-disk-space@main
with:
# this might remove tools that are actually needed,
# if set to "true" but frees about 6 GB
tool-cache: true
# all of these default to true, but feel free to set to
# "false" if necessary for your workflow
android: true
dotnet: true
haskell: true
large-packages: true
docker-images: false
swap-storage: true
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v3
# Forked repos can't push to GHCR, so we need to download the image as an artifact
- name: Download runtime image for fork
if: github.event.pull_request.head.repo.fork

View File

@@ -89,6 +89,9 @@ jobs:
sudo ln -sf $HOME/.colima/default/docker.sock /var/run/docker.sock
- name: Build Environment
run: make build
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v3
- name: Run Tests
run: poetry run pytest --forked --cov=agenthub --cov=openhands --cov-report=xml ./tests/unit
- name: Upload coverage to Codecov
@@ -107,6 +110,9 @@ jobs:
python-version: ['3.11']
steps:
- uses: actions/checkout@v4
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v3
- name: Install poetry via pipx
run: pipx install poetry
- name: Set up Python

View File

@@ -29,6 +29,9 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v3
- name: Set up Python
uses: actions/setup-python@v5
with:

View File

@@ -15,6 +15,9 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v3
- name: Set up Python
uses: actions/setup-python@v5
with:

View File

@@ -21,6 +21,9 @@ jobs:
steps:
- name: install git, github cli
run: apt-get install -y git gh
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v3
- name: Checkout Repository
uses: actions/checkout@v4
- name: Write Task File

View File

@@ -1,7 +1,12 @@
import datetime
import os
import subprocess
import sys
import time
import docker
from openhands import __version__ as oh_version
from openhands.core.logger import openhands_logger as logger
from openhands.runtime.builder.base import RuntimeBuilder
@@ -10,45 +15,139 @@ class DockerRuntimeBuilder(RuntimeBuilder):
def __init__(self, docker_client: docker.DockerClient):
self.docker_client = docker_client
def build(self, path: str, tags: list[str]) -> str:
version_info = self.docker_client.version()
server_version = version_info.get('Version', '')
if tuple(map(int, server_version.split('.'))) < (18, 9):
raise RuntimeError('Docker server version must be >= 18.09 to use BuildKit')
self.max_lines = 10
self.log_lines = [''] * self.max_lines
def build(
self,
path: str,
tags: list[str],
use_local_cache: bool = False,
extra_build_args: list[str] | None = None,
) -> str:
"""Builds a Docker image using BuildKit and handles the build logs appropriately.
Args:
path (str): The path to the Docker build context.
tags (list[str]): A list of image tags to apply to the built image.
use_local_cache (bool, optional): Whether to use and update the local build cache. Defaults to True.
extra_build_args (list[str], optional): Additional arguments to pass to the Docker build command. Defaults to None.
Returns:
str: The name of the built Docker image.
Raises:
RuntimeError: If the Docker server version is incompatible or if the build process fails.
Note:
This method uses Docker BuildKit for improved build performance and caching capabilities.
If `use_local_cache` is True, it will attempt to use and update the build cache in a local directory.
The `extra_build_args` parameter allows for passing additional Docker build arguments as needed.
"""
self.docker_client = docker.from_env()
version_info = self.docker_client.version()
server_version = version_info.get('Version', '')
if tuple(map(int, server_version.split('.'))) < (18, 9):
raise RuntimeError('Docker server version must be >= 18.09 to use BuildKit')
target_image_hash_name = tags[0]
target_image_repo, target_image_hash_tag = target_image_hash_name.split(':')
target_image_tag = tags[1].split(':')[1] if len(tags) > 1 else None
try:
build_logs = self.docker_client.api.build(
path=path,
tag=target_image_hash_name,
rm=True,
decode=True,
)
except docker.errors.BuildError as e:
logger.error(f'Sandbox image build failed: {e}')
raise RuntimeError(f'Sandbox image build failed: {e}')
# Check if the image exists and pull if necessary
self.image_exists(target_image_repo)
layers: dict[str, dict[str, str]] = {}
previous_layer_count = 0
for log in build_logs:
if 'stream' in log:
logger.info(log['stream'].strip())
elif 'error' in log:
logger.error(log['error'].strip())
elif 'status' in log:
self._output_build_progress(log, layers, previous_layer_count)
previous_layer_count = len(layers)
else:
logger.info(str(log))
buildx_cmd = [
'docker',
'buildx',
'build',
'--progress=plain',
f'--build-arg=OPENHANDS_RUNTIME_VERSION={oh_version}',
f'--build-arg=OPENHANDS_RUNTIME_BUILD_TIME={datetime.datetime.now().isoformat()}',
f'--tag={target_image_hash_name}',
'--load',
]
cache_dir = '/tmp/.buildx-cache'
if use_local_cache and self._is_cache_usable(cache_dir):
buildx_cmd.extend(
[
f'--cache-from=type=local,src={cache_dir}',
f'--cache-to=type=local,dest={cache_dir},mode=max',
]
)
if extra_build_args:
buildx_cmd.extend(extra_build_args)
buildx_cmd.append(path) # must be last!
print('================ DOCKER BUILD STARTED ================')
if sys.stdout.isatty():
sys.stdout.write('\n' * self.max_lines)
sys.stdout.flush()
try:
process = subprocess.Popen(
buildx_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
bufsize=1,
)
if process.stdout:
for line in iter(process.stdout.readline, ''):
line = line.strip()
if line:
self._output_logs(line)
return_code = process.wait()
if return_code != 0:
raise subprocess.CalledProcessError(
return_code,
process.args,
output=None,
stderr=None,
)
except subprocess.CalledProcessError as e:
logger.error(f'Image build failed:\n{e}')
logger.error(f'Command output:\n{e.output}')
raise
except subprocess.TimeoutExpired:
logger.error('Image build timed out')
raise
except FileNotFoundError as e:
logger.error(f'Python executable not found: {e}')
raise
except PermissionError as e:
logger.error(
f'Permission denied when trying to execute the build command:\n{e}'
)
raise
except Exception as e:
logger.error(f'An unexpected error occurred during the build process: {e}')
raise
logger.info(f'Image [{target_image_hash_name}] build finished.')
assert (
target_image_tag
), f'Expected target image tag [{target_image_tag}] is None'
image = self.docker_client.images.get(target_image_hash_name)
image.tag(target_image_repo, target_image_tag)
logger.info(
f'Re-tagged image [{target_image_hash_name}] with more generic tag [{target_image_tag}]'
)
if target_image_tag:
image = self.docker_client.images.get(target_image_hash_name)
image.tag(target_image_repo, target_image_tag)
logger.info(
f'Re-tagged image [{target_image_hash_name}] with more generic tag [{target_image_tag}]'
)
# Check if the image is built successfully
image = self.docker_client.images.get(target_image_hash_name)
@@ -80,13 +179,13 @@ class DockerRuntimeBuilder(RuntimeBuilder):
return False
try:
logger.info(f'Checking, if image exists locally:\n{image_name}')
logger.debug(f'Checking, if image exists locally:\n{image_name}')
self.docker_client.images.get(image_name)
logger.info('Image found locally.')
logger.debug('Image found locally.')
return True
except docker.errors.ImageNotFound:
try:
logger.info(
logger.debug(
'Image not found locally. Trying to pull it, please wait...'
)
@@ -97,10 +196,10 @@ class DockerRuntimeBuilder(RuntimeBuilder):
):
self._output_build_progress(line, layers, previous_layer_count)
previous_layer_count = len(layers)
logger.info('Image pulled')
logger.debug('Image pulled')
return True
except docker.errors.ImageNotFound:
logger.info('Could not find image locally or in registry.')
logger.debug('Could not find image locally or in registry.')
return False
except Exception as e:
msg = 'Image could not be pulled: '
@@ -109,9 +208,30 @@ class DockerRuntimeBuilder(RuntimeBuilder):
msg += 'image not found in registry.'
else:
msg += f'{ex_msg}'
logger.warning(msg)
logger.debug(msg)
return False
def _output_logs(self, new_line: str) -> None:
"""Display the last 10 log_lines in the console (not for file logging).
This will create the effect of a rolling display in the console.
'\033[F' moves the cursor up one line.
'\033[2K\r' clears the line and moves the cursor to the beginning of the line.
"""
if not sys.stdout.isatty():
logger.debug(new_line)
return
self.log_lines.pop(0)
self.log_lines.append(new_line[:80])
sys.stdout.write('\033[F' * (self.max_lines))
sys.stdout.flush()
for line in self.log_lines:
sys.stdout.write('\033[2K' + line + '\n')
sys.stdout.flush()
def _output_build_progress(
self, current_line: dict, layers: dict, previous_layer_count: int
) -> None:
@@ -126,31 +246,93 @@ class DockerRuntimeBuilder(RuntimeBuilder):
if 'progress' in current_line:
layers[layer_id]['progress'] = current_line['progress']
if (
'total' in current_line['progressDetail']
and 'current' in current_line['progressDetail']
):
total = current_line['progressDetail']['total']
current = current_line['progressDetail']['current']
percentage = (current / total) * 100
else:
percentage = 0
if 'progressDetail' in current_line:
progress_detail = current_line['progressDetail']
if 'total' in progress_detail and 'current' in progress_detail:
total = progress_detail['total']
current = progress_detail['current']
percentage = min(
(current / total) * 100, 100
) # Ensure it doesn't exceed 100%
else:
percentage = (
100 if layers[layer_id]['status'] == 'Download complete' else 0
)
# refresh process bar in console if stdout is a tty
if sys.stdout.isatty():
sys.stdout.write('\033[F' * previous_layer_count)
for lid, layer_data in sorted(layers.items()):
sys.stdout.write('\033[K')
print(
f'Layer {lid}: {layer_data["progress"]} {layer_data["status"]}'
)
sys.stdout.write('\033[2K\r')
status = layer_data['status']
progress = layer_data['progress']
if status == 'Download complete':
print(f'Layer {lid}: Download complete')
elif status == 'Already exists':
print(f'Layer {lid}: Already exists')
else:
print(f'Layer {lid}: {progress} {status}')
sys.stdout.flush()
# otherwise Log only if percentage is at least 10% higher than last logged
elif percentage != 0 and percentage - layers[layer_id]['last_logged'] >= 10:
logger.info(
elif percentage != 0 and (
percentage - layers[layer_id]['last_logged'] >= 10 or percentage == 100
):
logger.debug(
f'Layer {layer_id}: {layers[layer_id]["progress"]} {layers[layer_id]["status"]}'
)
layers[layer_id]['last_logged'] = percentage
elif 'status' in current_line:
logger.info(current_line['status'])
logger.debug(current_line['status'])
def _prune_old_cache_files(self, cache_dir: str, max_age_days: int = 7) -> None:
"""
Prune cache files older than the specified number of days.
Args:
cache_dir (str): The path to the cache directory.
max_age_days (int): The maximum age of cache files in days.
"""
try:
current_time = time.time()
max_age_seconds = max_age_days * 24 * 60 * 60
for root, _, files in os.walk(cache_dir):
for file in files:
file_path = os.path.join(root, file)
try:
file_age = current_time - os.path.getmtime(file_path)
if file_age > max_age_seconds:
os.remove(file_path)
logger.debug(f'Removed old cache file: {file_path}')
except Exception as e:
logger.warning(f'Error processing cache file {file_path}: {e}')
except Exception as e:
logger.warning(f'Error during build cache pruning: {e}')
def _is_cache_usable(self, cache_dir: str) -> bool:
"""
Check if the cache directory is usable (exists and is writable).
Args:
cache_dir (str): The path to the cache directory.
Returns:
bool: True if the cache directory is usable, False otherwise.
"""
if not os.path.exists(cache_dir):
try:
os.makedirs(cache_dir, exist_ok=True)
logger.debug(f'Created cache directory: {cache_dir}')
except OSError as e:
logger.debug(f'Failed to create cache directory {cache_dir}: {e}')
return False
if not os.access(cache_dir, os.W_OK):
logger.warning(
f'Cache directory {cache_dir} is not writable. Caches will not be used for Docker builds.'
)
return False
self._prune_old_cache_files(cache_dir)
logger.debug(f'Cache directory {cache_dir} is usable')
return True

View File

@@ -6,11 +6,11 @@ import subprocess
import tempfile
import docker
import toml
from dirhash import dirhash
from jinja2 import Environment, FileSystemLoader
import openhands
from openhands import __version__ as oh_version
from openhands.core.logger import openhands_logger as logger
from openhands.runtime.builder import DockerRuntimeBuilder, RuntimeBuilder
@@ -19,19 +19,6 @@ def get_runtime_image_repo():
return os.getenv('OH_RUNTIME_RUNTIME_IMAGE_REPO', 'ghcr.io/all-hands-ai/runtime')
def _get_package_version():
"""Read the version from pyproject.toml.
Returns:
- The version specified in pyproject.toml under [tool.poetry]
"""
project_root = os.path.dirname(os.path.dirname(os.path.abspath(openhands.__file__)))
pyproject_path = os.path.join(project_root, 'pyproject.toml')
with open(pyproject_path, 'r') as f:
pyproject_data = toml.load(f)
return pyproject_data['tool']['poetry']['version']
def _put_source_code_to_dir(temp_dir: str):
"""Builds the project source tarball directly in temp_dir and unpacks it.
The OpenHands source code ends up in the temp_dir/code directory.
@@ -46,7 +33,7 @@ def _put_source_code_to_dir(temp_dir: str):
logger.info(f'Building source distribution using project root: {project_root}')
# Fetch the correct version from pyproject.toml
package_version = _get_package_version()
package_version = oh_version
tarball_filename = f'openhands_ai-{package_version}.tar.gz'
tarball_path = os.path.join(temp_dir, tarball_filename)
@@ -188,7 +175,6 @@ def get_runtime_image_repo_and_tag(base_image: str) -> tuple[str, str]:
if ':' not in base_image:
base_image = base_image + ':latest'
[repo, tag] = base_image.split(':')
oh_version = _get_package_version()
# Hash the repo if it's too long
if len(repo) > 32:
@@ -377,7 +363,7 @@ def _build_sandbox_image(
if not image_name:
raise RuntimeError(f'Build failed for image {target_image_hash_name}')
except Exception as e:
logger.error(f'Sandbox image build failed: {e}')
logger.error(f'Sandbox image build failed: {str(e)}')
raise
return image_name

View File

@@ -1,15 +1,19 @@
import os
import tempfile
import uuid
from importlib.metadata import version
from unittest.mock import ANY, MagicMock, call, patch
import docker
import pytest
import toml
from pytest import TempPathFactory
from openhands import __version__ as oh_version
from openhands.core.logger import openhands_logger as logger
from openhands.runtime.builder.docker import DockerRuntimeBuilder
from openhands.runtime.utils.runtime_build import (
_generate_dockerfile,
_get_package_version,
_put_source_code_to_dir,
build_runtime_image,
get_runtime_image_repo,
@@ -17,7 +21,8 @@ from openhands.runtime.utils.runtime_build import (
prep_docker_build_folder,
)
OH_VERSION = f'oh_v{_get_package_version()}'
OH_VERSION = f'oh_v{oh_version}'
DEFAULT_BASE_IMAGE = 'nikolaik/python-nodejs:python3.11-nodejs22'
@pytest.fixture
@@ -25,6 +30,21 @@ def temp_dir(tmp_path_factory: TempPathFactory) -> str:
return str(tmp_path_factory.mktemp('test_runtime_build'))
@pytest.fixture
def mock_docker_client():
mock_client = MagicMock(spec=docker.DockerClient)
mock_client.version.return_value = {
'Version': '19.03'
} # Ensure version is >= 18.09
return mock_client
@pytest.fixture
def docker_runtime_builder():
client = docker.from_env()
return DockerRuntimeBuilder(client)
def _check_source_code_in_dir(temp_dir):
# assert there is a folder called 'code' in the temp_dir
code_dir = os.path.join(temp_dir, 'code')
@@ -63,7 +83,7 @@ def test_put_source_code_to_dir(temp_dir):
def test_docker_build_folder(temp_dir):
prep_docker_build_folder(
temp_dir,
base_image='nikolaik/python-nodejs:python3.11-nodejs22',
base_image=DEFAULT_BASE_IMAGE,
skip_init=False,
)
@@ -82,14 +102,14 @@ def test_docker_build_folder(temp_dir):
def test_hash_folder_same(temp_dir):
dir_hash_1 = prep_docker_build_folder(
temp_dir,
base_image='nikolaik/python-nodejs:python3.11-nodejs22',
base_image=DEFAULT_BASE_IMAGE,
skip_init=False,
)
with tempfile.TemporaryDirectory() as temp_dir_2:
dir_hash_2 = prep_docker_build_folder(
temp_dir_2,
base_image='nikolaik/python-nodejs:python3.11-nodejs22',
base_image=DEFAULT_BASE_IMAGE,
skip_init=False,
)
assert dir_hash_1 == dir_hash_2
@@ -98,14 +118,14 @@ def test_hash_folder_same(temp_dir):
def test_hash_folder_diff_init(temp_dir):
dir_hash_1 = prep_docker_build_folder(
temp_dir,
base_image='nikolaik/python-nodejs:python3.11-nodejs22',
base_image=DEFAULT_BASE_IMAGE,
skip_init=False,
)
with tempfile.TemporaryDirectory() as temp_dir_2:
dir_hash_2 = prep_docker_build_folder(
temp_dir_2,
base_image='nikolaik/python-nodejs:python3.11-nodejs22',
base_image=DEFAULT_BASE_IMAGE,
skip_init=True,
)
assert dir_hash_1 != dir_hash_2
@@ -114,7 +134,7 @@ def test_hash_folder_diff_init(temp_dir):
def test_hash_folder_diff_image(temp_dir):
dir_hash_1 = prep_docker_build_folder(
temp_dir,
base_image='nikolaik/python-nodejs:python3.11-nodejs22',
base_image=DEFAULT_BASE_IMAGE,
skip_init=False,
)
@@ -179,8 +199,7 @@ def test_get_runtime_image_repo_and_tag_eventstream():
and img_tag == f'{OH_VERSION}_image_debian_tag_11'
)
base_image = 'nikolaik/python-nodejs:python3.11-nodejs22'
img_repo, img_tag = get_runtime_image_repo_and_tag(base_image)
img_repo, img_tag = get_runtime_image_repo_and_tag(DEFAULT_BASE_IMAGE)
assert (
img_repo == f'{get_runtime_image_repo()}'
and img_tag
@@ -290,3 +309,234 @@ def test_build_runtime_image_exact_hash_not_exist(mock_build_sandbox_image, temp
target_image_tag=latest_image_tag,
)
assert image_name == f'{repo}:{from_scratch_hash}'
# ==============================
# DockerRuntimeBuilder Tests
# ==============================
def test_output_progress(docker_runtime_builder):
with patch('sys.stdout.isatty', return_value=True):
with patch('sys.stdout.write') as mock_write, patch('sys.stdout.flush'):
docker_runtime_builder._output_logs('new log line')
mock_write.assert_any_call('\033[F' * 10)
mock_write.assert_any_call('\033[2Knew log line\n')
def test_output_build_progress(docker_runtime_builder):
with patch('sys.stdout.isatty', return_value=True):
with patch('sys.stdout.write') as mock_write, patch('sys.stdout.flush'):
layers = {}
docker_runtime_builder._output_build_progress(
{
'id': 'layer1',
'status': 'Downloading',
'progressDetail': {'current': 50, 'total': 100},
},
layers,
0,
)
mock_write.assert_any_call('\033[F' * 0)
mock_write.assert_any_call('\033[2K\r')
assert layers['layer1']['status'] == 'Downloading'
assert layers['layer1']['progress'] == ''
assert layers['layer1']['last_logged'] == 50.0
@pytest.fixture(scope='function')
def live_docker_image():
client = docker.from_env()
unique_id = str(uuid.uuid4())[:8] # Use first 8 characters of a UUID
unique_prefix = f'test_image_{unique_id}'
dockerfile_content = f"""
# syntax=docker/dockerfile:1.4
FROM {DEFAULT_BASE_IMAGE} AS base
RUN apt-get update && apt-get install -y wget sudo apt-utils
FROM base AS intermediate
RUN mkdir -p /openhands
FROM intermediate AS final
RUN echo "Hello, OpenHands!" > /openhands/hello.txt
"""
with tempfile.TemporaryDirectory() as temp_dir:
dockerfile_path = os.path.join(temp_dir, 'Dockerfile')
with open(dockerfile_path, 'w') as f:
f.write(dockerfile_content)
try:
image, logs = client.images.build(
path=temp_dir,
tag=f'{unique_prefix}:final',
buildargs={'DOCKER_BUILDKIT': '1'},
labels={'test': 'true'},
rm=True,
forcerm=True,
)
# Tag intermediary stages
client.api.tag(image.id, unique_prefix, 'base')
client.api.tag(image.id, unique_prefix, 'intermediate')
all_tags = [
f'{unique_prefix}:final',
f'{unique_prefix}:base',
f'{unique_prefix}:intermediate',
]
print(f'\nImage ID: {image.id}')
print(f'Image tags: {all_tags}\n')
yield image
finally:
# Clean up all tagged images
for tag in all_tags:
try:
client.images.remove(tag, force=True)
print(f'Removed image: {tag}')
except Exception as e:
print(f'Error removing image {tag}: {str(e)}')
def test_init(docker_runtime_builder):
assert isinstance(docker_runtime_builder.docker_client, docker.DockerClient)
assert docker_runtime_builder.max_lines == 10
assert docker_runtime_builder.log_lines == [''] * 10
def test_build_image_from_scratch(docker_runtime_builder, tmp_path):
context_path = str(tmp_path)
tags = ['test_build:latest']
# Create a minimal Dockerfile in the context path
with open(os.path.join(context_path, 'Dockerfile'), 'w') as f:
f.write("""FROM php:latest
CMD ["sh", "-c", "echo 'Hello, World!'"]
""")
built_image_name = None
container = None
client = docker.from_env()
try:
with patch('sys.stdout.isatty', return_value=False):
built_image_name = docker_runtime_builder.build(
context_path,
tags,
use_local_cache=False,
)
assert built_image_name == f'{tags[0]}'
# Verify the image was created
image = client.images.get(tags[0])
assert image is not None
except docker.errors.ImageNotFound:
pytest.fail('test_build_image_from_scratch: test image not found!')
except Exception as e:
pytest.fail(f'test_build_image_from_scratch: Build failed with error: {str(e)}')
finally:
# Clean up the container
if container:
try:
container.remove(force=True)
logger.info(f'Removed test container: `{container.id}`')
except Exception as e:
logger.warning(
f'Failed to remove test container `{container.id}`: {str(e)}'
)
# Clean up the image
if built_image_name:
try:
client.images.remove(built_image_name, force=True)
logger.info(f'Removed test image: `{built_image_name}`')
except Exception as e:
logger.warning(
f'Failed to remove test image `{built_image_name}`: {str(e)}'
)
else:
logger.warning('No image was built, so no image cleanup was necessary.')
def _format_size_to_gb(bytes_size):
"""Convert bytes to gigabytes with two decimal places."""
return round(bytes_size / (1024**3), 2)
def test_list_dangling_images():
client = docker.from_env()
dangling_images = client.images.list(filters={'dangling': True})
if dangling_images and len(dangling_images) > 0:
for image in dangling_images:
if 'Size' in image.attrs and isinstance(image.attrs['Size'], int):
size_gb = _format_size_to_gb(image.attrs['Size'])
logger.info(f'Dangling image: {image.tags}, Size: {size_gb} GB')
else:
logger.info(f'Dangling image: {image.tags}, Size: n/a')
else:
logger.info('No dangling images found')
def test_build_image_from_repo(docker_runtime_builder, tmp_path):
context_path = str(tmp_path)
tags = ['alpine:latest']
# Create a minimal Dockerfile in the context path
with open(os.path.join(context_path, 'Dockerfile'), 'w') as f:
f.write(f"""FROM {DEFAULT_BASE_IMAGE}
CMD ["sh", "-c", "echo 'Hello, World!'"]
""")
built_image_name = None
container = None
client = docker.from_env()
try:
with patch('sys.stdout.isatty', return_value=False):
built_image_name = docker_runtime_builder.build(
context_path,
tags,
use_local_cache=False,
)
assert built_image_name == f'{tags[0]}'
image = client.images.get(tags[0])
assert image is not None
except docker.errors.ImageNotFound:
pytest.fail('test_build_image_from_repo: test image not found!')
finally:
# Clean up the container
if container:
try:
container.remove(force=True)
logger.info(f'Removed test container: `{container.id}`')
except Exception as e:
logger.warning(
f'Failed to remove test container `{container.id}`: {str(e)}'
)
# Clean up the image
if built_image_name:
try:
client.images.remove(built_image_name, force=True)
logger.info(f'Removed test image: `{built_image_name}`')
except Exception as e:
logger.warning(
f'Failed to remove test image `{built_image_name}`: {str(e)}'
)
else:
logger.warning('No image was built, so no image cleanup was necessary.')
def test_image_exists_local(docker_runtime_builder, live_docker_image):
image_name = live_docker_image.tags[0] if live_docker_image.tags else None
assert image_name, 'Image has no tags'
assert docker_runtime_builder.image_exists(image_name)
def test_image_exists_not_found(docker_runtime_builder):
assert not docker_runtime_builder.image_exists('nonexistent:image')