mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-02-18 14:24:31 -05:00
* add basic functionality for model metadata fetching from hf and civitai * add storage * start unit tests * add unit tests and documentation * add missing dependency for pytests * remove redundant fetch; add modified/published dates; updated docs * add code to select diffusers files based on the variant type * implement Civitai installs * make huggingface parallel downloading work * add unit tests for model installation manager - Fixed race condition on selection of download destination path - Add fixtures common to several model_manager_2 unit tests - Added dummy model files for testing diffusers and safetensors downloading/probing - Refactored code for selecting proper variant from list of huggingface repo files - Regrouped ordering of methods in model_install_default.py * improve Civitai model downloading - Provide a better error message when Civitai requires an access token (doesn't give a 403 forbidden, but redirects to the HTML of an authorization page -- arrgh) - Handle case of Civitai providing a primary download link plus additional links for VAEs, config files, etc * add routes for retrieving metadata and tags * code tidying and documentation * fix ruff errors * add file needed to maintain test root diretory in repo for unit tests * fix self->cls in classmethod * add pydantic plugin for mypy * use TestSession instead of requests.Session to prevent any internet activity improve logging fix error message formatting fix logging again fix forward vs reverse slash issue in Windows install tests * Several fixes of problems detected during PR review: - Implement cancel_model_install_job and get_model_install_job routes to allow for better control of model download and install. - Fix thread deadlock that occurred after cancelling an install. - Remove unneeded pytest_plugins section from tests/conftest.py - Remove unused _in_terminal_state() from model_install_default. - Remove outdated documentation from several spots. - Add workaround for Civitai API results which don't return correct URL for the default model. * fix docs and tests to match get_job_by_source() rather than get_job() * Update invokeai/backend/model_manager/metadata/fetch/huggingface.py Co-authored-by: Ryan Dick <ryanjdick3@gmail.com> * Call CivitaiMetadata.model_validate_json() directly Co-authored-by: Ryan Dick <ryanjdick3@gmail.com> * Second round of revisions suggested by @ryanjdick: - Fix type mismatch in `list_all_metadata()` route. - Do not have a default value for the model install job id - Remove static class variable declarations from non Pydantic classes - Change `id` field to `model_id` for the sqlite3 `model_tags` table. - Changed AFTER DELETE triggers to ON DELETE CASCADE for the metadata and tags tables. - Made the `id` field of the `model_metadata` table into a primary key to achieve uniqueness. * Code cleanup suggested in PR review: - Narrowed the declaration of the `parts` attribute of the download progress event - Removed auto-conversion of str to Url in Url-containing sources - Fixed handling of `InvalidModelConfigException` - Made unknown sources raise `NotImplementedError` rather than `Exception` - Improved status reporting on cached HuggingFace access tokens * Multiple fixes: - `job.total_size` returns a valid size for locally installed models - new route `list_models` returns a paged summary of model, name, description, tags and other essential info - fix a few type errors * consolidated all invokeai root pytest fixtures into a single location * Update invokeai/backend/model_manager/metadata/metadata_store.py Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com> * Small tweaks in response to review comments: - Remove flake8 configuration from pyproject.toml - Use `id` rather than `modelId` for huggingface `ModelInfo` object - Use `last_modified` rather than `LastModified` for huggingface `ModelInfo` object - Add `sha256` field to file metadata downloaded from huggingface - Add `Invoker` argument to the model installer `start()` and `stop()` routines (but made it optional in order to facilitate use of the service outside the API) - Removed redundant `PRAGMA foreign_keys` from metadata store initialization code. * Additional tweaks and minor bug fixes - Fix calculation of aggregate diffusers model size to only count the size of files, not files + directories (which gives different unit test results on different filesystems). - Refactor _get_metadata() and _get_download_urls() to have distinct code paths for Civitai, HuggingFace and URL sources. - Forward the `inplace` flag from the source to the job and added unit test for this. - Attach cached model metadata to the job rather than to the model install service. * fix unit test that was breaking on windows due to CR/LF changing size of test json files * fix ruff formatting * a few last minor fixes before merging: - Turn job `error` and `error_type` into properties derived from the exception. - Add TODO comment about the reason for handling temporary directory destruction manually rather than using tempfile.tmpdir(). * add unit tests for reporting HTTP download errors --------- Co-authored-by: Lincoln Stein <lstein@gmail.com> Co-authored-by: Ryan Dick <ryanjdick3@gmail.com> Co-authored-by: psychedelicious <4822129+psychedelicious@users.noreply.github.com>
418 lines
14 KiB
Python
418 lines
14 KiB
Python
# Copyright (c) 2023 Lincoln D. Stein
|
|
"""FastAPI route for model configuration records."""
|
|
|
|
|
|
from hashlib import sha1
|
|
from random import randbytes
|
|
from typing import Any, Dict, List, Optional, Set
|
|
|
|
from fastapi import Body, Path, Query, Response
|
|
from fastapi.routing import APIRouter
|
|
from pydantic import BaseModel, ConfigDict
|
|
from starlette.exceptions import HTTPException
|
|
from typing_extensions import Annotated
|
|
|
|
from invokeai.app.services.model_install import ModelInstallJob, ModelSource
|
|
from invokeai.app.services.model_records import (
|
|
DuplicateModelException,
|
|
InvalidModelException,
|
|
ModelRecordOrderBy,
|
|
ModelSummary,
|
|
UnknownModelException,
|
|
)
|
|
from invokeai.app.services.shared.pagination import PaginatedResults
|
|
from invokeai.backend.model_manager.config import (
|
|
AnyModelConfig,
|
|
BaseModelType,
|
|
ModelFormat,
|
|
ModelType,
|
|
)
|
|
from invokeai.backend.model_manager.metadata import AnyModelRepoMetadata
|
|
|
|
from ..dependencies import ApiDependencies
|
|
|
|
model_records_router = APIRouter(prefix="/v1/model/record", tags=["model_manager_v2_unstable"])
|
|
|
|
|
|
class ModelsList(BaseModel):
|
|
"""Return list of configs."""
|
|
|
|
models: List[AnyModelConfig]
|
|
|
|
model_config = ConfigDict(use_enum_values=True)
|
|
|
|
|
|
class ModelTagSet(BaseModel):
|
|
"""Return tags for a set of models."""
|
|
|
|
key: str
|
|
name: str
|
|
author: str
|
|
tags: Set[str]
|
|
|
|
|
|
@model_records_router.get(
|
|
"/",
|
|
operation_id="list_model_records",
|
|
)
|
|
async def list_model_records(
|
|
base_models: Optional[List[BaseModelType]] = Query(default=None, description="Base models to include"),
|
|
model_type: Optional[ModelType] = Query(default=None, description="The type of model to get"),
|
|
model_name: Optional[str] = Query(default=None, description="Exact match on the name of the model"),
|
|
model_format: Optional[ModelFormat] = Query(
|
|
default=None, description="Exact match on the format of the model (e.g. 'diffusers')"
|
|
),
|
|
) -> ModelsList:
|
|
"""Get a list of models."""
|
|
record_store = ApiDependencies.invoker.services.model_records
|
|
found_models: list[AnyModelConfig] = []
|
|
if base_models:
|
|
for base_model in base_models:
|
|
found_models.extend(
|
|
record_store.search_by_attr(
|
|
base_model=base_model, model_type=model_type, model_name=model_name, model_format=model_format
|
|
)
|
|
)
|
|
else:
|
|
found_models.extend(
|
|
record_store.search_by_attr(model_type=model_type, model_name=model_name, model_format=model_format)
|
|
)
|
|
return ModelsList(models=found_models)
|
|
|
|
|
|
@model_records_router.get(
|
|
"/i/{key}",
|
|
operation_id="get_model_record",
|
|
responses={
|
|
200: {"description": "Success"},
|
|
400: {"description": "Bad request"},
|
|
404: {"description": "The model could not be found"},
|
|
},
|
|
)
|
|
async def get_model_record(
|
|
key: str = Path(description="Key of the model record to fetch."),
|
|
) -> AnyModelConfig:
|
|
"""Get a model record"""
|
|
record_store = ApiDependencies.invoker.services.model_records
|
|
try:
|
|
return record_store.get_model(key)
|
|
except UnknownModelException as e:
|
|
raise HTTPException(status_code=404, detail=str(e))
|
|
|
|
|
|
@model_records_router.get("/meta", operation_id="list_model_summary")
|
|
async def list_model_summary(
|
|
page: int = Query(default=0, description="The page to get"),
|
|
per_page: int = Query(default=10, description="The number of models per page"),
|
|
order_by: ModelRecordOrderBy = Query(default=ModelRecordOrderBy.Default, description="The attribute to order by"),
|
|
) -> PaginatedResults[ModelSummary]:
|
|
"""Gets a page of model summary data."""
|
|
return ApiDependencies.invoker.services.model_records.list_models(page=page, per_page=per_page, order_by=order_by)
|
|
|
|
|
|
@model_records_router.get(
|
|
"/meta/i/{key}",
|
|
operation_id="get_model_metadata",
|
|
responses={
|
|
200: {"description": "Success"},
|
|
400: {"description": "Bad request"},
|
|
404: {"description": "No metadata available"},
|
|
},
|
|
)
|
|
async def get_model_metadata(
|
|
key: str = Path(description="Key of the model repo metadata to fetch."),
|
|
) -> Optional[AnyModelRepoMetadata]:
|
|
"""Get a model metadata object."""
|
|
record_store = ApiDependencies.invoker.services.model_records
|
|
result = record_store.get_metadata(key)
|
|
if not result:
|
|
raise HTTPException(status_code=404, detail="No metadata for a model with this key")
|
|
return result
|
|
|
|
|
|
@model_records_router.get(
|
|
"/tags",
|
|
operation_id="list_tags",
|
|
)
|
|
async def list_tags() -> Set[str]:
|
|
"""Get a unique set of all the model tags."""
|
|
record_store = ApiDependencies.invoker.services.model_records
|
|
return record_store.list_tags()
|
|
|
|
|
|
@model_records_router.get(
|
|
"/tags/search",
|
|
operation_id="search_by_metadata_tags",
|
|
)
|
|
async def search_by_metadata_tags(
|
|
tags: Set[str] = Query(default=None, description="Tags to search for"),
|
|
) -> ModelsList:
|
|
"""Get a list of models."""
|
|
record_store = ApiDependencies.invoker.services.model_records
|
|
results = record_store.search_by_metadata_tag(tags)
|
|
return ModelsList(models=results)
|
|
|
|
|
|
@model_records_router.patch(
|
|
"/i/{key}",
|
|
operation_id="update_model_record",
|
|
responses={
|
|
200: {"description": "The model was updated successfully"},
|
|
400: {"description": "Bad request"},
|
|
404: {"description": "The model could not be found"},
|
|
409: {"description": "There is already a model corresponding to the new name"},
|
|
},
|
|
status_code=200,
|
|
response_model=AnyModelConfig,
|
|
)
|
|
async def update_model_record(
|
|
key: Annotated[str, Path(description="Unique key of model")],
|
|
info: Annotated[AnyModelConfig, Body(description="Model config", discriminator="type")],
|
|
) -> AnyModelConfig:
|
|
"""Update model contents with a new config. If the model name or base fields are changed, then the model is renamed."""
|
|
logger = ApiDependencies.invoker.services.logger
|
|
record_store = ApiDependencies.invoker.services.model_records
|
|
try:
|
|
model_response = record_store.update_model(key, config=info)
|
|
logger.info(f"Updated model: {key}")
|
|
except UnknownModelException as e:
|
|
raise HTTPException(status_code=404, detail=str(e))
|
|
except ValueError as e:
|
|
logger.error(str(e))
|
|
raise HTTPException(status_code=409, detail=str(e))
|
|
return model_response
|
|
|
|
|
|
@model_records_router.delete(
|
|
"/i/{key}",
|
|
operation_id="del_model_record",
|
|
responses={
|
|
204: {"description": "Model deleted successfully"},
|
|
404: {"description": "Model not found"},
|
|
},
|
|
status_code=204,
|
|
)
|
|
async def del_model_record(
|
|
key: str = Path(description="Unique key of model to remove from model registry."),
|
|
) -> Response:
|
|
"""
|
|
Delete model record from database.
|
|
|
|
The configuration record will be removed. The corresponding weights files will be
|
|
deleted as well if they reside within the InvokeAI "models" directory.
|
|
"""
|
|
logger = ApiDependencies.invoker.services.logger
|
|
|
|
try:
|
|
installer = ApiDependencies.invoker.services.model_install
|
|
installer.delete(key)
|
|
logger.info(f"Deleted model: {key}")
|
|
return Response(status_code=204)
|
|
except UnknownModelException as e:
|
|
logger.error(str(e))
|
|
raise HTTPException(status_code=404, detail=str(e))
|
|
|
|
|
|
@model_records_router.post(
|
|
"/i/",
|
|
operation_id="add_model_record",
|
|
responses={
|
|
201: {"description": "The model added successfully"},
|
|
409: {"description": "There is already a model corresponding to this path or repo_id"},
|
|
415: {"description": "Unrecognized file/folder format"},
|
|
},
|
|
status_code=201,
|
|
)
|
|
async def add_model_record(
|
|
config: Annotated[AnyModelConfig, Body(description="Model config", discriminator="type")],
|
|
) -> AnyModelConfig:
|
|
"""Add a model using the configuration information appropriate for its type."""
|
|
logger = ApiDependencies.invoker.services.logger
|
|
record_store = ApiDependencies.invoker.services.model_records
|
|
if config.key == "<NOKEY>":
|
|
config.key = sha1(randbytes(100)).hexdigest()
|
|
logger.info(f"Created model {config.key} for {config.name}")
|
|
try:
|
|
record_store.add_model(config.key, config)
|
|
except DuplicateModelException as e:
|
|
logger.error(str(e))
|
|
raise HTTPException(status_code=409, detail=str(e))
|
|
except InvalidModelException as e:
|
|
logger.error(str(e))
|
|
raise HTTPException(status_code=415)
|
|
|
|
# now fetch it out
|
|
return record_store.get_model(config.key)
|
|
|
|
|
|
@model_records_router.post(
|
|
"/import",
|
|
operation_id="import_model_record",
|
|
responses={
|
|
201: {"description": "The model imported successfully"},
|
|
415: {"description": "Unrecognized file/folder format"},
|
|
424: {"description": "The model appeared to import successfully, but could not be found in the model manager"},
|
|
409: {"description": "There is already a model corresponding to this path or repo_id"},
|
|
},
|
|
status_code=201,
|
|
)
|
|
async def import_model(
|
|
source: ModelSource,
|
|
config: Optional[Dict[str, Any]] = Body(
|
|
description="Dict of fields that override auto-probed values in the model config record, such as name, description and prediction_type ",
|
|
default=None,
|
|
),
|
|
) -> ModelInstallJob:
|
|
"""Add a model using its local path, repo_id, or remote URL.
|
|
|
|
Models will be downloaded, probed, configured and installed in a
|
|
series of background threads. The return object has `status` attribute
|
|
that can be used to monitor progress.
|
|
|
|
The source object is a discriminated Union of LocalModelSource,
|
|
HFModelSource and URLModelSource. Set the "type" field to the
|
|
appropriate value:
|
|
|
|
* To install a local path using LocalModelSource, pass a source of form:
|
|
`{
|
|
"type": "local",
|
|
"path": "/path/to/model",
|
|
"inplace": false
|
|
}`
|
|
The "inplace" flag, if true, will register the model in place in its
|
|
current filesystem location. Otherwise, the model will be copied
|
|
into the InvokeAI models directory.
|
|
|
|
* To install a HuggingFace repo_id using HFModelSource, pass a source of form:
|
|
`{
|
|
"type": "hf",
|
|
"repo_id": "stabilityai/stable-diffusion-2.0",
|
|
"variant": "fp16",
|
|
"subfolder": "vae",
|
|
"access_token": "f5820a918aaf01"
|
|
}`
|
|
The `variant`, `subfolder` and `access_token` fields are optional.
|
|
|
|
* To install a remote model using an arbitrary URL, pass:
|
|
`{
|
|
"type": "url",
|
|
"url": "http://www.civitai.com/models/123456",
|
|
"access_token": "f5820a918aaf01"
|
|
}`
|
|
The `access_token` field is optonal
|
|
|
|
The model's configuration record will be probed and filled in
|
|
automatically. To override the default guesses, pass "metadata"
|
|
with a Dict containing the attributes you wish to override.
|
|
|
|
Installation occurs in the background. Either use list_model_install_jobs()
|
|
to poll for completion, or listen on the event bus for the following events:
|
|
|
|
"model_install_running"
|
|
"model_install_completed"
|
|
"model_install_error"
|
|
|
|
On successful completion, the event's payload will contain the field "key"
|
|
containing the installed ID of the model. On an error, the event's payload
|
|
will contain the fields "error_type" and "error" describing the nature of the
|
|
error and its traceback, respectively.
|
|
|
|
"""
|
|
logger = ApiDependencies.invoker.services.logger
|
|
|
|
try:
|
|
installer = ApiDependencies.invoker.services.model_install
|
|
result: ModelInstallJob = installer.import_model(
|
|
source=source,
|
|
config=config,
|
|
)
|
|
logger.info(f"Started installation of {source}")
|
|
except UnknownModelException as e:
|
|
logger.error(str(e))
|
|
raise HTTPException(status_code=424, detail=str(e))
|
|
except InvalidModelException as e:
|
|
logger.error(str(e))
|
|
raise HTTPException(status_code=415)
|
|
except ValueError as e:
|
|
logger.error(str(e))
|
|
raise HTTPException(status_code=409, detail=str(e))
|
|
return result
|
|
|
|
|
|
@model_records_router.get(
|
|
"/import",
|
|
operation_id="list_model_install_jobs",
|
|
)
|
|
async def list_model_install_jobs() -> List[ModelInstallJob]:
|
|
"""Return list of model install jobs."""
|
|
jobs: List[ModelInstallJob] = ApiDependencies.invoker.services.model_install.list_jobs()
|
|
return jobs
|
|
|
|
|
|
@model_records_router.get(
|
|
"/import/{id}",
|
|
operation_id="get_model_install_job",
|
|
responses={
|
|
200: {"description": "Success"},
|
|
404: {"description": "No such job"},
|
|
},
|
|
)
|
|
async def get_model_install_job(id: int = Path(description="Model install id")) -> ModelInstallJob:
|
|
"""Return model install job corresponding to the given source."""
|
|
try:
|
|
return ApiDependencies.invoker.services.model_install.get_job_by_id(id)
|
|
except ValueError as e:
|
|
raise HTTPException(status_code=404, detail=str(e))
|
|
|
|
|
|
@model_records_router.delete(
|
|
"/import/{id}",
|
|
operation_id="cancel_model_install_job",
|
|
responses={
|
|
201: {"description": "The job was cancelled successfully"},
|
|
415: {"description": "No such job"},
|
|
},
|
|
status_code=201,
|
|
)
|
|
async def cancel_model_install_job(id: int = Path(description="Model install job ID")) -> None:
|
|
"""Cancel the model install job(s) corresponding to the given job ID."""
|
|
installer = ApiDependencies.invoker.services.model_install
|
|
try:
|
|
job = installer.get_job_by_id(id)
|
|
except ValueError as e:
|
|
raise HTTPException(status_code=415, detail=str(e))
|
|
installer.cancel_job(job)
|
|
|
|
|
|
@model_records_router.patch(
|
|
"/import",
|
|
operation_id="prune_model_install_jobs",
|
|
responses={
|
|
204: {"description": "All completed and errored jobs have been pruned"},
|
|
400: {"description": "Bad request"},
|
|
},
|
|
)
|
|
async def prune_model_install_jobs() -> Response:
|
|
"""Prune all completed and errored jobs from the install job list."""
|
|
ApiDependencies.invoker.services.model_install.prune_jobs()
|
|
return Response(status_code=204)
|
|
|
|
|
|
@model_records_router.patch(
|
|
"/sync",
|
|
operation_id="sync_models_to_config",
|
|
responses={
|
|
204: {"description": "Model config record database resynced with files on disk"},
|
|
400: {"description": "Bad request"},
|
|
},
|
|
)
|
|
async def sync_models_to_config() -> Response:
|
|
"""
|
|
Traverse the models and autoimport directories.
|
|
|
|
Model files without a corresponding
|
|
record in the database are added. Orphan records without a models file are deleted.
|
|
"""
|
|
ApiDependencies.invoker.services.model_install.sync_to_config()
|
|
return Response(status_code=204)
|