mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-16 20:47:55 -05:00
Compare commits
14 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2c45697f3d | ||
|
|
9a0a90e2a2 | ||
|
|
69f17da1a2 | ||
|
|
4d0a49298c | ||
|
|
55f7a7737a | ||
|
|
adc30045a6 | ||
|
|
fdd0e57976 | ||
|
|
9ba5ec4b67 | ||
|
|
8a17616bf4 | ||
|
|
f56b9537cd | ||
|
|
a95756f3ed | ||
|
|
4068e817d6 | ||
|
|
a09d705e4c | ||
|
|
540d506ec9 |
@@ -40,6 +40,25 @@ Follow the same steps to scan and import the missing models.
|
||||
- Check the `ram` setting in `invokeai.yaml`. This setting tells Invoke how much of your system RAM can be used to cache models. Having this too high or too low can slow things down. That said, it's generally safest to not set this at all and instead let Invoke manage it.
|
||||
- Check the `vram` setting in `invokeai.yaml`. This setting tells Invoke how much of your GPU VRAM can be used to cache models. Counter-intuitively, if this setting is too high, Invoke will need to do a lot of shuffling of models as it juggles the VRAM cache and the currently-loaded model. The default value of 0.25 is generally works well for GPUs without 16GB or more VRAM. Even on a 24GB card, the default works well.
|
||||
- Check that your generations are happening on your GPU (if you have one). InvokeAI will log what is being used for generation upon startup. If your GPU isn't used, re-install to ensure the correct versions of torch get installed.
|
||||
- If you are on Windows, you may have exceeded your GPU's VRAM capacity and are using slower [shared GPU memory](#shared-gpu-memory-windows). There's a guide to opt out of this behaviour in the linked FAQ entry.
|
||||
|
||||
## Shared GPU Memory (Windows)
|
||||
|
||||
!!! tip "Nvidia GPUs with driver 536.40"
|
||||
|
||||
This only applies to current Nvidia cards with driver 536.40 or later, released in June 2023.
|
||||
|
||||
When the GPU doesn't have enough VRAM for a task, Windows is able to allocate some of its CPU RAM to the GPU. This is much slower than VRAM, but it does allow the system to generate when it otherwise might no have enough VRAM.
|
||||
|
||||
When shared GPU memory is used, generation slows down dramatically - but at least it doesn't crash.
|
||||
|
||||
If you'd like to opt out of this behavior and instead get an error when you exceed your GPU's VRAM, follow [this guide from Nvidia](https://nvidia.custhelp.com/app/answers/detail/a_id/5490).
|
||||
|
||||
Here's how to get the python path required in the linked guide:
|
||||
|
||||
- Run `invoke.bat`.
|
||||
- Select option 2 for developer console.
|
||||
- At least one python path will be printed. Copy the path that includes your invoke installation directory (typically the first).
|
||||
|
||||
## Installer cannot find python (Windows)
|
||||
|
||||
|
||||
@@ -67,7 +67,7 @@ class IPAdapterInvocation(BaseInvocation):
|
||||
)
|
||||
clip_vision_model: Literal["ViT-H", "ViT-G"] = InputField(
|
||||
description="CLIP Vision model to use. Overrides model settings. Mandatory for checkpoint models.",
|
||||
default="auto",
|
||||
default="ViT-H",
|
||||
ui_order=2,
|
||||
)
|
||||
weight: Union[float, List[float]] = InputField(
|
||||
|
||||
@@ -117,7 +117,7 @@ class ModelCacheBase(ABC, Generic[T]):
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def stats(self) -> CacheStats:
|
||||
def stats(self) -> Optional[CacheStats]:
|
||||
"""Return collected CacheStats object."""
|
||||
pass
|
||||
|
||||
|
||||
@@ -269,9 +269,6 @@ class ModelCache(ModelCacheBase[AnyModel]):
|
||||
if torch.device(source_device).type == torch.device(target_device).type:
|
||||
return
|
||||
|
||||
# may raise an exception here if insufficient GPU VRAM
|
||||
self._check_free_vram(target_device, cache_entry.size)
|
||||
|
||||
start_model_to_time = time.time()
|
||||
snapshot_before = self._capture_memory_snapshot()
|
||||
cache_entry.model.to(target_device)
|
||||
@@ -329,11 +326,11 @@ class ModelCache(ModelCacheBase[AnyModel]):
|
||||
f" {in_ram_models}/{in_vram_models}({locked_in_vram_models})"
|
||||
)
|
||||
|
||||
def make_room(self, model_size: int) -> None:
|
||||
def make_room(self, size: int) -> None:
|
||||
"""Make enough room in the cache to accommodate a new model of indicated size."""
|
||||
# calculate how much memory this model will require
|
||||
# multiplier = 2 if self.precision==torch.float32 else 1
|
||||
bytes_needed = model_size
|
||||
bytes_needed = size
|
||||
maximum_size = self.max_cache_size * GIG # stored in GB, convert to bytes
|
||||
current_size = self.cache_size()
|
||||
|
||||
@@ -388,7 +385,7 @@ class ModelCache(ModelCacheBase[AnyModel]):
|
||||
# 1 from onnx runtime object
|
||||
if not cache_entry.locked and refs <= (3 if "onnx" in model_key else 2):
|
||||
self.logger.debug(
|
||||
f"Removing {model_key} from RAM cache to free at least {(model_size/GIG):.2f} GB (-{(cache_entry.size/GIG):.2f} GB)"
|
||||
f"Removing {model_key} from RAM cache to free at least {(size/GIG):.2f} GB (-{(cache_entry.size/GIG):.2f} GB)"
|
||||
)
|
||||
current_size -= cache_entry.size
|
||||
models_cleared += 1
|
||||
@@ -420,24 +417,3 @@ class ModelCache(ModelCacheBase[AnyModel]):
|
||||
mps.empty_cache()
|
||||
|
||||
self.logger.debug(f"After making room: cached_models={len(self._cached_models)}")
|
||||
|
||||
def _free_vram(self, device: torch.device) -> int:
|
||||
vram_device = ( # mem_get_info() needs an indexed device
|
||||
device if device.index is not None else torch.device(str(device), index=0)
|
||||
)
|
||||
free_mem, _ = torch.cuda.mem_get_info(vram_device)
|
||||
for _, cache_entry in self._cached_models.items():
|
||||
if cache_entry.loaded and not cache_entry.locked:
|
||||
free_mem += cache_entry.size
|
||||
return free_mem
|
||||
|
||||
def _check_free_vram(self, target_device: torch.device, needed_size: int) -> None:
|
||||
if target_device.type != "cuda":
|
||||
return
|
||||
free_mem = self._free_vram(target_device)
|
||||
if needed_size > free_mem:
|
||||
needed_gb = round(needed_size / GIG, 2)
|
||||
free_gb = round(free_mem / GIG, 2)
|
||||
raise torch.cuda.OutOfMemoryError(
|
||||
f"Insufficient VRAM to load model, requested {needed_gb}GB but only had {free_gb}GB free"
|
||||
)
|
||||
|
||||
@@ -291,7 +291,6 @@
|
||||
"canvasMerged": "تم دمج الخط",
|
||||
"sentToImageToImage": "تم إرسال إلى صورة إلى صورة",
|
||||
"sentToUnifiedCanvas": "تم إرسال إلى لوحة موحدة",
|
||||
"parametersSet": "تم تعيين المعلمات",
|
||||
"parametersNotSet": "لم يتم تعيين المعلمات",
|
||||
"metadataLoadFailed": "فشل تحميل البيانات الوصفية"
|
||||
},
|
||||
|
||||
@@ -480,7 +480,6 @@
|
||||
"canvasMerged": "Leinwand zusammengeführt",
|
||||
"sentToImageToImage": "Gesendet an Bild zu Bild",
|
||||
"sentToUnifiedCanvas": "Gesendet an Leinwand",
|
||||
"parametersSet": "Parameter festlegen",
|
||||
"parametersNotSet": "Parameter nicht festgelegt",
|
||||
"metadataLoadFailed": "Metadaten konnten nicht geladen werden",
|
||||
"setCanvasInitialImage": "Ausgangsbild setzen",
|
||||
|
||||
@@ -1041,10 +1041,10 @@
|
||||
"metadataLoadFailed": "Failed to load metadata",
|
||||
"modelAddedSimple": "Model Added to Queue",
|
||||
"modelImportCanceled": "Model Import Canceled",
|
||||
"parameters": "Parameters",
|
||||
"parameterNotSet": "{{parameter}} not set",
|
||||
"parameterSet": "{{parameter}} set",
|
||||
"parametersNotSet": "Parameters Not Set",
|
||||
"parametersSet": "Parameters Set",
|
||||
"problemCopyingCanvas": "Problem Copying Canvas",
|
||||
"problemCopyingCanvasDesc": "Unable to export base layer",
|
||||
"problemCopyingImage": "Unable to Copy Image",
|
||||
|
||||
@@ -363,7 +363,6 @@
|
||||
"canvasMerged": "Lienzo consolidado",
|
||||
"sentToImageToImage": "Enviar hacia Imagen a Imagen",
|
||||
"sentToUnifiedCanvas": "Enviar hacia Lienzo Consolidado",
|
||||
"parametersSet": "Parámetros establecidos",
|
||||
"parametersNotSet": "Parámetros no establecidos",
|
||||
"metadataLoadFailed": "Error al cargar metadatos",
|
||||
"serverError": "Error en el servidor",
|
||||
|
||||
@@ -298,7 +298,6 @@
|
||||
"canvasMerged": "Canvas fusionné",
|
||||
"sentToImageToImage": "Envoyé à Image à Image",
|
||||
"sentToUnifiedCanvas": "Envoyé à Canvas unifié",
|
||||
"parametersSet": "Paramètres définis",
|
||||
"parametersNotSet": "Paramètres non définis",
|
||||
"metadataLoadFailed": "Échec du chargement des métadonnées"
|
||||
},
|
||||
|
||||
@@ -306,7 +306,6 @@
|
||||
"canvasMerged": "קנבס מוזג",
|
||||
"sentToImageToImage": "נשלח לתמונה לתמונה",
|
||||
"sentToUnifiedCanvas": "נשלח אל קנבס מאוחד",
|
||||
"parametersSet": "הגדרת פרמטרים",
|
||||
"parametersNotSet": "פרמטרים לא הוגדרו",
|
||||
"metadataLoadFailed": "טעינת מטא-נתונים נכשלה"
|
||||
},
|
||||
|
||||
@@ -569,7 +569,6 @@
|
||||
"canvasMerged": "Tela unita",
|
||||
"sentToImageToImage": "Inviato a Immagine a Immagine",
|
||||
"sentToUnifiedCanvas": "Inviato a Tela Unificata",
|
||||
"parametersSet": "Parametri impostati",
|
||||
"parametersNotSet": "Parametri non impostati",
|
||||
"metadataLoadFailed": "Impossibile caricare i metadati",
|
||||
"serverError": "Errore del Server",
|
||||
|
||||
@@ -420,7 +420,6 @@
|
||||
"canvasMerged": "Canvas samengevoegd",
|
||||
"sentToImageToImage": "Gestuurd naar Afbeelding naar afbeelding",
|
||||
"sentToUnifiedCanvas": "Gestuurd naar Centraal canvas",
|
||||
"parametersSet": "Parameters ingesteld",
|
||||
"parametersNotSet": "Parameters niet ingesteld",
|
||||
"metadataLoadFailed": "Fout bij laden metagegevens",
|
||||
"serverError": "Serverfout",
|
||||
|
||||
@@ -267,7 +267,6 @@
|
||||
"canvasMerged": "Scalono widoczne warstwy",
|
||||
"sentToImageToImage": "Wysłano do Obraz na obraz",
|
||||
"sentToUnifiedCanvas": "Wysłano do trybu uniwersalnego",
|
||||
"parametersSet": "Ustawiono parametry",
|
||||
"parametersNotSet": "Nie ustawiono parametrów",
|
||||
"metadataLoadFailed": "Błąd wczytywania metadanych"
|
||||
},
|
||||
|
||||
@@ -310,7 +310,6 @@
|
||||
"canvasMerged": "Tela Fundida",
|
||||
"sentToImageToImage": "Mandar Para Imagem Para Imagem",
|
||||
"sentToUnifiedCanvas": "Enviada para a Tela Unificada",
|
||||
"parametersSet": "Parâmetros Definidos",
|
||||
"parametersNotSet": "Parâmetros Não Definidos",
|
||||
"metadataLoadFailed": "Falha ao tentar carregar metadados"
|
||||
},
|
||||
|
||||
@@ -307,7 +307,6 @@
|
||||
"canvasMerged": "Tela Fundida",
|
||||
"sentToImageToImage": "Mandar Para Imagem Para Imagem",
|
||||
"sentToUnifiedCanvas": "Enviada para a Tela Unificada",
|
||||
"parametersSet": "Parâmetros Definidos",
|
||||
"parametersNotSet": "Parâmetros Não Definidos",
|
||||
"metadataLoadFailed": "Falha ao tentar carregar metadados"
|
||||
},
|
||||
|
||||
@@ -575,7 +575,6 @@
|
||||
"canvasMerged": "Холст объединен",
|
||||
"sentToImageToImage": "Отправить в img2img",
|
||||
"sentToUnifiedCanvas": "Отправлено на Единый холст",
|
||||
"parametersSet": "Параметры заданы",
|
||||
"parametersNotSet": "Параметры не заданы",
|
||||
"metadataLoadFailed": "Не удалось загрузить метаданные",
|
||||
"serverError": "Ошибка сервера",
|
||||
|
||||
@@ -315,7 +315,6 @@
|
||||
"canvasMerged": "Полотно об'єднане",
|
||||
"sentToImageToImage": "Надіслати до img2img",
|
||||
"sentToUnifiedCanvas": "Надіслати на полотно",
|
||||
"parametersSet": "Параметри задані",
|
||||
"parametersNotSet": "Параметри не задані",
|
||||
"metadataLoadFailed": "Не вдалося завантажити метадані",
|
||||
"serverError": "Помилка сервера",
|
||||
|
||||
@@ -487,7 +487,6 @@
|
||||
"canvasMerged": "画布已合并",
|
||||
"sentToImageToImage": "已发送到图生图",
|
||||
"sentToUnifiedCanvas": "已发送到统一画布",
|
||||
"parametersSet": "参数已设定",
|
||||
"parametersNotSet": "参数未设定",
|
||||
"metadataLoadFailed": "加载元数据失败",
|
||||
"uploadFailedInvalidUploadDesc": "必须是单张的 PNG 或 JPEG 图片",
|
||||
|
||||
@@ -33,6 +33,7 @@ const ImageMetadataActions = (props: Props) => {
|
||||
<MetadataItem metadata={metadata} handlers={handlers.scheduler} />
|
||||
<MetadataItem metadata={metadata} handlers={handlers.cfgScale} />
|
||||
<MetadataItem metadata={metadata} handlers={handlers.cfgRescaleMultiplier} />
|
||||
<MetadataItem metadata={metadata} handlers={handlers.initialImage} />
|
||||
<MetadataItem metadata={metadata} handlers={handlers.strength} />
|
||||
<MetadataItem metadata={metadata} handlers={handlers.hrfEnabled} />
|
||||
<MetadataItem metadata={metadata} handlers={handlers.hrfMethod} />
|
||||
|
||||
@@ -189,6 +189,12 @@ export const handlers = {
|
||||
recaller: recallers.cfgScale,
|
||||
}),
|
||||
height: buildHandlers({ getLabel: () => t('metadata.height'), parser: parsers.height, recaller: recallers.height }),
|
||||
initialImage: buildHandlers({
|
||||
getLabel: () => t('metadata.initImage'),
|
||||
parser: parsers.initialImage,
|
||||
recaller: recallers.initialImage,
|
||||
renderValue: async (imageDTO) => imageDTO.image_name,
|
||||
}),
|
||||
negativePrompt: buildHandlers({
|
||||
getLabel: () => t('metadata.negativePrompt'),
|
||||
parser: parsers.negativePrompt,
|
||||
@@ -405,6 +411,6 @@ export const parseAndRecallAllMetadata = async (metadata: unknown, skip: (keyof
|
||||
})
|
||||
);
|
||||
if (results.some((result) => result.status === 'fulfilled')) {
|
||||
parameterSetToast(t('toast.parametersSet'));
|
||||
parameterSetToast(t('toast.parameters'));
|
||||
}
|
||||
};
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import { getStore } from 'app/store/nanostores/store';
|
||||
import {
|
||||
initialControlNet,
|
||||
initialIPAdapter,
|
||||
@@ -57,6 +58,8 @@ import {
|
||||
isParameterWidth,
|
||||
} from 'features/parameters/types/parameterSchemas';
|
||||
import { get, isArray, isString } from 'lodash-es';
|
||||
import { imagesApi } from 'services/api/endpoints/images';
|
||||
import type { ImageDTO } from 'services/api/types';
|
||||
import {
|
||||
isControlNetModelConfig,
|
||||
isIPAdapterModelConfig,
|
||||
@@ -135,6 +138,14 @@ const parseCFGRescaleMultiplier: MetadataParseFunc<ParameterCFGRescaleMultiplier
|
||||
const parseScheduler: MetadataParseFunc<ParameterScheduler> = (metadata) =>
|
||||
getProperty(metadata, 'scheduler', isParameterScheduler);
|
||||
|
||||
const parseInitialImage: MetadataParseFunc<ImageDTO> = async (metadata) => {
|
||||
const imageName = await getProperty(metadata, 'init_image', isString);
|
||||
const imageDTORequest = getStore().dispatch(imagesApi.endpoints.getImageDTO.initiate(imageName));
|
||||
const imageDTO = await imageDTORequest.unwrap();
|
||||
imageDTORequest.unsubscribe();
|
||||
return imageDTO;
|
||||
};
|
||||
|
||||
const parseWidth: MetadataParseFunc<ParameterWidth> = (metadata) => getProperty(metadata, 'width', isParameterWidth);
|
||||
|
||||
const parseHeight: MetadataParseFunc<ParameterHeight> = (metadata) =>
|
||||
@@ -402,6 +413,7 @@ export const parsers = {
|
||||
cfgScale: parseCFGScale,
|
||||
cfgRescaleMultiplier: parseCFGRescaleMultiplier,
|
||||
scheduler: parseScheduler,
|
||||
initialImage: parseInitialImage,
|
||||
width: parseWidth,
|
||||
height: parseHeight,
|
||||
steps: parseSteps,
|
||||
|
||||
@@ -17,6 +17,7 @@ import type {
|
||||
import { modelSelected } from 'features/parameters/store/actions';
|
||||
import {
|
||||
heightRecalled,
|
||||
initialImageChanged,
|
||||
setCfgRescaleMultiplier,
|
||||
setCfgScale,
|
||||
setImg2imgStrength,
|
||||
@@ -61,6 +62,7 @@ import {
|
||||
setRefinerStart,
|
||||
setRefinerSteps,
|
||||
} from 'features/sdxl/store/sdxlSlice';
|
||||
import type { ImageDTO } from 'services/api/types';
|
||||
|
||||
const recallPositivePrompt: MetadataRecallFunc<ParameterPositivePrompt> = (positivePrompt) => {
|
||||
getStore().dispatch(setPositivePrompt(positivePrompt));
|
||||
@@ -94,6 +96,10 @@ const recallScheduler: MetadataRecallFunc<ParameterScheduler> = (scheduler) => {
|
||||
getStore().dispatch(setScheduler(scheduler));
|
||||
};
|
||||
|
||||
const recallInitialImage: MetadataRecallFunc<ImageDTO> = async (imageDTO) => {
|
||||
getStore().dispatch(initialImageChanged(imageDTO));
|
||||
};
|
||||
|
||||
const recallWidth: MetadataRecallFunc<ParameterWidth> = (width) => {
|
||||
getStore().dispatch(widthRecalled(width));
|
||||
};
|
||||
@@ -235,6 +241,7 @@ export const recallers = {
|
||||
cfgScale: recallCFGScale,
|
||||
cfgRescaleMultiplier: recallCFGRescaleMultiplier,
|
||||
scheduler: recallScheduler,
|
||||
initialImage: recallInitialImage,
|
||||
width: recallWidth,
|
||||
height: recallHeight,
|
||||
steps: recallSteps,
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -27,6 +27,7 @@ from invokeai.app.invocations.fields import (
|
||||
OutputField,
|
||||
UIComponent,
|
||||
UIType,
|
||||
WithBoard,
|
||||
WithMetadata,
|
||||
WithWorkflow,
|
||||
)
|
||||
@@ -105,6 +106,7 @@ __all__ = [
|
||||
"OutputField",
|
||||
"UIComponent",
|
||||
"UIType",
|
||||
"WithBoard",
|
||||
"WithMetadata",
|
||||
"WithWorkflow",
|
||||
# invokeai.app.invocations.latent
|
||||
|
||||
@@ -1 +1 @@
|
||||
__version__ = "4.0.3"
|
||||
__version__ = "4.0.4"
|
||||
|
||||
Reference in New Issue
Block a user