mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-02-13 12:14:59 -05:00
Merge branch 'main' into OMI
This commit is contained in:
@@ -19,6 +19,7 @@ export const CanvasToolbarSaveToGalleryButton = memo(() => {
|
||||
onClick={shift ? saveBboxToGallery : saveCanvasToGallery}
|
||||
icon={<PiFloppyDiskBold />}
|
||||
aria-label={shift ? t('controlLayers.saveBboxToGallery') : t('controlLayers.saveCanvasToGallery')}
|
||||
colorScheme="invokeBlue"
|
||||
tooltip={shift ? t('controlLayers.saveBboxToGallery') : t('controlLayers.saveCanvasToGallery')}
|
||||
isDisabled={isBusy}
|
||||
/>
|
||||
|
||||
@@ -122,11 +122,11 @@ const NODE_TYPE_PUBLISH_DENYLIST = [
|
||||
'metadata_to_controlnets',
|
||||
'metadata_to_ip_adapters',
|
||||
'metadata_to_t2i_adapters',
|
||||
'google_imagen3_generate',
|
||||
'google_imagen3_edit',
|
||||
'google_imagen4_generate',
|
||||
'chatgpt_create_image',
|
||||
'chatgpt_edit_image',
|
||||
'google_imagen3_generate_image',
|
||||
'google_imagen3_edit_image',
|
||||
'google_imagen4_generate_image',
|
||||
'chatgpt_4o_generate_image',
|
||||
'chatgpt_4o_edit_image',
|
||||
];
|
||||
|
||||
export const selectHasUnpublishableNodes = createSelector(selectNodes, (nodes) => {
|
||||
|
||||
@@ -12161,7 +12161,7 @@ export type components = {
|
||||
* vram: DEPRECATED: This setting is no longer used. It has been replaced by `max_cache_vram_gb`, but most users will not need to use this config since automatic cache size limits should work well in most cases. This config setting will be removed once the new model cache behavior is stable.
|
||||
* lazy_offload: DEPRECATED: This setting is no longer used. Lazy-offloading is enabled by default. This config setting will be removed once the new model cache behavior is stable.
|
||||
* pytorch_cuda_alloc_conf: Configure the Torch CUDA memory allocator. This will impact peak reserved VRAM usage and performance. Setting to "backend:cudaMallocAsync" works well on many systems. The optimal configuration is highly dependent on the system configuration (device type, VRAM, CUDA driver version, etc.), so must be tuned experimentally.
|
||||
* device: Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.<br>Valid values: `auto`, `cpu`, `cuda`, `cuda:1`, `mps`
|
||||
* device: Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.<br>Valid values: `auto`, `cpu`, `cuda`, `mps`, `cuda:N` (where N is a device number)
|
||||
* precision: Floating point precision. `float16` will consume half the memory of `float32` but produce slightly lower-quality images. The `auto` setting will guess the proper precision based on your video card and operating system.<br>Valid values: `auto`, `float16`, `bfloat16`, `float32`
|
||||
* sequential_guidance: Whether to calculate guidance in serial instead of in parallel, lowering memory requirements.
|
||||
* attention_type: Attention type.<br>Valid values: `auto`, `normal`, `xformers`, `sliced`, `torch-sdp`
|
||||
@@ -12436,11 +12436,10 @@ export type components = {
|
||||
pytorch_cuda_alloc_conf?: string | null;
|
||||
/**
|
||||
* Device
|
||||
* @description Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.
|
||||
* @description Preferred execution device. `auto` will choose the device depending on the hardware platform and the installed torch capabilities.<br>Valid values: `auto`, `cpu`, `cuda`, `mps`, `cuda:N` (where N is a device number)
|
||||
* @default auto
|
||||
* @enum {string}
|
||||
*/
|
||||
device?: "auto" | "cpu" | "cuda" | "cuda:1" | "mps";
|
||||
device?: string;
|
||||
/**
|
||||
* Precision
|
||||
* @description Floating point precision. `float16` will consume half the memory of `float32` but produce slightly lower-quality images. The `auto` setting will guess the proper precision based on your video card and operating system.
|
||||
|
||||
Reference in New Issue
Block a user