mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-04-23 03:00:31 -04:00
Add util functions calc_tensor_size(...) and calc_tensors_size(...).
This commit is contained in:
@@ -3,6 +3,7 @@ from typing import Dict, Optional
|
||||
import torch
|
||||
|
||||
from invokeai.backend.lora.layers.lora_layer_base import LoRALayerBase
|
||||
from invokeai.backend.util.calc_tensor_size import calc_tensor_size
|
||||
|
||||
|
||||
class FullLayer(LoRALayerBase):
|
||||
@@ -26,9 +27,7 @@ class FullLayer(LoRALayerBase):
|
||||
return self.weight
|
||||
|
||||
def calc_size(self) -> int:
|
||||
model_size = super().calc_size()
|
||||
model_size += self.weight.nelement() * self.weight.element_size()
|
||||
return model_size
|
||||
return calc_tensor_size(self.weight) + super().calc_size()
|
||||
|
||||
def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None) -> None:
|
||||
super().to(device=device, dtype=dtype)
|
||||
|
||||
@@ -3,6 +3,7 @@ from typing import Dict, Optional
|
||||
import torch
|
||||
|
||||
from invokeai.backend.lora.layers.lora_layer_base import LoRALayerBase
|
||||
from invokeai.backend.util.calc_tensor_size import calc_tensors_size
|
||||
|
||||
|
||||
class IA3Layer(LoRALayerBase):
|
||||
@@ -30,8 +31,7 @@ class IA3Layer(LoRALayerBase):
|
||||
|
||||
def calc_size(self) -> int:
|
||||
model_size = super().calc_size()
|
||||
model_size += self.weight.nelement() * self.weight.element_size()
|
||||
model_size += self.on_input.nelement() * self.on_input.element_size()
|
||||
model_size += calc_tensors_size([self.weight, self.on_input])
|
||||
return model_size
|
||||
|
||||
def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None):
|
||||
|
||||
@@ -3,6 +3,7 @@ from typing import Dict, Optional
|
||||
import torch
|
||||
|
||||
from invokeai.backend.lora.layers.lora_layer_base import LoRALayerBase
|
||||
from invokeai.backend.util.calc_tensor_size import calc_tensors_size
|
||||
|
||||
|
||||
class LoHALayer(LoRALayerBase):
|
||||
@@ -49,9 +50,7 @@ class LoHALayer(LoRALayerBase):
|
||||
|
||||
def calc_size(self) -> int:
|
||||
model_size = super().calc_size()
|
||||
for val in [self.w1_a, self.w1_b, self.w2_a, self.w2_b, self.t1, self.t2]:
|
||||
if val is not None:
|
||||
model_size += val.nelement() * val.element_size()
|
||||
model_size += calc_tensors_size([self.w1_a, self.w1_b, self.w2_a, self.w2_b, self.t1, self.t2])
|
||||
return model_size
|
||||
|
||||
def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None) -> None:
|
||||
|
||||
@@ -3,6 +3,7 @@ from typing import Dict, Optional
|
||||
import torch
|
||||
|
||||
from invokeai.backend.lora.layers.lora_layer_base import LoRALayerBase
|
||||
from invokeai.backend.util.calc_tensor_size import calc_tensors_size
|
||||
|
||||
|
||||
class LoKRLayer(LoRALayerBase):
|
||||
@@ -85,9 +86,7 @@ class LoKRLayer(LoRALayerBase):
|
||||
|
||||
def calc_size(self) -> int:
|
||||
model_size = super().calc_size()
|
||||
for val in [self.w1, self.w1_a, self.w1_b, self.w2, self.w2_a, self.w2_b, self.t2]:
|
||||
if val is not None:
|
||||
model_size += val.nelement() * val.element_size()
|
||||
model_size += calc_tensors_size([self.w1, self.w1_a, self.w1_b, self.w2, self.w2_a, self.w2_b, self.t2])
|
||||
return model_size
|
||||
|
||||
def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None) -> None:
|
||||
|
||||
@@ -3,6 +3,7 @@ from typing import Dict, Optional
|
||||
import torch
|
||||
|
||||
from invokeai.backend.lora.layers.lora_layer_base import LoRALayerBase
|
||||
from invokeai.backend.util.calc_tensor_size import calc_tensors_size
|
||||
|
||||
|
||||
# TODO: find and debug lora/locon with bias
|
||||
@@ -43,9 +44,7 @@ class LoRALayer(LoRALayerBase):
|
||||
|
||||
def calc_size(self) -> int:
|
||||
model_size = super().calc_size()
|
||||
for val in [self.up, self.mid, self.down]:
|
||||
if val is not None:
|
||||
model_size += val.nelement() * val.element_size()
|
||||
model_size += calc_tensors_size([self.up, self.mid, self.down])
|
||||
return model_size
|
||||
|
||||
def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None) -> None:
|
||||
|
||||
@@ -3,6 +3,7 @@ from typing import Dict, Optional, Set
|
||||
import torch
|
||||
|
||||
import invokeai.backend.util.logging as logger
|
||||
from invokeai.backend.util.calc_tensor_size import calc_tensors_size
|
||||
|
||||
|
||||
class LoRALayerBase:
|
||||
@@ -49,11 +50,7 @@ class LoRALayerBase:
|
||||
return params
|
||||
|
||||
def calc_size(self) -> int:
|
||||
model_size = 0
|
||||
for val in [self.bias]:
|
||||
if val is not None:
|
||||
model_size += val.nelement() * val.element_size()
|
||||
return model_size
|
||||
return calc_tensors_size([self.bias])
|
||||
|
||||
def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None) -> None:
|
||||
if self.bias is not None:
|
||||
|
||||
@@ -3,6 +3,7 @@ from typing import Dict, Optional
|
||||
import torch
|
||||
|
||||
from invokeai.backend.lora.layers.lora_layer_base import LoRALayerBase
|
||||
from invokeai.backend.util.calc_tensor_size import calc_tensor_size
|
||||
|
||||
|
||||
class NormLayer(LoRALayerBase):
|
||||
@@ -27,7 +28,7 @@ class NormLayer(LoRALayerBase):
|
||||
|
||||
def calc_size(self) -> int:
|
||||
model_size = super().calc_size()
|
||||
model_size += self.weight.nelement() * self.weight.element_size()
|
||||
model_size += calc_tensor_size(self.weight)
|
||||
return model_size
|
||||
|
||||
def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None) -> None:
|
||||
|
||||
@@ -20,6 +20,7 @@ from invokeai.backend.model_manager.config import AnyModel
|
||||
from invokeai.backend.onnx.onnx_runtime import IAIOnnxRuntimeModel
|
||||
from invokeai.backend.spandrel_image_to_image_model import SpandrelImageToImageModel
|
||||
from invokeai.backend.textual_inversion import TextualInversionModelRaw
|
||||
from invokeai.backend.util.calc_tensor_size import calc_tensor_size
|
||||
|
||||
|
||||
def calc_model_size_by_data(logger: logging.Logger, model: AnyModel) -> int:
|
||||
@@ -83,10 +84,9 @@ def _calc_pipeline_by_data(pipeline: DiffusionPipeline) -> int:
|
||||
|
||||
def calc_module_size(model: torch.nn.Module) -> int:
|
||||
"""Calculate the size (in bytes) of a torch.nn.Module."""
|
||||
mem_params = sum([param.nelement() * param.element_size() for param in model.parameters()])
|
||||
mem_bufs = sum([buf.nelement() * buf.element_size() for buf in model.buffers()])
|
||||
mem: int = mem_params + mem_bufs # in bytes
|
||||
return mem
|
||||
mem_params = sum([calc_tensor_size(param) for param in model.parameters()])
|
||||
mem_bufs = sum([calc_tensor_size(buf) for buf in model.buffers()])
|
||||
return mem_params + mem_bufs
|
||||
|
||||
|
||||
def _calc_onnx_model_by_data(model: IAIOnnxRuntimeModel) -> int:
|
||||
|
||||
@@ -10,6 +10,7 @@ from transformers import CLIPTokenizer
|
||||
from typing_extensions import Self
|
||||
|
||||
from invokeai.backend.raw_model import RawModel
|
||||
from invokeai.backend.util.calc_tensor_size import calc_tensors_size
|
||||
|
||||
|
||||
class TextualInversionModelRaw(RawModel):
|
||||
@@ -74,11 +75,7 @@ class TextualInversionModelRaw(RawModel):
|
||||
|
||||
def calc_size(self) -> int:
|
||||
"""Get the size of this model in bytes."""
|
||||
embedding_size = self.embedding.element_size() * self.embedding.nelement()
|
||||
embedding_2_size = 0
|
||||
if self.embedding_2 is not None:
|
||||
embedding_2_size = self.embedding_2.element_size() * self.embedding_2.nelement()
|
||||
return embedding_size + embedding_2_size
|
||||
return calc_tensors_size([self.embedding, self.embedding_2])
|
||||
|
||||
|
||||
class TextualInversionManager(BaseTextualInversionManager):
|
||||
|
||||
11
invokeai/backend/util/calc_tensor_size.py
Normal file
11
invokeai/backend/util/calc_tensor_size.py
Normal file
@@ -0,0 +1,11 @@
|
||||
import torch
|
||||
|
||||
|
||||
def calc_tensor_size(t: torch.Tensor) -> int:
|
||||
"""Calculate the size of a tensor in bytes."""
|
||||
return t.nelement() * t.element_size()
|
||||
|
||||
|
||||
def calc_tensors_size(tensors: list[torch.Tensor | None]) -> int:
|
||||
"""Calculate the size of a list of tensors in bytes."""
|
||||
return sum(calc_tensor_size(t) for t in tensors if t is not None)
|
||||
Reference in New Issue
Block a user