Add util functions calc_tensor_size(...) and calc_tensors_size(...).

This commit is contained in:
Ryan Dick
2024-09-10 16:02:21 +00:00
parent e0c2b13558
commit c69e272fb3
10 changed files with 31 additions and 29 deletions

View File

@@ -3,6 +3,7 @@ from typing import Dict, Optional
import torch
from invokeai.backend.lora.layers.lora_layer_base import LoRALayerBase
from invokeai.backend.util.calc_tensor_size import calc_tensor_size
class FullLayer(LoRALayerBase):
@@ -26,9 +27,7 @@ class FullLayer(LoRALayerBase):
return self.weight
def calc_size(self) -> int:
model_size = super().calc_size()
model_size += self.weight.nelement() * self.weight.element_size()
return model_size
return calc_tensor_size(self.weight) + super().calc_size()
def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None) -> None:
super().to(device=device, dtype=dtype)

View File

@@ -3,6 +3,7 @@ from typing import Dict, Optional
import torch
from invokeai.backend.lora.layers.lora_layer_base import LoRALayerBase
from invokeai.backend.util.calc_tensor_size import calc_tensors_size
class IA3Layer(LoRALayerBase):
@@ -30,8 +31,7 @@ class IA3Layer(LoRALayerBase):
def calc_size(self) -> int:
model_size = super().calc_size()
model_size += self.weight.nelement() * self.weight.element_size()
model_size += self.on_input.nelement() * self.on_input.element_size()
model_size += calc_tensors_size([self.weight, self.on_input])
return model_size
def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None):

View File

@@ -3,6 +3,7 @@ from typing import Dict, Optional
import torch
from invokeai.backend.lora.layers.lora_layer_base import LoRALayerBase
from invokeai.backend.util.calc_tensor_size import calc_tensors_size
class LoHALayer(LoRALayerBase):
@@ -49,9 +50,7 @@ class LoHALayer(LoRALayerBase):
def calc_size(self) -> int:
model_size = super().calc_size()
for val in [self.w1_a, self.w1_b, self.w2_a, self.w2_b, self.t1, self.t2]:
if val is not None:
model_size += val.nelement() * val.element_size()
model_size += calc_tensors_size([self.w1_a, self.w1_b, self.w2_a, self.w2_b, self.t1, self.t2])
return model_size
def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None) -> None:

View File

@@ -3,6 +3,7 @@ from typing import Dict, Optional
import torch
from invokeai.backend.lora.layers.lora_layer_base import LoRALayerBase
from invokeai.backend.util.calc_tensor_size import calc_tensors_size
class LoKRLayer(LoRALayerBase):
@@ -85,9 +86,7 @@ class LoKRLayer(LoRALayerBase):
def calc_size(self) -> int:
model_size = super().calc_size()
for val in [self.w1, self.w1_a, self.w1_b, self.w2, self.w2_a, self.w2_b, self.t2]:
if val is not None:
model_size += val.nelement() * val.element_size()
model_size += calc_tensors_size([self.w1, self.w1_a, self.w1_b, self.w2, self.w2_a, self.w2_b, self.t2])
return model_size
def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None) -> None:

View File

@@ -3,6 +3,7 @@ from typing import Dict, Optional
import torch
from invokeai.backend.lora.layers.lora_layer_base import LoRALayerBase
from invokeai.backend.util.calc_tensor_size import calc_tensors_size
# TODO: find and debug lora/locon with bias
@@ -43,9 +44,7 @@ class LoRALayer(LoRALayerBase):
def calc_size(self) -> int:
model_size = super().calc_size()
for val in [self.up, self.mid, self.down]:
if val is not None:
model_size += val.nelement() * val.element_size()
model_size += calc_tensors_size([self.up, self.mid, self.down])
return model_size
def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None) -> None:

View File

@@ -3,6 +3,7 @@ from typing import Dict, Optional, Set
import torch
import invokeai.backend.util.logging as logger
from invokeai.backend.util.calc_tensor_size import calc_tensors_size
class LoRALayerBase:
@@ -49,11 +50,7 @@ class LoRALayerBase:
return params
def calc_size(self) -> int:
model_size = 0
for val in [self.bias]:
if val is not None:
model_size += val.nelement() * val.element_size()
return model_size
return calc_tensors_size([self.bias])
def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None) -> None:
if self.bias is not None:

View File

@@ -3,6 +3,7 @@ from typing import Dict, Optional
import torch
from invokeai.backend.lora.layers.lora_layer_base import LoRALayerBase
from invokeai.backend.util.calc_tensor_size import calc_tensor_size
class NormLayer(LoRALayerBase):
@@ -27,7 +28,7 @@ class NormLayer(LoRALayerBase):
def calc_size(self) -> int:
model_size = super().calc_size()
model_size += self.weight.nelement() * self.weight.element_size()
model_size += calc_tensor_size(self.weight)
return model_size
def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None) -> None:

View File

@@ -20,6 +20,7 @@ from invokeai.backend.model_manager.config import AnyModel
from invokeai.backend.onnx.onnx_runtime import IAIOnnxRuntimeModel
from invokeai.backend.spandrel_image_to_image_model import SpandrelImageToImageModel
from invokeai.backend.textual_inversion import TextualInversionModelRaw
from invokeai.backend.util.calc_tensor_size import calc_tensor_size
def calc_model_size_by_data(logger: logging.Logger, model: AnyModel) -> int:
@@ -83,10 +84,9 @@ def _calc_pipeline_by_data(pipeline: DiffusionPipeline) -> int:
def calc_module_size(model: torch.nn.Module) -> int:
"""Calculate the size (in bytes) of a torch.nn.Module."""
mem_params = sum([param.nelement() * param.element_size() for param in model.parameters()])
mem_bufs = sum([buf.nelement() * buf.element_size() for buf in model.buffers()])
mem: int = mem_params + mem_bufs # in bytes
return mem
mem_params = sum([calc_tensor_size(param) for param in model.parameters()])
mem_bufs = sum([calc_tensor_size(buf) for buf in model.buffers()])
return mem_params + mem_bufs
def _calc_onnx_model_by_data(model: IAIOnnxRuntimeModel) -> int:

View File

@@ -10,6 +10,7 @@ from transformers import CLIPTokenizer
from typing_extensions import Self
from invokeai.backend.raw_model import RawModel
from invokeai.backend.util.calc_tensor_size import calc_tensors_size
class TextualInversionModelRaw(RawModel):
@@ -74,11 +75,7 @@ class TextualInversionModelRaw(RawModel):
def calc_size(self) -> int:
"""Get the size of this model in bytes."""
embedding_size = self.embedding.element_size() * self.embedding.nelement()
embedding_2_size = 0
if self.embedding_2 is not None:
embedding_2_size = self.embedding_2.element_size() * self.embedding_2.nelement()
return embedding_size + embedding_2_size
return calc_tensors_size([self.embedding, self.embedding_2])
class TextualInversionManager(BaseTextualInversionManager):

View File

@@ -0,0 +1,11 @@
import torch
def calc_tensor_size(t: torch.Tensor) -> int:
"""Calculate the size of a tensor in bytes."""
return t.nelement() * t.element_size()
def calc_tensors_size(tensors: list[torch.Tensor | None]) -> int:
"""Calculate the size of a list of tensors in bytes."""
return sum(calc_tensor_size(t) for t in tensors if t is not None)