diff --git a/ldm/generate.py b/ldm/generate.py index 8e3b27b2ac..63432d63b0 100644 --- a/ldm/generate.py +++ b/ldm/generate.py @@ -178,7 +178,9 @@ class Generate: self.model_hash = None self.sampler = None self.device = None - self.session_peakmem = None + self.max_memory_allocated = 0 + self.memory_allocated = 0 + self.session_peakmem = 0 self.base_generator = None self.seed = None self.outdir = outdir @@ -786,6 +788,7 @@ class Generate: embiggen_tiles=opt.embiggen_tiles, embiggen_strength=opt.embiggen_strength, image_callback=callback, + clear_cuda_cache=self.clear_cuda_cache, ) elif tool == "outpaint": from ldm.invoke.restoration.outpaint import Outpaint diff --git a/ldm/invoke/generator/base.py b/ldm/invoke/generator/base.py index d89fb48aff..00e395bf83 100644 --- a/ldm/invoke/generator/base.py +++ b/ldm/invoke/generator/base.py @@ -126,7 +126,7 @@ class Generator: seed = self.new_seed() # Free up memory from the last generation. - clear_cuda_cache = kwargs['clear_cuda_cache'] or None + clear_cuda_cache = kwargs['clear_cuda_cache'] if 'clear_cuda_cache' in kwargs else None if clear_cuda_cache is not None: clear_cuda_cache() diff --git a/ldm/invoke/generator/diffusers_pipeline.py b/ldm/invoke/generator/diffusers_pipeline.py index 5f15dea45f..88769a8ba0 100644 --- a/ldm/invoke/generator/diffusers_pipeline.py +++ b/ldm/invoke/generator/diffusers_pipeline.py @@ -333,7 +333,7 @@ class StableDiffusionGeneratorPipeline(StableDiffusionPipeline): 16 * \ latents.size(dim=2) * latents.size(dim=3) * latents.size(dim=2) * latents.size(dim=3) * \ bytes_per_element_needed_for_baddbmm_duplication - if max_size_required_for_baddbmm > (mem_free * 3.3 / 4.0): # 3.3 / 4.0 is from old Invoke code + if max_size_required_for_baddbmm > (mem_free * 3.0 / 4.0): # 3.3 / 4.0 is from old Invoke code self.enable_attention_slicing(slice_size='max') else: self.disable_attention_slicing() diff --git a/ldm/invoke/generator/embiggen.py b/ldm/invoke/generator/embiggen.py index e7505aab2f..0a06f90b03 100644 --- a/ldm/invoke/generator/embiggen.py +++ b/ldm/invoke/generator/embiggen.py @@ -346,6 +346,7 @@ class Embiggen(Generator): newinitimage = torch.from_numpy(newinitimage) newinitimage = 2.0 * newinitimage - 1.0 newinitimage = newinitimage.to(self.model.device) + clear_cuda_cache = kwargs['clear_cuda_cache'] if 'clear_cuda_cache' in kwargs else None tile_results = gen_img2img.generate( prompt, @@ -363,6 +364,7 @@ class Embiggen(Generator): init_image = newinitimage, # notice that init_image is different from init_img mask_image = None, strength = strength, + clear_cuda_cache = clear_cuda_cache ) emb_tile_store.append(tile_results[0][0])