Compare commits

..

6 Commits

Author SHA1 Message Date
Lincoln Stein
48cb6bd200 change workflow to deploy from v2.3 branch 2023-05-06 23:50:34 -04:00
Lincoln Stein
332ac72e0e [Bugfix] Update check failing because process disappears (#3334)
Fixes #3228, where the check to see if invokeai is running fails because
a process no longer exists.
2023-05-04 20:32:51 -04:00
Dan Nguyen
03bbb308c9 [Bugfix] Update check failing because process disappears
Fixes #3228, where the check to see if invokeai is running fails because
a process no longer exists.
2023-05-03 10:54:43 -05:00
Lincoln Stein
1dcac3929b Release v2.3.5 (#3309)
# Version 2.3.5
This will be the 2.3.5 release once it is merged into the `v2.3` branch.
Changes on the RC branch are:

- Bump version number
- Fix bug in LoRA path determination (do it at runtime, not at module
load time, or root will get confused); closes #3293.
- Remove dangling debug statement.
2023-05-01 12:40:47 -04:00
Lincoln Stein
d73f1c363c bump version number 2023-05-01 09:28:49 -04:00
Lincoln Stein
e52e7418bb close #3304 2023-04-29 20:07:21 -04:00
4 changed files with 13 additions and 7 deletions

View File

@@ -41,7 +41,7 @@ jobs:
--verbose
- name: deploy to gh-pages
if: ${{ github.ref == 'refs/heads/main' }}
if: ${{ github.ref == 'refs/heads/v2.3' }}
run: |
python -m \
mkdocs gh-deploy \

View File

@@ -1 +1 @@
__version__='2.3.5-rc2'
__version__='2.3.5'

View File

@@ -39,7 +39,7 @@ def invokeai_is_running()->bool:
if matches:
print(f':exclamation: [bold red]An InvokeAI instance appears to be running as process {p.pid}[/red bold]')
return True
except psutil.AccessDenied:
except (psutil.AccessDenied,psutil.NoSuchProcess):
continue
return False

View File

@@ -456,11 +456,8 @@ class LoRA:
class KohyaLoraManager:
lora_path = None
vector_length_cache_path = None
def __init__(self, pipe):
self.lora_path = Path(global_lora_models_dir())
self.vector_length_cache_path = self.lora_path / '.vectorlength.cache'
self.unet = pipe.unet
self.wrapper = LoRAModuleWrapper(pipe.unet, pipe.text_encoder)
@@ -468,6 +465,16 @@ class KohyaLoraManager:
self.device = torch.device(choose_torch_device())
self.dtype = pipe.unet.dtype
@classmethod
@property
def lora_path(cls)->Path:
return Path(global_lora_models_dir())
@classmethod
@property
def vector_length_cache_path(cls)->Path:
return cls.lora_path / '.vectorlength.cache'
def load_lora_module(self, name, path_file, multiplier: float = 1.0):
print(f" | Found lora {name} at {path_file}")
if path_file.suffix == ".safetensors":
@@ -568,7 +575,6 @@ class KohyaLoraManager:
class LoraVectorLengthCache(object):
def __init__(self, cache_path: Path):
self.cache_path = cache_path
print(f'DEBUG: lock path = {Path(cache_path.parent, ".cachelock")}')
self.lock = FileLock(Path(cache_path.parent, ".cachelock"))
self.cache = {}