mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-02-17 23:21:22 -05:00
Merge branch 'spezialspezial-patch-9' into development
This commit is contained in:
@@ -896,7 +896,7 @@ class Generate:
|
||||
assert os.path.exists(image_path), '** "{image_path}" not found. Please enter the name of an existing image file to mask **'
|
||||
basename,_ = os.path.splitext(os.path.basename(image_path))
|
||||
if self.txt2mask is None:
|
||||
self.txt2mask = Txt2Mask(device = self.device)
|
||||
self.txt2mask = Txt2Mask(device = self.device, refined=True)
|
||||
segmented = self.txt2mask.segment(image_path,prompt)
|
||||
trans = segmented.to_transparent()
|
||||
inverse = segmented.to_transparent(invert=True)
|
||||
|
||||
@@ -41,7 +41,7 @@ class Outcrop(object):
|
||||
height = extended_image.height,
|
||||
init_img = extended_image,
|
||||
strength = 0.90,
|
||||
image_callback = wrapped_callback,
|
||||
image_callback = wrapped_callback if image_callback else None,
|
||||
seam_size = opt.seam_size or 96,
|
||||
seam_blur = opt.seam_blur or 16,
|
||||
seam_strength = opt.seam_strength or 0.7,
|
||||
|
||||
@@ -36,6 +36,7 @@ from torchvision import transforms
|
||||
|
||||
CLIP_VERSION = 'ViT-B/16'
|
||||
CLIPSEG_WEIGHTS = 'src/clipseg/weights/rd64-uni.pth'
|
||||
CLIPSEG_WEIGHTS_REFINED = 'src/clipseg/weights/rd64-uni-refined.pth'
|
||||
CLIPSEG_SIZE = 352
|
||||
|
||||
class SegmentedGrayscale(object):
|
||||
@@ -72,14 +73,14 @@ class Txt2Mask(object):
|
||||
Create new Txt2Mask object. The optional device argument can be one of
|
||||
'cuda', 'mps' or 'cpu'.
|
||||
'''
|
||||
def __init__(self,device='cpu'):
|
||||
def __init__(self,device='cpu',refined=False):
|
||||
print('>> Initializing clipseg model for text to mask inference')
|
||||
self.device = device
|
||||
self.model = CLIPDensePredT(version=CLIP_VERSION, reduce_dim=64, )
|
||||
self.model = CLIPDensePredT(version=CLIP_VERSION, reduce_dim=64, complex_trans_conv=refined)
|
||||
self.model.eval()
|
||||
# initially we keep everything in cpu to conserve space
|
||||
self.model.to('cpu')
|
||||
self.model.load_state_dict(torch.load(CLIPSEG_WEIGHTS, map_location=torch.device('cpu')), strict=False)
|
||||
self.model.load_state_dict(torch.load(CLIPSEG_WEIGHTS_REFINED if refined else CLIPSEG_WEIGHTS, map_location=torch.device('cpu')), strict=False)
|
||||
|
||||
@torch.no_grad()
|
||||
def segment(self, image, prompt:str) -> SegmentedGrayscale:
|
||||
|
||||
Reference in New Issue
Block a user