mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-01-16 01:58:14 -05:00
Compare commits
33 Commits
v2.3.4rc1
...
invokeai-b
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
dbd2161601 | ||
|
|
1f83ac2eae | ||
|
|
f7bb68d01c | ||
|
|
8cddf9c5b3 | ||
|
|
9b546ccf06 | ||
|
|
73dbf73a95 | ||
|
|
18a1f3893f | ||
|
|
018d5dab53 | ||
|
|
96a5de30e3 | ||
|
|
4d62d5b802 | ||
|
|
17de5c7008 | ||
|
|
f95403dcda | ||
|
|
e54d060d17 | ||
|
|
a01f1d4940 | ||
|
|
1873817ac9 | ||
|
|
31333a736c | ||
|
|
03274b6da6 | ||
|
|
0646649c05 | ||
|
|
2af511c98a | ||
|
|
f0039cc70a | ||
|
|
8fa7d5ca64 | ||
|
|
d90aa42799 | ||
|
|
c5b34d21e5 | ||
|
|
40a4867143 | ||
|
|
4b25f80427 | ||
|
|
894e2e643d | ||
|
|
a38ff1a16b | ||
|
|
41f268b475 | ||
|
|
b3ae3f595f | ||
|
|
29962613d8 | ||
|
|
1170cee1d8 | ||
|
|
5983e65b22 | ||
|
|
bc724fcdc3 |
File diff suppressed because one or more lines are too long
2
invokeai/frontend/dist/index.html
vendored
2
invokeai/frontend/dist/index.html
vendored
@@ -5,7 +5,7 @@
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>InvokeAI - A Stable Diffusion Toolkit</title>
|
||||
<link rel="shortcut icon" type="icon" href="./assets/favicon-0d253ced.ico" />
|
||||
<script type="module" crossorigin src="./assets/index-c1535364.js"></script>
|
||||
<script type="module" crossorigin src="./assets/index-f56b39bc.js"></script>
|
||||
<link rel="stylesheet" href="./assets/index-2ab0eb58.css">
|
||||
</head>
|
||||
|
||||
|
||||
3
invokeai/frontend/dist/locales/en.json
vendored
3
invokeai/frontend/dist/locales/en.json
vendored
@@ -328,8 +328,11 @@
|
||||
"updateModel": "Update Model",
|
||||
"availableModels": "Available Models",
|
||||
"addLora": "Add Lora",
|
||||
"clearLoras": "Clear Loras",
|
||||
"noLoraModels": "No Loras Found",
|
||||
"addTextualInversionTrigger": "Add Textual Inversion",
|
||||
"addTIToNegative": "Add To Negative",
|
||||
"clearTextualInversions": "Clear Textual Inversions",
|
||||
"noTextualInversionTriggers": "No Textual Inversions Found",
|
||||
"search": "Search",
|
||||
"load": "Load",
|
||||
|
||||
@@ -328,8 +328,11 @@
|
||||
"updateModel": "Update Model",
|
||||
"availableModels": "Available Models",
|
||||
"addLora": "Add Lora",
|
||||
"clearLoras": "Clear Loras",
|
||||
"noLoraModels": "No Loras Found",
|
||||
"addTextualInversionTrigger": "Add Textual Inversion",
|
||||
"addTIToNegative": "Add To Negative",
|
||||
"clearTextualInversions": "Clear Textual Inversions",
|
||||
"noTextualInversionTriggers": "No Textual Inversions Found",
|
||||
"search": "Search",
|
||||
"load": "Load",
|
||||
|
||||
@@ -92,7 +92,8 @@ export default function IAISimpleMenu(props: IAIMenuProps) {
|
||||
zIndex={15}
|
||||
padding={0}
|
||||
borderRadius="0.5rem"
|
||||
overflowY="scroll"
|
||||
overflow="scroll"
|
||||
maxWidth={'22.5rem'}
|
||||
maxHeight={500}
|
||||
backgroundColor="var(--background-color-secondary)"
|
||||
color="var(--text-color-secondary)"
|
||||
|
||||
@@ -34,7 +34,6 @@ export default function MainWidth() {
|
||||
withSliderMarks
|
||||
sliderMarkRightOffset={-8}
|
||||
inputWidth="6.2rem"
|
||||
inputReadOnly
|
||||
sliderNumberInputProps={{ max: 15360 }}
|
||||
/>
|
||||
) : (
|
||||
|
||||
@@ -1,10 +1,15 @@
|
||||
import { Box } from '@chakra-ui/react';
|
||||
import { Box, Flex } from '@chakra-ui/react';
|
||||
import { getLoraModels } from 'app/socketio/actions';
|
||||
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
|
||||
import IAIIconButton from 'common/components/IAIIconButton';
|
||||
import IAISimpleMenu, { IAIMenuItem } from 'common/components/IAISimpleMenu';
|
||||
import { setLorasInUse } from 'features/parameters/store/generationSlice';
|
||||
import {
|
||||
setClearLoras,
|
||||
setLorasInUse,
|
||||
} from 'features/parameters/store/generationSlice';
|
||||
import { useEffect } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { MdClear } from 'react-icons/md';
|
||||
|
||||
export default function LoraManager() {
|
||||
const dispatch = useAppDispatch();
|
||||
@@ -53,11 +58,20 @@ export default function LoraManager() {
|
||||
};
|
||||
|
||||
return foundLoras && foundLoras?.length > 0 ? (
|
||||
<IAISimpleMenu
|
||||
menuItems={makeLoraItems()}
|
||||
menuType="regular"
|
||||
buttonText={`${t('modelManager.addLora')} (${numOfActiveLoras()})`}
|
||||
/>
|
||||
<Flex columnGap={2}>
|
||||
<IAISimpleMenu
|
||||
menuItems={makeLoraItems()}
|
||||
menuType="regular"
|
||||
buttonText={`${t('modelManager.addLora')} (${numOfActiveLoras()})`}
|
||||
menuButtonProps={{ width: '100%', padding: '0 1rem' }}
|
||||
/>
|
||||
<IAIIconButton
|
||||
icon={<MdClear />}
|
||||
tooltip={t('modelManager.clearLoras')}
|
||||
aria-label={t('modelManager.clearLoras')}
|
||||
onClick={() => dispatch(setClearLoras())}
|
||||
/>
|
||||
</Flex>
|
||||
) : (
|
||||
<Box
|
||||
background="var(--btn-base-color)"
|
||||
@@ -0,0 +1,12 @@
|
||||
import { Flex } from '@chakra-ui/react';
|
||||
import LoraManager from './LoraManager/LoraManager';
|
||||
import TextualInversionManager from './TextualInversionManager/TextualInversionManager';
|
||||
|
||||
export default function PromptExtras() {
|
||||
return (
|
||||
<Flex flexDir="column" rowGap={2}>
|
||||
<LoraManager />
|
||||
<TextualInversionManager />
|
||||
</Flex>
|
||||
);
|
||||
}
|
||||
@@ -1,17 +1,28 @@
|
||||
import { Box } from '@chakra-ui/react';
|
||||
import { Box, Flex } from '@chakra-ui/react';
|
||||
import { getTextualInversionTriggers } from 'app/socketio/actions';
|
||||
import { RootState } from 'app/store';
|
||||
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
|
||||
import IAIIconButton from 'common/components/IAIIconButton';
|
||||
import IAISimpleMenu, { IAIMenuItem } from 'common/components/IAISimpleMenu';
|
||||
import { setTextualInversionsInUse } from 'features/parameters/store/generationSlice';
|
||||
import {
|
||||
setAddTIToNegative,
|
||||
setClearTextualInversions,
|
||||
setTextualInversionsInUse,
|
||||
} from 'features/parameters/store/generationSlice';
|
||||
import { useEffect } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { MdArrowDownward, MdClear } from 'react-icons/md';
|
||||
|
||||
export default function TextualInversionManager() {
|
||||
const dispatch = useAppDispatch();
|
||||
const textualInversionsInUse = useAppSelector(
|
||||
(state: RootState) => state.generation.textualInversionsInUse
|
||||
);
|
||||
|
||||
const negativeTextualInversionsInUse = useAppSelector(
|
||||
(state: RootState) => state.generation.negativeTextualInversionsInUse
|
||||
);
|
||||
|
||||
const foundLocalTextualInversionTriggers = useAppSelector(
|
||||
(state) => state.system.foundLocalTextualInversionTriggers
|
||||
);
|
||||
@@ -31,6 +42,10 @@ export default function TextualInversionManager() {
|
||||
(state) => state.ui.shouldShowHuggingFaceConcepts
|
||||
);
|
||||
|
||||
const addTIToNegative = useAppSelector(
|
||||
(state) => state.generation.addTIToNegative
|
||||
);
|
||||
|
||||
const { t } = useTranslation();
|
||||
|
||||
useEffect(() => {
|
||||
@@ -41,14 +56,25 @@ export default function TextualInversionManager() {
|
||||
dispatch(setTextualInversionsInUse(textual_inversion));
|
||||
};
|
||||
|
||||
const renderTextualInversionOption = (textual_inversion: string) => {
|
||||
const thisTIExists = textualInversionsInUse.includes(textual_inversion);
|
||||
const tiExistsStyle = {
|
||||
fontWeight: 'bold',
|
||||
color: 'var(--context-menu-active-item)',
|
||||
};
|
||||
const TIPip = ({ color }: { color: string }) => {
|
||||
return (
|
||||
<Box style={thisTIExists ? tiExistsStyle : {}}>{textual_inversion}</Box>
|
||||
<Box width={2} height={2} borderRadius={9999} backgroundColor={color}>
|
||||
{' '}
|
||||
</Box>
|
||||
);
|
||||
};
|
||||
|
||||
const renderTextualInversionOption = (textual_inversion: string) => {
|
||||
return (
|
||||
<Flex alignItems="center" columnGap={1}>
|
||||
{textual_inversion}
|
||||
{textualInversionsInUse.includes(textual_inversion) && (
|
||||
<TIPip color="var(--context-menu-active-item)" />
|
||||
)}
|
||||
{negativeTextualInversionsInUse.includes(textual_inversion) && (
|
||||
<TIPip color="var(--status-bad-color)" />
|
||||
)}
|
||||
</Flex>
|
||||
);
|
||||
};
|
||||
|
||||
@@ -56,8 +82,10 @@ export default function TextualInversionManager() {
|
||||
const allTextualInversions = localTextualInversionTriggers.concat(
|
||||
huggingFaceTextualInversionConcepts
|
||||
);
|
||||
return allTextualInversions.filter((ti) =>
|
||||
textualInversionsInUse.includes(ti)
|
||||
return allTextualInversions.filter(
|
||||
(ti) =>
|
||||
textualInversionsInUse.includes(ti) ||
|
||||
negativeTextualInversionsInUse.includes(ti)
|
||||
).length;
|
||||
};
|
||||
|
||||
@@ -93,13 +121,34 @@ export default function TextualInversionManager() {
|
||||
(foundHuggingFaceTextualInversionTriggers &&
|
||||
foundHuggingFaceTextualInversionTriggers?.length > 0 &&
|
||||
shouldShowHuggingFaceConcepts)) ? (
|
||||
<IAISimpleMenu
|
||||
menuItems={makeTextualInversionItems()}
|
||||
menuType="regular"
|
||||
buttonText={`${t(
|
||||
'modelManager.addTextualInversionTrigger'
|
||||
)} (${numOfActiveTextualInversions()})`}
|
||||
/>
|
||||
<Flex columnGap={2}>
|
||||
<IAISimpleMenu
|
||||
menuItems={makeTextualInversionItems()}
|
||||
menuType="regular"
|
||||
buttonText={`${t(
|
||||
'modelManager.addTextualInversionTrigger'
|
||||
)} (${numOfActiveTextualInversions()})`}
|
||||
menuButtonProps={{
|
||||
width: '100%',
|
||||
padding: '0 1rem',
|
||||
}}
|
||||
/>
|
||||
<IAIIconButton
|
||||
icon={<MdArrowDownward />}
|
||||
style={{
|
||||
backgroundColor: addTIToNegative ? 'var(--btn-delete-image)' : '',
|
||||
}}
|
||||
tooltip={t('modelManager.addTIToNegative')}
|
||||
aria-label={t('modelManager.addTIToNegative')}
|
||||
onClick={() => dispatch(setAddTIToNegative(!addTIToNegative))}
|
||||
/>
|
||||
<IAIIconButton
|
||||
icon={<MdClear />}
|
||||
tooltip={t('modelManager.clearTextualInversions')}
|
||||
aria-label={t('modelManager.clearTextualInversions')}
|
||||
onClick={() => dispatch(setClearTextualInversions())}
|
||||
/>
|
||||
</Flex>
|
||||
) : (
|
||||
<Box
|
||||
background="var(--btn-base-color)"
|
||||
@@ -1,24 +1,43 @@
|
||||
import { FormControl, Textarea } from '@chakra-ui/react';
|
||||
import type { RootState } from 'app/store';
|
||||
import { useAppDispatch, useAppSelector } from 'app/storeHooks';
|
||||
import { setNegativePrompt } from 'features/parameters/store/generationSlice';
|
||||
import {
|
||||
handlePromptCheckers,
|
||||
setNegativePrompt,
|
||||
} from 'features/parameters/store/generationSlice';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { ChangeEvent, useState } from 'react';
|
||||
|
||||
const NegativePromptInput = () => {
|
||||
const negativePrompt = useAppSelector(
|
||||
(state: RootState) => state.generation.negativePrompt
|
||||
);
|
||||
|
||||
const [promptTimer, setPromptTimer] = useState<number | undefined>(undefined);
|
||||
|
||||
const dispatch = useAppDispatch();
|
||||
const { t } = useTranslation();
|
||||
|
||||
const handleNegativeChangePrompt = (e: ChangeEvent<HTMLTextAreaElement>) => {
|
||||
dispatch(setNegativePrompt(e.target.value));
|
||||
|
||||
// Debounce Prompt UI Checking
|
||||
clearTimeout(promptTimer);
|
||||
const newPromptTimer = window.setTimeout(() => {
|
||||
dispatch(
|
||||
handlePromptCheckers({ prompt: e.target.value, toNegative: true })
|
||||
);
|
||||
}, 500);
|
||||
setPromptTimer(newPromptTimer);
|
||||
};
|
||||
|
||||
return (
|
||||
<FormControl>
|
||||
<Textarea
|
||||
id="negativePrompt"
|
||||
name="negativePrompt"
|
||||
value={negativePrompt}
|
||||
onChange={(e) => dispatch(setNegativePrompt(e.target.value))}
|
||||
onChange={handleNegativeChangePrompt}
|
||||
background="var(--prompt-bg-color)"
|
||||
placeholder={t('parameters.negativePrompts')}
|
||||
_placeholder={{ fontSize: '0.8rem' }}
|
||||
|
||||
@@ -51,7 +51,9 @@ const PromptInput = () => {
|
||||
// Debounce Prompt UI Checking
|
||||
clearTimeout(promptTimer);
|
||||
const newPromptTimer = window.setTimeout(() => {
|
||||
dispatch(handlePromptCheckers(e.target.value));
|
||||
dispatch(
|
||||
handlePromptCheckers({ prompt: e.target.value, toNegative: false })
|
||||
);
|
||||
}, 500);
|
||||
setPromptTimer(newPromptTimer);
|
||||
};
|
||||
|
||||
@@ -3,7 +3,11 @@ import { getPromptAndNegative } from 'common/util/getPromptAndNegative';
|
||||
import * as InvokeAI from 'app/invokeai';
|
||||
import promptToString from 'common/util/promptToString';
|
||||
import { useAppDispatch } from 'app/storeHooks';
|
||||
import { setNegativePrompt, setPrompt } from '../store/generationSlice';
|
||||
import {
|
||||
handlePromptCheckers,
|
||||
setNegativePrompt,
|
||||
setPrompt,
|
||||
} from '../store/generationSlice';
|
||||
|
||||
// TECHDEBT: We have two metadata prompt formats and need to handle recalling either of them.
|
||||
// This hook provides a function to do that.
|
||||
@@ -20,6 +24,10 @@ const useSetBothPrompts = () => {
|
||||
|
||||
dispatch(setPrompt(prompt));
|
||||
dispatch(setNegativePrompt(negativePrompt));
|
||||
dispatch(handlePromptCheckers({ prompt: prompt, toNegative: false }));
|
||||
dispatch(
|
||||
handlePromptCheckers({ prompt: negativePrompt, toNegative: true })
|
||||
);
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
@@ -18,9 +18,11 @@ export interface GenerationState {
|
||||
prompt: string;
|
||||
negativePrompt: string;
|
||||
lorasInUse: string[];
|
||||
localTextualInversionTriggers: string[];
|
||||
huggingFaceTextualInversionConcepts: string[];
|
||||
localTextualInversionTriggers: string[];
|
||||
textualInversionsInUse: string[];
|
||||
negativeTextualInversionsInUse: string[];
|
||||
addTIToNegative: boolean;
|
||||
sampler: string;
|
||||
seamBlur: number;
|
||||
seamless: boolean;
|
||||
@@ -53,9 +55,11 @@ const initialGenerationState: GenerationState = {
|
||||
prompt: '',
|
||||
negativePrompt: '',
|
||||
lorasInUse: [],
|
||||
localTextualInversionTriggers: [],
|
||||
huggingFaceTextualInversionConcepts: [],
|
||||
localTextualInversionTriggers: [],
|
||||
textualInversionsInUse: [],
|
||||
negativeTextualInversionsInUse: [],
|
||||
addTIToNegative: false,
|
||||
sampler: 'k_lms',
|
||||
seamBlur: 16,
|
||||
seamless: false,
|
||||
@@ -85,15 +89,86 @@ const loraExists = (state: GenerationState, lora: string) => {
|
||||
return false;
|
||||
};
|
||||
|
||||
const getTIRegex = (textualInversion: string) => {
|
||||
if (textualInversion.includes('<' || '>')) {
|
||||
return new RegExp(`${textualInversion}`);
|
||||
} else {
|
||||
return new RegExp(`\\b${textualInversion}\\b`);
|
||||
}
|
||||
};
|
||||
|
||||
const textualInversionExists = (
|
||||
state: GenerationState,
|
||||
textualInversion: string
|
||||
) => {
|
||||
const textualInversionRegex = new RegExp(textualInversion);
|
||||
if (state.prompt.match(textualInversionRegex)) return true;
|
||||
const textualInversionRegex = getTIRegex(textualInversion);
|
||||
|
||||
if (!state.addTIToNegative) {
|
||||
if (state.prompt.match(textualInversionRegex)) return true;
|
||||
} else {
|
||||
if (state.negativePrompt.match(textualInversionRegex)) return true;
|
||||
}
|
||||
return false;
|
||||
};
|
||||
|
||||
const handleTypedTICheck = (
|
||||
state: GenerationState,
|
||||
newPrompt: string,
|
||||
toNegative: boolean
|
||||
) => {
|
||||
let textualInversionsInUse = !toNegative
|
||||
? [...state.textualInversionsInUse]
|
||||
: [...state.negativeTextualInversionsInUse]; // Get Words In Prompt
|
||||
|
||||
const textualInversionRegex = /([\w<>!@%&*_-]+)/g; // Scan For Each Word
|
||||
|
||||
const textualInversionMatches = [
|
||||
...newPrompt.matchAll(textualInversionRegex),
|
||||
]; // Match All Words
|
||||
|
||||
if (textualInversionMatches.length > 0) {
|
||||
textualInversionsInUse = []; // Reset Textual Inversions In Use
|
||||
|
||||
textualInversionMatches.forEach((textualInversionMatch) => {
|
||||
const textualInversionName = textualInversionMatch[0];
|
||||
if (
|
||||
(!textualInversionsInUse.includes(textualInversionName) &&
|
||||
state.localTextualInversionTriggers.includes(textualInversionName)) ||
|
||||
state.huggingFaceTextualInversionConcepts.includes(textualInversionName)
|
||||
) {
|
||||
textualInversionsInUse.push(textualInversionName); // Add Textual Inversions In Prompt
|
||||
}
|
||||
});
|
||||
} else {
|
||||
textualInversionsInUse = []; // If No Matches, Remove Textual Inversions In Use
|
||||
}
|
||||
|
||||
if (!toNegative) {
|
||||
state.textualInversionsInUse = textualInversionsInUse;
|
||||
} else {
|
||||
state.negativeTextualInversionsInUse = textualInversionsInUse;
|
||||
}
|
||||
};
|
||||
|
||||
const handleTypedLoraCheck = (state: GenerationState, newPrompt: string) => {
|
||||
let lorasInUse = [...state.lorasInUse]; // Get Loras In Prompt
|
||||
|
||||
const loraRegex = /withLora\(([^\\)]+)\)/g; // Scan For Lora Syntax
|
||||
const loraMatches = [...newPrompt.matchAll(loraRegex)]; // Match All Lora Syntaxes
|
||||
|
||||
if (loraMatches.length > 0) {
|
||||
lorasInUse = []; // Reset Loras In Use
|
||||
loraMatches.forEach((loraMatch) => {
|
||||
const loraName = loraMatch[1].split(',')[0];
|
||||
if (!lorasInUse.includes(loraName)) lorasInUse.push(loraName); // Add Loras In Prompt
|
||||
});
|
||||
} else {
|
||||
lorasInUse = []; // If No Matches, Remove Loras In Use
|
||||
}
|
||||
|
||||
state.lorasInUse = lorasInUse;
|
||||
};
|
||||
|
||||
export const generationSlice = createSlice({
|
||||
name: 'generation',
|
||||
initialState,
|
||||
@@ -118,6 +193,20 @@ export const generationSlice = createSlice({
|
||||
state.negativePrompt = promptToString(newPrompt);
|
||||
}
|
||||
},
|
||||
handlePromptCheckers: (
|
||||
state,
|
||||
action: PayloadAction<{
|
||||
prompt: string | InvokeAI.Prompt;
|
||||
toNegative: boolean;
|
||||
}>
|
||||
) => {
|
||||
const newPrompt = action.payload.prompt;
|
||||
|
||||
if (typeof newPrompt === 'string') {
|
||||
if (!action.payload.toNegative) handleTypedLoraCheck(state, newPrompt);
|
||||
handleTypedTICheck(state, newPrompt, action.payload.toNegative);
|
||||
}
|
||||
},
|
||||
setLorasInUse: (state, action: PayloadAction<string>) => {
|
||||
const newLora = action.payload;
|
||||
const loras = [...state.lorasInUse];
|
||||
@@ -128,94 +217,99 @@ export const generationSlice = createSlice({
|
||||
'g'
|
||||
);
|
||||
const newPrompt = state.prompt.replaceAll(loraRegex, '');
|
||||
state.prompt = newPrompt;
|
||||
state.prompt = newPrompt.trim();
|
||||
|
||||
if (loras.includes(newLora)) {
|
||||
const newLoraIndex = loras.indexOf(newLora);
|
||||
if (newLoraIndex > -1) loras.splice(newLoraIndex, 1);
|
||||
}
|
||||
} else {
|
||||
state.prompt = `${state.prompt} withLora(${newLora},0.75)`;
|
||||
state.prompt = `${state.prompt.trim()} withLora(${newLora},0.75)`;
|
||||
if (!loras.includes(newLora)) loras.push(newLora);
|
||||
}
|
||||
state.lorasInUse = loras;
|
||||
},
|
||||
handlePromptCheckers: (
|
||||
state,
|
||||
action: PayloadAction<string | InvokeAI.Prompt>
|
||||
) => {
|
||||
const newPrompt = action.payload;
|
||||
setClearLoras: (state) => {
|
||||
const lorasInUse = [...state.lorasInUse];
|
||||
|
||||
// Tackle User Typed Lora Syntax
|
||||
let lorasInUse = [...state.lorasInUse]; // Get Loras In Prompt
|
||||
const loraRegex = /withLora\(([^\\)]+)\)/g; // Scan For Lora Syntax
|
||||
if (typeof newPrompt === 'string') {
|
||||
const loraMatches = [...newPrompt.matchAll(loraRegex)]; // Match All Lora Syntaxes
|
||||
if (loraMatches.length > 0) {
|
||||
lorasInUse = []; // Reset Loras In Use
|
||||
loraMatches.forEach((loraMatch) => {
|
||||
const loraName = loraMatch[1].split(',')[0];
|
||||
if (!lorasInUse.includes(loraName)) lorasInUse.push(loraName); // Add Loras In Prompt
|
||||
});
|
||||
} else {
|
||||
lorasInUse = []; // If No Matches, Remove Loras In Use
|
||||
}
|
||||
}
|
||||
state.lorasInUse = lorasInUse;
|
||||
lorasInUse.forEach((lora) => {
|
||||
const loraRegex = new RegExp(
|
||||
`withLora\\(${lora},?\\s*([^\\)]+)?\\)`,
|
||||
'g'
|
||||
);
|
||||
const newPrompt = state.prompt.replaceAll(loraRegex, '');
|
||||
state.prompt = newPrompt.trim();
|
||||
});
|
||||
|
||||
// Tackle User Typed Textual Inversion
|
||||
let textualInversionsInUse = [...state.textualInversionsInUse]; // Get Words In Prompt
|
||||
const textualInversionRegex = /([\w<>!@%&*_-]+)/g; // Scan For Each Word
|
||||
if (typeof newPrompt === 'string') {
|
||||
const textualInversionMatches = [
|
||||
...newPrompt.matchAll(textualInversionRegex),
|
||||
]; // Match All Words
|
||||
if (textualInversionMatches.length > 0) {
|
||||
textualInversionsInUse = []; // Reset Textual Inversions In Use
|
||||
console.log(textualInversionMatches);
|
||||
textualInversionMatches.forEach((textualInversionMatch) => {
|
||||
const textualInversionName = textualInversionMatch[0];
|
||||
console.log(textualInversionName);
|
||||
if (
|
||||
!textualInversionsInUse.includes(textualInversionName) &&
|
||||
(state.localTextualInversionTriggers.includes(
|
||||
textualInversionName
|
||||
) ||
|
||||
state.huggingFaceTextualInversionConcepts.includes(
|
||||
textualInversionName
|
||||
))
|
||||
)
|
||||
textualInversionsInUse.push(textualInversionName); // Add Textual Inversions In Prompt
|
||||
});
|
||||
} else {
|
||||
textualInversionsInUse = []; // If No Matches, Remove Textual Inversions In Use
|
||||
}
|
||||
}
|
||||
|
||||
console.log([...state.huggingFaceTextualInversionConcepts]);
|
||||
state.textualInversionsInUse = textualInversionsInUse;
|
||||
state.lorasInUse = [];
|
||||
},
|
||||
setTextualInversionsInUse: (state, action: PayloadAction<string>) => {
|
||||
const newTextualInversion = action.payload;
|
||||
|
||||
const textualInversions = [...state.textualInversionsInUse];
|
||||
const negativeTextualInversions = [
|
||||
...state.negativeTextualInversionsInUse,
|
||||
];
|
||||
|
||||
if (textualInversionExists(state, newTextualInversion)) {
|
||||
const textualInversionRegex = new RegExp(newTextualInversion, 'g');
|
||||
const newPrompt = state.prompt.replaceAll(textualInversionRegex, '');
|
||||
state.prompt = newPrompt;
|
||||
const textualInversionRegex = getTIRegex(newTextualInversion);
|
||||
|
||||
if (!state.addTIToNegative) {
|
||||
const newPrompt = state.prompt.replace(textualInversionRegex, '');
|
||||
state.prompt = newPrompt.trim();
|
||||
|
||||
if (textualInversions.includes(newTextualInversion)) {
|
||||
const newTIIndex = textualInversions.indexOf(newTextualInversion);
|
||||
if (newTIIndex > -1) textualInversions.splice(newTIIndex, 1);
|
||||
} else {
|
||||
const newPrompt = state.negativePrompt.replace(
|
||||
textualInversionRegex,
|
||||
''
|
||||
);
|
||||
state.negativePrompt = newPrompt.trim();
|
||||
|
||||
const newTIIndex =
|
||||
negativeTextualInversions.indexOf(newTextualInversion);
|
||||
if (newTIIndex > -1) negativeTextualInversions.splice(newTIIndex, 1);
|
||||
}
|
||||
} else {
|
||||
state.prompt = `${state.prompt} ${newTextualInversion}`;
|
||||
if (!textualInversions.includes(newTextualInversion))
|
||||
if (!state.addTIToNegative) {
|
||||
state.prompt = `${state.prompt.trim()} ${newTextualInversion}`;
|
||||
textualInversions.push(newTextualInversion);
|
||||
} else {
|
||||
state.negativePrompt = `${state.negativePrompt.trim()} ${newTextualInversion}`;
|
||||
negativeTextualInversions.push(newTextualInversion);
|
||||
}
|
||||
}
|
||||
state.lorasInUse = textualInversions;
|
||||
|
||||
state.textualInversionsInUse = textualInversions;
|
||||
state.negativeTextualInversionsInUse = negativeTextualInversions;
|
||||
},
|
||||
setClearTextualInversions: (state) => {
|
||||
const textualInversions = [...state.textualInversionsInUse];
|
||||
const negativeTextualInversions = [
|
||||
...state.negativeTextualInversionsInUse,
|
||||
];
|
||||
|
||||
textualInversions.forEach((ti) => {
|
||||
const textualInversionRegex = getTIRegex(ti);
|
||||
const newPrompt = state.prompt.replace(textualInversionRegex, '');
|
||||
state.prompt = newPrompt.trim();
|
||||
});
|
||||
|
||||
negativeTextualInversions.forEach((ti) => {
|
||||
const textualInversionRegex = getTIRegex(ti);
|
||||
const newPrompt = state.negativePrompt.replace(
|
||||
textualInversionRegex,
|
||||
''
|
||||
);
|
||||
state.negativePrompt = newPrompt.trim();
|
||||
});
|
||||
|
||||
state.textualInversionsInUse = [];
|
||||
state.negativeTextualInversionsInUse = [];
|
||||
},
|
||||
setAddTIToNegative: (state, action: PayloadAction<boolean>) => {
|
||||
state.addTIToNegative = action.payload;
|
||||
},
|
||||
setLocalTextualInversionTriggers: (
|
||||
state,
|
||||
@@ -509,11 +603,14 @@ export const {
|
||||
setPerlin,
|
||||
setPrompt,
|
||||
setNegativePrompt,
|
||||
setLorasInUse,
|
||||
setLocalTextualInversionTriggers,
|
||||
setHuggingFaceTextualInversionConcepts,
|
||||
setTextualInversionsInUse,
|
||||
handlePromptCheckers,
|
||||
setLorasInUse,
|
||||
setClearLoras,
|
||||
setHuggingFaceTextualInversionConcepts,
|
||||
setLocalTextualInversionTriggers,
|
||||
setTextualInversionsInUse,
|
||||
setAddTIToNegative,
|
||||
setClearTextualInversions,
|
||||
setSampler,
|
||||
setSeamBlur,
|
||||
setSeamless,
|
||||
|
||||
@@ -18,8 +18,7 @@ import PromptInput from 'features/parameters/components/PromptInput/PromptInput'
|
||||
import InvokeOptionsPanel from 'features/ui/components/InvokeParametersPanel';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import ImageToImageOptions from './ImageToImageOptions';
|
||||
import LoraManager from 'features/parameters/components/LoraManager/LoraManager';
|
||||
import TextualInversionManager from 'features/parameters/components/TextualInversionManager/TextualInversionManager';
|
||||
import PromptExtras from 'features/parameters/components/PromptInput/Extras/PromptExtras';
|
||||
|
||||
export default function ImageToImagePanel() {
|
||||
const { t } = useTranslation();
|
||||
@@ -65,8 +64,7 @@ export default function ImageToImagePanel() {
|
||||
<Flex flexDir="column" rowGap="0.5rem">
|
||||
<PromptInput />
|
||||
<NegativePromptInput />
|
||||
<LoraManager />
|
||||
<TextualInversionManager />
|
||||
<PromptExtras />
|
||||
</Flex>
|
||||
<ProcessButtons />
|
||||
<MainSettings />
|
||||
|
||||
@@ -10,8 +10,6 @@ import UpscaleSettings from 'features/parameters/components/AdvancedParameters/U
|
||||
import UpscaleToggle from 'features/parameters/components/AdvancedParameters/Upscale/UpscaleToggle';
|
||||
import GenerateVariationsToggle from 'features/parameters/components/AdvancedParameters/Variations/GenerateVariations';
|
||||
import VariationsSettings from 'features/parameters/components/AdvancedParameters/Variations/VariationsSettings';
|
||||
import LoraManager from 'features/parameters/components/LoraManager/LoraManager';
|
||||
import TextualInversionManager from 'features/parameters/components/TextualInversionManager/TextualInversionManager';
|
||||
import MainSettings from 'features/parameters/components/MainParameters/MainParameters';
|
||||
import ParametersAccordion from 'features/parameters/components/ParametersAccordion';
|
||||
import ProcessButtons from 'features/parameters/components/ProcessButtons/ProcessButtons';
|
||||
@@ -19,6 +17,7 @@ import NegativePromptInput from 'features/parameters/components/PromptInput/Nega
|
||||
import PromptInput from 'features/parameters/components/PromptInput/PromptInput';
|
||||
import InvokeOptionsPanel from 'features/ui/components/InvokeParametersPanel';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import PromptExtras from 'features/parameters/components/PromptInput/Extras/PromptExtras';
|
||||
|
||||
export default function TextToImagePanel() {
|
||||
const { t } = useTranslation();
|
||||
@@ -64,8 +63,7 @@ export default function TextToImagePanel() {
|
||||
<Flex flexDir="column" rowGap="0.5rem">
|
||||
<PromptInput />
|
||||
<NegativePromptInput />
|
||||
<LoraManager />
|
||||
<TextualInversionManager />
|
||||
<PromptExtras />
|
||||
</Flex>
|
||||
<ProcessButtons />
|
||||
<MainSettings />
|
||||
|
||||
@@ -10,8 +10,6 @@ import SymmetryToggle from 'features/parameters/components/AdvancedParameters/Ou
|
||||
import SeedSettings from 'features/parameters/components/AdvancedParameters/Seed/SeedSettings';
|
||||
import GenerateVariationsToggle from 'features/parameters/components/AdvancedParameters/Variations/GenerateVariations';
|
||||
import VariationsSettings from 'features/parameters/components/AdvancedParameters/Variations/VariationsSettings';
|
||||
import LoraManager from 'features/parameters/components/LoraManager/LoraManager';
|
||||
import TextualInversionManager from 'features/parameters/components/TextualInversionManager/TextualInversionManager';
|
||||
import MainSettings from 'features/parameters/components/MainParameters/MainParameters';
|
||||
import ParametersAccordion from 'features/parameters/components/ParametersAccordion';
|
||||
import ProcessButtons from 'features/parameters/components/ProcessButtons/ProcessButtons';
|
||||
@@ -19,6 +17,7 @@ import NegativePromptInput from 'features/parameters/components/PromptInput/Nega
|
||||
import PromptInput from 'features/parameters/components/PromptInput/PromptInput';
|
||||
import InvokeOptionsPanel from 'features/ui/components/InvokeParametersPanel';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import PromptExtras from 'features/parameters/components/PromptInput/Extras/PromptExtras';
|
||||
|
||||
export default function UnifiedCanvasPanel() {
|
||||
const { t } = useTranslation();
|
||||
@@ -75,8 +74,7 @@ export default function UnifiedCanvasPanel() {
|
||||
<Flex flexDir="column" rowGap="0.5rem">
|
||||
<PromptInput />
|
||||
<NegativePromptInput />
|
||||
<LoraManager />
|
||||
<TextualInversionManager />
|
||||
<PromptExtras />
|
||||
</Flex>
|
||||
<ProcessButtons />
|
||||
<MainSettings />
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -17,6 +17,8 @@ if sys.platform == "darwin":
|
||||
|
||||
import pyparsing # type: ignore
|
||||
|
||||
print(f'DEBUG: [1] All system modules imported', file=sys.stderr)
|
||||
|
||||
import ldm.invoke
|
||||
|
||||
from ..generate import Generate
|
||||
@@ -31,13 +33,21 @@ from .pngwriter import PngWriter, retrieve_metadata, write_metadata
|
||||
from .readline import Completer, get_completer
|
||||
from ..util import url_attachment_name
|
||||
|
||||
print(f'DEBUG: [2] All invokeai modules imported', file=sys.stderr)
|
||||
|
||||
# global used in multiple functions (fix)
|
||||
infile = None
|
||||
|
||||
def main():
|
||||
"""Initialize command-line parsers and the diffusion model"""
|
||||
global infile
|
||||
|
||||
|
||||
print('DEBUG: [3] Entered main()', file=sys.stderr)
|
||||
print('DEBUG: INVOKEAI ENVIRONMENT:')
|
||||
print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
|
||||
print("\n".join([f'{x}:{os.environ[x]}' for x in os.environ.keys()]))
|
||||
print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
|
||||
|
||||
opt = Args()
|
||||
args = opt.parse_args()
|
||||
if not args:
|
||||
@@ -66,9 +76,13 @@ def main():
|
||||
Globals.sequential_guidance = args.sequential_guidance
|
||||
Globals.ckpt_convert = True # always true as of 2.3.4 for LoRA support
|
||||
|
||||
print(f'DEBUG: [4] Globals initialized', file=sys.stderr)
|
||||
|
||||
# run any post-install patches needed
|
||||
run_patches()
|
||||
|
||||
print(f'DEBUG: [5] Patches run', file=sys.stderr)
|
||||
|
||||
print(f">> Internet connectivity is {Globals.internet_available}")
|
||||
|
||||
if not args.conf:
|
||||
@@ -84,8 +98,9 @@ def main():
|
||||
# loading here to avoid long delays on startup
|
||||
# these two lines prevent a horrible warning message from appearing
|
||||
# when the frozen CLIP tokenizer is imported
|
||||
print(f'DEBUG: [6] Importing torch modules', file=sys.stderr)
|
||||
|
||||
import transformers # type: ignore
|
||||
|
||||
from ldm.generate import Generate
|
||||
|
||||
transformers.logging.set_verbosity_error()
|
||||
@@ -93,6 +108,7 @@ def main():
|
||||
|
||||
diffusers.logging.set_verbosity_error()
|
||||
|
||||
print(f'DEBUG: [7] loading restoration models', file=sys.stderr)
|
||||
# Loading Face Restoration and ESRGAN Modules
|
||||
gfpgan, codeformer, esrgan = load_face_restoration(opt)
|
||||
|
||||
@@ -114,6 +130,7 @@ def main():
|
||||
Globals.lora_models_dir = opt.lora_path
|
||||
|
||||
# migrate legacy models
|
||||
print(f'DEBUG: [8] migrating models', file=sys.stderr)
|
||||
ModelManager.migrate_models()
|
||||
|
||||
# load the infile as a list of lines
|
||||
@@ -131,6 +148,7 @@ def main():
|
||||
|
||||
model = opt.model or retrieve_last_used_model()
|
||||
|
||||
print(f'DEBUG: [9] Creating generate object', file=sys.stderr)
|
||||
# creating a Generate object:
|
||||
try:
|
||||
gen = Generate(
|
||||
@@ -157,6 +175,7 @@ def main():
|
||||
print(">> changed to seamless tiling mode")
|
||||
|
||||
# preload the model
|
||||
print(f'DEBUG: [10] Loading default model', file=sys.stderr)
|
||||
try:
|
||||
gen.load_model()
|
||||
except KeyError:
|
||||
@@ -204,6 +223,7 @@ def main():
|
||||
# TODO: main_loop() has gotten busy. Needs to be refactored.
|
||||
def main_loop(gen, opt, completer):
|
||||
"""prompt/read/execute loop"""
|
||||
print(f'DEBUG: [11] In main loop', file=sys.stderr)
|
||||
global infile
|
||||
done = False
|
||||
doneAfterInFile = infile is not None
|
||||
@@ -1322,15 +1342,16 @@ def install_missing_config_files():
|
||||
install ckpt configuration files that may have been added to the
|
||||
distro after original root directory configuration
|
||||
"""
|
||||
import invokeai.configs as conf
|
||||
from shutil import copyfile
|
||||
pass
|
||||
# import invokeai.configs as conf
|
||||
# from shutil import copyfile
|
||||
|
||||
root_configs = Path(global_config_dir(), 'stable-diffusion')
|
||||
repo_configs = Path(conf.__path__[0], 'stable-diffusion')
|
||||
for src in repo_configs.iterdir():
|
||||
dest = root_configs / src.name
|
||||
if not dest.exists():
|
||||
copyfile(src,dest)
|
||||
# root_configs = Path(global_config_dir(), 'stable-diffusion')
|
||||
# repo_configs = Path(conf.__path__[0], 'stable-diffusion')
|
||||
# for src in repo_configs.iterdir():
|
||||
# dest = root_configs / src.name
|
||||
# if not dest.exists():
|
||||
# copyfile(src,dest)
|
||||
|
||||
def do_version_update(root_version: version.Version, app_version: Union[str, version.Version]):
|
||||
"""
|
||||
|
||||
@@ -1 +1 @@
|
||||
__version__='2.3.4rc1'
|
||||
__version__='2.3.4'
|
||||
|
||||
@@ -12,7 +12,8 @@ from typing import Union, Optional, Any
|
||||
from transformers import CLIPTokenizer
|
||||
|
||||
from compel import Compel
|
||||
from compel.prompt_parser import FlattenedPrompt, Blend, Fragment, CrossAttentionControlSubstitute, PromptParser
|
||||
from compel.prompt_parser import FlattenedPrompt, Blend, Fragment, CrossAttentionControlSubstitute, PromptParser, \
|
||||
Conjunction
|
||||
from .devices import torch_dtype
|
||||
from ..models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent
|
||||
from ldm.invoke.globals import Globals
|
||||
@@ -55,22 +56,25 @@ def get_uc_and_c_and_ec(prompt_string, model, log_tokens=False, skip_normalize_l
|
||||
# get rid of any newline characters
|
||||
prompt_string = prompt_string.replace("\n", " ")
|
||||
positive_prompt_string, negative_prompt_string = split_prompt_to_positive_and_negative(prompt_string)
|
||||
|
||||
legacy_blend = try_parse_legacy_blend(positive_prompt_string, skip_normalize_legacy_blend)
|
||||
positive_prompt: FlattenedPrompt|Blend
|
||||
lora_conditions = None
|
||||
positive_conjunction: Conjunction
|
||||
if legacy_blend is not None:
|
||||
positive_prompt = legacy_blend
|
||||
positive_conjunction = legacy_blend
|
||||
else:
|
||||
positive_conjunction = Compel.parse_prompt_string(positive_prompt_string)
|
||||
positive_prompt = positive_conjunction.prompts[0]
|
||||
should_use_lora_manager = True
|
||||
lora_weights = positive_conjunction.lora_weights
|
||||
if model.peft_manager:
|
||||
should_use_lora_manager = model.peft_manager.should_use(lora_weights)
|
||||
if not should_use_lora_manager:
|
||||
model.peft_manager.set_loras(lora_weights)
|
||||
if model.lora_manager and should_use_lora_manager:
|
||||
lora_conditions = model.lora_manager.set_loras_conditions(lora_weights)
|
||||
positive_prompt = positive_conjunction.prompts[0]
|
||||
|
||||
should_use_lora_manager = True
|
||||
lora_weights = positive_conjunction.lora_weights
|
||||
lora_conditions = None
|
||||
if model.peft_manager:
|
||||
should_use_lora_manager = model.peft_manager.should_use(lora_weights)
|
||||
if not should_use_lora_manager:
|
||||
model.peft_manager.set_loras(lora_weights)
|
||||
if model.lora_manager and should_use_lora_manager:
|
||||
lora_conditions = model.lora_manager.set_loras_conditions(lora_weights)
|
||||
|
||||
negative_conjunction = Compel.parse_prompt_string(negative_prompt_string)
|
||||
negative_prompt: FlattenedPrompt | Blend = negative_conjunction.prompts[0]
|
||||
|
||||
@@ -93,12 +97,12 @@ def get_prompt_structure(prompt_string, skip_normalize_legacy_blend: bool = Fals
|
||||
Union[FlattenedPrompt, Blend], FlattenedPrompt):
|
||||
positive_prompt_string, negative_prompt_string = split_prompt_to_positive_and_negative(prompt_string)
|
||||
legacy_blend = try_parse_legacy_blend(positive_prompt_string, skip_normalize_legacy_blend)
|
||||
positive_prompt: FlattenedPrompt|Blend
|
||||
positive_conjunction: Conjunction
|
||||
if legacy_blend is not None:
|
||||
positive_prompt = legacy_blend
|
||||
positive_conjunction = legacy_blend
|
||||
else:
|
||||
positive_conjunction = Compel.parse_prompt_string(positive_prompt_string)
|
||||
positive_prompt = positive_conjunction.prompts[0]
|
||||
positive_prompt = positive_conjunction.prompts[0]
|
||||
negative_conjunction = Compel.parse_prompt_string(negative_prompt_string)
|
||||
negative_prompt: FlattenedPrompt|Blend = negative_conjunction.prompts[0]
|
||||
|
||||
@@ -217,18 +221,26 @@ def log_tokenization_for_text(text, tokenizer, display_label=None):
|
||||
print(f'{discarded}\x1b[0m')
|
||||
|
||||
|
||||
def try_parse_legacy_blend(text: str, skip_normalize: bool=False) -> Optional[Blend]:
|
||||
def try_parse_legacy_blend(text: str, skip_normalize: bool=False) -> Optional[Conjunction]:
|
||||
weighted_subprompts = split_weighted_subprompts(text, skip_normalize=skip_normalize)
|
||||
if len(weighted_subprompts) <= 1:
|
||||
return None
|
||||
strings = [x[0] for x in weighted_subprompts]
|
||||
weights = [x[1] for x in weighted_subprompts]
|
||||
|
||||
pp = PromptParser()
|
||||
parsed_conjunctions = [pp.parse_conjunction(x) for x in strings]
|
||||
flattened_prompts = [x.prompts[0] for x in parsed_conjunctions]
|
||||
flattened_prompts = []
|
||||
weights = []
|
||||
loras = []
|
||||
for i, x in enumerate(parsed_conjunctions):
|
||||
if len(x.prompts)>0:
|
||||
flattened_prompts.append(x.prompts[0])
|
||||
weights.append(weighted_subprompts[i][1])
|
||||
if len(x.lora_weights)>0:
|
||||
loras.extend(x.lora_weights)
|
||||
|
||||
return Blend(prompts=flattened_prompts, weights=weights, normalize_weights=not skip_normalize)
|
||||
return Conjunction([Blend(prompts=flattened_prompts, weights=weights, normalize_weights=not skip_normalize)],
|
||||
lora_weights = loras)
|
||||
|
||||
|
||||
def split_weighted_subprompts(text, skip_normalize=False)->list:
|
||||
|
||||
@@ -4,14 +4,13 @@ pip install <path_to_git_source>.
|
||||
'''
|
||||
import os
|
||||
import platform
|
||||
import psutil
|
||||
import requests
|
||||
from rich import box, print
|
||||
from rich.console import Console, Group, group
|
||||
from rich.console import Console, group
|
||||
from rich.panel import Panel
|
||||
from rich.prompt import Prompt
|
||||
from rich.style import Style
|
||||
from rich.syntax import Syntax
|
||||
from rich.text import Text
|
||||
|
||||
from ldm.invoke import __version__
|
||||
|
||||
@@ -32,6 +31,19 @@ else:
|
||||
def get_versions()->dict:
|
||||
return requests.get(url=INVOKE_AI_REL).json()
|
||||
|
||||
def invokeai_is_running()->bool:
|
||||
for p in psutil.process_iter():
|
||||
try:
|
||||
cmdline = p.cmdline()
|
||||
matches = [x for x in cmdline if x.endswith(('invokeai','invokeai.exe'))]
|
||||
if matches:
|
||||
print(f':exclamation: [bold red]An InvokeAI instance appears to be running as process {p.pid}[/red bold]')
|
||||
return True
|
||||
except psutil.AccessDenied:
|
||||
continue
|
||||
return False
|
||||
|
||||
|
||||
def welcome(versions: dict):
|
||||
|
||||
@group()
|
||||
@@ -62,6 +74,10 @@ def welcome(versions: dict):
|
||||
|
||||
def main():
|
||||
versions = get_versions()
|
||||
if invokeai_is_running():
|
||||
print(f':exclamation: [bold red]Please terminate all running instances of InvokeAI before updating.[/red bold]')
|
||||
return
|
||||
|
||||
welcome(versions)
|
||||
|
||||
tag = None
|
||||
|
||||
@@ -32,7 +32,8 @@ def expand_prompts(
|
||||
template_file: Path,
|
||||
run_invoke: bool = False,
|
||||
invoke_model: str = None,
|
||||
invoke_outdir: Path = None,
|
||||
invoke_outdir: str = None,
|
||||
invoke_root: str = None,
|
||||
processes_per_gpu: int = 1,
|
||||
):
|
||||
"""
|
||||
@@ -61,6 +62,8 @@ def expand_prompts(
|
||||
invokeai_args = [shutil.which("invokeai"), "--from_file", "-"]
|
||||
if invoke_model:
|
||||
invokeai_args.extend(("--model", invoke_model))
|
||||
if invoke_root:
|
||||
invokeai_args.extend(("--root", invoke_root))
|
||||
if invoke_outdir:
|
||||
outdir = os.path.expanduser(invoke_outdir)
|
||||
invokeai_args.extend(("--outdir", outdir))
|
||||
@@ -79,6 +82,11 @@ def expand_prompts(
|
||||
)
|
||||
import ldm.invoke.CLI
|
||||
|
||||
print(f'DEBUG: BATCH PARENT ENVIRONMENT:')
|
||||
print('<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
|
||||
print("\n".join([f'{x}:{os.environ[x]}' for x in os.environ.keys()]))
|
||||
print('<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
|
||||
|
||||
parent_conn, child_conn = Pipe()
|
||||
children = set()
|
||||
for i in range(processes_to_launch):
|
||||
@@ -99,8 +107,9 @@ def expand_prompts(
|
||||
sequence = 0
|
||||
for command in commands:
|
||||
sequence += 1
|
||||
parent_conn.send(
|
||||
command + f' --fnformat="dp.{sequence:04}.{{prompt}}.png"'
|
||||
format = _get_fn_format(outdir, sequence)
|
||||
parent_conn.send_bytes(
|
||||
(command + f' --fnformat="{format}"').encode('utf-8')
|
||||
)
|
||||
parent_conn.close()
|
||||
else:
|
||||
@@ -110,7 +119,27 @@ def expand_prompts(
|
||||
for p in children:
|
||||
p.terminate()
|
||||
|
||||
def _dummy_cli_main():
|
||||
counter = 0
|
||||
while line := sys.stdin.readline():
|
||||
print(f'[{counter}] {os.getpid()} got command {line.rstrip()}\n')
|
||||
counter += 1
|
||||
time.sleep(1)
|
||||
|
||||
def _get_fn_format(directory:str, sequence:int)->str:
|
||||
"""
|
||||
Get a filename that doesn't exceed filename length restrictions
|
||||
on the current platform.
|
||||
"""
|
||||
try:
|
||||
max_length = os.pathconf(directory,'PC_NAME_MAX')
|
||||
except:
|
||||
max_length = 255
|
||||
prefix = f'dp.{sequence:04}.'
|
||||
suffix = '.png'
|
||||
max_length -= len(prefix)+len(suffix)
|
||||
return f'{prefix}{{prompt:0.{max_length}}}{suffix}'
|
||||
|
||||
class MessageToStdin(object):
|
||||
def __init__(self, connection: Connection):
|
||||
self.connection = connection
|
||||
@@ -119,7 +148,7 @@ class MessageToStdin(object):
|
||||
def readline(self) -> str:
|
||||
try:
|
||||
if len(self.linebuffer) == 0:
|
||||
message = self.connection.recv()
|
||||
message = self.connection.recv_bytes().decode('utf-8')
|
||||
self.linebuffer = message.split("\n")
|
||||
result = self.linebuffer.pop(0)
|
||||
return result
|
||||
@@ -165,9 +194,9 @@ def _run_invoke(
|
||||
os.environ["CUDA_VISIBLE_DEVICES"] = f"{gpu}"
|
||||
sys.argv = args
|
||||
sys.stdin = MessageToStdin(conn_in)
|
||||
sys.stdout = FilterStream(sys.stdout, include=re.compile("^\[\d+\]"))
|
||||
with open(logfile, "w") as stderr, redirect_stderr(stderr):
|
||||
entry_point()
|
||||
# sys.stdout = FilterStream(sys.stdout, include=re.compile("^\[\d+\]"))
|
||||
# with open(logfile, "w") as stderr, redirect_stderr(stderr):
|
||||
entry_point()
|
||||
|
||||
|
||||
def _filter_output(stream: TextIOBase):
|
||||
@@ -224,6 +253,10 @@ def main():
|
||||
default=1,
|
||||
help="When executing invokeai, how many parallel processes to execute per CUDA GPU.",
|
||||
)
|
||||
parser.add_argument(
|
||||
'--root_dir',
|
||||
default=None,
|
||||
help='Path to directory containing "models", "outputs" and "configs". If not present will read from environment variable INVOKEAI_ROOT. Defaults to ~/invokeai' )
|
||||
opt = parser.parse_args()
|
||||
|
||||
if opt.example:
|
||||
@@ -247,6 +280,7 @@ def main():
|
||||
run_invoke=opt.invoke,
|
||||
invoke_model=opt.model,
|
||||
invoke_outdir=opt.outdir,
|
||||
invoke_root=opt.root,
|
||||
processes_per_gpu=opt.processes_per_gpu,
|
||||
)
|
||||
|
||||
|
||||
@@ -255,8 +255,8 @@ class Inpaint(Img2Img):
|
||||
pipeline.scheduler = sampler
|
||||
|
||||
# todo: support cross-attention control
|
||||
uc, c, _ = conditioning
|
||||
conditioning_data = (ConditioningData(uc, c, cfg_scale)
|
||||
uc, c, extra_conditioning_info = conditioning
|
||||
conditioning_data = (ConditioningData(uc, c, cfg_scale, extra_conditioning_info)
|
||||
.add_scheduler_args_if_applicable(pipeline.scheduler, eta=ddim_eta))
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user