mirror of
https://github.com/invoke-ai/InvokeAI.git
synced 2026-04-23 03:00:31 -04:00
add readiness logic to video tab
This commit is contained in:
committed by
Mary Hipp Rogers
parent
7e2999649a
commit
cc9a85f7d0
@@ -24,6 +24,8 @@ export const buildRunwayVideoGraph = (arg: GraphBuilderArg): GraphBuilderReturn
|
||||
|
||||
const params = selectParamsSlice(state);
|
||||
const prompts = selectPresetModifiedPrompts(state);
|
||||
assert(prompts.positive.length > 0, 'Runway video requires positive prompt to have at least one character');
|
||||
|
||||
const startingFrameImage = selectStartingFrameImage(state);
|
||||
|
||||
assert(startingFrameImage, 'Video starting frame is required for runway video generation');
|
||||
|
||||
@@ -47,10 +47,35 @@ const TooltipContent = memo(({ prepend = false }: { prepend?: boolean }) => {
|
||||
return <UpscaleTabTooltipContent prepend={prepend} />;
|
||||
}
|
||||
|
||||
if (activeTab === 'video') {
|
||||
return <VideoTabTooltipContent prepend={prepend} />;
|
||||
}
|
||||
|
||||
return null;
|
||||
});
|
||||
TooltipContent.displayName = 'TooltipContent';
|
||||
|
||||
const VideoTabTooltipContent = memo(({ prepend = false }: { prepend?: boolean }) => {
|
||||
const isReady = useStore($isReadyToEnqueue);
|
||||
const reasons = useStore($reasonsWhyCannotEnqueue);
|
||||
|
||||
return (
|
||||
<Flex flexDir="column" gap={1}>
|
||||
<IsReadyText isReady={isReady} prepend={prepend} />
|
||||
<QueueCountPredictionCanvasOrUpscaleTab />
|
||||
{reasons.length > 0 && (
|
||||
<>
|
||||
<StyledDivider />
|
||||
<ReasonsList reasons={reasons} />
|
||||
</>
|
||||
)}
|
||||
<StyledDivider />
|
||||
<AddingToText />
|
||||
</Flex>
|
||||
)
|
||||
});
|
||||
VideoTabTooltipContent.displayName = 'VideoTabTooltipContent';
|
||||
|
||||
const CanvasTabTooltipContent = memo(({ prepend = false }: { prepend?: boolean }) => {
|
||||
const isReady = useStore($isReadyToEnqueue);
|
||||
const reasons = useStore($reasonsWhyCannotEnqueue);
|
||||
|
||||
@@ -35,6 +35,7 @@ import { useIsModelDisabled } from 'features/parameters/hooks/useIsModelDisabled
|
||||
import type { UpscaleState } from 'features/parameters/store/upscaleSlice';
|
||||
import { selectUpscaleSlice } from 'features/parameters/store/upscaleSlice';
|
||||
import { SUPPORTS_REF_IMAGES_BASE_MODELS } from 'features/parameters/types/constants';
|
||||
import { selectVideoSlice, type VideoState } from 'features/parameters/store/videoSlice';
|
||||
import type { ParameterModel } from 'features/parameters/types/parameterSchemas';
|
||||
import { getGridSize } from 'features/parameters/util/optimalDimension';
|
||||
import { promptExpansionApi, type PromptExpansionRequestState } from 'features/prompt/PromptExpansion/state';
|
||||
@@ -92,7 +93,8 @@ const debouncedUpdateReasons = debounce(
|
||||
store: AppStore,
|
||||
isInPublishFlow: boolean,
|
||||
isChatGPT4oHighModelDisabled: (model: ParameterModel) => boolean,
|
||||
promptExpansionRequest: PromptExpansionRequestState
|
||||
promptExpansionRequest: PromptExpansionRequestState,
|
||||
video: VideoState
|
||||
) => {
|
||||
if (tab === 'generate') {
|
||||
const model = selectMainModelConfig(store.getState());
|
||||
@@ -143,6 +145,15 @@ const debouncedUpdateReasons = debounce(
|
||||
promptExpansionRequest,
|
||||
});
|
||||
$reasonsWhyCannotEnqueue.set(reasons);
|
||||
} else if (tab === 'video') {
|
||||
const reasons = getReasonsWhyCannotEnqueueVideoTab({
|
||||
isConnected,
|
||||
video,
|
||||
params,
|
||||
promptExpansionRequest,
|
||||
dynamicPrompts,
|
||||
});
|
||||
$reasonsWhyCannotEnqueue.set(reasons);
|
||||
} else {
|
||||
$reasonsWhyCannotEnqueue.set(EMPTY_ARRAY);
|
||||
}
|
||||
@@ -173,7 +184,7 @@ export const useReadinessWatcher = () => {
|
||||
const isInPublishFlow = useStore($isInPublishFlow);
|
||||
const { isChatGPT4oHighModelDisabled } = useIsModelDisabled();
|
||||
const promptExpansionRequest = useStore(promptExpansionApi.$state);
|
||||
|
||||
const video = useAppSelector(selectVideoSlice);
|
||||
useEffect(() => {
|
||||
debouncedUpdateReasons(
|
||||
tab,
|
||||
@@ -195,7 +206,8 @@ export const useReadinessWatcher = () => {
|
||||
store,
|
||||
isInPublishFlow,
|
||||
isChatGPT4oHighModelDisabled,
|
||||
promptExpansionRequest
|
||||
promptExpansionRequest,
|
||||
video
|
||||
);
|
||||
}, [
|
||||
store,
|
||||
@@ -218,11 +230,44 @@ export const useReadinessWatcher = () => {
|
||||
isInPublishFlow,
|
||||
isChatGPT4oHighModelDisabled,
|
||||
promptExpansionRequest,
|
||||
video,
|
||||
]);
|
||||
};
|
||||
|
||||
const disconnectedReason = (t: typeof i18n.t) => ({ content: t('parameters.invoke.systemDisconnected') });
|
||||
|
||||
const getReasonsWhyCannotEnqueueVideoTab = (arg: {
|
||||
isConnected: boolean;
|
||||
video: VideoState;
|
||||
params: ParamsState;
|
||||
dynamicPrompts: DynamicPromptsState;
|
||||
promptExpansionRequest: PromptExpansionRequestState;
|
||||
}) => {
|
||||
const { isConnected, video, params, dynamicPrompts, promptExpansionRequest } = arg;
|
||||
const { positivePrompt } = params;
|
||||
const reasons: Reason[] = [];
|
||||
|
||||
if (!isConnected) {
|
||||
reasons.push(disconnectedReason(i18n.t));
|
||||
}
|
||||
|
||||
if (dynamicPrompts.prompts.length === 0 && getShouldProcessPrompt(positivePrompt)) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.noPrompts') });
|
||||
}
|
||||
|
||||
if (promptExpansionRequest.isPending) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.promptExpansionPending') });
|
||||
} else if (promptExpansionRequest.isSuccess) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.promptExpansionResultPending') });
|
||||
}
|
||||
|
||||
if (!video.startingFrameImage?.image_name) {
|
||||
reasons.push({ content: i18n.t('parameters.invoke.noStartingFrameImage') });
|
||||
}
|
||||
|
||||
return reasons;
|
||||
};
|
||||
|
||||
const getReasonsWhyCannotEnqueueGenerateTab = (arg: {
|
||||
isConnected: boolean;
|
||||
model: MainModelConfig | null | undefined;
|
||||
|
||||
Reference in New Issue
Block a user