diff --git a/README.md b/README.md index 3b7d2ad4c4..ffd2e0c542 100644 --- a/README.md +++ b/README.md @@ -108,13 +108,14 @@ you can try starting `dream.py` with the `--precision=float32` flag: - [Image To Image](docs/features/IMG2IMG.md) - [Inpainting Support](docs/features/INPAINTING.md) - [Outpainting Support](docs/features/OUTPAINTING.md) -- [GFPGAN and Real-ESRGAN Support](docs/features/UPSCALE.md) +- [Upscaling, face-restoration and outpainting](docs/features/POSTPROCESS.md) - [Seamless Tiling](docs/features/OTHER.md#seamless-tiling) - [Google Colab](docs/features/OTHER.md#google-colab) - [Web Server](docs/features/WEB.md) - [Reading Prompts From File](docs/features/PROMPTS.md#reading-prompts-from-a-file) - [Shortcut: Reusing Seeds](docs/features/OTHER.md#shortcuts-reusing-seeds) - [Weighted Prompts](docs/features/PROMPTS.md#weighted-prompts) +- [Thresholding and Perlin Noise Initialization Options](/docs/features/OTHER.md#thresholding-and-perlin-noise-initialization-options) - [Negative/Unconditioned Prompts](docs/features/PROMPTS.md#negative-and-unconditioned-prompts) - [Variations](docs/features/VARIATIONS.md) - [Personalizing Text-to-Image Generation](docs/features/TEXTUAL_INVERSION.md) diff --git a/backend/invoke_ai_web_server.py b/backend/invoke_ai_web_server.py index 56696e6b5c..dced69f72f 100644 --- a/backend/invoke_ai_web_server.py +++ b/backend/invoke_ai_web_server.py @@ -3,6 +3,8 @@ import glob import os import shutil import mimetypes +import traceback +import math from flask import Flask, redirect, send_from_directory from flask_socketio import SocketIO @@ -16,6 +18,7 @@ from ldm.dream.conditioning import split_weighted_subprompts from backend.modules.parameters import parameters_to_command + # Loading Arguments opt = Args() args = opt.parse_args() @@ -39,25 +42,25 @@ class InvokeAIWebServer: def setup_flask(self): # Fix missing mimetypes on Windows - mimetypes.add_type("application/javascript", ".js") - mimetypes.add_type("text/css", ".css") + mimetypes.add_type('application/javascript', '.js') + mimetypes.add_type('text/css', '.css') # Socket IO logger = True if args.web_verbose else False engineio_logger = True if args.web_verbose else False max_http_buffer_size = 10000000 # CORS Allowed Setup - cors_allowed_origins = ['http://127.0.0.1:5173', 'http://localhost:5173'] + cors_allowed_origins = [ + 'http://127.0.0.1:5173', + 'http://localhost:5173', + 'http://localhost:9090' + ] additional_allowed_origins = ( opt.cors if opt.cors else [] ) # additional CORS allowed origins - if self.host == '127.0.0.1': - cors_allowed_origins.extend( - [ - f'http://{self.host}:{self.port}', - f'http://localhost:{self.port}', - ] - ) + cors_allowed_origins.append(f'http://{self.host}:{self.port}') + if self.host == '127.0.0.1' or self.host == '0.0.0.0': + cors_allowed_origins.append(f'http://localhost:{self.port}') cors_allowed_origins = ( cors_allowed_origins + additional_allowed_origins ) @@ -76,6 +79,10 @@ class InvokeAIWebServer: ping_timeout=60, ) + # Keep Server Alive Route + @self.app.route('/flaskwebgui-keep-server-alive') + def keep_alive(): + return {'message': 'Server Running'} # Outputs Route self.app.config['OUTPUTS_FOLDER'] = os.path.abspath(args.outdir) @@ -86,7 +93,6 @@ class InvokeAIWebServer: self.app.config['OUTPUTS_FOLDER'], file_path ) - # Base Route @self.app.route('/') def serve(): @@ -99,18 +105,42 @@ class InvokeAIWebServer: self.load_socketio_listeners(self.socketio) - print('>> Started Invoke AI Web Server!') - if self.host == '0.0.0.0': - print( - f"Point your browser at http://localhost:{self.port} or use the host's DNS name or IP address." - ) - else: - print( - '>> Default host address now 127.0.0.1 (localhost). Use --host 0.0.0.0 to bind any address.' - ) - print(f'>> Point your browser at http://{self.host}:{self.port}') + if args.gui: + print('>> Launching Invoke AI GUI') + close_server_on_exit = True + if args.web_develop: + close_server_on_exit = False + try: + from flaskwebgui import FlaskUI + FlaskUI( + app=self.app, + socketio=self.socketio, + start_server='flask-socketio', + host=self.host, + port=self.port, + width=1600, + height=1000, + idle_interval=10, + close_server_on_exit=close_server_on_exit, + ).run() + except KeyboardInterrupt: + import sys - self.socketio.run(app=self.app, host=self.host, port=self.port) + sys.exit(0) + else: + print('>> Started Invoke AI Web Server!') + if self.host == '0.0.0.0': + print( + f"Point your browser at http://localhost:{self.port} or use the host's DNS name or IP address." + ) + else: + print( + '>> Default host address now 127.0.0.1 (localhost). Use --host 0.0.0.0 to bind any address.' + ) + print( + f'>> Point your browser at http://{self.host}:{self.port}' + ) + self.socketio.run(app=self.app, host=self.host, port=self.port) def setup_app(self): self.result_url = 'outputs/' @@ -146,276 +176,276 @@ class InvokeAIWebServer: config = self.get_system_config() socketio.emit('systemConfig', config) - @socketio.on('requestImages') - def handle_request_images(page=1, offset=0, last_mtime=None): - chunk_size = 50 + @socketio.on('requestLatestImages') + def handle_request_latest_images(latest_mtime): + try: + paths = glob.glob(os.path.join(self.result_path, '*.png')) - if last_mtime: - print(f'>> Latest images requested') - else: - print( - f'>> Page {page} of images requested (page size {chunk_size} offset {offset})' + image_paths = sorted( + paths, key=lambda x: os.path.getmtime(x), reverse=True ) - paths = glob.glob(os.path.join(self.result_path, '*.png')) - sorted_paths = sorted( - paths, key=lambda x: os.path.getmtime(x), reverse=True - ) - - if last_mtime: - image_paths = filter( - lambda x: os.path.getmtime(x) > last_mtime, sorted_paths - ) - else: - - image_paths = sorted_paths[ - slice( - chunk_size * (page - 1) + offset, - chunk_size * page + offset, + image_paths = list( + filter( + lambda x: os.path.getmtime(x) > latest_mtime, + image_paths, ) - ] - page = page + 1 - - image_array = [] - - for path in image_paths: - metadata = retrieve_metadata(path) - image_array.append( - { - 'url': self.get_url_from_image_path(path), - 'mtime': os.path.getmtime(path), - 'metadata': metadata['sd-metadata'], - } ) - socketio.emit( - 'galleryImages', - { - 'images': image_array, - 'nextPage': page, - 'offset': offset, - 'onlyNewImages': True if last_mtime else False, - }, - ) + image_array = [] + + for path in image_paths: + metadata = retrieve_metadata(path) + image_array.append( + { + 'url': self.get_url_from_image_path(path), + 'mtime': os.path.getmtime(path), + 'metadata': metadata['sd-metadata'], + } + ) + + socketio.emit( + 'galleryImages', + { + 'images': image_array, + }, + ) + except Exception as e: + self.socketio.emit('error', {'message': (str(e))}) + print('\n') + + traceback.print_exc() + print('\n') + + @socketio.on('requestImages') + def handle_request_images(earliest_mtime=None): + try: + page_size = 50 + + paths = glob.glob(os.path.join(self.result_path, '*.png')) + + image_paths = sorted( + paths, key=lambda x: os.path.getmtime(x), reverse=True + ) + + if earliest_mtime: + image_paths = list( + filter( + lambda x: os.path.getmtime(x) < earliest_mtime, + image_paths, + ) + ) + + areMoreImagesAvailable = len(image_paths) >= page_size + image_paths = image_paths[slice(0, page_size)] + + image_array = [] + + for path in image_paths: + metadata = retrieve_metadata(path) + image_array.append( + { + 'url': self.get_url_from_image_path(path), + 'mtime': os.path.getmtime(path), + 'metadata': metadata['sd-metadata'], + } + ) + + socketio.emit( + 'galleryImages', + { + 'images': image_array, + 'areMoreImagesAvailable': areMoreImagesAvailable, + }, + ) + except Exception as e: + self.socketio.emit('error', {'message': (str(e))}) + print('\n') + + traceback.print_exc() + print('\n') @socketio.on('generateImage') def handle_generate_image_event( generation_parameters, esrgan_parameters, gfpgan_parameters ): - print( - f'>> Image generation requested: {generation_parameters}\nESRGAN parameters: {esrgan_parameters}\nGFPGAN parameters: {gfpgan_parameters}' - ) - self.generate_images( - generation_parameters, esrgan_parameters, gfpgan_parameters - ) + try: + print( + f'>> Image generation requested: {generation_parameters}\nESRGAN parameters: {esrgan_parameters}\nGFPGAN parameters: {gfpgan_parameters}' + ) + self.generate_images( + generation_parameters, esrgan_parameters, gfpgan_parameters + ) + except Exception as e: + self.socketio.emit('error', {'message': (str(e))}) + print('\n') - @socketio.on('runESRGAN') - def handle_run_esrgan_event(original_image, esrgan_parameters): - print( - f'>> ESRGAN upscale requested for "{original_image["url"]}": {esrgan_parameters}' - ) - progress = { - 'currentStep': 1, - 'totalSteps': 1, - 'currentIteration': 1, - 'totalIterations': 1, - 'currentStatus': 'Preparing', - 'isProcessing': True, - 'currentStatusHasSteps': False, - } + traceback.print_exc() + print('\n') - socketio.emit('progressUpdate', progress) - eventlet.sleep(0) + @socketio.on('runPostprocessing') + def handle_run_postprocessing( + original_image, postprocessing_parameters + ): + try: + print( + f'>> Postprocessing requested for "{original_image["url"]}": {postprocessing_parameters}' + ) - original_image_path = self.get_image_path_from_url(original_image['url']) - # os.path.join(self.result_path, os.path.basename(original_image['url'])) + progress = Progress() - image = Image.open(original_image_path) + socketio.emit('progressUpdate', progress.to_formatted_dict()) + eventlet.sleep(0) - seed = ( - original_image['metadata']['seed'] - if 'seed' in original_image['metadata'] - else 'unknown_seed' - ) + original_image_path = self.get_image_path_from_url( + original_image['url'] + ) - progress['currentStatus'] = 'Upscaling' - socketio.emit('progressUpdate', progress) - eventlet.sleep(0) + image = Image.open(original_image_path) - image = self.esrgan.process( - image=image, - upsampler_scale=esrgan_parameters['upscale'][0], - strength=esrgan_parameters['upscale'][1], - seed=seed, - ) + seed = ( + original_image['metadata']['seed'] + if 'seed' in original_image['metadata'] + else 'unknown_seed' + ) - progress['currentStatus'] = 'Saving image' - socketio.emit('progressUpdate', progress) - eventlet.sleep(0) + if postprocessing_parameters['type'] == 'esrgan': + progress.set_current_status('Upscaling') + elif postprocessing_parameters['type'] == 'gfpgan': + progress.set_current_status('Restoring Faces') - esrgan_parameters['seed'] = seed - metadata = self.parameters_to_post_processed_image_metadata( - parameters=esrgan_parameters, - original_image_path=original_image_path, - type='esrgan', - ) - command = parameters_to_command(esrgan_parameters) + socketio.emit('progressUpdate', progress.to_formatted_dict()) + eventlet.sleep(0) - path = self.save_image( - image, - command, - metadata, - self.result_path, - postprocessing='esrgan', - ) + if postprocessing_parameters['type'] == 'esrgan': + image = self.esrgan.process( + image=image, + upsampler_scale=postprocessing_parameters['upscale'][ + 0 + ], + strength=postprocessing_parameters['upscale'][1], + seed=seed, + ) + elif postprocessing_parameters['type'] == 'gfpgan': + image = self.gfpgan.process( + image=image, + strength=postprocessing_parameters['gfpgan_strength'], + seed=seed, + ) + else: + raise TypeError( + f'{postprocessing_parameters["type"]} is not a valid postprocessing type' + ) - self.write_log_message( - f'[Upscaled] "{original_image_path}" > "{path}": {command}' - ) + progress.set_current_status('Saving Image') + socketio.emit('progressUpdate', progress.to_formatted_dict()) + eventlet.sleep(0) - progress['currentStatus'] = 'Finished' - progress['currentStep'] = 0 - progress['totalSteps'] = 0 - progress['currentIteration'] = 0 - progress['totalIterations'] = 0 - progress['isProcessing'] = False - socketio.emit('progressUpdate', progress) - eventlet.sleep(0) + postprocessing_parameters['seed'] = seed + metadata = self.parameters_to_post_processed_image_metadata( + parameters=postprocessing_parameters, + original_image_path=original_image_path, + ) - socketio.emit( - 'esrganResult', - { - 'url': self.get_url_from_image_path(path), - 'mtime': os.path.getmtime(path), - 'metadata': metadata, - }, - ) + command = parameters_to_command(postprocessing_parameters) - @socketio.on('runGFPGAN') - def handle_run_gfpgan_event(original_image, gfpgan_parameters): - print( - f'>> GFPGAN face fix requested for "{original_image["url"]}": {gfpgan_parameters}' - ) - progress = { - 'currentStep': 1, - 'totalSteps': 1, - 'currentIteration': 1, - 'totalIterations': 1, - 'currentStatus': 'Preparing', - 'isProcessing': True, - 'currentStatusHasSteps': False, - } + path = self.save_result_image( + image, + command, + metadata, + self.result_path, + postprocessing=postprocessing_parameters['type'], + ) - socketio.emit('progressUpdate', progress) - eventlet.sleep(0) + self.write_log_message( + f'[Postprocessed] "{original_image_path}" > "{path}": {postprocessing_parameters}' + ) - original_image_path = self.get_image_path_from_url(original_image['url']) + progress.mark_complete() + socketio.emit('progressUpdate', progress.to_formatted_dict()) + eventlet.sleep(0) - image = Image.open(original_image_path) + socketio.emit( + 'postprocessingResult', + { + 'url': self.get_url_from_image_path(path), + 'mtime': os.path.getmtime(path), + 'metadata': metadata, + }, + ) + except Exception as e: + self.socketio.emit('error', {'message': (str(e))}) + print('\n') - seed = ( - original_image['metadata']['seed'] - if 'seed' in original_image['metadata'] - else 'unknown_seed' - ) - - progress['currentStatus'] = 'Fixing faces' - socketio.emit('progressUpdate', progress) - eventlet.sleep(0) - - image = self.gfpgan.process( - image=image, - strength=gfpgan_parameters['gfpgan_strength'], - seed=seed, - ) - - progress['currentStatus'] = 'Saving image' - socketio.emit('progressUpdate', progress) - eventlet.sleep(0) - - gfpgan_parameters['seed'] = seed - metadata = self.parameters_to_post_processed_image_metadata( - parameters=gfpgan_parameters, - original_image_path=original_image_path, - type='gfpgan', - ) - command = parameters_to_command(gfpgan_parameters) - - path = self.save_image( - image, - command, - metadata, - self.result_path, - postprocessing='gfpgan', - ) - - self.write_log_message( - f'[Fixed faces] "{original_image_path}" > "{path}": {command}' - ) - - progress['currentStatus'] = 'Finished' - progress['currentStep'] = 0 - progress['totalSteps'] = 0 - progress['currentIteration'] = 0 - progress['totalIterations'] = 0 - progress['isProcessing'] = False - socketio.emit('progressUpdate', progress) - eventlet.sleep(0) - - socketio.emit( - 'gfpganResult', - { - 'url': self.get_url_from_image_path(path), - 'mtime': os.path.getmtime(path), - 'metadata': metadata, - }, - ) + traceback.print_exc() + print('\n') @socketio.on('cancel') def handle_cancel(): print(f'>> Cancel processing requested') self.canceled.set() - socketio.emit('processingCanceled') # TODO: I think this needs a safety mechanism. @socketio.on('deleteImage') - def handle_delete_image(path, uuid): - print(f'>> Delete requested "{path}"') - from send2trash import send2trash + def handle_delete_image(url, uuid): + try: + print(f'>> Delete requested "{url}"') + from send2trash import send2trash - path = self.get_image_path_from_url(path) - send2trash(path) - socketio.emit('imageDeleted', {'url': path, 'uuid': uuid}) + path = self.get_image_path_from_url(url) + send2trash(path) + socketio.emit('imageDeleted', {'url': url, 'uuid': uuid}) + except Exception as e: + self.socketio.emit('error', {'message': (str(e))}) + print('\n') + + traceback.print_exc() + print('\n') # TODO: I think this needs a safety mechanism. @socketio.on('uploadInitialImage') def handle_upload_initial_image(bytes, name): - print(f'>> Init image upload requested "{name}"') - uuid = uuid4().hex - split = os.path.splitext(name) - name = f'{split[0]}.{uuid}{split[1]}' - file_path = os.path.join(self.init_image_path, name) - os.makedirs(os.path.dirname(file_path), exist_ok=True) - newFile = open(file_path, 'wb') - newFile.write(bytes) + try: + print(f'>> Init image upload requested "{name}"') + file_path = self.save_file_unique_uuid_name( + bytes=bytes, name=name, path=self.init_image_path + ) - socketio.emit( - 'initialImageUploaded', {'url': self.get_url_from_image_path(file_path), 'uuid': ''} - ) + socketio.emit( + 'initialImageUploaded', + { + 'url': self.get_url_from_image_path(file_path), + }, + ) + except Exception as e: + self.socketio.emit('error', {'message': (str(e))}) + print('\n') + + traceback.print_exc() + print('\n') # TODO: I think this needs a safety mechanism. @socketio.on('uploadMaskImage') def handle_upload_mask_image(bytes, name): - print(f'>> Mask image upload requested "{name}"') - uuid = uuid4().hex - split = os.path.splitext(name) - name = f'{split[0]}.{uuid}{split[1]}' - file_path = os.path.join(self.mask_image_path, name) - os.makedirs(os.path.dirname(file_path), exist_ok=True) - newFile = open(file_path, 'wb') - newFile.write(bytes) + try: + print(f'>> Mask image upload requested "{name}"') - socketio.emit('maskImageUploaded', {'url': self.get_url_from_image_path(file_path), 'uuid': ''}) + file_path = self.save_file_unique_uuid_name( + bytes=bytes, name=name, path=self.mask_image_path + ) + + socketio.emit( + 'maskImageUploaded', + { + 'url': self.get_url_from_image_path(file_path), + }, + ) + except Exception as e: + self.socketio.emit('error', {'message': (str(e))}) + print('\n') + + traceback.print_exc() + print('\n') # App Functions def get_system_config(self): @@ -430,244 +460,239 @@ class InvokeAIWebServer: def generate_images( self, generation_parameters, esrgan_parameters, gfpgan_parameters ): - self.canceled.clear() + try: + self.canceled.clear() - step_index = 1 - prior_variations = ( - generation_parameters['with_variations'] - if 'with_variations' in generation_parameters - else [] - ) + step_index = 1 + prior_variations = ( + generation_parameters['with_variations'] + if 'with_variations' in generation_parameters + else [] + ) - """ - TODO: RE-IMPLEMENT THE COMMENTED-OUT CODE - If a result image is used as an init image, and then deleted, we will want to be - able to use it as an init image in the future. Need to copy it. + """ + TODO: + If a result image is used as an init image, and then deleted, we will want to be + able to use it as an init image in the future. Need to handle this case. + """ - If the init/mask image doesn't exist in the init_image_path/mask_image_path, - make a unique filename for it and copy it there. - """ - # if 'init_img' in generation_parameters: - # filename = os.path.basename(generation_parameters['init_img']) - # abs_init_image_path = os.path.join(self.init_image_path, filename) - # if not os.path.exists( - # abs_init_image_path - # ): - # unique_filename = self.make_unique_init_image_filename( - # filename - # ) - # new_path = os.path.join(self.init_image_path, unique_filename) - # shutil.copy(abs_init_image_path, new_path) - # generation_parameters['init_img'] = os.path.abspath(new_path) - # else: - # generation_parameters['init_img'] = os.path.abspath(os.path.join(self.init_image_path, filename)) + # We need to give absolute paths to the generator, stash the URLs for later + init_img_url = None + mask_img_url = None - # if 'init_mask' in generation_parameters: - # filename = os.path.basename(generation_parameters['init_mask']) - # if not os.path.exists( - # os.path.join(self.mask_image_path, filename) - # ): - # unique_filename = self.make_unique_init_image_filename( - # filename - # ) - # new_path = os.path.join( - # self.init_image_path, unique_filename - # ) - # shutil.copy(generation_parameters['init_img'], new_path) - # generation_parameters['init_mask'] = os.path.abspath(new_path) - # else: - # generation_parameters['init_mas'] = os.path.abspath(os.path.join(self.mask_image_path, filename)) + if 'init_img' in generation_parameters: + init_img_url = generation_parameters['init_img'] + generation_parameters[ + 'init_img' + ] = self.get_image_path_from_url( + generation_parameters['init_img'] + ) + if 'init_mask' in generation_parameters: + mask_img_url = generation_parameters['init_mask'] + generation_parameters[ + 'init_mask' + ] = self.get_image_path_from_url( + generation_parameters['init_mask'] + ) - # We need to give absolute paths to the generator, stash the URLs for later - init_img_url = None; - mask_img_url = None; + totalSteps = self.calculate_real_steps( + steps=generation_parameters['steps'], + strength=generation_parameters['strength'] + if 'strength' in generation_parameters + else None, + has_init_image='init_img' in generation_parameters, + ) - if 'init_img' in generation_parameters: - init_img_url = generation_parameters['init_img'] - generation_parameters['init_img'] = self.get_image_path_from_url(generation_parameters['init_img']) + progress = Progress(generation_parameters=generation_parameters) - if 'init_mask' in generation_parameters: - mask_img_url = generation_parameters['init_mask'] - generation_parameters['init_mask'] = self.get_image_path_from_url(generation_parameters['init_mask']) + self.socketio.emit('progressUpdate', progress.to_formatted_dict()) + eventlet.sleep(0) - totalSteps = self.calculate_real_steps( - steps=generation_parameters['steps'], - strength=generation_parameters['strength'] - if 'strength' in generation_parameters - else None, - has_init_image='init_img' in generation_parameters, - ) + def image_progress(sample, step): + if self.canceled.is_set(): + raise CanceledException - progress = { - 'currentStep': 1, - 'totalSteps': totalSteps, - 'currentIteration': 1, - 'totalIterations': generation_parameters['iterations'], - 'currentStatus': 'Preparing', - 'isProcessing': True, - 'currentStatusHasSteps': False, - } + nonlocal step_index + nonlocal generation_parameters + nonlocal progress - self.socketio.emit('progressUpdate', progress) - eventlet.sleep(0) + progress.set_current_step(step + 1) + progress.set_current_status('Generating') + progress.set_current_status_has_steps(True) - def image_progress(sample, step): - if self.canceled.is_set(): - raise CanceledException + if ( + generation_parameters['progress_images'] + and step % 5 == 0 + and step < generation_parameters['steps'] - 1 + ): + image = self.generate.sample_to_image(sample) + metadata = self.parameters_to_generated_image_metadata( + generation_parameters + ) + command = parameters_to_command(generation_parameters) - nonlocal step_index - nonlocal generation_parameters - nonlocal progress + path = self.save_result_image( + image, + command, + metadata, + self.intermediate_path, + step_index=step_index, + postprocessing=False, + ) - progress['currentStep'] = step + 1 - progress['currentStatus'] = 'Generating' - progress['currentStatusHasSteps'] = True - - if ( - generation_parameters['progress_images'] - and step % 5 == 0 - and step < generation_parameters['steps'] - 1 - ): - image = self.generate.sample_to_image(sample) - metadata = self.parameters_to_generated_image_metadata(generation_parameters) - command = parameters_to_command(generation_parameters) - - path = self.save_image(image, command, metadata, self.intermediate_path, step_index=step_index, postprocessing=False) - - step_index += 1 + step_index += 1 + self.socketio.emit( + 'intermediateResult', + { + 'url': self.get_url_from_image_path(path), + 'mtime': os.path.getmtime(path), + 'metadata': metadata, + }, + ) self.socketio.emit( - 'intermediateResult', + 'progressUpdate', progress.to_formatted_dict() + ) + eventlet.sleep(0) + + def image_done(image, seed, first_seed): + if self.canceled.is_set(): + raise CanceledException + + nonlocal generation_parameters + nonlocal esrgan_parameters + nonlocal gfpgan_parameters + nonlocal progress + + step_index = 1 + nonlocal prior_variations + + progress.set_current_status('Generation Complete') + + self.socketio.emit( + 'progressUpdate', progress.to_formatted_dict() + ) + eventlet.sleep(0) + + all_parameters = generation_parameters + postprocessing = False + + if ( + 'variation_amount' in all_parameters + and all_parameters['variation_amount'] > 0 + ): + first_seed = first_seed or seed + this_variation = [ + [seed, all_parameters['variation_amount']] + ] + all_parameters['with_variations'] = ( + prior_variations + this_variation + ) + all_parameters['seed'] = first_seed + elif 'with_variations' in all_parameters: + all_parameters['seed'] = first_seed + else: + all_parameters['seed'] = seed + + if self.canceled.is_set(): + raise CanceledException + + if esrgan_parameters: + progress.set_current_status('Upscaling') + progress.set_current_status_has_steps(False) + self.socketio.emit( + 'progressUpdate', progress.to_formatted_dict() + ) + eventlet.sleep(0) + + image = self.esrgan.process( + image=image, + upsampler_scale=esrgan_parameters['level'], + strength=esrgan_parameters['strength'], + seed=seed, + ) + + postprocessing = True + all_parameters['upscale'] = [ + esrgan_parameters['level'], + esrgan_parameters['strength'], + ] + + if self.canceled.is_set(): + raise CanceledException + + if gfpgan_parameters: + progress.set_current_status('Restoring Faces') + progress.set_current_status_has_steps(False) + self.socketio.emit( + 'progressUpdate', progress.to_formatted_dict() + ) + eventlet.sleep(0) + + image = self.gfpgan.process( + image=image, + strength=gfpgan_parameters['strength'], + seed=seed, + ) + postprocessing = True + all_parameters['gfpgan_strength'] = gfpgan_parameters[ + 'strength' + ] + + progress.set_current_status('Saving Image') + self.socketio.emit( + 'progressUpdate', progress.to_formatted_dict() + ) + eventlet.sleep(0) + + # restore the stashed URLS and discard the paths, we are about to send the result to client + if 'init_img' in all_parameters: + all_parameters['init_img'] = init_img_url + + if 'init_mask' in all_parameters: + all_parameters['init_mask'] = mask_img_url + + metadata = self.parameters_to_generated_image_metadata( + all_parameters + ) + + command = parameters_to_command(all_parameters) + + path = self.save_result_image( + image, + command, + metadata, + self.result_path, + postprocessing=postprocessing, + ) + + print(f'>> Image generated: "{path}"') + self.write_log_message(f'[Generated] "{path}": {command}') + + if progress.total_iterations > progress.current_iteration: + progress.set_current_step(1) + progress.set_current_status('Iteration complete') + progress.set_current_status_has_steps(False) + else: + progress.mark_complete() + + self.socketio.emit( + 'progressUpdate', progress.to_formatted_dict() + ) + eventlet.sleep(0) + + self.socketio.emit( + 'generationResult', { 'url': self.get_url_from_image_path(path), 'mtime': os.path.getmtime(path), 'metadata': metadata, }, ) - self.socketio.emit('progressUpdate', progress) - eventlet.sleep(0) - - def image_done(image, seed, first_seed): - nonlocal generation_parameters - nonlocal esrgan_parameters - nonlocal gfpgan_parameters - nonlocal progress - - step_index = 1 - nonlocal prior_variations - - progress['currentStatus'] = 'Generation complete' - self.socketio.emit('progressUpdate', progress) - eventlet.sleep(0) - - all_parameters = generation_parameters - postprocessing = False - - if ( - 'variation_amount' in all_parameters - and all_parameters['variation_amount'] > 0 - ): - first_seed = first_seed or seed - this_variation = [[seed, all_parameters['variation_amount']]] - all_parameters['with_variations'] = ( - prior_variations + this_variation - ) - all_parameters['seed'] = first_seed - elif 'with_variations' in all_parameters: - all_parameters['seed'] = first_seed - else: - all_parameters['seed'] = seed - - if esrgan_parameters: - progress['currentStatus'] = 'Upscaling' - progress['currentStatusHasSteps'] = False - self.socketio.emit('progressUpdate', progress) eventlet.sleep(0) - image = self.esrgan.process( - image=image, - upsampler_scale=esrgan_parameters['level'], - strength=esrgan_parameters['strength'], - seed=seed, - ) + progress.set_current_iteration(progress.current_iteration + 1) - postprocessing = True - all_parameters['upscale'] = [ - esrgan_parameters['level'], - esrgan_parameters['strength'], - ] - - if gfpgan_parameters: - progress['currentStatus'] = 'Fixing faces' - progress['currentStatusHasSteps'] = False - self.socketio.emit('progressUpdate', progress) - eventlet.sleep(0) - - image = self.gfpgan.process( - image=image, - strength=gfpgan_parameters['strength'], - seed=seed, - ) - postprocessing = True - all_parameters['gfpgan_strength'] = gfpgan_parameters[ - 'strength' - ] - - progress['currentStatus'] = 'Saving image' - self.socketio.emit('progressUpdate', progress) - eventlet.sleep(0) - - # restore the stashed URLS and discard the paths, we are about to send the result to client - if 'init_img' in all_parameters: - all_parameters['init_img'] = init_img_url - - if 'init_mask' in all_parameters: - all_parameters['init_mask'] = mask_img_url - - metadata = self.parameters_to_generated_image_metadata( - all_parameters - ) - - command = parameters_to_command(all_parameters) - - path = self.save_image( - image, - command, - metadata, - self.result_path, - postprocessing=postprocessing, - ) - - print(f'>> Image generated: "{path}"') - self.write_log_message(f'[Generated] "{path}": {command}') - - if progress['totalIterations'] > progress['currentIteration']: - progress['currentStep'] = 1 - progress['currentIteration'] += 1 - progress['currentStatus'] = 'Iteration finished' - progress['currentStatusHasSteps'] = False - else: - progress['currentStep'] = 0 - progress['totalSteps'] = 0 - progress['currentIteration'] = 0 - progress['totalIterations'] = 0 - progress['currentStatus'] = 'Finished' - progress['isProcessing'] = False - - self.socketio.emit('progressUpdate', progress) - eventlet.sleep(0) - - self.socketio.emit( - 'generationResult', - { - 'url': self.get_url_from_image_path(path), - 'mtime': os.path.getmtime(path), - 'metadata': metadata, - }, - ) - eventlet.sleep(0) - - try: self.generate.prompt2image( **generation_parameters, step_callback=image_progress, @@ -677,133 +702,177 @@ class InvokeAIWebServer: except KeyboardInterrupt: raise except CanceledException: + self.socketio.emit('processingCanceled') pass except Exception as e: + print(e) self.socketio.emit('error', {'message': (str(e))}) print('\n') - import traceback traceback.print_exc() print('\n') def parameters_to_generated_image_metadata(self, parameters): - # top-level metadata minus `image` or `images` - metadata = self.get_system_config() - # remove any image keys not mentioned in RFC #266 - rfc266_img_fields = [ - 'type', - 'postprocessing', - 'sampler', - 'prompt', - 'seed', - 'variations', - 'steps', - 'cfg_scale', - 'step_number', - 'width', - 'height', - 'extra', - 'seamless', - ] - - rfc_dict = {} - - for item in parameters.items(): - key, value = item - if key in rfc266_img_fields: - rfc_dict[key] = value - - postprocessing = [] - - # 'postprocessing' is either null or an - if 'gfpgan_strength' in parameters: - - postprocessing.append( - { - 'type': 'gfpgan', - 'strength': float(parameters['gfpgan_strength']), - } - ) - - if 'upscale' in parameters: - postprocessing.append( - { - 'type': 'esrgan', - 'scale': int(parameters['upscale'][0]), - 'strength': float(parameters['upscale'][1]), - } - ) - - rfc_dict['postprocessing'] = ( - postprocessing if len(postprocessing) > 0 else None - ) - - # semantic drift - rfc_dict['sampler'] = parameters['sampler_name'] - - # display weighted subprompts (liable to change) - subprompts = split_weighted_subprompts(parameters['prompt']) - subprompts = [{'prompt': x[0], 'weight': x[1]} for x in subprompts] - rfc_dict['prompt'] = subprompts - - # 'variations' should always exist and be an array, empty or consisting of {'seed': seed, 'weight': weight} pairs - variations = [] - - if 'with_variations' in parameters: - variations = [ - {'seed': x[0], 'weight': x[1]} - for x in parameters['with_variations'] + try: + # top-level metadata minus `image` or `images` + metadata = self.get_system_config() + # remove any image keys not mentioned in RFC #266 + rfc266_img_fields = [ + 'type', + 'postprocessing', + 'sampler', + 'prompt', + 'seed', + 'variations', + 'steps', + 'cfg_scale', + 'threshold', + 'perlin', + 'step_number', + 'width', + 'height', + 'extra', + 'seamless', ] - rfc_dict['variations'] = variations + rfc_dict = {} - if 'init_img' in parameters: - rfc_dict['type'] = 'img2img' - rfc_dict['strength'] = parameters['strength'] - rfc_dict['fit'] = parameters['fit'] # TODO: Noncompliant - rfc_dict['orig_hash'] = calculate_init_img_hash(self.get_image_path_from_url(parameters['init_img'])) - rfc_dict['init_image_path'] = parameters[ - 'init_img' - ] # TODO: Noncompliant - rfc_dict[ - 'sampler' - ] = 'ddim' # TODO: FIX ME WHEN IMG2IMG SUPPORTS ALL SAMPLERS - if 'init_mask' in parameters: - rfc_dict['mask_hash'] = calculate_init_img_hash(self.get_image_path_from_url(parameters['init_mask'])) # TODO: Noncompliant - rfc_dict['mask_image_path'] = parameters[ - 'init_mask' + for item in parameters.items(): + key, value = item + if key in rfc266_img_fields: + rfc_dict[key] = value + + postprocessing = [] + + # 'postprocessing' is either null or an + if 'gfpgan_strength' in parameters: + + postprocessing.append( + { + 'type': 'gfpgan', + 'strength': float(parameters['gfpgan_strength']), + } + ) + + if 'upscale' in parameters: + postprocessing.append( + { + 'type': 'esrgan', + 'scale': int(parameters['upscale'][0]), + 'strength': float(parameters['upscale'][1]), + } + ) + + rfc_dict['postprocessing'] = ( + postprocessing if len(postprocessing) > 0 else None + ) + + # semantic drift + rfc_dict['sampler'] = parameters['sampler_name'] + + # display weighted subprompts (liable to change) + subprompts = split_weighted_subprompts(parameters['prompt']) + subprompts = [{'prompt': x[0], 'weight': x[1]} for x in subprompts] + rfc_dict['prompt'] = subprompts + + # 'variations' should always exist and be an array, empty or consisting of {'seed': seed, 'weight': weight} pairs + variations = [] + + if 'with_variations' in parameters: + variations = [ + {'seed': x[0], 'weight': x[1]} + for x in parameters['with_variations'] + ] + + rfc_dict['variations'] = variations + + if 'init_img' in parameters: + rfc_dict['type'] = 'img2img' + rfc_dict['strength'] = parameters['strength'] + rfc_dict['fit'] = parameters['fit'] # TODO: Noncompliant + rfc_dict['orig_hash'] = calculate_init_img_hash( + self.get_image_path_from_url(parameters['init_img']) + ) + rfc_dict['init_image_path'] = parameters[ + 'init_img' ] # TODO: Noncompliant - else: - rfc_dict['type'] = 'txt2img' + if 'init_mask' in parameters: + rfc_dict['mask_hash'] = calculate_init_img_hash( + self.get_image_path_from_url(parameters['init_mask']) + ) # TODO: Noncompliant + rfc_dict['mask_image_path'] = parameters[ + 'init_mask' + ] # TODO: Noncompliant + else: + rfc_dict['type'] = 'txt2img' - metadata['image'] = rfc_dict + metadata['image'] = rfc_dict - return metadata + return metadata + + except Exception as e: + self.socketio.emit('error', {'message': (str(e))}) + print('\n') + + traceback.print_exc() + print('\n') def parameters_to_post_processed_image_metadata( - self, parameters, original_image_path, type + self, parameters, original_image_path ): - # top-level metadata minus `image` or `images` - metadata = self.get_system_config() + try: + current_metadata = retrieve_metadata(original_image_path)[ + 'sd-metadata' + ] + postprocessing_metadata = {} - orig_hash = calculate_init_img_hash(self.get_image_path_from_url(original_image_path)) + """ + if we don't have an original image metadata to reconstruct, + need to record the original image and its hash + """ + if 'image' not in current_metadata: + current_metadata['image'] = {} - image = {'orig_path': original_image_path, 'orig_hash': orig_hash} + orig_hash = calculate_init_img_hash( + self.get_image_path_from_url(original_image_path) + ) - if type == 'esrgan': - image['type'] = 'esrgan' - image['scale'] = parameters['upscale'][0] - image['strength'] = parameters['upscale'][1] - elif type == 'gfpgan': - image['type'] = 'gfpgan' - image['strength'] = parameters['gfpgan_strength'] - else: - raise TypeError(f'Invalid type: {type}') + postprocessing_metadata['orig_path'] = (original_image_path,) + postprocessing_metadata['orig_hash'] = orig_hash - metadata['image'] = image - return metadata + if parameters['type'] == 'esrgan': + postprocessing_metadata['type'] = 'esrgan' + postprocessing_metadata['scale'] = parameters['upscale'][0] + postprocessing_metadata['strength'] = parameters['upscale'][1] + elif parameters['type'] == 'gfpgan': + postprocessing_metadata['type'] = 'gfpgan' + postprocessing_metadata['strength'] = parameters[ + 'gfpgan_strength' + ] + else: + raise TypeError(f"Invalid type: {parameters['type']}") - def save_image( + if 'postprocessing' in current_metadata['image'] and isinstance( + current_metadata['image']['postprocessing'], list + ): + current_metadata['image']['postprocessing'].append( + postprocessing_metadata + ) + else: + current_metadata['image']['postprocessing'] = [ + postprocessing_metadata + ] + + return current_metadata + + except Exception as e: + self.socketio.emit('error', {'message': (str(e))}) + print('\n') + + traceback.print_exc() + print('\n') + + def save_result_image( self, image, command, @@ -812,69 +881,213 @@ class InvokeAIWebServer: step_index=None, postprocessing=False, ): - pngwriter = PngWriter(output_dir) - prefix = pngwriter.unique_prefix() + try: + pngwriter = PngWriter(output_dir) + prefix = pngwriter.unique_prefix() - seed = 'unknown_seed' + seed = 'unknown_seed' - if 'image' in metadata: - if 'seed' in metadata['image']: - seed = metadata['image']['seed'] + if 'image' in metadata: + if 'seed' in metadata['image']: + seed = metadata['image']['seed'] - filename = f'{prefix}.{seed}' + filename = f'{prefix}.{seed}' - if step_index: - filename += f'.{step_index}' - if postprocessing: - filename += f'.postprocessed' + if step_index: + filename += f'.{step_index}' + if postprocessing: + filename += f'.postprocessed' - filename += '.png' + filename += '.png' - path = pngwriter.save_image_and_prompt_to_png( - image=image, dream_prompt=command, metadata=metadata, name=filename - ) + path = pngwriter.save_image_and_prompt_to_png( + image=image, + dream_prompt=command, + metadata=metadata, + name=filename, + ) - return os.path.abspath(path) + return os.path.abspath(path) + + except Exception as e: + self.socketio.emit('error', {'message': (str(e))}) + print('\n') + + traceback.print_exc() + print('\n') def make_unique_init_image_filename(self, name): - uuid = uuid4().hex - split = os.path.splitext(name) - name = f'{split[0]}.{uuid}{split[1]}' - return name + try: + uuid = uuid4().hex + split = os.path.splitext(name) + name = f'{split[0]}.{uuid}{split[1]}' + return name + except Exception as e: + self.socketio.emit('error', {'message': (str(e))}) + print('\n') + + traceback.print_exc() + print('\n') def calculate_real_steps(self, steps, strength, has_init_image): import math + return math.floor(strength * steps) if has_init_image else steps def write_log_message(self, message): """Logs the filename and parameters used to generate or process that image to log file""" - message = f'{message}\n' - with open(self.log_path, 'a', encoding='utf-8') as file: - file.writelines(message) + try: + message = f'{message}\n' + with open(self.log_path, 'a', encoding='utf-8') as file: + file.writelines(message) + + except Exception as e: + self.socketio.emit('error', {'message': (str(e))}) + print('\n') + + traceback.print_exc() + print('\n') def get_image_path_from_url(self, url): """Given a url to an image used by the client, returns the absolute file path to that image""" - if 'init-images' in url: - return os.path.abspath(os.path.join(self.init_image_path, os.path.basename(url))) - elif 'mask-images' in url: - return os.path.abspath(os.path.join(self.mask_image_path, os.path.basename(url))) - elif 'intermediates' in url: - return os.path.abspath(os.path.join(self.intermediate_path, os.path.basename(url))) - else: - return os.path.abspath(os.path.join(self.result_path, os.path.basename(url))) + try: + if 'init-images' in url: + return os.path.abspath( + os.path.join(self.init_image_path, os.path.basename(url)) + ) + elif 'mask-images' in url: + return os.path.abspath( + os.path.join(self.mask_image_path, os.path.basename(url)) + ) + elif 'intermediates' in url: + return os.path.abspath( + os.path.join(self.intermediate_path, os.path.basename(url)) + ) + else: + return os.path.abspath( + os.path.join(self.result_path, os.path.basename(url)) + ) + except Exception as e: + self.socketio.emit('error', {'message': (str(e))}) + print('\n') + + traceback.print_exc() + print('\n') def get_url_from_image_path(self, path): """Given an absolute file path to an image, returns the URL that the client can use to load the image""" - if 'init-images' in path: - return os.path.join(self.init_image_url, os.path.basename(path)) - elif 'mask-images' in path: - return os.path.join(self.mask_image_url, os.path.basename(path)) - elif 'intermediates' in path: - return os.path.join(self.intermediate_url, os.path.basename(path)) - else: - return os.path.join(self.result_url, os.path.basename(path)) + try: + if 'init-images' in path: + return os.path.join( + self.init_image_url, os.path.basename(path) + ) + elif 'mask-images' in path: + return os.path.join( + self.mask_image_url, os.path.basename(path) + ) + elif 'intermediates' in path: + return os.path.join( + self.intermediate_url, os.path.basename(path) + ) + else: + return os.path.join(self.result_url, os.path.basename(path)) + except Exception as e: + self.socketio.emit('error', {'message': (str(e))}) + print('\n') + traceback.print_exc() + print('\n') + + def save_file_unique_uuid_name(self, bytes, name, path): + try: + uuid = uuid4().hex + split = os.path.splitext(name) + name = f'{split[0]}.{uuid}{split[1]}' + file_path = os.path.join(path, name) + os.makedirs(os.path.dirname(file_path), exist_ok=True) + newFile = open(file_path, 'wb') + newFile.write(bytes) + return file_path + except Exception as e: + self.socketio.emit('error', {'message': (str(e))}) + print('\n') + + traceback.print_exc() + print('\n') + + +class Progress: + def __init__(self, generation_parameters=None): + self.current_step = 1 + self.total_steps = ( + self._calculate_real_steps( + steps=generation_parameters['steps'], + strength=generation_parameters['strength'] + if 'strength' in generation_parameters + else None, + has_init_image='init_img' in generation_parameters, + ) + if generation_parameters + else 1 + ) + self.current_iteration = 1 + self.total_iterations = ( + generation_parameters['iterations'] if generation_parameters else 1 + ) + self.current_status = 'Preparing' + self.is_processing = True + self.current_status_has_steps = False + self.has_error = False + + def set_current_step(self, current_step): + self.current_step = current_step + + def set_total_steps(self, total_steps): + self.total_steps = total_steps + + def set_current_iteration(self, current_iteration): + self.current_iteration = current_iteration + + def set_total_iterations(self, total_iterations): + self.total_iterations = total_iterations + + def set_current_status(self, current_status): + self.current_status = current_status + + def set_is_processing(self, is_processing): + self.is_processing = is_processing + + def set_current_status_has_steps(self, current_status_has_steps): + self.current_status_has_steps = current_status_has_steps + + def set_has_error(self, has_error): + self.has_error = has_error + + def mark_complete(self): + self.current_status = 'Processing Complete' + self.current_step = 0 + self.total_steps = 0 + self.current_iteration = 0 + self.total_iterations = 0 + self.is_processing = False + + def to_formatted_dict( + self, + ): + return { + 'currentStep': self.current_step, + 'totalSteps': self.total_steps, + 'currentIteration': self.current_iteration, + 'totalIterations': self.total_iterations, + 'currentStatus': self.current_status, + 'isProcessing': self.is_processing, + 'currentStatusHasSteps': self.current_status_has_steps, + 'hasError': self.has_error, + } + + def _calculate_real_steps(self, steps, strength, has_init_image): + return math.floor(strength * steps) if has_init_image else steps class CanceledException(Exception): - pass \ No newline at end of file + pass diff --git a/backend/server.py b/backend/server.py index de4397502a..d0977b9dc3 100644 --- a/backend/server.py +++ b/backend/server.py @@ -486,6 +486,8 @@ def parameters_to_generated_image_metadata(parameters): "variations", "steps", "cfg_scale", + "threshold", + "perlin", "step_number", "width", "height", diff --git a/configs/stable-diffusion/v1-finetune.yaml b/configs/stable-diffusion/v1-finetune.yaml index 7bc31168e7..df22987fa5 100644 --- a/configs/stable-diffusion/v1-finetune.yaml +++ b/configs/stable-diffusion/v1-finetune.yaml @@ -107,4 +107,4 @@ lightning: benchmark: True max_steps: 4000000 # max_steps: 4000 - \ No newline at end of file + diff --git a/configs/stable-diffusion/v1-inference.yaml b/configs/stable-diffusion/v1-inference.yaml index 59d8f33125..da4770ffc7 100644 --- a/configs/stable-diffusion/v1-inference.yaml +++ b/configs/stable-diffusion/v1-inference.yaml @@ -30,9 +30,9 @@ model: target: ldm.modules.embedding_manager.EmbeddingManager params: placeholder_strings: ["*"] - initializer_words: ["sculpture"] + initializer_words: ['face', 'man', 'photo', 'africanmale'] per_image_tokens: false - num_vectors_per_token: 1 + num_vectors_per_token: 6 progressive_words: False unet_config: diff --git a/configs/stable-diffusion/v1-m1-finetune.yaml b/configs/stable-diffusion/v1-m1-finetune.yaml new file mode 100644 index 0000000000..af37f1ec7e --- /dev/null +++ b/configs/stable-diffusion/v1-m1-finetune.yaml @@ -0,0 +1,110 @@ +model: + base_learning_rate: 5.0e-03 + target: ldm.models.diffusion.ddpm.LatentDiffusion + params: + linear_start: 0.00085 + linear_end: 0.0120 + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + first_stage_key: image + cond_stage_key: caption + image_size: 64 + channels: 4 + cond_stage_trainable: true # Note: different from the one we trained before + conditioning_key: crossattn + monitor: val/loss_simple_ema + scale_factor: 0.18215 + use_ema: False + embedding_reg_weight: 0.0 + + personalization_config: + target: ldm.modules.embedding_manager.EmbeddingManager + params: + placeholder_strings: ["*"] + initializer_words: ['face', 'man', 'photo', 'africanmale'] + per_image_tokens: false + num_vectors_per_token: 6 + progressive_words: False + + unet_config: + target: ldm.modules.diffusionmodules.openaimodel.UNetModel + params: + image_size: 32 # unused + in_channels: 4 + out_channels: 4 + model_channels: 320 + attention_resolutions: [ 4, 2, 1 ] + num_res_blocks: 2 + channel_mult: [ 1, 2, 4, 4 ] + num_heads: 8 + use_spatial_transformer: True + transformer_depth: 1 + context_dim: 768 + use_checkpoint: True + legacy: False + + first_stage_config: + target: ldm.models.autoencoder.AutoencoderKL + params: + embed_dim: 4 + monitor: val/rec_loss + ddconfig: + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + + cond_stage_config: + target: ldm.modules.encoders.modules.FrozenCLIPEmbedder + +data: + target: main.DataModuleFromConfig + params: + batch_size: 1 + num_workers: 2 + wrap: false + train: + target: ldm.data.personalized.PersonalizedBase + params: + size: 512 + set: train + per_image_tokens: false + repeats: 100 + validation: + target: ldm.data.personalized.PersonalizedBase + params: + size: 512 + set: val + per_image_tokens: false + repeats: 10 + +lightning: + modelcheckpoint: + params: + every_n_train_steps: 500 + callbacks: + image_logger: + target: main.ImageLogger + params: + batch_frequency: 500 + max_images: 5 + increase_log_steps: False + + trainer: + benchmark: False + max_steps: 6200 +# max_steps: 4000 + diff --git a/docs/assets/img2img/000019.1592514025.png b/docs/assets/img2img/000019.1592514025.png new file mode 100644 index 0000000000..2bc2d63ffa Binary files /dev/null and b/docs/assets/img2img/000019.1592514025.png differ diff --git a/docs/assets/img2img/000019.steps.png b/docs/assets/img2img/000019.steps.png new file mode 100644 index 0000000000..28899e9111 Binary files /dev/null and b/docs/assets/img2img/000019.steps.png differ diff --git a/docs/assets/img2img/000030.1592514025.png b/docs/assets/img2img/000030.1592514025.png new file mode 100644 index 0000000000..0e1641f7eb Binary files /dev/null and b/docs/assets/img2img/000030.1592514025.png differ diff --git a/docs/assets/img2img/000030.step-0.png b/docs/assets/img2img/000030.step-0.png new file mode 100644 index 0000000000..81beb074ec Binary files /dev/null and b/docs/assets/img2img/000030.step-0.png differ diff --git a/docs/assets/img2img/000030.steps.gravity.png b/docs/assets/img2img/000030.steps.gravity.png new file mode 100644 index 0000000000..2bda935a5f Binary files /dev/null and b/docs/assets/img2img/000030.steps.gravity.png differ diff --git a/docs/assets/img2img/000032.1592514025.png b/docs/assets/img2img/000032.1592514025.png new file mode 100644 index 0000000000..0ed2106ec4 Binary files /dev/null and b/docs/assets/img2img/000032.1592514025.png differ diff --git a/docs/assets/img2img/000032.step-0.png b/docs/assets/img2img/000032.step-0.png new file mode 100644 index 0000000000..cc2da68ee4 Binary files /dev/null and b/docs/assets/img2img/000032.step-0.png differ diff --git a/docs/assets/img2img/000032.steps.gravity.png b/docs/assets/img2img/000032.steps.gravity.png new file mode 100644 index 0000000000..79058c1227 Binary files /dev/null and b/docs/assets/img2img/000032.steps.gravity.png differ diff --git a/docs/assets/img2img/000034.1592514025.png b/docs/assets/img2img/000034.1592514025.png new file mode 100644 index 0000000000..43751da572 Binary files /dev/null and b/docs/assets/img2img/000034.1592514025.png differ diff --git a/docs/assets/img2img/000034.steps.png b/docs/assets/img2img/000034.steps.png new file mode 100644 index 0000000000..216213162f Binary files /dev/null and b/docs/assets/img2img/000034.steps.png differ diff --git a/docs/assets/img2img/000035.1592514025.png b/docs/assets/img2img/000035.1592514025.png new file mode 100644 index 0000000000..d298895080 Binary files /dev/null and b/docs/assets/img2img/000035.1592514025.png differ diff --git a/docs/assets/img2img/000035.steps.gravity.png b/docs/assets/img2img/000035.steps.gravity.png new file mode 100644 index 0000000000..122c729e87 Binary files /dev/null and b/docs/assets/img2img/000035.steps.gravity.png differ diff --git a/docs/assets/img2img/000045.1592514025.png b/docs/assets/img2img/000045.1592514025.png new file mode 100644 index 0000000000..5e70f1a5bf Binary files /dev/null and b/docs/assets/img2img/000045.1592514025.png differ diff --git a/docs/assets/img2img/000045.steps.gravity.png b/docs/assets/img2img/000045.steps.gravity.png new file mode 100644 index 0000000000..39e2a9b711 Binary files /dev/null and b/docs/assets/img2img/000045.steps.gravity.png differ diff --git a/docs/assets/img2img/000046.1592514025.png b/docs/assets/img2img/000046.1592514025.png new file mode 100644 index 0000000000..70d248eb61 Binary files /dev/null and b/docs/assets/img2img/000046.1592514025.png differ diff --git a/docs/assets/img2img/000046.steps.gravity.png b/docs/assets/img2img/000046.steps.gravity.png new file mode 100644 index 0000000000..d801a48701 Binary files /dev/null and b/docs/assets/img2img/000046.steps.gravity.png differ diff --git a/docs/assets/img2img/fire-drawing.png b/docs/assets/img2img/fire-drawing.png new file mode 100644 index 0000000000..36e2f111fa Binary files /dev/null and b/docs/assets/img2img/fire-drawing.png differ diff --git a/docs/assets/invoke_web_dark.png b/docs/assets/invoke_web_dark.png new file mode 100644 index 0000000000..9141ab40f3 Binary files /dev/null and b/docs/assets/invoke_web_dark.png differ diff --git a/docs/assets/invoke_web_light.png b/docs/assets/invoke_web_light.png new file mode 100644 index 0000000000..98311ccafd Binary files /dev/null and b/docs/assets/invoke_web_light.png differ diff --git a/docs/assets/outpainting/curly-outcrop.png b/docs/assets/outpainting/curly-outcrop.png new file mode 100644 index 0000000000..ae8d8dacd3 Binary files /dev/null and b/docs/assets/outpainting/curly-outcrop.png differ diff --git a/docs/assets/outpainting/curly-outpaint.png b/docs/assets/outpainting/curly-outpaint.png new file mode 100644 index 0000000000..9f4a2ee431 Binary files /dev/null and b/docs/assets/outpainting/curly-outpaint.png differ diff --git a/docs/assets/outpainting/curly.png b/docs/assets/outpainting/curly.png new file mode 100644 index 0000000000..d9a4cb257e Binary files /dev/null and b/docs/assets/outpainting/curly.png differ diff --git a/docs/assets/outpainting/elven_princess.outpainted.png b/docs/assets/outpainting/elven_princess.outpainted.png deleted file mode 100644 index 98f98564df..0000000000 Binary files a/docs/assets/outpainting/elven_princess.outpainted.png and /dev/null differ diff --git a/docs/assets/outpainting/elven_princess.png b/docs/assets/outpainting/elven_princess.png deleted file mode 100644 index aa5f00ccf7..0000000000 Binary files a/docs/assets/outpainting/elven_princess.png and /dev/null differ diff --git a/docs/assets/truncation_comparison.jpg b/docs/assets/truncation_comparison.jpg new file mode 100644 index 0000000000..a39a804beb Binary files /dev/null and b/docs/assets/truncation_comparison.jpg differ diff --git a/docs/features/CLI.md b/docs/features/CLI.md index 580249f248..184f23c171 100644 --- a/docs/features/CLI.md +++ b/docs/features/CLI.md @@ -146,6 +146,7 @@ Here are the dream> command that apply to txt2img: | --cfg_scale | -C | 7.5 | How hard to try to match the prompt to the generated image; any number greater than 1.0 works, but the useful range is roughly 5.0 to 20.0 | | --seed | -S | None | Set the random seed for the next series of images. This can be used to recreate an image generated previously.| | --sampler | -A| k_lms | Sampler to use. Use -h to get list of available samplers. | +| --hires_fix | | | Larger images often have duplication artefacts. This option suppresses duplicates by generating the image at low res, and then using img2img to increase the resolution | | --grid | -g | False | Turn on grid mode to return a single image combining all the images generated by this prompt | | --individual | -i | True | Turn off grid mode (deprecated; leave off --grid instead) | | --outdir | -o | outputs/img_samples | Temporarily change the location of these images | @@ -249,9 +250,9 @@ generated image and either loads them into the command line (Linux|Mac), or prints them out in a comment for copy-and-paste (Windows). You may provide either the name of a file in the current output directory, or a full file path. -Given a wildcard path to a folder with image png files, -command will retrieve the dream command used to generate the images, -and save them to a file commands.txt for further processing +Specify path to a folder with image png files, and wildcard *.png +to retrieve the dream command used to generate the images, +and save them to a file commands.txt for further processing. Name of the saved file could be set as the second argument to !fetch ~~~ @@ -299,10 +300,25 @@ dream> !20 dream> watercolor of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194 ~~~ +## !search + +This is similar to !history but it only returns lines that contain +`search string`. For example: + +~~~ +dream> !search surreal +[21] surrealist painting of beautiful woman sitting under tree wearing broad hat and flowing garment -v0.2 -n6 -S2878767194 +~~~ + +## !clear + +This clears the search history from memory and disk. Be advised that +this operation is irreversible and does not issue any warnings! + # Command-line editing and completion -If you are on a Macintosh or Linux machine, the command-line offers -convenient history tracking, editing, and command completion. +The command-line offers convenient history tracking, editing, and +command completion. - To scroll through previous commands and potentially edit/reuse them, use the up and down cursor keys. - To edit the current command, use the left and right cursor keys to position the cursor, and then backspace, delete or insert characters. @@ -312,7 +328,8 @@ convenient history tracking, editing, and command completion. - To paste a cut section back in, position the cursor where you want to paste, and type CTRL-Y Windows users can get similar, but more limited, functionality if they -launch dream.py with the "winpty" program: +launch dream.py with the "winpty" program and have the `pyreadline3` +library installed: ~~~ > winpty python scripts\dream.py diff --git a/docs/features/IMG2IMG.md b/docs/features/IMG2IMG.md index e61f365c01..47e8317a39 100644 --- a/docs/features/IMG2IMG.md +++ b/docs/features/IMG2IMG.md @@ -9,7 +9,7 @@ drawing or photo. This is a really cool feature that tells stable diffusion to b top of the image you provide, preserving the original's basic shape and layout. To use it, provide the `--init_img` option as shown here: -```bash +```commandline dream> "waterfall and rainbow" --init_img=./init-images/crude_drawing.png --strength=0.5 -s100 -n4 ``` @@ -26,5 +26,99 @@ If the initial image contains transparent regions, then Stable Diffusion will on transparent regions, a process called "inpainting". However, for this to work correctly, the color information underneath the transparent needs to be preserved, not erased. -More Details can be found here: +More details can be found here: [Creating Transparent Images For Inpainting](./INPAINTING.md#creating-transparent-regions-for-inpainting) + +## How does it actually work, though? + +The main difference between `img2img` and `prompt2img` is the starting point. While `prompt2img` always starts with pure +gaussian noise and progressively refines it over the requested number of steps, `img2img` skips some of these earlier steps +(how many it skips is indirectly controlled by the `--strength` parameter), and uses instead your initial image mixed with gaussian noise as the starting image. + +**Let's start** by thinking about vanilla `prompt2img`, just generating an image from a prompt. If the step count is 10, then the "latent space" (Stable Diffusion's internal representation of the image) for the prompt "fire" with seed `1592514025` develops something like this: + +```commandline +dream> "fire" -s10 -W384 -H384 -S1592514025 +``` + +![latent steps](../assets/img2img/000019.steps.png) + +Put simply: starting from a frame of fuzz/static, SD finds details in each frame that it thinks look like "fire" and brings them a little bit more into focus, gradually scrubbing out the fuzz until a clear image remains. + +**When you use `img2img`** some of the earlier steps are cut, and instead an initial image of your choice is used. But because of how the maths behind Stable Diffusion works, this image needs to be mixed with just the right amount of noise (fuzz/static) for where it is being inserted. This is where the strength parameter comes in. Depending on the set strength, your image will be inserted into the sequence at the appropriate point, with just the right amount of noise. + +### A concrete example + +Say I want SD to draw a fire based on this hand-drawn image: + +![drawing of a fireplace](../assets/img2img/fire-drawing.png) + +Let's only do 10 steps, to make it easier to see what's happening. If strength is `0.7`, this is what the internal steps the algorithm has to take will look like: + +![](../assets/img2img/000032.steps.gravity.png) + +With strength `0.4`, the steps look more like this: + +![](../assets/img2img/000030.steps.gravity.png) + +Notice how much more fuzzy the starting image is for strength `0.7` compared to `0.4`, and notice also how much longer the sequence is with `0.7`: + +| | strength = 0.7 | strength = 0.4 | +| -- | -- | -- | +| initial image that SD sees | ![](../assets/img2img/000032.step-0.png) | ![](../assets/img2img/000030.step-0.png) | +| steps argument to `dream>` | `-S10` | `-S10` | +| steps actually taken | 7 | 4 | +| latent space at each step | ![](../assets/img2img/000032.steps.gravity.png) | ![](../assets/img2img/000030.steps.gravity.png) | +| output | ![](../assets/img2img/000032.1592514025.png) | ![](../assets/img2img/000030.1592514025.png) | + +Both of the outputs look kind of like what I was thinking of. With the strength higher, my input becomes more vague, *and* Stable Diffusion has more steps to refine its output. But it's not really making what I want, which is a picture of cheery open fire. With the strength lower, my input is more clear, *but* Stable Diffusion has less chance to refine itself, so the result ends up inheriting all the problems of my bad drawing. + + +If you want to try this out yourself, all of these are using a seed of `1592514025` with a width/height of `384`, step count `10`, the default sampler (`k_lms`), and the single-word prompt `fire`: + +```commandline +dream> "fire" -s10 -W384 -H384 -S1592514025 -I /tmp/fire-drawing.png --strength 0.7 +``` + +The code for rendering intermediates is on my (damian0815's) branch [document-img2img](https://github.com/damian0815/InvokeAI/tree/document-img2img) - run `dream.py` and check your `outputs/img-samples/intermediates` folder while generating an image. + +### Compensating for the reduced step count + +After putting this guide together I was curious to see how the difference would be if I increased the step count to compensate, so that SD could have the same amount of steps to develop the image regardless of the strength. So I ran the generation again using the same seed, but this time adapting the step count to give each generation 20 steps. + +Here's strength `0.4` (note step count `50`, which is `20 ÷ 0.4` to make sure SD does `20` steps from my image): + +```commandline +dream> "fire" -s50 -W384 -H384 -S1592514025 -I /tmp/fire-drawing.png -f 0.4 +``` + +![](../assets/img2img/000035.1592514025.png) + +and strength `0.7` (note step count `30`, which is roughly `20 ÷ 0.7` to make sure SD does `20` steps from my image): + +```commandline +dream> "fire" -s30 -W384 -H384 -S1592514025 -I /tmp/fire-drawing.png -f 0.7 +``` + +![](../assets/img2img/000046.1592514025.png) + +In both cases the image is nice and clean and "finished", but because at strength `0.7` Stable Diffusion has been give so much more freedom to improve on my badly-drawn flames, they've come out looking much better. You can really see the difference when looking at the latent steps. There's more noise on the first image with strength `0.7`: + +![](../assets/img2img/000046.steps.gravity.png) + +than there is for strength `0.4`: + +![](../assets/img2img/000035.steps.gravity.png) + +and that extra noise gives the algorithm more choices when it is evaluating how to denoise any particular pixel in the image. + +Unfortunately, it seems that `img2img` is very sensitive to the step count. Here's strength `0.7` with a step count of `29` (SD did 19 steps from my image): + +![](../assets/img2img/000045.1592514025.png) + +By comparing the latents we can sort of see that something got interpreted differently enough on the third or fourth step to lead to a rather different interpretation of the flames. + +![](../assets/img2img/000046.steps.gravity.png) +![](../assets/img2img/000045.steps.gravity.png) + +This is the result of a difference in the de-noising "schedule" - basically the noise has to be cleaned by a certain degree each step or the model won't "converge" on the image properly (see https://huggingface.co/blog/stable_diffusion for more about that). A different step count means a different schedule, which means things get interpreted slightly differently at every step. diff --git a/docs/features/OTHER.md b/docs/features/OTHER.md index 5faa1d32d0..6aab528275 100644 --- a/docs/features/OTHER.md +++ b/docs/features/OTHER.md @@ -55,6 +55,43 @@ outputs/img-samples/000040.3498014304.png: "a cute child playing hopscotch" -G1. --- +## **Weighted Prompts** + +You may weight different sections of the prompt to tell the sampler to attach different levels of +priority to them, by adding `:(number)` to the end of the section you wish to up- or downweight. For +example consider this prompt: + +```bash +tabby cat:0.25 white duck:0.75 hybrid +``` + +This will tell the sampler to invest 25% of its effort on the tabby cat aspect of the image and 75% +on the white duck aspect (surprisingly, this example actually works). The prompt weights can use any +combination of integers and floating point numbers, and they do not need to add up to 1. + +--- + +## Thresholding and Perlin Noise Initialization Options + +Two new options are the thresholding (`--threshold`) and the perlin noise initialization (`--perlin`) options. Thresholding limits the range of the latent values during optimization, which helps combat oversaturation with higher CFG scale values. Perlin noise initialization starts with a percentage (a value ranging from 0 to 1) of perlin noise mixed into the initial noise. Both features allow for more variations and options in the course of generating images. + +For better intuition into what these options do in practice, [here is a graphic demonstrating them both](static/truncation_comparison.jpg) in use. In generating this graphic, perlin noise at initialization was programmatically varied going across on the diagram by values 0.0, 0.1, 0.2, 0.4, 0.5, 0.6, 0.8, 0.9, 1.0; and the threshold was varied going down from +0, 1, 2, 3, 4, 5, 10, 20, 100. The other options are fixed, so the initial prompt is as follows (no thresholding or perlin noise): + +``` + a portrait of a beautiful young lady -S 1950357039 -s 100 -C 20 -A k_euler_a --threshold 0 --perlin 0 +``` + +Here's an example of another prompt used when setting the threshold to 5 and perlin noise to 0.2: + +``` + a portrait of a beautiful young lady -S 1950357039 -s 100 -C 20 -A k_euler_a --threshold 5 --perlin 0.2 +``` + +Note: currently the thresholding feature is only implemented for the k-diffusion style samplers, and empirically appears to work best with `k_euler_a` and `k_dpm_2_a`. Using 0 disables thresholding. Using 0 for perlin noise disables using perlin noise for initialization. Finally, using 1 for perlin noise uses only perlin noise for initialization. + +--- + ## **Simplified API** For programmers who wish to incorporate stable-diffusion into other products, this repository diff --git a/docs/features/OUTPAINTING.md b/docs/features/OUTPAINTING.md index 9f72a5cb3c..952bbc97fc 100644 --- a/docs/features/OUTPAINTING.md +++ b/docs/features/OUTPAINTING.md @@ -4,75 +4,95 @@ title: Outpainting # :octicons-paintbrush-16: Outpainting -## Continous outpainting +## Outpainting and outcropping -This extension uses the inpainting code to extend an existing image to -any direction of "top", "right", "bottom" or "left". To use it you -need to provide an initial image with -I and an extension direction -with -D (direction). When extending using outpainting a higher img2img -strength value of 0.83 is the default. +Outpainting is a process by which the AI generates parts of the image +that are outside its original frame. It can be used to fix up images +in which the subject is off center, or when some detail (often the top +of someone's head!) is cut off. -The code is not foolproof. Sometimes it will do a good job extending -the image, and other times it will generate ghost images and other -artifacts. In addition, the code works best on images that were -generated by dream.py, because it will be able to recover the original -prompt that generated the file and "understand" what you are trying to -achieve. +InvokeAI supports two versions of outpainting, one called "outpaint" +and the other "outcrop." They work slightly differently and each has +its advantages and drawbacks. -### Basic Usage +### Outcrop -To illustrate, consider this image generated with the prompt "fantasy -portrait of eleven princess." It's nice, but rather annoying that the -top of the head has been cropped off. +The `outcrop` extension allows you to extend the image in 64 pixel +increments in any dimension. You can apply the module to any image +previously-generated by InvokeAI. Note that it will **not** work with +arbitrary photographs or Stable Diffusion images created by other +implementations. -![elven_princess](../assets/outpainting/elven_princess.png) +Consider this image: -We can fix that using the `!fix` command! +![curly_woman](../assets/outpainting/curly.png) + +Pretty nice, but it's annoying that the top of her head is cut +off. She's also a bit off center. Let's fix that! ~~~~ -dream> !fix my_images/elven_princess.png -D top 50 +dream> !fix images/curly.png --outcrop top 64 right 64 ~~~~ -This is telling dream.py to open up a rectangle 50 pixels high at the -top of the image and outpaint into it. The result is: +This is saying to apply the `outcrop` extension by extending the top +of the image by 64 pixels, and the right of the image by the same +amount. You can use any combination of top|left|right|bottom, and +specify any number of pixels to extend. You can also abbreviate +`--outcrop` to `-c`. -![elven_princess.fixed](../assets/outpainting/elven_princess.outpainted.png) +The result looks like this: -Viola! You can similarly specify `bottom`, `left` or `right` to -outpaint into these margins. +![curly_woman_outcrop](../assets/outpainting/curly-outcrop.png) -There are some limitations to be aware of: +The new image is actually slightly larger than the original (576x576, +because 64 pixels were added to the top and right sides.) -1. You cannot change the size of the image rectangle. In the example, - notice that the whole image is shifted downwards by 50 pixels, rather - than the top being extended upwards. +A number of caveats: -2. Attempting to outpaint larger areas will frequently give rise to ugly +1. Although you can specify any pixel values, they will be rounded up +to the nearest multiple of 64. Smaller values are better. Larger +extensions are more likely to generate artefacts. However, if you wish +you can run the !fix command repeatedly to cautiously expand the +image. + +2. The extension is stochastic, meaning that each time you run it +you'll get a slightly different result. You can run it repeatedly +until you get an image you like. Unfortunately `!fix` does not +currently respect the `-n` (`--iterations`) argument. + +## Outpaint + +The `outpaint` extension does the same thing, but with subtle +differences. Starting with the same image, here is how we would add an +additional 64 pixels to the top of the image: + +~~~ +dream> !fix images/curly.png --out_direction top 64 +~~~ + +(you can abbreviate ``--out_direction` as `-D`. + +The result is shown here: + +![curly_woman_outpaint](../assets/outpainting/curly-outpaint.png) + +Although the effect is similar, there are significant differences from +outcropping: + +1. You can only specify one direction to extend at a time. +2. The image is **not** resized. Instead, the image is shifted by the specified +number of pixels. If you look carefully, you'll see that less of the lady's +torso is visible in the image. +3. Because the image dimensions remain the same, there's no rounding +to multiples of 64. +4. Attempting to outpaint larger areas will frequently give rise to ugly ghosting effects. - -3. For best results, try increasing the step number. - -4. If you don't specify a pixel value in -D, it will default to half +5. For best results, try increasing the step number. +6. If you don't specify a pixel value in -D, it will default to half of the whole image, which is likely not what you want. -You can do more with `!fix` including upscaling and facial -reconstruction of previously-generated images. See -[./UPSCALE.md#fixing-previously-generated-images] for the details. - -### Advanced Usage - -For more control over the outpaintihg process, you can provide the -`-D` option at image generation time. This allows you to apply all the -controls, including the ability to resize the image and apply face-fixing -and upscaling. For example: - -~~~~ -dream> man with cat on shoulder -I./images/man.png -D bottom 100 -W960 -H960 -fit -~~~~ - -Or even shorter, since the prompt is read from the metadata of the old image: - -~~~~ -dream> -I./images/man.png -D bottom 100 -W960 -H960 -fit -U2 -G1 -~~~~ +Neither `outpaint` nor `outcrop` are perfect, but we continue to tune +and improve them. If one doesn't work, try the other. You may also +wish to experiment with other `img2img` arguments, such as `-C`, `-f` +and `-s`. diff --git a/docs/features/UPSCALE.md b/docs/features/POSTPROCESS.md similarity index 90% rename from docs/features/UPSCALE.md rename to docs/features/POSTPROCESS.md index 6f720826ac..cd4fd7e9e6 100644 --- a/docs/features/UPSCALE.md +++ b/docs/features/POSTPROCESS.md @@ -1,14 +1,18 @@ --- -title: Upscale +title: Postprocessing --- ## Intro -The script provides the ability to restore faces and upscale. You can apply -these operations at the time you generate the images, or at any time to a -previously-generated PNG file, using the -[!fix](#fixing-previously-generated-images) command. +This extension provides the ability to restore faces and upscale +images. + +Face restoration and upscaling can be applied at the time you generate +the images, or at any later time against a previously-generated PNG +file, using the [!fix](#fixing-previously-generated-images) +command. [Outpainting and outcropping](OUTPAINTING.md) can only be +applied after the fact. ## Face Fixing @@ -31,7 +35,7 @@ into **src/gfpgan/experiments/pretrained_models**. On Mac and Linux systems, here's how you'd do it using **wget**: ```bash -wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth src/gfpgan/experiments/pretrained_models/ +wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth -P src/gfpgan/experiments/pretrained_models/ ``` Make sure that you're in the InvokeAI directory when you do this. @@ -158,9 +162,9 @@ situations when there is very little facial data to work with. ## Fixing Previously-Generated Images It is easy to apply face restoration and/or upscaling to any -previously-generated file. Just use the syntax -`!fix path/to/file.png `. For example, to apply GFPGAN at strength 0.8 -and upscale 2X for a file named `./outputs/img-samples/000044.2945021133.png`, +previously-generated file. Just use the syntax `!fix path/to/file.png +`. For example, to apply GFPGAN at strength 0.8 and upscale +2X for a file named `./outputs/img-samples/000044.2945021133.png`, just run: ``` diff --git a/docs/features/WEB.md b/docs/features/WEB.md index 833b18cdfc..ffc48f47f7 100644 --- a/docs/features/WEB.md +++ b/docs/features/WEB.md @@ -1,12 +1,15 @@ --- -title: Barebones Web Server +title: InvokeAI Web UI & Server --- -# :material-web: Barebones Web Server +# :material-web: InvokeAI Web Server -As of version 1.10, this distribution comes with a bare bones web server (see -screenshot). To use it, run the `dream.py` script by adding the `--web` -option. +As of version 2.0, this distribution's web server has been updated to include +an all-new UI, with optimizations to improve common workflows for image generation. + +## Getting Started & Initialization Commands + +To start the web server, run the `dream.py` script by adding the `--web` parameter. ```bash (ldm) ~/stable-diffusion$ python3 scripts/dream.py --web @@ -15,7 +18,58 @@ option. You can then connect to the server by pointing your web browser at http://localhost:9090, or to the network name or IP address of the server. -Kudos to [Tesseract Cat](https://github.com/TesseractCat) for contributing this -code, and to [dagf2101](https://github.com/dagf2101) for refining it. +### Additional Options + `--web_develop` - Starts the web server in development mode. + + `--web_verbose` - Enables verbose logging + + `--cors [CORS ...]` - Additional allowed origins, comma-separated + + `--host HOST` - Web server: Host or IP to listen on. Set to 0.0.0.0 to + accept traffic from other devices on your network. + + `--port PORT` - Web server: Port to listen on + + `--gui` - Start InvokeAI GUI - This is the "desktop mode" version of the web app. It uses Flask + to create a desktop app experience of the webserver. + + +## Web Specific Features + +The web experience offers an incredibly easy-to-use experience for interacting with the InvokeAI toolkit. +For detailed guidance on individual features, see the Feature-specific help documents available in this directory. +Note that the latest functionality available in the CLI may not always be available in the Web interface. + +### Dark Mode & Light Mode +The InvokeAI interface is available in a nano-carbon black & purple Dark Mode, and a "burn your eyes out Nosferatu" Light Mode. These can be toggled by clicking the Sun/Moon icons at the top right of the interface. + +![InvokeAI Web Server - Dark Mode](../assets/invoke_web_dark.png) + +![InvokeAI Web Server - Light Mode](../assets/invoke_web_light.png) + +### Invocation Toolbar +The left side of the InvokeAI interface is available for customizing the prompt and the settings used for invoking your new image. Typing your prompt into the open text field and clicking the Invoke button will produce the image based on the settings configured in the toolbar. + +See below for additional documentation related to each feature: +- [Core Prompt Settings](./CLI.md) +- [Variations](./VARIATIONS.md) +- [Upscaling](./UPSCALE.md) +- [Image to Image](./IMG2IMG.md) +- [Inpainting](./INPAINTING.md) +- [Other](./OTHER.md) + +### Invocation Gallery +The currently selected --outdir (or the default outputs folder) will display all previously generated files on load. As new invocations are generated, these will be dynamically added to the gallery, and can be previewed by selecting them. Each image also has a simple set of actions (e.g., Delete, Use Seed, Use All Parameters, etc.) that can be accessed by hovering over the image. + +### Image Workspace +When an image from the Invocation Gallery is selected, or is generated, the image will be displayed within the center of the interface. A quickbar of common image interactions are displayed along the top of the image, including: +- Use image in the `Image to Image` workflow +- Initialize Face Restoration on the selected file +- Initialize Upscaling on the selected file +- View File metadata and details +- Delete the file + +## Acknowledgements + +A huge shout-out to the core team working to make this vision a reality, including [psychedelicious](https://github.com/psychedelicious), [Kyle0654](https://github.com/Kyle0654) and [blessedcoolant](https://github.com/blessedcoolant). [hipsterusername](https://github.com/hipsterusername) was the team's unofficial cheerleader and added tooltips/docs. -![Dream Web Server](../assets/dream_web_server.png) diff --git a/docs/other/CONTRIBUTORS.md b/docs/other/CONTRIBUTORS.md index be4f3f407c..82f9132f97 100644 --- a/docs/other/CONTRIBUTORS.md +++ b/docs/other/CONTRIBUTORS.md @@ -58,6 +58,7 @@ We thank them for all of their time and hard work. - [rabidcopy](https://github.com/rabidcopy) - [Dominic Letz](https://github.com/dominicletz) - [Dmitry T.](https://github.com/ArDiouscuros) +- [Kent Keirsey](https://github.com/hipsterusername) ## **Original CompVis Authors:** diff --git a/environment-mac.yml b/environment-mac.yml index dcaed6c88d..70df3b8865 100644 --- a/environment-mac.yml +++ b/environment-mac.yml @@ -33,13 +33,13 @@ dependencies: - openh264==2.3.0 - onnx==1.12.0 - onnxruntime==1.12.1 - - protobuf==3.20.1 + - protobuf==3.19.4 - pudb==2022.1 - - pytorch-lightning==1.6.5 + - pytorch-lightning==1.7.5 - scipy==1.9.1 - streamlit==1.12.2 - sympy==1.10.1 - - tensorboard==2.9.0 + - tensorboard==2.10.0 - torchmetrics==0.9.3 - pip: - flask==2.1.3 diff --git a/environment.yml b/environment.yml index eaf4d0e02a..621ec024b0 100644 --- a/environment.yml +++ b/environment.yml @@ -23,6 +23,7 @@ dependencies: - send2trash==1.8.0 - pillow==9.2.0 - einops==0.3.0 + - pyreadline3 - torch-fidelity==0.3.0 - transformers==4.19.2 - torchmetrics==0.6.0 @@ -34,6 +35,6 @@ dependencies: - kornia==0.6.0 - -e git+https://github.com/openai/CLIP.git@main#egg=clip - -e git+https://github.com/CompVis/taming-transformers.git@master#egg=taming-transformers - - -e git+https://github.com/lstein/k-diffusion.git@master#egg=k-diffusion + - -e git+https://github.com/Birch-san/k-diffusion.git@mps#egg=k_diffusion - -e git+https://github.com/lstein/GFPGAN@fix-dark-cast-images#egg=gfpgan - -e . diff --git a/frontend/README.md b/frontend/README.md index 94934b2bce..f597cc6f23 100644 --- a/frontend/README.md +++ b/frontend/README.md @@ -2,24 +2,22 @@ ## Run -- `python backend/server.py` serves both frontend and backend at http://localhost:9090 +- `python scripts/dream.py --web` serves both frontend and backend at + http://localhost:9090 ## Evironment Install [node](https://nodejs.org/en/download/) (includes npm) and optionally [yarn](https://yarnpkg.com/getting-started/install). -From `frontend/` run `npm install` / `yarn install` to install the frontend packages. +From `frontend/` run `npm install` / `yarn install` to install the frontend +packages. ## Dev 1. From `frontend/`, run `npm dev` / `yarn dev` to start the dev server. -2. Note the address it starts up on (probably `http://localhost:5173/`). -3. Edit `backend/server.py`'s `additional_allowed_origins` to include this address, e.g. - `additional_allowed_origins = ['http://localhost:5173']`. -4. Leaving the dev server running, open a new terminal and go to the project root. -5. Run `python backend/server.py`. -6. Navigate to the dev server address e.g. `http://localhost:5173/`. +2. Run `python scripts/dream.py --web`. +3. Navigate to the dev server address e.g. `http://localhost:5173/`. To build for dev: `npm build-dev` / `yarn build-dev` @@ -28,10 +26,3 @@ To build for production: `npm build` / `yarn build` ## TODO - Search repo for "TODO" -- My one gripe with Chakra: no way to disable all animations right now and drop the dependence on - `framer-motion`. I would prefer to save the ~30kb on bundle and have zero animations. This is on - the Chakra roadmap. See https://github.com/chakra-ui/chakra-ui/pull/6368 for last discussion on - this. Need to check in on this issue periodically. -- Mobile friendly layout -- Proper image gallery/viewer/manager -- Help tooltips and such diff --git a/frontend/dist/assets/Inter-Bold.790c108b.ttf b/frontend/dist/assets/Inter-Bold.790c108b.ttf new file mode 100644 index 0000000000..8e82c70d10 Binary files /dev/null and b/frontend/dist/assets/Inter-Bold.790c108b.ttf differ diff --git a/frontend/dist/assets/Inter.b9a8e5e2.ttf b/frontend/dist/assets/Inter.b9a8e5e2.ttf new file mode 100644 index 0000000000..ec3164efa8 Binary files /dev/null and b/frontend/dist/assets/Inter.b9a8e5e2.ttf differ diff --git a/frontend/dist/assets/favicon.0d253ced.ico b/frontend/dist/assets/favicon.0d253ced.ico new file mode 100644 index 0000000000..413340efb2 Binary files /dev/null and b/frontend/dist/assets/favicon.0d253ced.ico differ diff --git a/frontend/dist/assets/index.1332a4e9.js b/frontend/dist/assets/index.1332a4e9.js deleted file mode 100644 index 6174f1baac..0000000000 --- a/frontend/dist/assets/index.1332a4e9.js +++ /dev/null @@ -1,694 +0,0 @@ -function Yj(e,t){for(var n=0;ni[o]})}}}return Object.freeze(Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}))}(function(){const t=document.createElement("link").relList;if(t&&t.supports&&t.supports("modulepreload"))return;for(const o of document.querySelectorAll('link[rel="modulepreload"]'))i(o);new MutationObserver(o=>{for(const c of o)if(c.type==="childList")for(const u of c.addedNodes)u.tagName==="LINK"&&u.rel==="modulepreload"&&i(u)}).observe(document,{childList:!0,subtree:!0});function n(o){const c={};return o.integrity&&(c.integrity=o.integrity),o.referrerpolicy&&(c.referrerPolicy=o.referrerpolicy),o.crossorigin==="use-credentials"?c.credentials="include":o.crossorigin==="anonymous"?c.credentials="omit":c.credentials="same-origin",c}function i(o){if(o.ep)return;o.ep=!0;const c=n(o);fetch(o.href,c)}})();var vc=typeof globalThis<"u"?globalThis:typeof window<"u"?window:typeof global<"u"?global:typeof self<"u"?self:{};function qj(e){return e&&e.__esModule&&Object.prototype.hasOwnProperty.call(e,"default")?e.default:e}var k={exports:{}},e5={exports:{}};/** - * @license React - * react.development.js - * - * Copyright (c) Facebook, Inc. and its affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - */(function(e,t){(function(){typeof __REACT_DEVTOOLS_GLOBAL_HOOK__<"u"&&typeof __REACT_DEVTOOLS_GLOBAL_HOOK__.registerInternalModuleStart=="function"&&__REACT_DEVTOOLS_GLOBAL_HOOK__.registerInternalModuleStart(new Error);var n="18.2.0",i=Symbol.for("react.element"),o=Symbol.for("react.portal"),c=Symbol.for("react.fragment"),u=Symbol.for("react.strict_mode"),m=Symbol.for("react.profiler"),h=Symbol.for("react.provider"),v=Symbol.for("react.context"),b=Symbol.for("react.forward_ref"),x=Symbol.for("react.suspense"),w=Symbol.for("react.suspense_list"),N=Symbol.for("react.memo"),R=Symbol.for("react.lazy"),O=Symbol.for("react.offscreen"),M=Symbol.iterator,B="@@iterator";function I(S){if(S===null||typeof S!="object")return null;var A=M&&S[M]||S[B];return typeof A=="function"?A:null}var F={current:null},z={transition:null},$={current:null,isBatchingLegacy:!1,didScheduleLegacyUpdate:!1},q={current:null},K={},le=null;function me(S){le=S}K.setExtraStackFrame=function(S){le=S},K.getCurrentStack=null,K.getStackAddendum=function(){var S="";le&&(S+=le);var A=K.getCurrentStack;return A&&(S+=A()||""),S};var se=!1,Se=!1,qe=!1,ae=!1,ue=!1,Ee={ReactCurrentDispatcher:F,ReactCurrentBatchConfig:z,ReactCurrentOwner:q};Ee.ReactDebugCurrentFrame=K,Ee.ReactCurrentActQueue=$;function de(S){{for(var A=arguments.length,V=new Array(A>1?A-1:0),G=1;G1?A-1:0),G=1;G1){for(var Ot=Array(St),yt=0;yt1){for(var jt=Array(yt),Tt=0;Tt is not supported and will be removed in a future major release. Did you mean to render instead?")),A.Provider},set:function(we){A.Provider=we}},_currentValue:{get:function(){return A._currentValue},set:function(we){A._currentValue=we}},_currentValue2:{get:function(){return A._currentValue2},set:function(we){A._currentValue2=we}},_threadCount:{get:function(){return A._threadCount},set:function(we){A._threadCount=we}},Consumer:{get:function(){return V||(V=!0,ie("Rendering is not supported and will be removed in a future major release. Did you mean to render instead?")),A.Consumer}},displayName:{get:function(){return A.displayName},set:function(we){te||(de("Setting `displayName` on Context.Consumer has no effect. You should set it directly on the context with Context.displayName = '%s'.",we),te=!0)}}}),A.Consumer=ze}return A._currentRenderer=null,A._currentRenderer2=null,A}var vr=-1,Ia=0,Fi=1,La=2;function X(S){if(S._status===vr){var A=S._result,V=A();if(V.then(function(ze){if(S._status===Ia||S._status===vr){var we=S;we._status=Fi,we._result=ze}},function(ze){if(S._status===Ia||S._status===vr){var we=S;we._status=La,we._result=ze}}),S._status===vr){var G=S;G._status=Ia,G._result=V}}if(S._status===Fi){var te=S._result;return te===void 0&&ie(`lazy: Expected the result of a dynamic import() call. Instead received: %s - -Your code should look like: - const MyComponent = lazy(() => import('./MyComponent')) - -Did you accidentally put curly braces around the import?`,te),"default"in te||ie(`lazy: Expected the result of a dynamic import() call. Instead received: %s - -Your code should look like: - const MyComponent = lazy(() => import('./MyComponent'))`,te),te.default}else throw S._result}function Ue(S){var A={_status:vr,_result:S},V={$$typeof:R,_payload:A,_init:X};{var G,te;Object.defineProperties(V,{defaultProps:{configurable:!0,get:function(){return G},set:function(ze){ie("React.lazy(...): It is not supported to assign `defaultProps` to a lazy component import. Either specify them where the component is defined, or create a wrapping component around it."),G=ze,Object.defineProperty(V,"defaultProps",{enumerable:!0})}},propTypes:{configurable:!0,get:function(){return te},set:function(ze){ie("React.lazy(...): It is not supported to assign `propTypes` to a lazy component import. Either specify them where the component is defined, or create a wrapping component around it."),te=ze,Object.defineProperty(V,"propTypes",{enumerable:!0})}}})}return V}function Ke(S){S!=null&&S.$$typeof===N?ie("forwardRef requires a render function but received a `memo` component. Instead of forwardRef(memo(...)), use memo(forwardRef(...))."):typeof S!="function"?ie("forwardRef requires a render function but was given %s.",S===null?"null":typeof S):S.length!==0&&S.length!==2&&ie("forwardRef render functions accept exactly two parameters: props and ref. %s",S.length===1?"Did you forget to use the ref parameter?":"Any additional parameter will be undefined."),S!=null&&(S.defaultProps!=null||S.propTypes!=null)&&ie("forwardRef render functions do not support propTypes or defaultProps. Did you accidentally pass a React component?");var A={$$typeof:b,render:S};{var V;Object.defineProperty(A,"displayName",{enumerable:!1,configurable:!0,get:function(){return V},set:function(G){V=G,!S.name&&!S.displayName&&(S.displayName=G)}})}return A}var Ct;Ct=Symbol.for("react.module.reference");function on(S){return!!(typeof S=="string"||typeof S=="function"||S===c||S===m||ue||S===u||S===x||S===w||ae||S===O||se||Se||qe||typeof S=="object"&&S!==null&&(S.$$typeof===R||S.$$typeof===N||S.$$typeof===h||S.$$typeof===v||S.$$typeof===b||S.$$typeof===Ct||S.getModuleId!==void 0))}function Cn(S,A){on(S)||ie("memo: The first argument must be a component. Instead received: %s",S===null?"null":typeof S);var V={$$typeof:N,type:S,compare:A===void 0?null:A};{var G;Object.defineProperty(V,"displayName",{enumerable:!1,configurable:!0,get:function(){return G},set:function(te){G=te,!S.name&&!S.displayName&&(S.displayName=te)}})}return V}function nt(){var S=F.current;return S===null&&ie(`Invalid hook call. Hooks can only be called inside of the body of a function component. This could happen for one of the following reasons: -1. You might have mismatching versions of React and the renderer (such as React DOM) -2. You might be breaking the Rules of Hooks -3. You might have more than one copy of React in the same app -See https://reactjs.org/link/invalid-hook-call for tips about how to debug and fix this problem.`),S}function qt(S){var A=nt();if(S._context!==void 0){var V=S._context;V.Consumer===S?ie("Calling useContext(Context.Consumer) is not supported, may cause bugs, and will be removed in a future major release. Did you mean to call useContext(Context) instead?"):V.Provider===S&&ie("Calling useContext(Context.Provider) is not supported. Did you mean to call useContext(Context) instead?")}return A.useContext(S)}function Vn(S){var A=nt();return A.useState(S)}function Bn(S,A,V){var G=nt();return G.useReducer(S,A,V)}function ln(S){var A=nt();return A.useRef(S)}function Ur(S,A){var V=nt();return V.useEffect(S,A)}function vi(S,A){var V=nt();return V.useInsertionEffect(S,A)}function Po(S,A){var V=nt();return V.useLayoutEffect(S,A)}function ya(S,A){var V=nt();return V.useCallback(S,A)}function oo(S,A){var V=nt();return V.useMemo(S,A)}function Nu(S,A,V){var G=nt();return G.useImperativeHandle(S,A,V)}function gi(S,A){{var V=nt();return V.useDebugValue(S,A)}}function Hs(){var S=nt();return S.useTransition()}function zi(S){var A=nt();return A.useDeferredValue(S)}function Jt(){var S=nt();return S.useId()}function Bi(S,A,V){var G=nt();return G.useSyncExternalStore(S,A,V)}var ba=0,Mo,os,Io,ss,ls,Lo,Fo;function us(){}us.__reactDisabledLog=!0;function Ws(){{if(ba===0){Mo=console.log,os=console.info,Io=console.warn,ss=console.error,ls=console.group,Lo=console.groupCollapsed,Fo=console.groupEnd;var S={configurable:!0,enumerable:!0,value:us,writable:!0};Object.defineProperties(console,{info:S,log:S,warn:S,error:S,group:S,groupCollapsed:S,groupEnd:S})}ba++}}function Gs(){{if(ba--,ba===0){var S={configurable:!0,enumerable:!0,writable:!0};Object.defineProperties(console,{log:Me({},S,{value:Mo}),info:Me({},S,{value:os}),warn:Me({},S,{value:Io}),error:Me({},S,{value:ss}),group:Me({},S,{value:ls}),groupCollapsed:Me({},S,{value:Lo}),groupEnd:Me({},S,{value:Fo})})}ba<0&&ie("disabledDepth fell below zero. This is a bug in React. Please file an issue.")}}var yi=Ee.ReactCurrentDispatcher,Mr;function Fa(S,A,V){{if(Mr===void 0)try{throw Error()}catch(te){var G=te.stack.trim().match(/\n( *(at )?)/);Mr=G&&G[1]||""}return` -`+Mr+S}}var Sa=!1,za;{var cs=typeof WeakMap=="function"?WeakMap:Map;za=new cs}function zo(S,A){if(!S||Sa)return"";{var V=za.get(S);if(V!==void 0)return V}var G;Sa=!0;var te=Error.prepareStackTrace;Error.prepareStackTrace=void 0;var ze;ze=yi.current,yi.current=null,Ws();try{if(A){var we=function(){throw Error()};if(Object.defineProperty(we.prototype,"props",{set:function(){throw Error()}}),typeof Reflect=="object"&&Reflect.construct){try{Reflect.construct(we,[])}catch(Bt){G=Bt}Reflect.construct(S,[],we)}else{try{we.call()}catch(Bt){G=Bt}S.call(we.prototype)}}else{try{throw Error()}catch(Bt){G=Bt}S()}}catch(Bt){if(Bt&&G&&typeof Bt.stack=="string"){for(var Ve=Bt.stack.split(` -`),st=G.stack.split(` -`),St=Ve.length-1,Ot=st.length-1;St>=1&&Ot>=0&&Ve[St]!==st[Ot];)Ot--;for(;St>=1&&Ot>=0;St--,Ot--)if(Ve[St]!==st[Ot]){if(St!==1||Ot!==1)do if(St--,Ot--,Ot<0||Ve[St]!==st[Ot]){var yt=` -`+Ve[St].replace(" at new "," at ");return S.displayName&&yt.includes("")&&(yt=yt.replace("",S.displayName)),typeof S=="function"&&za.set(S,yt),yt}while(St>=1&&Ot>=0);break}}}finally{Sa=!1,yi.current=ze,Gs(),Error.prepareStackTrace=te}var jt=S?S.displayName||S.name:"",Tt=jt?Fa(jt):"";return typeof S=="function"&&za.set(S,Tt),Tt}function fs(S,A,V){return zo(S,!1)}function Ml(S){var A=S.prototype;return!!(A&&A.isReactComponent)}function xa(S,A,V){if(S==null)return"";if(typeof S=="function")return zo(S,Ml(S));if(typeof S=="string")return Fa(S);switch(S){case x:return Fa("Suspense");case w:return Fa("SuspenseList")}if(typeof S=="object")switch(S.$$typeof){case b:return fs(S.render);case N:return xa(S.type,A,V);case R:{var G=S,te=G._payload,ze=G._init;try{return xa(ze(te),A,V)}catch{}}}return""}var Bo={},Ba=Ee.ReactDebugCurrentFrame;function bi(S){if(S){var A=S._owner,V=xa(S.type,S._source,A?A.type:null);Ba.setExtraStackFrame(V)}else Ba.setExtraStackFrame(null)}function Ys(S,A,V,G,te){{var ze=Function.call.bind(an);for(var we in S)if(ze(S,we)){var Ve=void 0;try{if(typeof S[we]!="function"){var st=Error((G||"React class")+": "+V+" type `"+we+"` is invalid; it must be a function, usually from the `prop-types` package, but received `"+typeof S[we]+"`.This often happens because of typos such as `PropTypes.function` instead of `PropTypes.func`.");throw st.name="Invariant Violation",st}Ve=S[we](A,we,G,V,null,"SECRET_DO_NOT_PASS_THIS_OR_YOU_WILL_BE_FIRED")}catch(St){Ve=St}Ve&&!(Ve instanceof Error)&&(bi(te),ie("%s: type specification of %s `%s` is invalid; the type checker function must return `null` or an `Error` but returned a %s. You may have forgotten to pass an argument to the type checker creator (arrayOf, instanceOf, objectOf, oneOf, oneOfType, and shape all require an argument).",G||"React class",V,we,typeof Ve),bi(null)),Ve instanceof Error&&!(Ve.message in Bo)&&(Bo[Ve.message]=!0,bi(te),ie("Failed %s type: %s",V,Ve.message),bi(null))}}}function un(S){if(S){var A=S._owner,V=xa(S.type,S._source,A?A.type:null);me(V)}else me(null)}var Si;Si=!1;function Uo(){if(q.current){var S=Lt(q.current.type);if(S)return` - -Check the render method of \``+S+"`."}return""}function $t(S){if(S!==void 0){var A=S.fileName.replace(/^.*[\\\/]/,""),V=S.lineNumber;return` - -Check your code at `+A+":"+V+"."}return""}function qs(S){return S!=null?$t(S.__source):""}var xr={};function Ui(S){var A=Uo();if(!A){var V=typeof S=="string"?S:S.displayName||S.name;V&&(A=` - -Check the top-level render call using <`+V+">.")}return A}function Ya(S,A){if(!(!S._store||S._store.validated||S.key!=null)){S._store.validated=!0;var V=Ui(A);if(!xr[V]){xr[V]=!0;var G="";S&&S._owner&&S._owner!==q.current&&(G=" It was passed a child from "+Lt(S._owner.type)+"."),un(S),ie('Each child in a list should have a unique "key" prop.%s%s See https://reactjs.org/link/warning-keys for more information.',V,G),un(null)}}}function so(S,A){if(typeof S=="object"){if(Wt(S))for(var V=0;V",te=" Did you accidentally export a JSX literal instead of a component?"):we=typeof S,ie("React.createElement: type is invalid -- expected a string (for built-in components) or a class/function (for composite components) but got: %s.%s",we,te)}var Ve=it.apply(this,arguments);if(Ve==null)return Ve;if(G)for(var st=2;st10&&de("Detected a large number of updates inside startTransition. If this is due to a subscription please re-write it to use React provided hooks. Otherwise concurrent mode guarantees are off the table."),G._updatedFibers.clear()}}}var lo=!1,xi=null;function Ks(S){if(xi===null)try{var A=("require"+Math.random()).slice(0,7),V=e&&e[A];xi=V.call(e,"timers").setImmediate}catch{xi=function(te){lo===!1&&(lo=!0,typeof MessageChannel>"u"&&ie("This browser does not have a MessageChannel implementation, so enqueuing tasks via await act(async () => ...) will fail. Please file an issue at https://github.com/facebook/react/issues if you encounter this warning."));var ze=new MessageChannel;ze.port1.onmessage=te,ze.port2.postMessage(void 0)}}return xi(S)}var vn=0,Fn=!1;function Il(S){{var A=vn;vn++,$.current===null&&($.current=[]);var V=$.isBatchingLegacy,G;try{if($.isBatchingLegacy=!0,G=S(),!V&&$.didScheduleLegacyUpdate){var te=$.current;te!==null&&($.didScheduleLegacyUpdate=!1,pe(te))}}catch(jt){throw Ua(A),jt}finally{$.isBatchingLegacy=V}if(G!==null&&typeof G=="object"&&typeof G.then=="function"){var ze=G,we=!1,Ve={then:function(jt,Tt){we=!0,ze.then(function(Bt){Ua(A),vn===0?W(Bt,jt,Tt):jt(Bt)},function(Bt){Ua(A),Tt(Bt)})}};return!Fn&&typeof Promise<"u"&&Promise.resolve().then(function(){}).then(function(){we||(Fn=!0,ie("You called act(async () => ...) without await. This could lead to unexpected testing behaviour, interleaving multiple act calls and mixing their scopes. You should - await act(async () => ...);"))}),Ve}else{var st=G;if(Ua(A),vn===0){var St=$.current;St!==null&&(pe(St),$.current=null);var Ot={then:function(jt,Tt){$.current===null?($.current=[],W(st,jt,Tt)):jt(st)}};return Ot}else{var yt={then:function(jt,Tt){jt(st)}};return yt}}}}function Ua(S){S!==vn-1&&ie("You seem to have overlapping act() calls, this is not supported. Be sure to await previous act() calls before making a new one. "),vn=S}function W(S,A,V){{var G=$.current;if(G!==null)try{pe(G),Ks(function(){G.length===0?($.current=null,A(S)):W(S,A,V)})}catch(te){V(te)}else A(S)}}var Q=!1;function pe(S){if(!Q){Q=!0;var A=0;try{for(;A0;){var Qt=hn-1>>>1,Tn=je[Qt];if(v(Tn,it)>0)je[Qt]=it,je[hn]=Tn,hn=Qt;else return}}function h(je,it,_t){for(var hn=_t,Qt=je.length,Tn=Qt>>>1;hn_t&&(!je||$n()));){var hn=ae.callback;if(typeof hn=="function"){ae.callback=null,ue=ae.priorityLevel;var Qt=ae.expirationTime<=_t,Tn=hn(Qt);_t=e.unstable_now(),typeof Tn=="function"?ae.callback=Tn:ae===c(se)&&u(se),Ce(_t)}else u(se);ae=c(se)}if(ae!==null)return!0;var Ln=c(Se);return Ln!==null&&kt(Me,Ln.startTime-_t),!1}function at(je,it){switch(je){case b:case x:case w:case N:case R:break;default:je=w}var _t=ue;ue=je;try{return it()}finally{ue=_t}}function gt(je){var it;switch(ue){case b:case x:case w:it=w;break;default:it=ue;break}var _t=ue;ue=it;try{return je()}finally{ue=_t}}function Ht(je){var it=ue;return function(){var _t=ue;ue=it;try{return je.apply(this,arguments)}finally{ue=_t}}}function Ze(je,it,_t){var hn=e.unstable_now(),Qt;if(typeof _t=="object"&&_t!==null){var Tn=_t.delay;typeof Tn=="number"&&Tn>0?Qt=hn+Tn:Qt=hn}else Qt=hn;var Ln;switch(je){case b:Ln=$;break;case x:Ln=q;break;case R:Ln=me;break;case N:Ln=le;break;case w:default:Ln=K;break}var kr=Qt+Ln,Dn={id:qe++,callback:it,priorityLevel:je,startTime:Qt,expirationTime:kr,sortIndex:-1};return Qt>hn?(Dn.sortIndex=Qt,o(Se,Dn),c(se)===null&&Dn===c(Se)&&(ie?Oe():ie=!0,kt(Me,Qt-hn))):(Dn.sortIndex=kr,o(se,Dn),!de&&!Ee&&(de=!0,tn(Ie))),Dn}function ct(){}function wt(){!de&&!Ee&&(de=!0,tn(Ie))}function zt(){return c(se)}function Ge(je){je.callback=null}function Wt(){return ue}var ye=!1,et=null,Nt=-1,lt=i,xn=-1;function $n(){var je=e.unstable_now()-xn;return!(je125){console.error("forceFrameRate takes a positive int between 0 and 125, forcing frame rates higher than 125 fps is not supported");return}je>0?lt=Math.floor(1e3/je):lt=i}var On=function(){if(et!==null){var je=e.unstable_now();xn=je;var it=!0,_t=!0;try{_t=et(it,je)}finally{_t?gn():(ye=!1,et=null)}}else ye=!1},gn;if(typeof he=="function")gn=function(){he(On)};else if(typeof MessageChannel<"u"){var He=new MessageChannel,Je=He.port2;He.port1.onmessage=On,gn=function(){Je.postMessage(null)}}else gn=function(){xe(On,0)};function tn(je){et=je,ye||(ye=!0,gn())}function kt(je,it){Nt=xe(function(){je(e.unstable_now())},it)}function Oe(){Ne(Nt),Nt=-1}var Yt=Lt,_n=null;e.unstable_IdlePriority=R,e.unstable_ImmediatePriority=b,e.unstable_LowPriority=N,e.unstable_NormalPriority=w,e.unstable_Profiling=_n,e.unstable_UserBlockingPriority=x,e.unstable_cancelCallback=Ge,e.unstable_continueExecution=wt,e.unstable_forceFrameRate=an,e.unstable_getCurrentPriorityLevel=Wt,e.unstable_getFirstCallbackNode=zt,e.unstable_next=gt,e.unstable_pauseExecution=ct,e.unstable_requestPaint=Yt,e.unstable_runWithPriority=at,e.unstable_scheduleCallback=Ze,e.unstable_shouldYield=$n,e.unstable_wrapCallback=Ht,typeof __REACT_DEVTOOLS_GLOBAL_HOOK__<"u"&&typeof __REACT_DEVTOOLS_GLOBAL_HOOK__.registerInternalModuleStop=="function"&&__REACT_DEVTOOLS_GLOBAL_HOOK__.registerInternalModuleStop(new Error)})()})(ZI);(function(e){e.exports=ZI})(XI);/** - * @license React - * react-dom.development.js - * - * Copyright (c) Facebook, Inc. and its affiliates. - * - * This source code is licensed under the MIT license found in the - * LICENSE file in the root directory of this source tree. - */(function(){typeof __REACT_DEVTOOLS_GLOBAL_HOOK__<"u"&&typeof __REACT_DEVTOOLS_GLOBAL_HOOK__.registerInternalModuleStart=="function"&&__REACT_DEVTOOLS_GLOBAL_HOOK__.registerInternalModuleStart(new Error);var e=k.exports,t=XI.exports,n=e.__SECRET_INTERNALS_DO_NOT_USE_OR_YOU_WILL_BE_FIRED,i=!1;function o(r){i=r}function c(r){if(!i){for(var a=arguments.length,s=new Array(a>1?a-1:0),f=1;f1?a-1:0),f=1;f2&&(r[0]==="o"||r[0]==="O")&&(r[1]==="n"||r[1]==="N")}function kr(r,a,s,f){if(s!==null&&s.type===He)return!1;switch(typeof a){case"function":case"symbol":return!0;case"boolean":{if(f)return!1;if(s!==null)return!s.acceptsBooleans;var p=r.toLowerCase().slice(0,5);return p!=="data-"&&p!=="aria-"}default:return!1}}function Dn(r,a,s,f){if(a===null||typeof a>"u"||kr(r,a,s,f))return!0;if(f)return!1;if(s!==null)switch(s.type){case kt:return!a;case Oe:return a===!1;case Yt:return isNaN(a);case _n:return isNaN(a)||a<1}return!1}function va(r){return Rn.hasOwnProperty(r)?Rn[r]:null}function jn(r,a,s,f,p,y,C){this.acceptsBooleans=a===tn||a===kt||a===Oe,this.attributeName=f,this.attributeNamespace=p,this.mustUseProperty=s,this.propertyName=r,this.type=a,this.sanitizeURL=y,this.removeEmptyString=C}var Rn={},ga=["children","dangerouslySetInnerHTML","defaultValue","defaultChecked","innerHTML","suppressContentEditableWarning","suppressHydrationWarning","style"];ga.forEach(function(r){Rn[r]=new jn(r,He,!1,r,null,!1,!1)}),[["acceptCharset","accept-charset"],["className","class"],["htmlFor","for"],["httpEquiv","http-equiv"]].forEach(function(r){var a=r[0],s=r[1];Rn[a]=new jn(a,Je,!1,s,null,!1,!1)}),["contentEditable","draggable","spellCheck","value"].forEach(function(r){Rn[r]=new jn(r,tn,!1,r.toLowerCase(),null,!1,!1)}),["autoReverse","externalResourcesRequired","focusable","preserveAlpha"].forEach(function(r){Rn[r]=new jn(r,tn,!1,r,null,!1,!1)}),["allowFullScreen","async","autoFocus","autoPlay","controls","default","defer","disabled","disablePictureInPicture","disableRemotePlayback","formNoValidate","hidden","loop","noModule","noValidate","open","playsInline","readOnly","required","reversed","scoped","seamless","itemScope"].forEach(function(r){Rn[r]=new jn(r,kt,!1,r.toLowerCase(),null,!1,!1)}),["checked","multiple","muted","selected"].forEach(function(r){Rn[r]=new jn(r,kt,!0,r,null,!1,!1)}),["capture","download"].forEach(function(r){Rn[r]=new jn(r,Oe,!1,r,null,!1,!1)}),["cols","rows","size","span"].forEach(function(r){Rn[r]=new jn(r,_n,!1,r,null,!1,!1)}),["rowSpan","start"].forEach(function(r){Rn[r]=new jn(r,Yt,!1,r.toLowerCase(),null,!1,!1)});var Sr=/[\-\:]([a-z])/g,ko=function(r){return r[1].toUpperCase()};["accent-height","alignment-baseline","arabic-form","baseline-shift","cap-height","clip-path","clip-rule","color-interpolation","color-interpolation-filters","color-profile","color-rendering","dominant-baseline","enable-background","fill-opacity","fill-rule","flood-color","flood-opacity","font-family","font-size","font-size-adjust","font-stretch","font-style","font-variant","font-weight","glyph-name","glyph-orientation-horizontal","glyph-orientation-vertical","horiz-adv-x","horiz-origin-x","image-rendering","letter-spacing","lighting-color","marker-end","marker-mid","marker-start","overline-position","overline-thickness","paint-order","panose-1","pointer-events","rendering-intent","shape-rendering","stop-color","stop-opacity","strikethrough-position","strikethrough-thickness","stroke-dasharray","stroke-dashoffset","stroke-linecap","stroke-linejoin","stroke-miterlimit","stroke-opacity","stroke-width","text-anchor","text-decoration","text-rendering","underline-position","underline-thickness","unicode-bidi","unicode-range","units-per-em","v-alphabetic","v-hanging","v-ideographic","v-mathematical","vector-effect","vert-adv-y","vert-origin-x","vert-origin-y","word-spacing","writing-mode","xmlns:xlink","x-height"].forEach(function(r){var a=r.replace(Sr,ko);Rn[a]=new jn(a,Je,!1,r,null,!1,!1)}),["xlink:actuate","xlink:arcrole","xlink:role","xlink:show","xlink:title","xlink:type"].forEach(function(r){var a=r.replace(Sr,ko);Rn[a]=new jn(a,Je,!1,r,"http://www.w3.org/1999/xlink",!1,!1)}),["xml:base","xml:lang","xml:space"].forEach(function(r){var a=r.replace(Sr,ko);Rn[a]=new jn(a,Je,!1,r,"http://www.w3.org/XML/1998/namespace",!1,!1)}),["tabIndex","crossOrigin"].forEach(function(r){Rn[r]=new jn(r,Je,!1,r.toLowerCase(),null,!1,!1)});var as="xlinkHref";Rn[as]=new jn("xlinkHref",Je,!1,"xlink:href","http://www.w3.org/1999/xlink",!0,!1),["src","href","action","formAction"].forEach(function(r){Rn[r]=new jn(r,Je,!1,r.toLowerCase(),null,!0,!0)});var is=/^[\u0000-\u001F ]*j[\r\n\t]*a[\r\n\t]*v[\r\n\t]*a[\r\n\t]*s[\r\n\t]*c[\r\n\t]*r[\r\n\t]*i[\r\n\t]*p[\r\n\t]*t[\r\n\t]*\:/i,Oo=!1;function Do(r){!Oo&&is.test(r)&&(Oo=!0,u("A future version of React will block javascript: URLs as a security precaution. Use event handlers instead if you can. If you need to generate unsafe HTML try using dangerouslySetInnerHTML instead. React was passed %s.",JSON.stringify(r)))}function vr(r,a,s,f){if(f.mustUseProperty){var p=f.propertyName;return r[p]}else{xn(s,a),f.sanitizeURL&&Do(""+s);var y=f.attributeName,C=null;if(f.type===Oe){if(r.hasAttribute(y)){var T=r.getAttribute(y);return T===""?!0:Dn(a,s,f,!1)?T:T===""+s?s:T}}else if(r.hasAttribute(y)){if(Dn(a,s,f,!1))return r.getAttribute(y);if(f.type===kt)return s;C=r.getAttribute(y)}return Dn(a,s,f,!1)?C===null?s:C:C===""+s?s:C}}function Ia(r,a,s,f){{if(!Tn(a))return;if(!r.hasAttribute(a))return s===void 0?void 0:null;var p=r.getAttribute(a);return xn(s,a),p===""+s?s:p}}function Fi(r,a,s,f){var p=va(a);if(!Ln(a,p,f)){if(Dn(a,s,p,f)&&(s=null),f||p===null){if(Tn(a)){var y=a;s===null?r.removeAttribute(y):(xn(s,a),r.setAttribute(y,""+s))}return}var C=p.mustUseProperty;if(C){var T=p.propertyName;if(s===null){var D=p.type;r[T]=D===kt?!1:""}else r[T]=s;return}var U=p.attributeName,H=p.attributeNamespace;if(s===null)r.removeAttribute(U);else{var ee=p.type,J;ee===kt||ee===Oe&&s===!0?J="":(xn(s,U),J=""+s,p.sanitizeURL&&Do(J.toString())),H?r.setAttributeNS(H,U,J):r.setAttribute(U,J)}}}var La=Symbol.for("react.element"),X=Symbol.for("react.portal"),Ue=Symbol.for("react.fragment"),Ke=Symbol.for("react.strict_mode"),Ct=Symbol.for("react.profiler"),on=Symbol.for("react.provider"),Cn=Symbol.for("react.context"),nt=Symbol.for("react.forward_ref"),qt=Symbol.for("react.suspense"),Vn=Symbol.for("react.suspense_list"),Bn=Symbol.for("react.memo"),ln=Symbol.for("react.lazy"),Ur=Symbol.for("react.scope"),vi=Symbol.for("react.debug_trace_mode"),Po=Symbol.for("react.offscreen"),ya=Symbol.for("react.legacy_hidden"),oo=Symbol.for("react.cache"),Nu=Symbol.for("react.tracing_marker"),gi=Symbol.iterator,Hs="@@iterator";function zi(r){if(r===null||typeof r!="object")return null;var a=gi&&r[gi]||r[Hs];return typeof a=="function"?a:null}var Jt=Object.assign,Bi=0,ba,Mo,os,Io,ss,ls,Lo;function Fo(){}Fo.__reactDisabledLog=!0;function us(){{if(Bi===0){ba=console.log,Mo=console.info,os=console.warn,Io=console.error,ss=console.group,ls=console.groupCollapsed,Lo=console.groupEnd;var r={configurable:!0,enumerable:!0,value:Fo,writable:!0};Object.defineProperties(console,{info:r,log:r,warn:r,error:r,group:r,groupCollapsed:r,groupEnd:r})}Bi++}}function Ws(){{if(Bi--,Bi===0){var r={configurable:!0,enumerable:!0,writable:!0};Object.defineProperties(console,{log:Jt({},r,{value:ba}),info:Jt({},r,{value:Mo}),warn:Jt({},r,{value:os}),error:Jt({},r,{value:Io}),group:Jt({},r,{value:ss}),groupCollapsed:Jt({},r,{value:ls}),groupEnd:Jt({},r,{value:Lo})})}Bi<0&&u("disabledDepth fell below zero. This is a bug in React. Please file an issue.")}}var Gs=n.ReactCurrentDispatcher,yi;function Mr(r,a,s){{if(yi===void 0)try{throw Error()}catch(p){var f=p.stack.trim().match(/\n( *(at )?)/);yi=f&&f[1]||""}return` -`+yi+r}}var Fa=!1,Sa;{var za=typeof WeakMap=="function"?WeakMap:Map;Sa=new za}function cs(r,a){if(!r||Fa)return"";{var s=Sa.get(r);if(s!==void 0)return s}var f;Fa=!0;var p=Error.prepareStackTrace;Error.prepareStackTrace=void 0;var y;y=Gs.current,Gs.current=null,us();try{if(a){var C=function(){throw Error()};if(Object.defineProperty(C.prototype,"props",{set:function(){throw Error()}}),typeof Reflect=="object"&&Reflect.construct){try{Reflect.construct(C,[])}catch(ge){f=ge}Reflect.construct(r,[],C)}else{try{C.call()}catch(ge){f=ge}r.call(C.prototype)}}else{try{throw Error()}catch(ge){f=ge}r()}}catch(ge){if(ge&&f&&typeof ge.stack=="string"){for(var T=ge.stack.split(` -`),D=f.stack.split(` -`),U=T.length-1,H=D.length-1;U>=1&&H>=0&&T[U]!==D[H];)H--;for(;U>=1&&H>=0;U--,H--)if(T[U]!==D[H]){if(U!==1||H!==1)do if(U--,H--,H<0||T[U]!==D[H]){var ee=` -`+T[U].replace(" at new "," at ");return r.displayName&&ee.includes("")&&(ee=ee.replace("",r.displayName)),typeof r=="function"&&Sa.set(r,ee),ee}while(U>=1&&H>=0);break}}}finally{Fa=!1,Gs.current=y,Ws(),Error.prepareStackTrace=p}var J=r?r.displayName||r.name:"",ve=J?Mr(J):"";return typeof r=="function"&&Sa.set(r,ve),ve}function zo(r,a,s){return cs(r,!0)}function fs(r,a,s){return cs(r,!1)}function Ml(r){var a=r.prototype;return!!(a&&a.isReactComponent)}function xa(r,a,s){if(r==null)return"";if(typeof r=="function")return cs(r,Ml(r));if(typeof r=="string")return Mr(r);switch(r){case qt:return Mr("Suspense");case Vn:return Mr("SuspenseList")}if(typeof r=="object")switch(r.$$typeof){case nt:return fs(r.render);case Bn:return xa(r.type,a,s);case ln:{var f=r,p=f._payload,y=f._init;try{return xa(y(p),a,s)}catch{}}}return""}function Bo(r){switch(r._debugOwner&&r._debugOwner.type,r._debugSource,r.tag){case N:return Mr(r.type);case le:return Mr("Lazy");case $:return Mr("Suspense");case Se:return Mr("SuspenseList");case h:case b:case K:return fs(r.type);case F:return fs(r.type.render);case v:return zo(r.type);default:return""}}function Ba(r){try{var a="",s=r;do a+=Bo(s),s=s.return;while(s);return a}catch(f){return` -Error generating stack: `+f.message+` -`+f.stack}}function bi(r,a,s){var f=r.displayName;if(f)return f;var p=a.displayName||a.name||"";return p!==""?s+"("+p+")":s}function Ys(r){return r.displayName||"Context"}function un(r){if(r==null)return null;if(typeof r.tag=="number"&&u("Received an unexpected object in getComponentNameFromType(). This is likely a bug in React. Please file an issue."),typeof r=="function")return r.displayName||r.name||null;if(typeof r=="string")return r;switch(r){case Ue:return"Fragment";case X:return"Portal";case Ct:return"Profiler";case Ke:return"StrictMode";case qt:return"Suspense";case Vn:return"SuspenseList"}if(typeof r=="object")switch(r.$$typeof){case Cn:var a=r;return Ys(a)+".Consumer";case on:var s=r;return Ys(s._context)+".Provider";case nt:return bi(r,r.render,"ForwardRef");case Bn:var f=r.displayName||null;return f!==null?f:un(r.type)||"Memo";case ln:{var p=r,y=p._payload,C=p._init;try{return un(C(y))}catch{return null}}}return null}function Si(r,a,s){var f=a.displayName||a.name||"";return r.displayName||(f!==""?s+"("+f+")":s)}function Uo(r){return r.displayName||"Context"}function $t(r){var a=r.tag,s=r.type;switch(a){case Ee:return"Cache";case B:var f=s;return Uo(f)+".Consumer";case I:var p=s;return Uo(p._context)+".Provider";case se:return"DehydratedFragment";case F:return Si(s,s.render,"ForwardRef");case O:return"Fragment";case N:return s;case w:return"Portal";case x:return"Root";case R:return"Text";case le:return un(s);case M:return s===Ke?"StrictMode":"Mode";case ae:return"Offscreen";case z:return"Profiler";case qe:return"Scope";case $:return"Suspense";case Se:return"SuspenseList";case de:return"TracingMarker";case v:case h:case me:case b:case q:case K:if(typeof s=="function")return s.displayName||s.name||null;if(typeof s=="string")return s;break}return null}var qs=n.ReactDebugCurrentFrame,xr=null,Ui=!1;function Ya(){{if(xr===null)return null;var r=xr._debugOwner;if(r!==null&&typeof r<"u")return $t(r)}return null}function so(){return xr===null?"":Ba(xr)}function _r(){qs.getCurrentStack=null,xr=null,Ui=!1}function or(r){qs.getCurrentStack=r===null?null:so,xr=r,Ui=!1}function $o(){return xr}function Kr(r){Ui=r}function pr(r){return""+r}function aa(r){switch(typeof r){case"boolean":case"number":case"string":case"undefined":return r;case"object":return gn(r),r;default:return""}}var Eu={button:!0,checkbox:!0,image:!0,hidden:!0,radio:!0,reset:!0,submit:!0};function lo(r,a){Eu[a.type]||a.onChange||a.onInput||a.readOnly||a.disabled||a.value==null||u("You provided a `value` prop to a form field without an `onChange` handler. This will render a read-only field. If the field should be mutable use `defaultValue`. Otherwise, set either `onChange` or `readOnly`."),a.onChange||a.readOnly||a.disabled||a.checked==null||u("You provided a `checked` prop to a form field without an `onChange` handler. This will render a read-only field. If the field should be mutable use `defaultChecked`. Otherwise, set either `onChange` or `readOnly`.")}function xi(r){var a=r.type,s=r.nodeName;return s&&s.toLowerCase()==="input"&&(a==="checkbox"||a==="radio")}function Ks(r){return r._valueTracker}function vn(r){r._valueTracker=null}function Fn(r){var a="";return r&&(xi(r)?a=r.checked?"true":"false":a=r.value),a}function Il(r){var a=xi(r)?"checked":"value",s=Object.getOwnPropertyDescriptor(r.constructor.prototype,a);gn(r[a]);var f=""+r[a];if(!(r.hasOwnProperty(a)||typeof s>"u"||typeof s.get!="function"||typeof s.set!="function")){var p=s.get,y=s.set;Object.defineProperty(r,a,{configurable:!0,get:function(){return p.call(this)},set:function(T){gn(T),f=""+T,y.call(this,T)}}),Object.defineProperty(r,a,{enumerable:s.enumerable});var C={getValue:function(){return f},setValue:function(T){gn(T),f=""+T},stopTracking:function(){vn(r),delete r[a]}};return C}}function Ua(r){Ks(r)||(r._valueTracker=Il(r))}function W(r){if(!r)return!1;var a=Ks(r);if(!a)return!0;var s=a.getValue(),f=Fn(r);return f!==s?(a.setValue(f),!0):!1}function Q(r){if(r=r||(typeof document<"u"?document:void 0),typeof r>"u")return null;try{return r.activeElement||r.body}catch{return r.body}}var pe=!1,ot=!1,cn=!1,zn=!1;function Kt(r){var a=r.type==="checkbox"||r.type==="radio";return a?r.checked!=null:r.value!=null}function S(r,a){var s=r,f=a.checked,p=Jt({},a,{defaultChecked:void 0,defaultValue:void 0,value:void 0,checked:f??s._wrapperState.initialChecked});return p}function A(r,a){lo("input",a),a.checked!==void 0&&a.defaultChecked!==void 0&&!ot&&(u("%s contains an input of type %s with both checked and defaultChecked props. Input elements must be either controlled or uncontrolled (specify either the checked prop, or the defaultChecked prop, but not both). Decide between using a controlled or uncontrolled input element and remove one of these props. More info: https://reactjs.org/link/controlled-components",Ya()||"A component",a.type),ot=!0),a.value!==void 0&&a.defaultValue!==void 0&&!pe&&(u("%s contains an input of type %s with both value and defaultValue props. Input elements must be either controlled or uncontrolled (specify either the value prop, or the defaultValue prop, but not both). Decide between using a controlled or uncontrolled input element and remove one of these props. More info: https://reactjs.org/link/controlled-components",Ya()||"A component",a.type),pe=!0);var s=r,f=a.defaultValue==null?"":a.defaultValue;s._wrapperState={initialChecked:a.checked!=null?a.checked:a.defaultChecked,initialValue:aa(a.value!=null?a.value:f),controlled:Kt(a)}}function V(r,a){var s=r,f=a.checked;f!=null&&Fi(s,"checked",f,!1)}function G(r,a){var s=r;{var f=Kt(a);!s._wrapperState.controlled&&f&&!zn&&(u("A component is changing an uncontrolled input to be controlled. This is likely caused by the value changing from undefined to a defined value, which should not happen. Decide between using a controlled or uncontrolled input element for the lifetime of the component. More info: https://reactjs.org/link/controlled-components"),zn=!0),s._wrapperState.controlled&&!f&&!cn&&(u("A component is changing a controlled input to be uncontrolled. This is likely caused by the value changing from a defined to undefined, which should not happen. Decide between using a controlled or uncontrolled input element for the lifetime of the component. More info: https://reactjs.org/link/controlled-components"),cn=!0)}V(r,a);var p=aa(a.value),y=a.type;if(p!=null)y==="number"?(p===0&&s.value===""||s.value!=p)&&(s.value=pr(p)):s.value!==pr(p)&&(s.value=pr(p));else if(y==="submit"||y==="reset"){s.removeAttribute("value");return}a.hasOwnProperty("value")?Ve(s,a.type,p):a.hasOwnProperty("defaultValue")&&Ve(s,a.type,aa(a.defaultValue)),a.checked==null&&a.defaultChecked!=null&&(s.defaultChecked=!!a.defaultChecked)}function te(r,a,s){var f=r;if(a.hasOwnProperty("value")||a.hasOwnProperty("defaultValue")){var p=a.type,y=p==="submit"||p==="reset";if(y&&(a.value===void 0||a.value===null))return;var C=pr(f._wrapperState.initialValue);s||C!==f.value&&(f.value=C),f.defaultValue=C}var T=f.name;T!==""&&(f.name=""),f.defaultChecked=!f.defaultChecked,f.defaultChecked=!!f._wrapperState.initialChecked,T!==""&&(f.name=T)}function ze(r,a){var s=r;G(s,a),we(s,a)}function we(r,a){var s=a.name;if(a.type==="radio"&&s!=null){for(var f=r;f.parentNode;)f=f.parentNode;xn(s,"name");for(var p=f.querySelectorAll("input[name="+JSON.stringify(""+s)+'][type="radio"]'),y=0;y.")))}):a.dangerouslySetInnerHTML!=null&&(Ot||(Ot=!0,u("Pass a `value` prop if you set dangerouslyInnerHTML so React knows which value should be selected.")))),a.selected!=null&&!st&&(u("Use the `defaultValue` or `value` props on must be a scalar value if `multiple` is false.%s",s,Ca())}}}}function Pn(r,a,s,f){var p=r.options;if(a){for(var y=s,C={},T=0;T.");var f=Jt({},a,{value:void 0,defaultValue:void 0,children:pr(s._wrapperState.initialValue)});return f}function Nv(r,a){var s=r;lo("textarea",a),a.value!==void 0&&a.defaultValue!==void 0&&!lb&&(u("%s contains a textarea with both value and defaultValue props. Textarea elements must be either controlled or uncontrolled (specify either the value prop, or the defaultValue prop, but not both). Decide between using a controlled or uncontrolled textarea and remove one of these props. More info: https://reactjs.org/link/controlled-components",Ya()||"A component"),lb=!0);var f=a.value;if(f==null){var p=a.children,y=a.defaultValue;if(p!=null){u("Use the `defaultValue` or `value` props instead of setting children on