diff --git a/gradio_canny2image.py b/gradio_canny2image.py index f9b1e9f..1db15ce 100644 --- a/gradio_canny2image.py +++ b/gradio_canny2image.py @@ -23,7 +23,7 @@ model = model.cuda() ddim_sampler = DDIMSampler(model) -def process(input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, ddim_steps, scale, seed, eta, low_threshold, high_threshold): +def process(input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, ddim_steps, strength, scale, seed, eta, low_threshold, high_threshold): with torch.no_grad(): img = resize_image(HWC3(input_image), image_resolution) H, W, C = img.shape @@ -49,6 +49,7 @@ def process(input_image, prompt, a_prompt, n_prompt, num_samples, image_resoluti if config.save_memory: model.low_vram_shift(is_diffusing=True) + model.control_scales = [strength] * 13 samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples, shape, cond, verbose=False, eta=eta, unconditional_guidance_scale=scale, @@ -76,6 +77,7 @@ with block: with gr.Accordion("Advanced options", open=False): num_samples = gr.Slider(label="Images", minimum=1, maximum=12, value=1, step=1) image_resolution = gr.Slider(label="Image Resolution", minimum=256, maximum=768, value=512, step=256) + strength = gr.Slider(label="Control Strength", minimum=0.0, maximum=2.0, value=1.0, step=0.01) low_threshold = gr.Slider(label="Canny low threshold", minimum=1, maximum=255, value=100, step=1) high_threshold = gr.Slider(label="Canny high threshold", minimum=1, maximum=255, value=200, step=1) ddim_steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=20, step=1) @@ -87,7 +89,7 @@ with block: value='longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality') with gr.Column(): result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery").style(grid=2, height='auto') - ips = [input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, ddim_steps, scale, seed, eta, low_threshold, high_threshold] + ips = [input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, ddim_steps, strength, scale, seed, eta, low_threshold, high_threshold] run_button.click(fn=process, inputs=ips, outputs=[result_gallery]) diff --git a/gradio_depth2image.py b/gradio_depth2image.py index 6a72de6..0a81bcf 100644 --- a/gradio_depth2image.py +++ b/gradio_depth2image.py @@ -23,7 +23,7 @@ model = model.cuda() ddim_sampler = DDIMSampler(model) -def process(input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps, scale, seed, eta): +def process(input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps, strength, scale, seed, eta): with torch.no_grad(): input_image = HWC3(input_image) detected_map, _ = apply_midas(resize_image(input_image, detect_resolution)) @@ -51,6 +51,7 @@ def process(input_image, prompt, a_prompt, n_prompt, num_samples, image_resoluti if config.save_memory: model.low_vram_shift(is_diffusing=True) + model.control_scales = [strength] * 13 samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples, shape, cond, verbose=False, eta=eta, unconditional_guidance_scale=scale, @@ -78,6 +79,7 @@ with block: with gr.Accordion("Advanced options", open=False): num_samples = gr.Slider(label="Images", minimum=1, maximum=12, value=1, step=1) image_resolution = gr.Slider(label="Image Resolution", minimum=256, maximum=768, value=512, step=256) + strength = gr.Slider(label="Control Strength", minimum=0.0, maximum=2.0, value=1.0, step=0.01) detect_resolution = gr.Slider(label="Depth Resolution", minimum=128, maximum=1024, value=384, step=1) ddim_steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=20, step=1) scale = gr.Slider(label="Guidance Scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1) @@ -88,7 +90,7 @@ with block: value='longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality') with gr.Column(): result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery").style(grid=2, height='auto') - ips = [input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps, scale, seed, eta] + ips = [input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps, strength, scale, seed, eta] run_button.click(fn=process, inputs=ips, outputs=[result_gallery]) diff --git a/gradio_fake_scribble2image.py b/gradio_fake_scribble2image.py index fac13d9..5e38e44 100644 --- a/gradio_fake_scribble2image.py +++ b/gradio_fake_scribble2image.py @@ -23,7 +23,7 @@ model = model.cuda() ddim_sampler = DDIMSampler(model) -def process(input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps, scale, seed, eta): +def process(input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps, strength, scale, seed, eta): with torch.no_grad(): input_image = HWC3(input_image) detected_map = apply_hed(resize_image(input_image, detect_resolution)) @@ -55,6 +55,7 @@ def process(input_image, prompt, a_prompt, n_prompt, num_samples, image_resoluti if config.save_memory: model.low_vram_shift(is_diffusing=True) + model.control_scales = [strength] * 13 samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples, shape, cond, verbose=False, eta=eta, unconditional_guidance_scale=scale, @@ -82,6 +83,7 @@ with block: with gr.Accordion("Advanced options", open=False): num_samples = gr.Slider(label="Images", minimum=1, maximum=12, value=1, step=1) image_resolution = gr.Slider(label="Image Resolution", minimum=256, maximum=768, value=512, step=256) + strength = gr.Slider(label="Control Strength", minimum=0.0, maximum=2.0, value=1.0, step=0.01) detect_resolution = gr.Slider(label="HED Resolution", minimum=128, maximum=1024, value=512, step=1) ddim_steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=20, step=1) scale = gr.Slider(label="Guidance Scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1) @@ -92,7 +94,7 @@ with block: value='longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality') with gr.Column(): result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery").style(grid=2, height='auto') - ips = [input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps, scale, seed, eta] + ips = [input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps, strength, scale, seed, eta] run_button.click(fn=process, inputs=ips, outputs=[result_gallery]) diff --git a/gradio_hed2image.py b/gradio_hed2image.py index 13c1815..39d1b03 100644 --- a/gradio_hed2image.py +++ b/gradio_hed2image.py @@ -23,7 +23,7 @@ model = model.cuda() ddim_sampler = DDIMSampler(model) -def process(input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps, scale, seed, eta): +def process(input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps, strength, scale, seed, eta): with torch.no_grad(): input_image = HWC3(input_image) detected_map = apply_hed(resize_image(input_image, detect_resolution)) @@ -51,6 +51,7 @@ def process(input_image, prompt, a_prompt, n_prompt, num_samples, image_resoluti if config.save_memory: model.low_vram_shift(is_diffusing=True) + model.control_scales = [strength] * 13 samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples, shape, cond, verbose=False, eta=eta, unconditional_guidance_scale=scale, @@ -78,6 +79,7 @@ with block: with gr.Accordion("Advanced options", open=False): num_samples = gr.Slider(label="Images", minimum=1, maximum=12, value=1, step=1) image_resolution = gr.Slider(label="Image Resolution", minimum=256, maximum=768, value=512, step=256) + strength = gr.Slider(label="Control Strength", minimum=0.0, maximum=2.0, value=1.0, step=0.01) detect_resolution = gr.Slider(label="HED Resolution", minimum=128, maximum=1024, value=512, step=1) ddim_steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=20, step=1) scale = gr.Slider(label="Guidance Scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1) @@ -88,7 +90,7 @@ with block: value='longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality') with gr.Column(): result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery").style(grid=2, height='auto') - ips = [input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps, scale, seed, eta] + ips = [input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps, strength, scale, seed, eta] run_button.click(fn=process, inputs=ips, outputs=[result_gallery]) diff --git a/gradio_hough2image.py b/gradio_hough2image.py index 8223cca..c112305 100644 --- a/gradio_hough2image.py +++ b/gradio_hough2image.py @@ -23,7 +23,7 @@ model = model.cuda() ddim_sampler = DDIMSampler(model) -def process(input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps, scale, seed, eta, value_threshold, distance_threshold): +def process(input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps, strength, scale, seed, eta, value_threshold, distance_threshold): with torch.no_grad(): input_image = HWC3(input_image) detected_map = apply_mlsd(resize_image(input_image, detect_resolution), value_threshold, distance_threshold) @@ -51,6 +51,7 @@ def process(input_image, prompt, a_prompt, n_prompt, num_samples, image_resoluti if config.save_memory: model.low_vram_shift(is_diffusing=True) + model.control_scales = [strength] * 13 samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples, shape, cond, verbose=False, eta=eta, unconditional_guidance_scale=scale, @@ -78,6 +79,7 @@ with block: with gr.Accordion("Advanced options", open=False): num_samples = gr.Slider(label="Images", minimum=1, maximum=12, value=1, step=1) image_resolution = gr.Slider(label="Image Resolution", minimum=256, maximum=768, value=512, step=256) + strength = gr.Slider(label="Control Strength", minimum=0.0, maximum=2.0, value=1.0, step=0.01) detect_resolution = gr.Slider(label="Hough Resolution", minimum=128, maximum=1024, value=512, step=1) value_threshold = gr.Slider(label="Hough value threshold (MLSD)", minimum=0.01, maximum=2.0, value=0.1, step=0.01) distance_threshold = gr.Slider(label="Hough distance threshold (MLSD)", minimum=0.01, maximum=20.0, value=0.1, step=0.01) @@ -90,7 +92,7 @@ with block: value='longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality') with gr.Column(): result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery").style(grid=2, height='auto') - ips = [input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps, scale, seed, eta, value_threshold, distance_threshold] + ips = [input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps, strength, scale, seed, eta, value_threshold, distance_threshold] run_button.click(fn=process, inputs=ips, outputs=[result_gallery]) diff --git a/gradio_normal2image.py b/gradio_normal2image.py index e9622e0..643b523 100644 --- a/gradio_normal2image.py +++ b/gradio_normal2image.py @@ -23,7 +23,7 @@ model = model.cuda() ddim_sampler = DDIMSampler(model) -def process(input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps, scale, seed, eta, bg_threshold): +def process(input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps, strength, scale, seed, eta, bg_threshold): with torch.no_grad(): input_image = HWC3(input_image) _, detected_map = apply_midas(resize_image(input_image, detect_resolution), bg_th=bg_threshold) @@ -51,6 +51,7 @@ def process(input_image, prompt, a_prompt, n_prompt, num_samples, image_resoluti if config.save_memory: model.low_vram_shift(is_diffusing=True) + model.control_scales = [strength] * 13 samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples, shape, cond, verbose=False, eta=eta, unconditional_guidance_scale=scale, @@ -78,6 +79,7 @@ with block: with gr.Accordion("Advanced options", open=False): num_samples = gr.Slider(label="Images", minimum=1, maximum=12, value=1, step=1) image_resolution = gr.Slider(label="Image Resolution", minimum=256, maximum=768, value=512, step=256) + strength = gr.Slider(label="Control Strength", minimum=0.0, maximum=2.0, value=1.0, step=0.01) detect_resolution = gr.Slider(label="Normal Resolution", minimum=128, maximum=1024, value=384, step=1) bg_threshold = gr.Slider(label="Normal background threshold", minimum=0.0, maximum=1.0, value=0.4, step=0.01) ddim_steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=20, step=1) @@ -89,7 +91,7 @@ with block: value='longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality') with gr.Column(): result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery").style(grid=2, height='auto') - ips = [input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps, scale, seed, eta, bg_threshold] + ips = [input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps, strength, scale, seed, eta, bg_threshold] run_button.click(fn=process, inputs=ips, outputs=[result_gallery]) diff --git a/gradio_pose2image.py b/gradio_pose2image.py index 92384ed..75b3395 100644 --- a/gradio_pose2image.py +++ b/gradio_pose2image.py @@ -23,7 +23,7 @@ model = model.cuda() ddim_sampler = DDIMSampler(model) -def process(input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps, scale, seed, eta): +def process(input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps, strength, scale, seed, eta): with torch.no_grad(): input_image = HWC3(input_image) detected_map, _ = apply_openpose(resize_image(input_image, detect_resolution)) @@ -51,6 +51,7 @@ def process(input_image, prompt, a_prompt, n_prompt, num_samples, image_resoluti if config.save_memory: model.low_vram_shift(is_diffusing=True) + model.control_scales = [strength] * 13 samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples, shape, cond, verbose=False, eta=eta, unconditional_guidance_scale=scale, @@ -78,6 +79,7 @@ with block: with gr.Accordion("Advanced options", open=False): num_samples = gr.Slider(label="Images", minimum=1, maximum=12, value=1, step=1) image_resolution = gr.Slider(label="Image Resolution", minimum=256, maximum=768, value=512, step=256) + strength = gr.Slider(label="Control Strength", minimum=0.0, maximum=2.0, value=1.0, step=0.01) detect_resolution = gr.Slider(label="OpenPose Resolution", minimum=128, maximum=1024, value=512, step=1) ddim_steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=20, step=1) scale = gr.Slider(label="Guidance Scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1) @@ -88,7 +90,7 @@ with block: value='longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality') with gr.Column(): result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery").style(grid=2, height='auto') - ips = [input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps, scale, seed, eta] + ips = [input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps, strength, scale, seed, eta] run_button.click(fn=process, inputs=ips, outputs=[result_gallery]) diff --git a/gradio_scribble2image.py b/gradio_scribble2image.py index 44241d3..908a30d 100644 --- a/gradio_scribble2image.py +++ b/gradio_scribble2image.py @@ -20,7 +20,7 @@ model = model.cuda() ddim_sampler = DDIMSampler(model) -def process(input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, ddim_steps, scale, seed, eta): +def process(input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, ddim_steps, strength, scale, seed, eta): with torch.no_grad(): img = resize_image(HWC3(input_image), image_resolution) H, W, C = img.shape @@ -46,6 +46,7 @@ def process(input_image, prompt, a_prompt, n_prompt, num_samples, image_resoluti if config.save_memory: model.low_vram_shift(is_diffusing=True) + model.control_scales = [strength] * 13 samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples, shape, cond, verbose=False, eta=eta, unconditional_guidance_scale=scale, @@ -73,6 +74,7 @@ with block: with gr.Accordion("Advanced options", open=False): num_samples = gr.Slider(label="Images", minimum=1, maximum=12, value=1, step=1) image_resolution = gr.Slider(label="Image Resolution", minimum=256, maximum=768, value=512, step=256) + strength = gr.Slider(label="Control Strength", minimum=0.0, maximum=2.0, value=1.0, step=0.01) ddim_steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=20, step=1) scale = gr.Slider(label="Guidance Scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1) seed = gr.Slider(label="Seed", minimum=-1, maximum=2147483647, step=1, randomize=True) @@ -82,7 +84,7 @@ with block: value='longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality') with gr.Column(): result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery").style(grid=2, height='auto') - ips = [input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, ddim_steps, scale, seed, eta] + ips = [input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, ddim_steps, strength, scale, seed, eta] run_button.click(fn=process, inputs=ips, outputs=[result_gallery]) diff --git a/gradio_scribble2image_interactive.py b/gradio_scribble2image_interactive.py index 97e7514..02ae0b0 100644 --- a/gradio_scribble2image_interactive.py +++ b/gradio_scribble2image_interactive.py @@ -20,7 +20,7 @@ model = model.cuda() ddim_sampler = DDIMSampler(model) -def process(input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, ddim_steps, scale, seed, eta): +def process(input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, ddim_steps, strength, scale, seed, eta): with torch.no_grad(): img = resize_image(HWC3(input_image['mask'][:, :, 0]), image_resolution) H, W, C = img.shape @@ -46,6 +46,7 @@ def process(input_image, prompt, a_prompt, n_prompt, num_samples, image_resoluti if config.save_memory: model.low_vram_shift(is_diffusing=True) + model.control_scales = [strength] * 13 samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples, shape, cond, verbose=False, eta=eta, unconditional_guidance_scale=scale, @@ -83,6 +84,7 @@ with block: with gr.Accordion("Advanced options", open=False): num_samples = gr.Slider(label="Images", minimum=1, maximum=12, value=1, step=1) image_resolution = gr.Slider(label="Image Resolution", minimum=256, maximum=768, value=512, step=256) + strength = gr.Slider(label="Control Strength", minimum=0.0, maximum=2.0, value=1.0, step=0.01) ddim_steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=20, step=1) scale = gr.Slider(label="Guidance Scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1) seed = gr.Slider(label="Seed", minimum=-1, maximum=2147483647, step=1, randomize=True) @@ -92,7 +94,7 @@ with block: value='longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality') with gr.Column(): result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery").style(grid=2, height='auto') - ips = [input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, ddim_steps, scale, seed, eta] + ips = [input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, ddim_steps, strength, scale, seed, eta] run_button.click(fn=process, inputs=ips, outputs=[result_gallery]) diff --git a/gradio_seg2image.py b/gradio_seg2image.py index 64d156e..70c5077 100644 --- a/gradio_seg2image.py +++ b/gradio_seg2image.py @@ -23,7 +23,7 @@ model = model.cuda() ddim_sampler = DDIMSampler(model) -def process(input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps, scale, seed, eta): +def process(input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps, strength, scale, seed, eta): with torch.no_grad(): input_image = HWC3(input_image) detected_map = apply_uniformer(resize_image(input_image, detect_resolution)) @@ -50,6 +50,7 @@ def process(input_image, prompt, a_prompt, n_prompt, num_samples, image_resoluti if config.save_memory: model.low_vram_shift(is_diffusing=True) + model.control_scales = [strength] * 13 samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples, shape, cond, verbose=False, eta=eta, unconditional_guidance_scale=scale, @@ -77,6 +78,7 @@ with block: with gr.Accordion("Advanced options", open=False): num_samples = gr.Slider(label="Images", minimum=1, maximum=12, value=1, step=1) image_resolution = gr.Slider(label="Image Resolution", minimum=256, maximum=768, value=512, step=256) + strength = gr.Slider(label="Control Strength", minimum=0.0, maximum=2.0, value=1.0, step=0.01) detect_resolution = gr.Slider(label="Segmentation Resolution", minimum=128, maximum=1024, value=512, step=1) ddim_steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=20, step=1) scale = gr.Slider(label="Guidance Scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1) @@ -87,7 +89,7 @@ with block: value='longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality') with gr.Column(): result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery").style(grid=2, height='auto') - ips = [input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps, scale, seed, eta] + ips = [input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps, strength, scale, seed, eta] run_button.click(fn=process, inputs=ips, outputs=[result_gallery])