diff --git a/modules/img2img.py b/modules/img2img.py
index e6707f9600dc761b01af7e2e3512b993e578b66d..600a5172336f4f69315e0b320683222328ad27dc 100644
--- a/modules/img2img.py
+++ b/modules/img2img.py
@@ -55,7 +55,10 @@ def img2img(prompt: str, init_img, init_img_with_mask, steps: int, sampler_index
         initial_seed = None
         initial_info = None
 
+        state.job_count = n_iter
+
         for i in range(n_iter):
+
             p.n_iter = 1
             p.batch_size = 1
             p.do_not_save_grid = True
@@ -72,6 +75,8 @@ def img2img(prompt: str, init_img, init_img_with_mask, steps: int, sampler_index
             p.denoising_strength = max(p.denoising_strength * 0.95, 0.1)
             history.append(processed.images[0])
 
+            state.nextjob()
+
         grid = images.image_grid(history, batch_size, rows=1)
 
         images.save_image(grid, p.outpath_grids, "grid", initial_seed, prompt, opts.grid_format, info=info, short_filename=not opts.grid_extended_filename)
@@ -103,6 +108,8 @@ def img2img(prompt: str, init_img, init_img_with_mask, steps: int, sampler_index
         batch_count = math.ceil(len(work) / p.batch_size)
         print(f"SD upscaling will process a total of {len(work)} images tiled as {len(grid.tiles[0][2])}x{len(grid.tiles)} in a total of {batch_count} batches.")
 
+        state.job_count = batch_count
+
         for i in range(batch_count):
             p.init_images = work[i*p.batch_size:(i+1)*p.batch_size]
 
@@ -116,6 +123,8 @@ def img2img(prompt: str, init_img, init_img_with_mask, steps: int, sampler_index
             p.seed = processed.seed + 1
             work_results += processed.images
 
+            state.nextjob()
+
         image_index = 0
         for y, h, row in grid.tiles:
             for tiledata in row:
diff --git a/modules/processing.py b/modules/processing.py
index c0c1adb74235c27846815eb5f43da3fe97375c0f..1351579bd3c44cf48b71951ac474c9afc281118b 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -153,6 +153,8 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
     with torch.no_grad(), precision_scope("cuda"), ema_scope():
         p.init()
 
+        state.job_count = p.n_iter
+
         for n in range(p.n_iter):
             if state.interrupted:
                 break
@@ -207,6 +209,8 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
 
                 output_images.append(image)
 
+            state.nextjob()
+
         unwanted_grid_because_of_img_count = len(output_images) < 2 and opts.grid_only_if_multiple
         if not p.do_not_save_grid and not unwanted_grid_because_of_img_count:
             return_grid = opts.return_grid
diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py
index 6f028f5f6d142d0a980028392610ab888b81a0c7..896e8b3f5fa20c38c961dababfa9bcfde31cfae1 100644
--- a/modules/sd_samplers.py
+++ b/modules/sd_samplers.py
@@ -1,10 +1,12 @@
 from collections import namedtuple
+
+import ldm.models.diffusion.ddim
 import torch
 import tqdm
 
 import k_diffusion.sampling
-from ldm.models.diffusion.ddim import DDIMSampler
-from ldm.models.diffusion.plms import PLMSSampler
+import ldm.models.diffusion.ddim
+import ldm.models.diffusion.plms
 
 from modules.shared import opts, cmd_opts, state
 import modules.shared as shared
@@ -29,8 +31,8 @@ samplers_data_k_diffusion = [
 
 samplers = [
     *samplers_data_k_diffusion,
-    SamplerData('DDIM', lambda model: VanillaStableDiffusionSampler(DDIMSampler, model), []),
-    SamplerData('PLMS', lambda model: VanillaStableDiffusionSampler(PLMSSampler, model), []),
+    SamplerData('DDIM', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.ddim.DDIMSampler, model), []),
+    SamplerData('PLMS', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.plms.PLMSSampler, model), []),
 ]
 samplers_for_img2img = [x for x in samplers if x.name != 'PLMS']
 
@@ -43,6 +45,23 @@ def p_sample_ddim_hook(sampler_wrapper, x_dec, cond, ts, *args, **kwargs):
     return sampler_wrapper.orig_p_sample_ddim(x_dec, cond, ts, *args, **kwargs)
 
 
+def extended_tdqm(sequence, *args, desc=None, **kwargs):
+    state.sampling_steps = len(sequence)
+    state.sampling_step = 0
+
+    for x in tqdm.tqdm(sequence, *args, desc=state.job, **kwargs):
+        if state.interrupted:
+            break
+
+        yield x
+
+        state.sampling_step += 1
+
+
+ldm.models.diffusion.ddim.tqdm = lambda *args, desc=None, **kwargs: extended_tdqm(*args, desc=desc, **kwargs)
+ldm.models.diffusion.plms.tqdm = lambda *args, desc=None, **kwargs: extended_tdqm(*args, desc=desc, **kwargs)
+
+
 class VanillaStableDiffusionSampler:
     def __init__(self, constructor, sd_model):
         self.sampler = constructor(sd_model)
@@ -102,13 +121,18 @@ class CFGDenoiser(torch.nn.Module):
         return denoised
 
 
-def extended_trange(*args, **kwargs):
-    for x in tqdm.trange(*args, desc=state.job, **kwargs):
+def extended_trange(count, *args, **kwargs):
+    state.sampling_steps = count
+    state.sampling_step = 0
+
+    for x in tqdm.trange(count, *args, desc=state.job, **kwargs):
         if state.interrupted:
             break
 
         yield x
 
+        state.sampling_step += 1
+
 
 class KDiffusionSampler:
     def __init__(self, funcname, sd_model):
diff --git a/modules/shared.py b/modules/shared.py
index 4e36df37fe588828797687936efb2fbb2625ca6b..53861dafbea525a47b4b524b6344a8794e6e3788 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -42,10 +42,18 @@ batch_cond_uncond = cmd_opts.always_batch_cond_uncond or not (cmd_opts.lowvram o
 class State:
     interrupted = False
     job = ""
+    job_no = 0
+    job_count = 0
+    sampling_step = 0
+    sampling_steps = 0
 
     def interrupt(self):
         self.interrupted = True
 
+    def nextjob(self):
+        self.job_no += 1
+        self.sampling_step = 0
+
 state = State()
 
 artist_db = modules.artists.ArtistsDatabase(os.path.join(script_path, 'artists.csv'))
@@ -89,6 +97,7 @@ class Options:
         "ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap, in pixels for ESRGAN upscaling. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}),
         "random_artist_categories": OptionInfo([], "Allowed categories for random artists selection when using the Roll button", gr.CheckboxGroup, {"choices": artist_db.categories()}),
         "upscale_at_full_resolution_padding": OptionInfo(16, "Inpainting at full resolution: padding, in pixels, for the masked region.", gr.Slider, {"minimum": 0, "maximum": 128, "step": 4}),
+        "show_progressbar": OptionInfo(True, "Show progressbar"),
     }
 
     def __init__(self):
diff --git a/modules/ui.py b/modules/ui.py
index aa5a61b7d1de48e789aa401040ef5ba8ace12596..a9e4fd003702a88f2d96248144e6925f78653794 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -48,7 +48,6 @@ css_hide_progressbar = """
 .meta-text { display:none!important; }
 """
 
-
 def plaintext_to_html(text):
     text = "".join([f"<p>{html.escape(x)}</p>\n" for x in text.split('\n')])
     return text
@@ -134,6 +133,24 @@ def wrap_gradio_call(func):
     return f
 
 
+def check_progress_call():
+    if not opts.show_progressbar:
+        return ""
+
+    if shared.state.job_count == 0:
+        return ""
+
+    progress = shared.state.job_no / shared.state.job_count
+    if shared.state.sampling_steps > 0:
+        progress += 1 / shared.state.job_count * shared.state.sampling_step / shared.state.sampling_steps
+
+    progress = min(progress, 1)
+
+    progressbar = f"""<div class='progressDiv'><div class='progress' style="width:{progress * 100}%">{str(int(progress*100))+"%" if progress > 0.01 else ""}</div></div>"""
+
+    return f"<span style='display: none'>{time.time()}</span><p>{progressbar}</p>"
+
+
 def roll_artist(prompt):
     allowed_cats = set([x for x in shared.artist_db.categories() if len(opts.random_artist_categories)==0 or x in opts.random_artist_categories])
     artist = random.choice([x for x in shared.artist_db.artists if x.category in allowed_cats])
@@ -154,8 +171,9 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
         with gr.Row():
             prompt = gr.Textbox(label="Prompt", elem_id="txt2img_prompt", show_label=False, placeholder="Prompt", lines=1)
             negative_prompt = gr.Textbox(label="Negative prompt", elem_id="txt2img_negative_prompt", show_label=False, placeholder="Negative prompt", lines=1, visible=False)
-            roll = gr.Button('Roll', elem_id="txt2img_roll", visible=len(shared.artist_db.artists)>0)
+            roll = gr.Button('Roll', elem_id="txt2img_roll", visible=len(shared.artist_db.artists) > 0)
             submit = gr.Button('Generate', elem_id="txt2img_generate", variant='primary')
+            check_progress = gr.Button('Check progress', elem_id="check_progress", visible=False)
 
         with gr.Row().style(equal_height=False):
             with gr.Column(variant='panel'):
@@ -185,6 +203,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
                 with gr.Group():
                     txt2img_gallery = gr.Gallery(label='Output', elem_id='txt2img_gallery')
 
+
                 with gr.Group():
                     with gr.Row():
                         save = gr.Button('Save')
@@ -193,12 +212,16 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
                         send_to_extras = gr.Button('Send to extras')
                         interrupt = gr.Button('Interrupt')
 
+                progressbar = gr.HTML(elem_id="progressbar")
+
                 with gr.Group():
                     html_info = gr.HTML()
                     generation_info = gr.Textbox(visible=False)
 
+
             txt2img_args = dict(
                 fn=txt2img,
+                _js="submit",
                 inputs=[
                     prompt,
                     negative_prompt,
@@ -223,6 +246,13 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
             prompt.submit(**txt2img_args)
             submit.click(**txt2img_args)
 
+            check_progress.click(
+                fn=check_progress_call,
+                inputs=[],
+                outputs=[progressbar],
+            )
+
+
             interrupt.click(
                 fn=lambda: shared.state.interrupt(),
                 inputs=[],
@@ -252,10 +282,12 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
                 ]
             )
 
+
     with gr.Blocks(analytics_enabled=False) as img2img_interface:
         with gr.Row():
             prompt = gr.Textbox(label="Prompt", elem_id="img2img_prompt", show_label=False, placeholder="Prompt", lines=1)
             submit = gr.Button('Generate', elem_id="img2img_generate", variant='primary')
+            check_progress = gr.Button('Check progress', elem_id="check_progress", visible=False)
 
         with gr.Row().style(equal_height=False):
 
@@ -310,6 +342,8 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
                         save = gr.Button('Save')
                         img2img_send_to_extras = gr.Button('Send to extras')
 
+                progressbar = gr.HTML(elem_id="progressbar")
+
                 with gr.Group():
                     html_info = gr.HTML()
                     generation_info = gr.Textbox(visible=False)
@@ -352,6 +386,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
 
             img2img_args = dict(
                 fn=img2img,
+                _js="submit",
                 inputs=[
                     prompt,
                     init_img,
@@ -386,6 +421,12 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
             prompt.submit(**img2img_args)
             submit.click(**img2img_args)
 
+            check_progress.click(
+                fn=check_progress_call,
+                inputs=[],
+                outputs=[progressbar],
+            )
+
             interrupt.click(
                 fn=lambda: shared.state.interrupt(),
                 inputs=[],
diff --git a/script.js b/script.js
index 7aa07e561f6489acdcfdb81ec0c22e2ab9365311..ff301e4960a15a3c990e1098d2dec584ea13876a 100644
--- a/script.js
+++ b/script.js
@@ -51,6 +51,8 @@ function gradioApp(){
     return document.getElementsByTagName('gradio-app')[0].shadowRoot;
 }
 
+global_progressbar = null
+
 function addTitles(root){
 	root.querySelectorAll('span, button, select').forEach(function(span){
 		tooltip = titles[span.textContent];
@@ -71,6 +73,17 @@ function addTitles(root){
             select.title = titles[select.value] || "";
 	    }
 	})
+
+	progressbar = root.getElementById('progressbar')
+	if(progressbar!= null && progressbar != global_progressbar){
+	    global_progressbar = progressbar
+
+        var mutationObserver = new MutationObserver(function(m){
+            window.setTimeout(requestProgress, 500)
+        });
+        mutationObserver.observe( progressbar, { childList:true, subtree:true })
+	}
+
 }
 
 document.addEventListener("DOMContentLoaded", function() {
@@ -78,7 +91,6 @@ document.addEventListener("DOMContentLoaded", function() {
         addTitles(gradioApp());
     });
     mutationObserver.observe( gradioApp(), { childList:true, subtree:true })
-
 });
 
 function selected_gallery_index(){
@@ -105,3 +117,22 @@ function extract_image_from_gallery(gallery){
 
     return gallery[index];
 }
+
+
+function requestProgress(){
+    btn = gradioApp().getElementById("check_progress");
+    if(btn==null) return;
+
+    btn.click();
+}
+
+function submit(){
+    window.setTimeout(requestProgress, 500)
+
+    res = []
+    for(var i=0;i<arguments.length;i++){
+        res.push(arguments[i])
+    }
+    console.log(res)
+    return res
+}
\ No newline at end of file
diff --git a/scripts/poor_mans_outpainting.py b/scripts/poor_mans_outpainting.py
index 98e1def0055662fb3fb933cdd0854ca10470ffa6..149d8ac526f0bea04977b5511f0a323350da566e 100644
--- a/scripts/poor_mans_outpainting.py
+++ b/scripts/poor_mans_outpainting.py
@@ -78,6 +78,8 @@ class Script(scripts.Script):
         batch_count = len(work)
         print(f"Poor man's outpainting will process a total of {len(work)} images tiled as {len(grid.tiles[0][2])}x{len(grid.tiles)}.")
 
+        state.job_count = batch_count
+
         for i in range(batch_count):
             p.init_images = [work[i]]
             p.image_mask = work_mask[i]
@@ -93,6 +95,8 @@ class Script(scripts.Script):
             p.seed = processed.seed + 1
             work_results += processed.images
 
+            state.nextjob()
+
         image_index = 0
         for y, h, row in grid.tiles:
             for tiledata in row:
diff --git a/scripts/prompt_matrix.py b/scripts/prompt_matrix.py
index ed31dd36b38cad7e5501f2e7b260ee8eda661f9c..720d7583fddaaf6a7a8cb5ae32652c68692e8ac0 100644
--- a/scripts/prompt_matrix.py
+++ b/scripts/prompt_matrix.py
@@ -20,6 +20,8 @@ def draw_xy_grid(xs, ys, x_label, y_label, cell):
 
     first_pocessed = None
 
+    state.job_count = len(xs) * len(ys)
+
     for iy, y in enumerate(ys):
         for ix, x in enumerate(xs):
             state.job = f"{ix + iy * len(xs) + 1} out of {len(xs) * len(ys)}"
@@ -29,6 +31,7 @@ def draw_xy_grid(xs, ys, x_label, y_label, cell):
                 first_pocessed = processed
 
             res.append(processed.images[0])
+            state.nextjob()
 
     grid = images.image_grid(res, rows=len(ys))
     grid = images.draw_grid_annotations(grid, res[0].width, res[0].height, hor_texts, ver_texts)
diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py
index 4a00b4a413fdaf956fede2f625e2ed4038ed1768..87692983c822e5f47a06725943bf17f262db8231 100644
--- a/scripts/xy_grid.py
+++ b/scripts/xy_grid.py
@@ -67,6 +67,8 @@ def draw_xy_grid(xs, ys, x_label, y_label, cell):
 
     first_pocessed = None
 
+    state.job_count = len(xs) * len(ys)
+
     for iy, y in enumerate(ys):
         for ix, x in enumerate(xs):
             state.job = f"{ix + iy * len(xs) + 1} out of {len(xs) * len(ys)}"
@@ -77,6 +79,8 @@ def draw_xy_grid(xs, ys, x_label, y_label, cell):
 
             res.append(processed.images[0])
 
+            state.nextjob()
+
     grid = images.image_grid(res, rows=len(ys))
     grid = images.draw_grid_annotations(grid, res[0].width, res[0].height, hor_texts, ver_texts)
 
diff --git a/style.css b/style.css
index f10e7ee0984159592d9529adf44a93a1361a9ae1..93bdcb132fa65a14031e218f7cc1ed2e24b62869 100644
--- a/style.css
+++ b/style.css
@@ -71,3 +71,28 @@ input[type="range"]{
     padding-left: 0.6em;
     padding-right: 0.6em;
 }
+
+
+
+.progressDiv{
+  width: 100%;
+  height: 30px;
+  background: #b4c0cc;
+  border-radius: 8px;
+}
+
+.dark .progressDiv{
+  background: #424c5b;
+}
+
+.progressDiv .progress{
+  width: 0%;
+  height: 30px;
+  background: #0060df;
+  color: white;
+  font-weight: bold;
+  line-height: 30px;
+  padding: 0 8px 0 0;
+  text-align: right;
+  border-radius: 8px;
+}
diff --git a/webui.py b/webui.py
index 8eebeeee34b8ab2437eee60b8873002a0c345654..e703b3431aa293fbdc372ccf289846a2d4d46915 100644
--- a/webui.py
+++ b/webui.py
@@ -53,6 +53,7 @@ def load_model_from_config(config, ckpt, verbose=False):
 
 cached_images = {}
 
+
 def run_extras(image, gfpgan_strength, upscaling_resize, extras_upscaler_1, extras_upscaler_2, extras_upscaler_2_visibility):
     processing.torch_gc()
 
@@ -121,10 +122,16 @@ queue_lock = threading.Lock()
 
 def wrap_gradio_gpu_call(func):
     def f(*args, **kwargs):
+        shared.state.sampling_step = 0
+        shared.state.job_count = 1
+        shared.state.job_no = 0
+
+
         with queue_lock:
             res = func(*args, **kwargs)
 
         shared.state.job = ""
+        shared.state.job_count = 0
 
         return res