diff --git a/modules/api/api.py b/modules/api/api.py
index 279c384acb9882c1cfa2ddadb885404f1c4e16b6..3ea099ad8c44730bd0b20cc1007238a79dfb7aea 100644
--- a/modules/api/api.py
+++ b/modules/api/api.py
@@ -327,7 +327,7 @@ class Api:
             p.outpath_grids = opts.outdir_txt2img_grids
             p.outpath_samples = opts.outdir_txt2img_samples
 
-            shared.state.begin()
+            shared.state.begin(job="scripts_txt2img")
             if selectable_scripts is not None:
                 p.script_args = script_args
                 processed = scripts.scripts_txt2img.run(p, *p.script_args) # Need to pass args as list here
@@ -384,7 +384,7 @@ class Api:
             p.outpath_grids = opts.outdir_img2img_grids
             p.outpath_samples = opts.outdir_img2img_samples
 
-            shared.state.begin()
+            shared.state.begin(job="scripts_img2img")
             if selectable_scripts is not None:
                 p.script_args = script_args
                 processed = scripts.scripts_img2img.run(p, *p.script_args) # Need to pass args as list here
@@ -599,7 +599,7 @@ class Api:
 
     def create_embedding(self, args: dict):
         try:
-            shared.state.begin()
+            shared.state.begin(job="create_embedding")
             filename = create_embedding(**args) # create empty embedding
             sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings() # reload embeddings so new one can be immediately used
             shared.state.end()
@@ -610,7 +610,7 @@ class Api:
 
     def create_hypernetwork(self, args: dict):
         try:
-            shared.state.begin()
+            shared.state.begin(job="create_hypernetwork")
             filename = create_hypernetwork(**args) # create empty embedding
             shared.state.end()
             return models.CreateResponse(info=f"create hypernetwork filename: {filename}")
@@ -620,7 +620,7 @@ class Api:
 
     def preprocess(self, args: dict):
         try:
-            shared.state.begin()
+            shared.state.begin(job="preprocess")
             preprocess(**args) # quick operation unless blip/booru interrogation is enabled
             shared.state.end()
             return models.PreprocessResponse(info = 'preprocess complete')
@@ -636,7 +636,7 @@ class Api:
 
     def train_embedding(self, args: dict):
         try:
-            shared.state.begin()
+            shared.state.begin(job="train_embedding")
             apply_optimizations = shared.opts.training_xattention_optimizations
             error = None
             filename = ''
@@ -657,7 +657,7 @@ class Api:
 
     def train_hypernetwork(self, args: dict):
         try:
-            shared.state.begin()
+            shared.state.begin(job="train_hypernetwork")
             shared.loaded_hypernetworks = []
             apply_optimizations = shared.opts.training_xattention_optimizations
             error = None
diff --git a/modules/call_queue.py b/modules/call_queue.py
index 69bf63d2b9edd3cab7fea7b3dfdf7ebecd246360..3b94f8a4c8e2739b246ba9070ef679e811e01c09 100644
--- a/modules/call_queue.py
+++ b/modules/call_queue.py
@@ -30,7 +30,7 @@ def wrap_gradio_gpu_call(func, extra_outputs=None):
             id_task = None
 
         with queue_lock:
-            shared.state.begin()
+            shared.state.begin(job=id_task)
             progress.start_task(id_task)
 
             try:
diff --git a/modules/extras.py b/modules/extras.py
index 830b53aa2b6af8addac7fe0fbfa3d49462b75f17..e9c0263ec7d83d112c42645c18a3cbda64ed911e 100644
--- a/modules/extras.py
+++ b/modules/extras.py
@@ -73,8 +73,7 @@ def to_half(tensor, enable):
 
 
 def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_model_name, interp_method, multiplier, save_as_half, custom_name, checkpoint_format, config_source, bake_in_vae, discard_weights, save_metadata):
-    shared.state.begin()
-    shared.state.job = 'model-merge'
+    shared.state.begin(job="model-merge")
 
     def fail(message):
         shared.state.textinfo = message
diff --git a/modules/interrogate.py b/modules/interrogate.py
index 9b2c5b60efbd10bd6b652c34ad3b62d9e1ca9f3e..a3ae1dd5c4c0663fe3548ee85d2856866c04b1b8 100644
--- a/modules/interrogate.py
+++ b/modules/interrogate.py
@@ -184,8 +184,7 @@ class InterrogateModels:
 
     def interrogate(self, pil_image):
         res = ""
-        shared.state.begin()
-        shared.state.job = 'interrogate'
+        shared.state.begin(job="interrogate")
         try:
             if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
                 lowvram.send_everything_to_cpu()
diff --git a/modules/postprocessing.py b/modules/postprocessing.py
index 736315e2d7acc82be80834ea7c2e57a44f58a9f4..544b2f7208c55bd466d09027c5333e13931c13e8 100644
--- a/modules/postprocessing.py
+++ b/modules/postprocessing.py
@@ -9,8 +9,7 @@ from modules.shared import opts
 def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir, show_extras_results, *args, save_output: bool = True):
     devices.torch_gc()
 
-    shared.state.begin()
-    shared.state.job = 'extras'
+    shared.state.begin(job="extras")
 
     image_data = []
     image_names = []
diff --git a/modules/shared.py b/modules/shared.py
index 203ee1b9e56a47e4ab0a0393f1f90b67e6271f4f..7df2879cc8cc210c92e37f66955921e2065375b7 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -173,7 +173,7 @@ class State:
 
         return obj
 
-    def begin(self):
+    def begin(self, job: str = "(unknown)"):
         self.sampling_step = 0
         self.job_count = -1
         self.processing_has_refined_job_count = False
@@ -187,7 +187,7 @@ class State:
         self.interrupted = False
         self.textinfo = None
         self.time_start = time.time()
-
+        self.job = job
         devices.torch_gc()
 
     def end(self):