Skip to content

Commit 0114b59

Browse files
committed
refactor of progress monitoring
Signed-off-by: Vladimir Mandic <[email protected]>
1 parent 669799b commit 0114b59

24 files changed

+177
-84
lines changed

.eslintrc.json

+1
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222
"default-case":"off",
2323
"no-await-in-loop":"off",
2424
"no-bitwise":"off",
25+
"no-continue":"off",
2526
"no-confusing-arrow":"off",
2627
"no-console":"off",
2728
"no-empty":"off",

CHANGELOG.md

+2
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,8 @@
2020
- add explicit detailer steps setting
2121
- **SysInfo**:
2222
- update to collected data and benchmarks
23+
- **Progress**:
24+
- refactored progress monitoring, job updates and live preview
2325
- **Metadata**:
2426
- improved metadata save and restore
2527
- **Fixes**:

javascript/progressBar.js

+22-15
Original file line numberDiff line numberDiff line change
@@ -20,29 +20,36 @@ function checkPaused(state) {
2020

2121
function setProgress(res) {
2222
const elements = ['txt2img_generate', 'img2img_generate', 'extras_generate', 'control_generate'];
23-
const progress = (res?.progress || 0);
24-
let job = res?.job || '';
25-
job = job.replace('txt2img', 'Generate').replace('img2img', 'Generate');
26-
const perc = res && (progress > 0) ? `${Math.round(100.0 * progress)}%` : '';
27-
let sec = res?.eta || 0;
23+
const progress = res?.progress || 0;
24+
const job = res?.job || '';
25+
let perc = '';
2826
let eta = '';
29-
if (res?.paused) eta = 'Paused';
30-
else if (res?.completed || (progress > 0.99)) eta = 'Finishing';
31-
else if (sec === 0) eta = 'Starting';
27+
if (job === 'VAE') perc = 'Decode';
3228
else {
33-
const min = Math.floor(sec / 60);
34-
sec %= 60;
35-
eta = min > 0 ? `${Math.round(min)}m ${Math.round(sec)}s` : `${Math.round(sec)}s`;
29+
perc = res && (progress > 0) && (progress < 1) ? `${Math.round(100.0 * progress)}% ` : '';
30+
let sec = res?.eta || 0;
31+
if (res?.paused) eta = 'Paused';
32+
else if (res?.completed || (progress > 0.99)) eta = 'Finishing';
33+
else if (sec === 0) eta = 'Start';
34+
else {
35+
const min = Math.floor(sec / 60);
36+
sec %= 60;
37+
eta = min > 0 ? `${Math.round(min)}m ${Math.round(sec)}s` : `${Math.round(sec)}s`;
38+
}
3639
}
3740
document.title = `SD.Next ${perc}`;
3841
for (const elId of elements) {
3942
const el = document.getElementById(elId);
4043
if (el) {
41-
el.innerText = (res ? `${job} ${perc} ${eta}` : 'Generate');
44+
const jobLabel = (res ? `${job} ${perc}${eta}` : 'Generate').trim();
45+
el.innerText = jobLabel;
4246
if (!window.waitForUiReady) {
43-
el.style.background = res && (progress > 0)
44-
? `linear-gradient(to right, var(--primary-500) 0%, var(--primary-800) ${perc}, var(--neutral-700) ${perc})`
45-
: 'var(--button-primary-background-fill)';
47+
const gradient = perc !== '' ? perc : '100%';
48+
if (jobLabel === 'Generate') el.style.background = 'var(--primary-500)';
49+
else if (jobLabel.endsWith('Decode')) continue;
50+
else if (jobLabel.endsWith('Start') || jobLabel.endsWith('Finishing')) el.style.background = 'var(--primary-800)';
51+
else if (res && progress > 0 && progress < 1) el.style.background = `linear-gradient(to right, var(--primary-500) 0%, var(--primary-800) ${gradient}, var(--neutral-700) ${gradient})`;
52+
else el.style.background = 'var(--primary-500)';
4653
}
4754
}
4855
}

modules/call_queue.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -15,8 +15,8 @@ def f(*args, **kwargs):
1515
return f
1616

1717

18-
def wrap_gradio_gpu_call(func, extra_outputs=None):
19-
name = func.__name__
18+
def wrap_gradio_gpu_call(func, extra_outputs=None, name=None):
19+
name = name or func.__name__
2020
def f(*args, **kwargs):
2121
# if the first argument is a string that says "task(...)", it is treated as a job id
2222
if len(args) > 0 and type(args[0]) == str and args[0][0:5] == "task(" and args[0][-1] == ")":

modules/gr_tempdir.py

+1
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,7 @@ def pil_to_temp_file(self, img: Image, dir: str, format="png") -> str: # pylint:
7171
img.already_saved_as = name
7272
size = os.path.getsize(name)
7373
shared.log.debug(f'Save temp: image="{name}" width={img.width} height={img.height} size={size}')
74+
shared.state.image_history += 1
7475
params = ', '.join([f'{k}: {v}' for k, v in img.info.items()])
7576
params = params[12:] if params.startswith('parameters: ') else params
7677
with open(os.path.join(paths.data_path, "params.txt"), "w", encoding="utf8") as file:

modules/history.py

+1
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,7 @@ def find(self, name):
6262
return -1
6363

6464
def add(self, latent, preview=None, info=None, ops=[]):
65+
shared.state.latent_history += 1
6566
if shared.opts.latent_history == 0:
6667
return
6768
if torch.is_tensor(latent):

modules/images.py

+3
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@ def atomically_save_image():
2929
Image.MAX_IMAGE_PIXELS = None # disable check in Pillow and rely on check below to allow large custom image sizes
3030
while True:
3131
image, filename, extension, params, exifinfo, filename_txt = save_queue.get()
32+
shared.state.image_history += 1
3233
with open(os.path.join(paths.data_path, "params.txt"), "w", encoding="utf8") as file:
3334
file.write(exifinfo)
3435
fn = filename + extension
@@ -49,6 +50,7 @@ def atomically_save_image():
4950
shared.log.info(f'Save: text="{filename_txt}" len={len(exifinfo)}')
5051
except Exception as e:
5152
shared.log.warning(f'Save failed: description={filename_txt} {e}')
53+
5254
# actual save
5355
if image_format == 'PNG':
5456
pnginfo_data = PngImagePlugin.PngInfo()
@@ -79,6 +81,7 @@ def atomically_save_image():
7981
errors.display(e, 'Image save')
8082
size = os.path.getsize(fn) if os.path.exists(fn) else 0
8183
shared.log.info(f'Save: image="{fn}" type={image_format} width={image.width} height={image.height} size={size}')
84+
8285
if shared.opts.save_log_fn != '' and len(exifinfo) > 0:
8386
fn = os.path.join(paths.data_path, shared.opts.save_log_fn)
8487
if not fn.endswith('.json'):

modules/lora/lora_extract.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -265,7 +265,7 @@ def gr_show(visible=True):
265265

266266
auto_rank.change(fn=lambda x: gr_show(x), inputs=[auto_rank], outputs=[rank_ratio])
267267
extract.click(
268-
fn=wrap_gradio_gpu_call(make_lora, extra_outputs=[]),
268+
fn=wrap_gradio_gpu_call(make_lora, extra_outputs=[], name='LoRA'),
269269
inputs=[filename, rank, auto_rank, rank_ratio, modules, overwrite],
270270
outputs=[status]
271271
)

modules/processing.py

+5-2
Original file line numberDiff line numberDiff line change
@@ -280,19 +280,22 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
280280
output_images = []
281281

282282
process_init(p)
283-
if os.path.exists(shared.opts.embeddings_dir) and not p.do_not_reload_embeddings and not shared.native:
283+
if not shared.native and os.path.exists(shared.opts.embeddings_dir) and not p.do_not_reload_embeddings:
284284
modules.sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings(force_reload=False)
285285
if p.scripts is not None and isinstance(p.scripts, scripts.ScriptRunner):
286286
p.scripts.process(p)
287287

288288
ema_scope_context = p.sd_model.ema_scope if not shared.native else nullcontext
289-
shared.state.job_count = p.n_iter
289+
if not shared.native:
290+
shared.state.job_count = p.n_iter
290291
with devices.inference_context(), ema_scope_context():
291292
t0 = time.time()
292293
if not hasattr(p, 'skip_init'):
293294
p.init(p.all_prompts, p.all_seeds, p.all_subseeds)
294295
debug(f'Processing inner: args={vars(p)}')
295296
for n in range(p.n_iter):
297+
# if hasattr(p, 'skip_processing'):
298+
# continue
296299
pag.apply(p)
297300
debug(f'Processing inner: iteration={n+1}/{p.n_iter}')
298301
p.iteration = n

modules/processing_args.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515

1616
debug_enabled = os.environ.get('SD_DIFFUSERS_DEBUG', None)
1717
debug_log = shared.log.trace if os.environ.get('SD_DIFFUSERS_DEBUG', None) is not None else lambda *args, **kwargs: None
18+
disable_pbar = os.environ.get('SD_DISABLE_PBAR', None) is not None
1819

1920

2021
def task_specific_kwargs(p, model):
@@ -107,7 +108,7 @@ def set_pipeline_args(p, model, prompts:list, negative_prompts:list, prompts_2:t
107108
shared.sd_model = sd_models.apply_balanced_offload(shared.sd_model)
108109
apply_circular(p.tiling, model)
109110
if hasattr(model, "set_progress_bar_config"):
110-
model.set_progress_bar_config(bar_format='Progress {rate_fmt}{postfix} {bar} {percentage:3.0f}% {n_fmt}/{total_fmt} {elapsed} {remaining} ' + '\x1b[38;5;71m' + desc, ncols=80, colour='#327fba')
111+
model.set_progress_bar_config(bar_format='Progress {rate_fmt}{postfix} {bar} {percentage:3.0f}% {n_fmt}/{total_fmt} {elapsed} {remaining} ' + '\x1b[38;5;71m' + desc, ncols=80, colour='#327fba', disable=disable_pbar)
111112
args = {}
112113
has_vae = hasattr(model, 'vae') or (hasattr(model, 'pipe') and hasattr(model.pipe, 'vae'))
113114
if hasattr(model, 'pipe') and not hasattr(model, 'no_recurse'): # recurse

modules/processing_callbacks.py

+3-2
Original file line numberDiff line numberDiff line change
@@ -56,8 +56,9 @@ def diffusers_callback(pipe, step: int = 0, timestep: int = 0, kwargs: dict = {}
5656
latents = kwargs.get('latents', None)
5757
if debug:
5858
debug_callback(f'Callback: step={step} timestep={timestep} latents={latents.shape if latents is not None else None} kwargs={list(kwargs)}')
59-
order = getattr(pipe.scheduler, "order", 1) if hasattr(pipe, 'scheduler') else 1
60-
shared.state.sampling_step = step // order
59+
shared.state.step()
60+
# order = getattr(pipe.scheduler, "order", 1) if hasattr(pipe, 'scheduler') else 1
61+
# shared.state.sampling_step = step // order
6162
if shared.state.interrupted or shared.state.skipped:
6263
raise AssertionError('Interrupted...')
6364
if shared.state.paused:

modules/processing_class.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -581,7 +581,7 @@ def init_hr(self, scale = None, upscaler = None, force = False):
581581
else:
582582
self.hr_upscale_to_x, self.hr_upscale_to_y = self.hr_resize_x, self.hr_resize_y
583583
# hypertile_set(self, hr=True)
584-
shared.state.job_count = 2 * self.n_iter
584+
# shared.state.job_count = 2 * self.n_iter
585585
shared.log.debug(f'Control hires: upscaler="{self.hr_upscaler}" scale={scale} fixed={not use_scale} size={self.hr_upscale_to_x}x{self.hr_upscale_to_y}')
586586

587587

modules/processing_diffusers.py

+11-11
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
import torchvision.transforms.functional as TF
77
from PIL import Image
88
from modules import shared, devices, processing, sd_models, errors, sd_hijack_hypertile, processing_vae, sd_models_compile, hidiffusion, timer, modelstats, extra_networks
9-
from modules.processing_helpers import resize_hires, calculate_base_steps, calculate_hires_steps, calculate_refiner_steps, save_intermediate, update_sampler, is_txt2img, is_refiner_enabled
9+
from modules.processing_helpers import resize_hires, calculate_base_steps, calculate_hires_steps, calculate_refiner_steps, save_intermediate, update_sampler, is_txt2img, is_refiner_enabled, get_job_name
1010
from modules.processing_args import set_pipeline_args
1111
from modules.onnx_impl import preprocess_pipeline as preprocess_onnx_pipeline, check_parameters_changed as olive_check_parameters_changed
1212
from modules.lora import networks
@@ -53,8 +53,9 @@ def restore_state(p: processing.StableDiffusionProcessing):
5353

5454

5555
def process_base(p: processing.StableDiffusionProcessing):
56-
use_refiner_start = is_txt2img() and is_refiner_enabled(p) and not p.is_hr_pass and p.refiner_start > 0 and p.refiner_start < 1
57-
use_denoise_start = not is_txt2img() and p.refiner_start > 0 and p.refiner_start < 1
56+
txt2img = is_txt2img()
57+
use_refiner_start = txt2img and is_refiner_enabled(p) and not p.is_hr_pass and p.refiner_start > 0 and p.refiner_start < 1
58+
use_denoise_start = not txt2img and p.refiner_start > 0 and p.refiner_start < 1
5859

5960
shared.sd_model = update_pipeline(shared.sd_model, p)
6061
update_sampler(p, shared.sd_model)
@@ -76,7 +77,8 @@ def process_base(p: processing.StableDiffusionProcessing):
7677
clip_skip=p.clip_skip,
7778
desc='Base',
7879
)
79-
shared.state.sampling_steps = base_args.get('prior_num_inference_steps', None) or p.steps or base_args.get('num_inference_steps', None)
80+
base_steps = base_args.get('prior_num_inference_steps', None) or p.steps or base_args.get('num_inference_steps', None)
81+
shared.state.update(get_job_name(p, shared.sd_model), base_steps, 1)
8082
if shared.opts.scheduler_eta is not None and shared.opts.scheduler_eta > 0 and shared.opts.scheduler_eta < 1:
8183
p.extra_generation_params["Sampler Eta"] = shared.opts.scheduler_eta
8284
output = None
@@ -172,7 +174,7 @@ def process_hires(p: processing.StableDiffusionProcessing, output):
172174
p.ops.append('upscale')
173175
if shared.opts.samples_save and not p.do_not_save_samples and shared.opts.save_images_before_highres_fix and hasattr(shared.sd_model, 'vae'):
174176
save_intermediate(p, latents=output.images, suffix="-before-hires")
175-
shared.state.job = 'Upscale'
177+
shared.state.update('Upscale', 0, 1)
176178
output.images = resize_hires(p, latents=output.images)
177179
sd_hijack_hypertile.hypertile_set(p, hr=True)
178180

@@ -190,7 +192,6 @@ def process_hires(p: processing.StableDiffusionProcessing, output):
190192
shared.log.warning('HiRes skip: denoising=0')
191193
p.hr_force = False
192194
if p.hr_force:
193-
shared.state.job_count = 2 * p.n_iter
194195
shared.sd_model = sd_models.set_diffuser_pipe(shared.sd_model, sd_models.DiffusersTaskType.IMAGE_2_IMAGE)
195196
if 'Upscale' in shared.sd_model.__class__.__name__ or 'Flux' in shared.sd_model.__class__.__name__ or 'Kandinsky' in shared.sd_model.__class__.__name__:
196197
output.images = processing_vae.vae_decode(latents=output.images, model=shared.sd_model, full_quality=p.full_quality, output_type='pil', width=p.width, height=p.height)
@@ -217,8 +218,8 @@ def process_hires(p: processing.StableDiffusionProcessing, output):
217218
strength=strength,
218219
desc='Hires',
219220
)
220-
shared.state.job = 'HiRes'
221-
shared.state.sampling_steps = hires_args.get('prior_num_inference_steps', None) or p.steps or hires_args.get('num_inference_steps', None)
221+
hires_steps = hires_args.get('prior_num_inference_steps', None) or p.hr_second_pass_steps or hires_args.get('num_inference_steps', None)
222+
shared.state.update(get_job_name(p, shared.sd_model), hires_steps, 1)
222223
try:
223224
shared.sd_model = sd_models.apply_balanced_offload(shared.sd_model)
224225
sd_models.move_model(shared.sd_model, devices.device)
@@ -255,8 +256,6 @@ def process_refine(p: processing.StableDiffusionProcessing, output):
255256
# optional refiner pass or decode
256257
if is_refiner_enabled(p):
257258
prev_job = shared.state.job
258-
shared.state.job = 'Refine'
259-
shared.state.job_count +=1
260259
if shared.opts.samples_save and not p.do_not_save_samples and shared.opts.save_images_before_refiner and hasattr(shared.sd_model, 'vae'):
261260
save_intermediate(p, latents=output.images, suffix="-before-refiner")
262261
if shared.opts.diffusers_move_base:
@@ -306,7 +305,8 @@ def process_refine(p: processing.StableDiffusionProcessing, output):
306305
prompt_attention='fixed',
307306
desc='Refiner',
308307
)
309-
shared.state.sampling_steps = refiner_args.get('prior_num_inference_steps', None) or p.steps or refiner_args.get('num_inference_steps', None)
308+
refiner_steps = refiner_args.get('prior_num_inference_steps', None) or p.steps or refiner_args.get('num_inference_steps', None)
309+
shared.state.update(get_job_name(p, shared.sd_refiner), refiner_steps, 1)
310310
try:
311311
if 'requires_aesthetics_score' in shared.sd_refiner.config: # sdxl-model needs false and sdxl-refiner needs true
312312
shared.sd_refiner.register_to_config(requires_aesthetics_score = getattr(shared.sd_refiner, 'tokenizer', None) is None)

modules/processing_helpers.py

+23
Original file line numberDiff line numberDiff line change
@@ -584,3 +584,26 @@ def update_sampler(p, sd_model, second_pass=False):
584584
sampler_options.append('low order')
585585
if len(sampler_options) > 0:
586586
p.extra_generation_params['Sampler options'] = '/'.join(sampler_options)
587+
588+
589+
def get_job_name(p, model):
590+
if hasattr(model, 'pipe'):
591+
model = model.pipe
592+
if hasattr(p, 'xyz'):
593+
return 'Ignore' # xyz grid handles its own jobs
594+
if sd_models.get_diffusers_task(model) == sd_models.DiffusersTaskType.TEXT_2_IMAGE:
595+
return 'Text'
596+
elif sd_models.get_diffusers_task(model) == sd_models.DiffusersTaskType.IMAGE_2_IMAGE:
597+
if p.is_refiner_pass:
598+
return 'Refiner'
599+
elif p.is_hr_pass:
600+
return 'Hires'
601+
else:
602+
return 'Image'
603+
elif sd_models.get_diffusers_task(model) == sd_models.DiffusersTaskType.INPAINTING:
604+
if p.detailer:
605+
return 'Detailer'
606+
else:
607+
return 'Inpaint'
608+
else:
609+
return 'Unknown'

modules/progress.py

+4-11
Original file line numberDiff line numberDiff line change
@@ -64,23 +64,16 @@ def progressapi(req: ProgressRequest):
6464
queued = req.id_task in pending_tasks
6565
completed = req.id_task in finished_tasks
6666
paused = shared.state.paused
67-
shared.state.job_count = max(shared.state.frame_count, shared.state.job_count, shared.state.job_no)
68-
batch_x = max(shared.state.job_no, 0)
69-
batch_y = max(shared.state.job_count, 1)
70-
step_x = max(shared.state.sampling_step, 0)
71-
step_y = max(shared.state.sampling_steps, 1)
72-
current = step_y * batch_x + step_x
73-
total = step_y * batch_y
74-
while total < current:
75-
total += step_y
76-
progress = min(1, abs(current / total) if total > 0 else 0)
67+
step = max(shared.state.sampling_step, 0)
68+
steps = max(shared.state.sampling_steps, 1)
69+
progress = round(min(1, abs(step / steps) if steps > 0 else 0), 2)
7770
elapsed = time.time() - shared.state.time_start if shared.state.time_start is not None else 0
7871
predicted = elapsed / progress if progress > 0 else None
7972
eta = predicted - elapsed if predicted is not None else None
8073
id_live_preview = req.id_live_preview
8174
live_preview = None
8275
updated = shared.state.set_current_image()
83-
debug_log(f'Preview: job={shared.state.job} active={active} progress={current}/{total} step={shared.state.current_image_sampling_step}/{step_x}/{step_y} request={id_live_preview} last={shared.state.id_live_preview} enabled={shared.opts.live_previews_enable} job={shared.state.preview_job} updated={updated} image={shared.state.current_image} elapsed={elapsed:.3f}')
76+
debug_log(f'Preview: job={shared.state.job} active={active} progress={step}/{steps}/{progress} image={shared.state.current_image_sampling_step} request={id_live_preview} last={shared.state.id_live_preview} enabled={shared.opts.live_previews_enable} job={shared.state.preview_job} updated={updated} image={shared.state.current_image} elapsed={elapsed:.3f}')
8477
if not active:
8578
return InternalProgressResponse(job=shared.state.job, active=active, queued=queued, paused=paused, completed=completed, id_live_preview=-1, debug=debug, textinfo="Queued..." if queued else "Waiting...")
8679
if shared.opts.live_previews_enable and (shared.state.id_live_preview != id_live_preview) and (shared.state.current_image is not None):

0 commit comments

Comments
 (0)