Skip to content

[Bug]: [Bug]: Z-Image-Turbo Text to Image Template throws OOM #340

@Matzebhv

Description

@Matzebhv

ComfyUI and Frontend Versions

Image

Description

Workflow throws OOM after any image generation on my 16Gb 5060TI

Steps to Reproduce

Generate Picture, after that try to generate next picture

Debug Logs

`got prompt
Requested to load Lumina2
loaded completely; 12030.40 MB usable, 11739.55 MB loaded, full load: True
  0%|                                                                                                                               | 0/9 [00:02<?, ?it/s]
!!! Exception during processing !!! Allocation on device
Traceback (most recent call last):
  File "F:\ComfyUI\ComfyUI.torch2.9\execution.py", line 510, in execute
    output_data, output_ui, has_subgraph, has_pending_tasks = await get_output_data(prompt_id, unique_id, obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb, hidden_inputs=hidden_inputs)
  File "F:\ComfyUI\ComfyUI.torch2.9\execution.py", line 324, in get_output_data
    return_values = await _async_map_node_over_list(prompt_id, unique_id, obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb, hidden_inputs=hidden_inputs)
  File "F:\ComfyUI\ComfyUI.torch2.9\execution.py", line 298, in _async_map_node_over_list
    await process_inputs(input_dict, i)
  File "F:\ComfyUI\ComfyUI.torch2.9\execution.py", line 286, in process_inputs
    result = f(**inputs)
  File "F:\ComfyUI\ComfyUI.torch2.9\nodes.py", line 1525, in sample
    return common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise)
  File "F:\ComfyUI\ComfyUI.torch2.9\nodes.py", line 1492, in common_ksampler
    samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image,
  File "F:\ComfyUI\ComfyUI.torch2.9\comfy\sample.py", line 60, in sample
    samples = sampler.sample(noise, positive, negative, cfg=cfg, latent_image=latent_image, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, denoise_mask=noise_mask, sigmas=sigmas, callback=callback, disable_pbar=disable_pbar, seed=seed)
  File "F:\ComfyUI\ComfyUI.torch2.9\custom_nodes\ComfyUI-TiledDiffusion\utils.py", line 51, in KSampler_sample
    return orig_fn(*args, **kwargs)
  File "F:\ComfyUI\ComfyUI.torch2.9\comfy\samplers.py", line 1163, in sample
    return sample(self.model, noise, positive, negative, cfg, self.device, sampler, sigmas, self.model_options, latent_image=latent_image, denoise_mask=denoise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed)
  File "F:\ComfyUI\ComfyUI.torch2.9\comfy\samplers.py", line 1053, in sample
    return cfg_guider.sample(noise, latent_image, sampler, sigmas, denoise_mask, callback, disable_pbar, seed)
  File "F:\ComfyUI\ComfyUI.torch2.9\comfy\samplers.py", line 1035, in sample
    output = executor.execute(noise, latent_image, sampler, sigmas, denoise_mask, callback, disable_pbar, seed, latent_shapes=latent_shapes)
  File "F:\ComfyUI\ComfyUI.torch2.9\comfy\patcher_extension.py", line 112, in execute
    return self.original(*args, **kwargs)
  File "F:\ComfyUI\ComfyUI.torch2.9\comfy\samplers.py", line 997, in outer_sample
    output = self.inner_sample(noise, latent_image, device, sampler, sigmas, denoise_mask, callback, disable_pbar, seed, latent_shapes=latent_shapes)
  File "F:\ComfyUI\ComfyUI.torch2.9\comfy\samplers.py", line 980, in inner_sample
    samples = executor.execute(self, sigmas, extra_args, callback, noise, latent_image, denoise_mask, disable_pbar)
  File "F:\ComfyUI\ComfyUI.torch2.9\comfy\patcher_extension.py", line 112, in execute
    return self.original(*args, **kwargs)
  File "F:\ComfyUI\ComfyUI.torch2.9\custom_nodes\ComfyUI-TiledDiffusion\utils.py", line 34, in KSAMPLER_sample
    return orig_fn(*args, **kwargs)
  File "F:\ComfyUI\ComfyUI.torch2.9\comfy\samplers.py", line 752, in sample
    samples = self.sampler_function(model_k, noise, sigmas, extra_args=extra_args, callback=k_callback, disable=disable_pbar, **self.extra_options)
  File "F:\ComfyUI\ComfyUI.torch2.9\venv\lib\site-packages\torch\utils\_contextlib.py", line 120, in decorate_context
    return func(*args, **kwargs)
  File "F:\ComfyUI\ComfyUI.torch2.9\comfy\k_diffusion\sampling.py", line 1429, in sample_res_multistep
    return res_multistep(model, x, sigmas, extra_args=extra_args, callback=callback, disable=disable, s_noise=s_noise, noise_sampler=noise_sampler, eta=0., cfg_pp=False)
  File "F:\ComfyUI\ComfyUI.torch2.9\venv\lib\site-packages\torch\utils\_contextlib.py", line 120, in decorate_context
    return func(*args, **kwargs)
  File "F:\ComfyUI\ComfyUI.torch2.9\comfy\k_diffusion\sampling.py", line 1390, in res_multistep
    callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigmas[i], "denoised": denoised})
  File "F:\ComfyUI\ComfyUI.torch2.9\comfy\samplers.py", line 750, in <lambda>
    k_callback = lambda x: callback(x["i"], x["denoised"], x["x"], total_steps)
  File "F:\ComfyUI\ComfyUI.torch2.9\latent_preview.py", line 105, in callback
    preview_bytes = previewer.decode_latent_to_preview_image(preview_format, x0)
  File "F:\ComfyUI\ComfyUI.torch2.9\latent_preview.py", line 27, in decode_latent_to_preview_image
    preview_image = self.decode_latent_to_preview(x0)
  File "F:\ComfyUI\ComfyUI.torch2.9\latent_preview.py", line 35, in decode_latent_to_preview
    x_sample = self.taesd.decode(x0[:1])[0].movedim(0, 2)
  File "F:\ComfyUI\ComfyUI.torch2.9\comfy\taesd\taesd.py", line 74, in decode
    x_sample = self.taesd_decoder((x - self.vae_shift) * self.vae_scale)
  File "F:\ComfyUI\ComfyUI.torch2.9\venv\lib\site-packages\torch\nn\modules\module.py", line 1777, in _wrapped_call_impl
    return self._call_impl(*args, **kwargs)
  File "F:\ComfyUI\ComfyUI.torch2.9\venv\lib\site-packages\torch\nn\modules\module.py", line 1788, in _call_impl
    return forward_call(*args, **kwargs)
  File "F:\ComfyUI\ComfyUI.torch2.9\venv\lib\site-packages\torch\nn\modules\container.py", line 250, in forward
    input = module(input)
  File "F:\ComfyUI\ComfyUI.torch2.9\venv\lib\site-packages\torch\nn\modules\module.py", line 1777, in _wrapped_call_impl
    return self._call_impl(*args, **kwargs)
  File "F:\ComfyUI\ComfyUI.torch2.9\venv\lib\site-packages\torch\nn\modules\module.py", line 1788, in _call_impl
    return forward_call(*args, **kwargs)
  File "F:\ComfyUI\ComfyUI.torch2.9\comfy\taesd\taesd.py", line 26, in forward
    return self.fuse(self.conv(x) + self.skip(x))
  File "F:\ComfyUI\ComfyUI.torch2.9\venv\lib\site-packages\torch\nn\modules\module.py", line 1777, in _wrapped_call_impl
    return self._call_impl(*args, **kwargs)
  File "F:\ComfyUI\ComfyUI.torch2.9\venv\lib\site-packages\torch\nn\modules\module.py", line 1788, in _call_impl
    return forward_call(*args, **kwargs)
  File "F:\ComfyUI\ComfyUI.torch2.9\venv\lib\site-packages\torch\nn\modules\container.py", line 250, in forward
    input = module(input)
  File "F:\ComfyUI\ComfyUI.torch2.9\venv\lib\site-packages\torch\nn\modules\module.py", line 1777, in _wrapped_call_impl
    return self._call_impl(*args, **kwargs)
  File "F:\ComfyUI\ComfyUI.torch2.9\venv\lib\site-packages\torch\nn\modules\module.py", line 1788, in _call_impl
    return forward_call(*args, **kwargs)
  File "F:\ComfyUI\ComfyUI.torch2.9\venv\lib\site-packages\torch\nn\modules\activation.py", line 144, in forward
    return F.relu(input, inplace=self.inplace)
  File "F:\ComfyUI\ComfyUI.torch2.9\venv\lib\site-packages\torch\nn\functional.py", line 1697, in relu
    result = torch.relu(input)
torch.OutOfMemoryError: Allocation on device

Got an OOM, unloading all loaded models.
Prompt executed in 36.75 seconds`

Browser Logs

--

Setting JSON

--

What browsers do you use to access the UI ?

Google Chrome

Metadata

Metadata

Assignees

No one assigned

    Labels

    Potential BugBug that has not been confirmed/reproduced yet

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions