I have been using Automatic1111 and animatediff + controlnet + Adetailer for txt2img generation. I have been using it for a project for a week and nothing wrong with it. And suddenly few days ago it started to get error, it stopped generating after the Adetailer process. I found out it doesn't work because of Adetailer, it work alright when I disable it but I needed Adetailer so I have been trying to fix it. I tried updating it, doesn't work. I even tried formating my computer and reinstall everything. Still the same. I am a newbie so please help me out.. thank you very much. This is the error I got:
*** Error running postprocess_image: C:\Users\laugh\Downloads\sd.webui\webui\extensions\adetailer\scripts\!adetailer.py
Traceback (most recent call last):
File "C:\Users\laugh\Downloads\sd.webui\webui\modules\scripts.py", line 856, in postprocess_image
script.postprocess_image(p, pp, *script_args)
File "C:\Users\laugh\Downloads\sd.webui\webui\extensions\adetailer\adetailer\traceback.py", line 159, in wrapper
raise error from None
RuntimeError:
╭──────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
│ System info │
│ ┏━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ │
│ ┃ ┃ Value ┃ │
│ ┡━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩ │
│ │ Platform │ Windows-10-10.0.22621-SP0 │ │
│ │ Python │ 3.10.6 (tags/v3.10.6:9c7b4bd, Aug 1 2022, 21:53:49) [MSC v.1932 64 bit (AMD64)] │ │
│ │ Version │ v1.8.0 │ │
│ │ Commit │ bef51aed032c0aaa5cfd80445bc4cf0d85b408b5 │ │
│ │ Commandline │ ['launch.py', '--no-download-sd-model', '--disable-nan-check', '--skip-version-check', │ │
│ │ │ '--no-half-vae', '--upcast-sampling', '--opt-sdp-attention', '--xformers', │ │
│ │ │ '--force-enable-xformers', '--autolaunch'] │ │
│ │ Libraries │ {'torch': '2.1.2+cu121', 'torchvision': '0.16.2+cu121', 'ultralytics': '8.1.22', 'mediapipe': │ │
│ │ │ '0.10.10'} │ │
│ └─────────────┴────────────────────────────────────────────────────────────────────────────────────────────────┘ │
│ Inputs │
│ ┏━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ │
│ ┃ ┃ Value ┃ │
│ ┡━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩ │
│ │ prompt │ girls runinng │ │
│ │ negative_prompt │ │ │
│ │ n_iter │ 1 │ │
│ │ batch_size │ 16 │ │
│ │ width │ 512 │ │
│ │ height │ 512 │ │
│ │ sampler_name │ DPM++ 2M Karras │ │
│ │ enable_hr │ False │ │
│ │ hr_upscaler │ Latent │ │
│ │ checkpoint │ Dreamshaper8.safetensors [879db523c3] │ │
│ │ vae │ Automatic │ │
│ │ unet │ Automatic │ │
│ └─────────────────┴───────────────────────────────────────┘ │
│ ADetailer │
│ ┏━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┓ │
│ ┃ ┃ Value ┃ │
│ ┡━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━┩ │
│ │ version │ 24.3.0 │ │
│ │ ad_model │ face_yolov8n.pt │ │
│ │ ad_prompt │ │ │
│ │ ad_negative_prompt │ │ │
│ │ ad_controlnet_model │ None │ │
│ │ is_api │ False │ │
│ └─────────────────────┴─────────────────┘ │
│ ╭─────────────────────────────── Traceback (most recent call last) ────────────────────────────────╮ │
│ │ C:\Users\laugh\Downloads\sd.webui\webui\extensions\adetailer\adetailer\traceback.py:139 in │ │
│ │ wrapper │ │
│ │ │ │
│ │ 138 │ │ try: │ │
│ │ ❱ 139 │ │ │ return func(*args, **kwargs) │ │
│ │ 140 │ │ except Exception as e: │ │
│ │ │ │
│ │ C:\Users\laugh\Downloads\sd.webui\webui\extensions\adetailer\scripts\!adetailer.py:778 in │ │
│ │ postprocess_image │ │
│ │ │ │
│ │ 777 │ │ │ │ │ continue │ │
│ │ ❱ 778 │ │ │ │ is_processed |= self._postprocess_image_inner(p, pp, args, n=n) │ │
│ │ 779 │ │
│ │ │ │
│ │ C:\Users\laugh\Downloads\sd.webui\webui\extensions\adetailer\scripts\!adetailer.py:701 in │ │
│ │ _postprocess_image_inner │ │
│ │ │ │
│ │ 700 │ │ with change_torch_load(): │ │
│ │ ❱ 701 │ │ │ pred = predictor(ad_model, pp.image, args.ad_confidence, **kwargs) │ │
│ │ 702 │ │
│ │ │ │
│ │ C:\Users\laugh\Downloads\sd.webui\webui\extensions\adetailer\adetailer\ultralytics.py:29 in │ │
│ │ ultralytics_predict │ │
│ │ │ │
│ │ 28 │ apply_classes(model, model_path, classes) │ │
│ │ ❱ 29 │ pred = model(image, conf=confidence, device=device) │ │
│ │ 30 │ │
│ │ │ │
│ │ C:\Users\laugh\Downloads\sd.webui\system\python\lib\site-packages\ultralytics\engine\model.py:16 │ │
│ │ 9 in __call__ │ │
│ │ │ │
│ │ 168 │ │ """ │ │
│ │ ❱ 169 │ │ return self.predict(source, stream, **kwargs) │ │
│ │ 170 │ │
│ │ │ │
│ │ C:\Users\laugh\Downloads\sd.webui\system\python\lib\site-packages\ultralytics\engine\model.py:43 │ │
│ │ 2 in predict │ │
│ │ │ │
│ │ 431 │ │ │ self.predictor = predictor or self._smart_load("predictor")(overrides=args, │ │
│ │ ❱ 432 │ │ │ self.predictor.setup_model(model=self.model, verbose=is_cli) │ │
│ │ 433 │ │ else: # only update args if predictor is already setup │ │
│ │ │ │
│ │ C:\Users\laugh\Downloads\sd.webui\system\python\lib\site-packages\ultralytics\engine\predictor.p │ │
│ │ y:343 in setup_model │ │
│ │ │ │
│ │ 342 │ │ """Initialize YOLO model with given parameters and set it to evaluation mode.""" │ │
│ │ ❱ 343 │ │ self.model = AutoBackend( │ │
│ │ 344 │ │ │ model or self.args.model, │ │
│ │ │ │
│ │ C:\Users\laugh\Downloads\sd.webui\system\python\lib\site-packages\torch\utils\_contextlib.py:115 │ │
│ │ in decorate_context │ │
│ │ │ │
│ │ 114 │ │ with ctx_factory(): │ │
│ │ ❱ 115 │ │ │ return func(*args, **kwargs) │ │
│ │ 116 │ │
│ │ │ │
│ │ C:\Users\laugh\Downloads\sd.webui\system\python\lib\site-packages\ultralytics\nn\autobackend.py: │ │
│ │ 140 in __init__ │ │
│ │ │ │
│ │ 139 │ │ if nn_module: # in-memory PyTorch model │ │
│ │ ❱ 140 │ │ │ model = weights.to(device) │ │
│ │ 141 │ │ │ model = model.fuse(verbose=verbose) if fuse else model │ │
│ │ │ │
│ │ C:\Users\laugh\Downloads\sd.webui\system\python\lib\site-packages\torch\nn\modules\module.py:116 │ │
│ │ 0 in to │ │
│ │ │ │
│ │ 1159 │ │ │ │
│ │ ❱ 1160 │ │ return self._apply(convert) │ │
│ │ 1161 │ │
│ │ │ │
│ │ C:\Users\laugh\Downloads\sd.webui\system\python\lib\site-packages\ultralytics\nn\tasks.py:226 in │ │
│ │ _apply │ │
│ │ │ │
│ │ 225 │ │ """ │ │
│ │ ❱ 226 │ │ self = super()._apply(fn) │ │
│ │ 227 │ │ m = self.model[-1] # Detect() │ │
│ │ │ │
│ │ C:\Users\laugh\Downloads\sd.webui\system\python\lib\site-packages\torch\nn\modules\module.py:810 │ │
│ │ in _apply │ │
│ │ │ │
│ │ 809 │ │ │ for module in self.children(): │ │
│ │ ❱ 810 │ │ │ │ module._apply(fn) │ │
│ │ 811 │ │
│ │ │ │
│ │ C:\Users\laugh\Downloads\sd.webui\system\python\lib\site-packages\torch\nn\modules\module.py:810 │ │
│ │ in _apply │ │
│ │ │ │
│ │ 809 │ │ │ for module in self.children(): │ │
│ │ ❱ 810 │ │ │ │ module._apply(fn) │ │
│ │ 811 │ │
│ │ │ │
│ │ C:\Users\laugh\Downloads\sd.webui\system\python\lib\site-packages\torch\nn\modules\module.py:810 │ │
│ │ in _apply │ │
│ │ │ │
│ │ 809 │ │ │ for module in self.children(): │ │
│ │ ❱ 810 │ │ │ │ module._apply(fn) │ │
│ │ 811 │ │
│ │ │ │
│ │ C:\Users\laugh\Downloads\sd.webui\system\python\lib\site-packages\torch\nn\modules\module.py:833 │ │
│ │ in _apply │ │
│ │ │ │
│ │ 832 │ │ │ with torch.no_grad(): │ │
│ │ ❱ 833 │ │ │ │ param_applied = fn(param) │ │
│ │ 834 │ │ │ should_use_set_data = compute_should_use_set_data(param, param_applied) │ │
│ │ │ │
│ │ C:\Users\laugh\Downloads\sd.webui\system\python\lib\site-packages\torch\nn\modules\module.py:115 │ │
│ │ 8 in convert │ │
│ │ │ │
│ │ 1157 │ │ │ │ │ │ │ non_blocking, memory_format=convert_to_format) │ │
│ │ ❱ 1158 │ │ │ return t.to(device, dtype if t.is_floating_point() or t.is_complex() else No │ │
│ │ 1159 │ │
│ ╰──────────────────────────────────────────────────────────────────────────────────────────────────╯ │
│ RuntimeError: CUDA error: device-side assert triggered │
│ Compile with `TORCH_USE_CUDA_DSA` to enable device-side assertions. │
│ │
╰──────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
---
*** Error completing request
*** Arguments: ('task(nhh84s7w8xorjgy)', <gradio.routes.Request object at 0x000001F95012BCD0>, 'girls runinng', '', [], 20, 'DPM++ 2M Karras', 1, 1, 7, 512, 512, False, 0.7, 2, 'Latent', 0, 0, 0, 'Use same checkpoint', 'Use same sampler', '', '', [], 0, False, '', 0.8, -1, False, -1, 0, 0, 0, True, False, {'ad_model': 'face_yolov8n.pt', 'ad_model_classes': '', 'ad_prompt': '', 'ad_negative_prompt': '', 'ad_confidence': 0.3, 'ad_mask_k_largest': 0, 'ad_mask_min_ratio': 0, 'ad_mask_max_ratio': 1, 'ad_x_offset': 0, 'ad_y_offset': 0, 'ad_dilate_erode': 4, 'ad_mask_merge_invert': 'None', 'ad_mask_blur': 4, 'ad_denoising_strength': 0.4, 'ad_inpaint_only_masked': True, 'ad_inpaint_only_masked_padding': 32, 'ad_use_inpaint_width_height': False, 'ad_inpaint_width': 512, 'ad_inpaint_height': 512, 'ad_use_steps': False, 'ad_steps': 28, 'ad_use_cfg_scale': False, 'ad_cfg_scale': 7, 'ad_use_checkpoint': False, 'ad_checkpoint': 'Use same checkpoint', 'ad_use_vae': False, 'ad_vae': 'Use same VAE', 'ad_use_sampler': False, 'ad_sampler': 'DPM++ 2M Karras', 'ad_use_noise_multiplier': False, 'ad_noise_multiplier': 1, 'ad_use_clip_skip': False, 'ad_clip_skip': 1, 'ad_restore_face': False, 'ad_controlnet_model': 'None', 'ad_controlnet_module': 'None', 'ad_controlnet_weight': 1, 'ad_controlnet_guidance_start': 0, 'ad_controlnet_guidance_end': 1, 'is_api': ()}, {'ad_model': 'None', 'ad_model_classes': '', 'ad_prompt': '', 'ad_negative_prompt': '', 'ad_confidence': 0.3, 'ad_mask_k_largest': 0, 'ad_mask_min_ratio': 0, 'ad_mask_max_ratio': 1, 'ad_x_offset': 0, 'ad_y_offset': 0, 'ad_dilate_erode': 4, 'ad_mask_merge_invert': 'None', 'ad_mask_blur': 4, 'ad_denoising_strength': 0.4, 'ad_inpaint_only_masked': True, 'ad_inpaint_only_masked_padding': 32, 'ad_use_inpaint_width_height': False, 'ad_inpaint_width': 512, 'ad_inpaint_height': 512, 'ad_use_steps': False, 'ad_steps': 28, 'ad_use_cfg_scale': False, 'ad_cfg_scale': 7, 'ad_use_checkpoint': False, 'ad_checkpoint': 'Use same checkpoint', 'ad_use_vae': False, 'ad_vae': 'Use same VAE', 'ad_use_sampler': False, 'ad_sampler': 'DPM++ 2M Karras', 'ad_use_noise_multiplier': False, 'ad_noise_multiplier': 1, 'ad_use_clip_skip': False, 'ad_clip_skip': 1, 'ad_restore_face': False, 'ad_controlnet_model': 'None', 'ad_controlnet_module': 'None', 'ad_controlnet_weight': 1, 'ad_controlnet_guidance_start': 0, 'ad_controlnet_guidance_end': 1, 'is_api': ()}, <scripts.animatediff_ui.AnimateDiffProcess object at 0x000001F9500268C0>, UiControlNetUnit(enabled=True, module='ip-adapter_clip_sd15', model='ip-adapter-plus_sd15 [836b5c2e]', weight=1, image={'image': array([[[ 99, 67, 45],
**
***
*** [0, 0, 0]]], dtype=uint8)}, resize_mode='Crop and Resize', low_vram=False, processor_res=512, threshold_a=-1, threshold_b=-1, guidance_start=0, guidance_end=1, pixel_perfect=False, control_mode='Balanced', inpaint_crop_input_image=False, hr_option='Both', save_detected_map=True, advanced_weighting=None), UiControlNetUnit(enabled=False, module='none', model='None', weight=1, image=None, resize_mode='Crop and Resize', low_vram=False, processor_res=-1, threshold_a=-1, threshold_b=-1, guidance_start=0, guidance_end=1, pixel_perfect=False, control_mode='Balanced', inpaint_crop_input_image=False, hr_option='Both', save_detected_map=True, advanced_weighting=None), UiControlNetUnit(enabled=False, module='none', model='None', weight=1, image=None, resize_mode='Crop and Resize', low_vram=False, processor_res=-1, threshold_a=-1, threshold_b=-1, guidance_start=0, guidance_end=1, pixel_perfect=False, control_mode='Balanced', inpaint_crop_input_image=False, hr_option='Both', save_detected_map=True, advanced_weighting=None), False, False, 'positive', 'comma', 0, False, False, 'start', '', 1, '', [], 0, '', [], 0, '', [], True, False, False, False, False, False, False, 0, False, None, None, False, None, None, False, None, None, False, 50) {}
Traceback (most recent call last):
File "C:\Users\laugh\Downloads\sd.webui\webui\modules\call_queue.py", line 57, in f
res = list(func(*args, **kwargs))
File "C:\Users\laugh\Downloads\sd.webui\webui\modules\call_queue.py", line 36, in f
res = func(*args, **kwargs)
File "C:\Users\laugh\Downloads\sd.webui\webui\modules\txt2img.py", line 110, in txt2img
processed = processing.process_images(p)
File "C:\Users\laugh\Downloads\sd.webui\webui\modules\processing.py", line 785, in process_images
res = process_images_inner(p)
File "C:\Users\laugh\Downloads\sd.webui\webui\extensions\sd-webui-controlnet\scripts\batch_hijack.py", line 48, in processing_process_images_hijack
return getattr(processing, '__controlnet_original_process_images_inner')(p, *args, **kwargs)
File "C:\Users\laugh\Downloads\sd.webui\webui\modules\processing.py", line 1041, in process_images_inner
devices.torch_gc()
File "C:\Users\laugh\Downloads\sd.webui\webui\modules\devices.py", line 81, in torch_gc
torch.cuda.empty_cache()
File "C:\Users\laugh\Downloads\sd.webui\system\python\lib\site-packages\torch\cuda\memory.py", line 159, in empty_cache
torch._C._cuda_emptyCache()
RuntimeError: CUDA error: device-side assert triggered
Compile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.
---
Traceback (most recent call last):
File "C:\Users\laugh\Downloads\sd.webui\system\python\lib\site-packages\gradio\routes.py", line 488, in run_predict
output = await app.get_blocks().process_api(
File "C:\Users\laugh\Downloads\sd.webui\system\python\lib\site-packages\gradio\blocks.py", line 1431, in process_api
result = await self.call_function(
File "C:\Users\laugh\Downloads\sd.webui\system\python\lib\site-packages\gradio\blocks.py", line 1103, in call_function
prediction = await anyio.to_thread.run_sync(
File "C:\Users\laugh\Downloads\sd.webui\system\python\lib\site-packages\anyio\to_thread.py", line 33, in run_sync
return await get_asynclib().run_sync_in_worker_thread(
File "C:\Users\laugh\Downloads\sd.webui\system\python\lib\site-packages\anyio\_backends\_asyncio.py", line 877, in run_sync_in_worker_thread
return await future
File "C:\Users\laugh\Downloads\sd.webui\system\python\lib\site-packages\anyio\_backends\_asyncio.py", line 807, in run
result = context.run(func, *args)
File "C:\Users\laugh\Downloads\sd.webui\system\python\lib\site-packages\gradio\utils.py", line 707, in wrapper
response = f(*args, **kwargs)
File "C:\Users\laugh\Downloads\sd.webui\webui\modules\call_queue.py", line 77, in f
devices.torch_gc()
File "C:\Users\laugh\Downloads\sd.webui\webui\modules\devices.py", line 81, in torch_gc
torch.cuda.empty_cache()
File "C:\Users\laugh\Downloads\sd.webui\system\python\lib\site-packages\torch\cuda\memory.py", line 159, in empty_cache
torch._C._cuda_emptyCache()
RuntimeError: CUDA error: device-side assert triggered
Compile with `TORCH_USE_CUDA_DSA` to enable device-side assertions.
formating my computer, reinstall everything Add '--skip-torch-cuda-test' to COMMANDLINE_ARGS