You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
The issue is caused by an extension, but I believe it is caused by a bug in the webui
The issue exists in the current version of the webui
The issue has not been reported before recently
The issue has been reported before but has not been fixed yet
What happened?
I was installing v1.9.3-amd as it was the best version for my RX 580 to run image generation with the fastest speed when using directml as newer versions slowing it down significantly.
It worked fine for a clean install for a long time until a strange bug happened
Steps to reproduce the problem
start webui-user.bat
wait for the installation to finish
the launch process begins
torch complains about fp8 even though I don't use flux just SD 1.5 and SDXL with fp16
What should have happened?
At runtime it should run without any issues at the start
venv "C:\Users\Martin\Downloads\stable-diffusion-webui-amdgpu\venv\Scripts\Python.exe"
Python 3.10.6 (tags/v3.10.6:9c7b4bd, Aug 1 2022, 21:53:49) [MSC v.1932 64 bit (AMD64)]
Version: v1.9.3-amd
Commit hash: 6130ef9e7a194bb0a212d12a697347153c4f31e2
Skipping onnxruntime installation.
C:\Users\Martin\Downloads\stable-diffusion-webui-amdgpu\venv\lib\site-packages\timm\models\layers\__init__.py:48: FutureWarning: Importing from timm.models.layers is deprecated, please import via timm.layers
warnings.warn(f"Importing from {__name__} is deprecated, please import via timm.layers", FutureWarning)
no module 'xformers'. Processing without...
no module 'xformers'. Processing without...
No module 'xformers'. Proceeding without it.
C:\Users\Martin\Downloads\stable-diffusion-webui-amdgpu\venv\lib\site-packages\pytorch_lightning\utilities\distributed.py:258: LightningDeprecationWarning: `pytorch_lightning.utilities.distributed.rank_zero_only` has been deprecated in v1.8.1 and will be removed in v2.0.0. You can import it from `pytorch_lightning.utilities` instead.
rank_zero_deprecation(
Launching Web UI with arguments: --use-directml --skip-ort --opt-sdp-attention --medvram --medvram-sdxl --do-not-download-clip --no-download-sd-model
==============================================================================
You are running torch 2.0.0+cpu.
The program is tested to work with torch 2.1.2.
To reinstall the desired version, run with commandline flag --reinstall-torch.
Beware that this will cause a lot of large files to be downloaded, as well as
there are reports of issues with training tab on the latest version.
Use --skip-version-check commandline argument to disable this check.
==============================================================================
Calculating sha256 for C:\Users\Martin\Downloads\stable-diffusion-webui-amdgpu\models\Stable-diffusion\illustriousSNAFUSuper_ilxlSNAFUV10.safetensors: Traceback (most recent call last):
File "C:\Users\Martin\Downloads\stable-diffusion-webui-amdgpu\venv\lib\site-packages\diffusers\utils\import_utils.py", line 920, in _get_module
return importlib.import_module("." + module_name, self.__name__)
File "C:\Users\Martin\AppData\Local\Programs\Python\Python310\lib\importlib\__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 1050, in _gcd_import
File "<frozen importlib._bootstrap>", line 1027, in _find_and_load
File "<frozen importlib._bootstrap>", line 992, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 241, in _call_with_frames_removed
File "<frozen importlib._bootstrap>", line 1050, in _gcd_import
File "<frozen importlib._bootstrap>", line 1027, in _find_and_load
File "<frozen importlib._bootstrap>", line 1006, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 688, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 883, in exec_module
File "<frozen importlib._bootstrap>", line 241, in _call_with_frames_removed
File "C:\Users\Martin\Downloads\stable-diffusion-webui-amdgpu\venv\lib\site-packages\diffusers\models\autoencoders\__init__.py", line 1, in<module>
from .autoencoder_asym_kl import AsymmetricAutoencoderKL
File "C:\Users\Martin\Downloads\stable-diffusion-webui-amdgpu\venv\lib\site-packages\diffusers\models\autoencoders\autoencoder_asym_kl.py", line 22, in<module>
from ..modeling_utils import ModelMixin
File "C:\Users\Martin\Downloads\stable-diffusion-webui-amdgpu\venv\lib\site-packages\diffusers\models\modeling_utils.py", line 35, in<module>
from ..quantizers import DiffusersAutoQuantizer, DiffusersQuantizer
File "C:\Users\Martin\Downloads\stable-diffusion-webui-amdgpu\venv\lib\site-packages\diffusers\quantizers\__init__.py", line 15, in<module>
from .auto import DiffusersAutoQuantizer
File "C:\Users\Martin\Downloads\stable-diffusion-webui-amdgpu\venv\lib\site-packages\diffusers\quantizers\auto.py", line 31, in<module>
from .torchao import TorchAoHfQuantizer
File "C:\Users\Martin\Downloads\stable-diffusion-webui-amdgpu\venv\lib\site-packages\diffusers\quantizers\torchao\__init__.py", line 15, in<module>
from .torchao_quantizer import TorchAoHfQuantizer
File "C:\Users\Martin\Downloads\stable-diffusion-webui-amdgpu\venv\lib\site-packages\diffusers\quantizers\torchao\torchao_quantizer.py", line 57, in<module>
torch.float8_e4m3fn,
AttributeError: module 'torch' has no attribute 'float8_e4m3fn'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "C:\Users\Martin\Downloads\stable-diffusion-webui-amdgpu\venv\lib\site-packages\diffusers\utils\import_utils.py", line 920, in _get_module
return importlib.import_module("." + module_name, self.__name__)
File "C:\Users\Martin\AppData\Local\Programs\Python\Python310\lib\importlib\__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 1050, in _gcd_import
File "<frozen importlib._bootstrap>", line 1027, in _find_and_load
File "<frozen importlib._bootstrap>", line 1006, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 688, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 883, in exec_module
File "<frozen importlib._bootstrap>", line 241, in _call_with_frames_removed
File "C:\Users\Martin\Downloads\stable-diffusion-webui-amdgpu\venv\lib\site-packages\diffusers\pipelines\stable_diffusion\pipeline_onnx_stable_diffusion.py", line 26, in<module>
from ..pipeline_utils import DiffusionPipeline
File "C:\Users\Martin\Downloads\stable-diffusion-webui-amdgpu\venv\lib\site-packages\diffusers\pipelines\pipeline_utils.py", line 45, in<module>
from ..models import AutoencoderKL
File "<frozen importlib._bootstrap>", line 1075, in _handle_fromlist
File "C:\Users\Martin\Downloads\stable-diffusion-webui-amdgpu\venv\lib\site-packages\diffusers\utils\import_utils.py", line 910, in __getattr__
module = self._get_module(self._class_to_module[name])
File "C:\Users\Martin\Downloads\stable-diffusion-webui-amdgpu\venv\lib\site-packages\diffusers\utils\import_utils.py", line 922, in _get_module
raise RuntimeError(
RuntimeError: Failed to import diffusers.models.autoencoders.autoencoder_kl because of the following error (look up to see its traceback):
module 'torch' has no attribute 'float8_e4m3fn'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "C:\Users\Martin\Downloads\stable-diffusion-webui-amdgpu\launch.py", line 48, in<module>main()
File "C:\Users\Martin\Downloads\stable-diffusion-webui-amdgpu\launch.py", line 44, in main
start()
File "C:\Users\Martin\Downloads\stable-diffusion-webui-amdgpu\modules\launch_utils.py", line 701, in start
webui.webui()
File "C:\Users\Martin\Downloads\stable-diffusion-webui-amdgpu\webui.py", line 64, in webui
shared.demo = ui.create_ui()
File "C:\Users\Martin\Downloads\stable-diffusion-webui-amdgpu\modules\ui.py", line 1681, in create_ui
settings.create_ui(loadsave, dummy_component)
File "C:\Users\Martin\Downloads\stable-diffusion-webui-amdgpu\modules\ui_settings.py", line 161, in create_ui
component = create_setting_component(k)
File "C:\Users\Martin\Downloads\stable-diffusion-webui-amdgpu\modules\ui_settings.py", line 29, in create_setting_component
args = info.component_args() if callable(info.component_args) else info.component_args
File "C:\Users\Martin\Downloads\stable-diffusion-webui-amdgpu\modules\shared_options.py", line 216, in<lambda>"diffusers_pipeline": OptionInfo('ONNX Stable Diffusion', 'Diffusers pipeline', gr.Dropdown, lambda: {"choices": list(shared_items.get_pipelines())}),
File "C:\Users\Martin\Downloads\stable-diffusion-webui-amdgpu\modules\shared_items.py", line 97, in get_pipelines
'ONNX Stable Diffusion': getattr(diffusers, 'OnnxStableDiffusionPipeline', None),
File "C:\Users\Martin\Downloads\stable-diffusion-webui-amdgpu\venv\lib\site-packages\diffusers\utils\import_utils.py", line 911, in __getattr__
value = getattr(module, name)
File "C:\Users\Martin\Downloads\stable-diffusion-webui-amdgpu\venv\lib\site-packages\diffusers\utils\import_utils.py", line 911, in __getattr__
value = getattr(module, name)
File "C:\Users\Martin\Downloads\stable-diffusion-webui-amdgpu\venv\lib\site-packages\diffusers\utils\import_utils.py", line 910, in __getattr__
module = self._get_module(self._class_to_module[name])
File "C:\Users\Martin\Downloads\stable-diffusion-webui-amdgpu\venv\lib\site-packages\diffusers\utils\import_utils.py", line 922, in _get_module
raise RuntimeError(
RuntimeError: Failed to import diffusers.pipelines.stable_diffusion.pipeline_onnx_stable_diffusion because of the following error (look up to see its traceback):
Failed to import diffusers.models.autoencoders.autoencoder_kl because of the following error (look up to see its traceback):
module 'torch' has no attribute 'float8_e4m3fn'
91fea33f81d1e7a25c63bd587def3c92d2c7a912dc3bf722705c5e5ce364cd21
Loading weights [91fea33f81] from C:\Users\Martin\Downloads\stable-diffusion-webui-amdgpu\models\Stable-diffusion\illustriousSNAFUSuper_ilxlSNAFUV10.safetensors
Creating model from config: C:\Users\Martin\Downloads\stable-diffusion-webui-amdgpu\repositories\generative-models\configs\inference\sd_xl_base.yaml
C:\Users\Martin\Downloads\stable-diffusion-webui-amdgpu\venv\lib\site-packages\huggingface_hub\file_download.py:797: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.
warnings.warn(
Applying attention optimization: sdp... done.
Model loaded in 30.9s (calculate hash: 20.1s, create model: 0.5s, apply weights to model: 8.5s, apply half(): 0.2s, hijack: 0.2s, calculate empty prompt: 1.2s).
Press any key to continue...
Additional information
AMD driver WHQL 24.9.1
Windows 10 19045 build
Opera GX 116.0.5366.148
The text was updated successfully, but these errors were encountered:
Generation time for SDXL Illustrious:
~ 1 minute 20 seconds (Original requirements)
~ 1 minute 15 seconds (Fixed requirements)
Tests done with RX 580 8GB on v1.9.3-amd branch with these args:
--use-directml --skip-ort --opt-sdp-attention --medvram --medvram-sdxl --do-not-download-clip --no-download-sd-model
Checklist
What happened?
I was installing v1.9.3-amd as it was the best version for my RX 580 to run image generation with the fastest speed when using directml as newer versions slowing it down significantly.
It worked fine for a clean install for a long time until a strange bug happened
Steps to reproduce the problem
What should have happened?
At runtime it should run without any issues at the start
What browsers do you use to access the UI ?
Other
Sysinfo
{
"Platform": "Windows-10-10.0.19045-SP0",
"Python": "3.10.6",
"Version": "v1.9.3-amd",
"Commit": "6130ef9e7a194bb0a212d12a697347153c4f31e2",
"Script path": "C:\Users\Martin\Downloads\stable-diffusion-webui-amdgpu",
"Data path": "C:\Users\Martin\Downloads\stable-diffusion-webui-amdgpu",
"Extensions dir": "C:\Users\Martin\Downloads\stable-diffusion-webui-amdgpu\extensions",
"Checksum": "88a9666aa54568c4b1b246753dcb06557a0ce1747983d5bb9b6355a6715e2ff4",
"Commandline": [
"launch.py",
"--use-directml",
"--skip-ort",
"--opt-sdp-attention",
"--medvram",
"--medvram-sdxl",
"--do-not-download-clip",
"--no-download-sd-model",
"--dump-sysinfo"
],
"Torch env info": {
"torch_version": "2.0.0+cpu",
"is_debug_build": "False",
"cuda_compiled_version": null,
"gcc_version": null,
"clang_version": null,
"cmake_version": null,
"os": "Microsoft Windows 10 Pro",
"libc_version": "N/A",
"python_version": "3.10.6 (tags/v3.10.6:9c7b4bd, Aug 1 2022, 21:53:49) [MSC v.1932 64 bit (AMD64)] (64-bit runtime)",
"python_platform": "Windows-10-10.0.19045-SP0",
"is_cuda_available": "False",
"cuda_runtime_version": null,
"cuda_module_loading": "N/A",
"nvidia_driver_version": null,
"nvidia_gpu_models": null,
"cudnn_version": null,
"pip_version": "pip3",
"pip_packages": [
"numpy==1.26.2",
"open-clip-torch==2.20.0",
"pytorch-lightning==1.9.4",
"torch==2.0.0",
"torch-directml==0.2.0.dev230426",
"torchdiffeq==0.2.3",
"torchmetrics==1.6.1",
"torchsde==0.2.6",
"torchvision==0.15.1"
],
"conda_packages": null,
"hip_compiled_version": "N/A",
"hip_runtime_version": "N/A",
"miopen_runtime_version": "N/A",
"caching_allocator_config": "",
"is_xnnpack_available": "True",
"cpu_info": [
"Architecture=9",
"CurrentClockSpeed=3600",
"DeviceID=CPU0",
"Family=107",
"L2CacheSize=3072",
"L2CacheSpeed=",
"Manufacturer=AuthenticAMD",
"MaxClockSpeed=3600",
"Name=AMD Ryzen 5 3600 6-Core Processor ",
"ProcessorType=3",
"Revision=28928"
]
},
"Exceptions": [],
"CPU": {
"model": "AMD64 Family 23 Model 113 Stepping 0, AuthenticAMD",
"count logical": 12,
"count physical": 6
},
"RAM": {
"total": "16GB",
"used": "5GB",
"free": "11GB"
},
"GPU": "DirectML is not initialized",
"Extensions": [],
"Inactive extensions": [],
"Environment": {
"COMMANDLINE_ARGS": "--use-directml --skip-ort --opt-sdp-attention --medvram --medvram-sdxl --do-not-download-clip --no-download-sd-model --dump-sysinfo",
"GRADIO_ANALYTICS_ENABLED": "False"
},
"Config": "'NoneType' object has no attribute 'data'",
"Startup": null,
"Packages": [
"accelerate==0.21.0",
"aenum==3.1.15",
"aiofiles==23.2.1",
"aiohappyeyeballs==2.4.6",
"aiohttp==3.11.12",
"aiosignal==1.3.2",
"alembic==1.14.1",
"altair==5.5.0",
"antlr4-python3-runtime==4.9.3",
"anyio==3.7.1",
"async-timeout==5.0.1",
"attrs==25.1.0",
"blendmodes==2022",
"certifi==2025.1.31",
"charset-normalizer==3.4.1",
"clean-fid==0.1.35",
"click==8.1.8",
"clip==1.0",
"colorama==0.4.6",
"coloredlogs==15.0.1",
"colorlog==6.9.0",
"contourpy==1.3.1",
"cycler==0.12.1",
"deprecation==2.1.0",
"diffusers==0.32.2",
"diskcache==5.6.3",
"einops==0.4.1",
"exceptiongroup==1.2.2",
"facexlib==0.3.0",
"fastapi==0.94.0",
"ffmpy==0.5.0",
"filelock==3.17.0",
"filterpy==1.4.5",
"flatbuffers==25.2.10",
"fonttools==4.56.0",
"frozenlist==1.5.0",
"fsspec==2025.2.0",
"ftfy==6.3.1",
"gitdb==4.0.12",
"gitpython==3.1.32",
"gradio-client==0.5.0",
"gradio==3.41.2",
"greenlet==3.1.1",
"h11==0.12.0",
"httpcore==0.15.0",
"httpx==0.24.1",
"huggingface-hub==0.29.1",
"humanfriendly==10.0",
"idna==3.10",
"imageio==2.37.0",
"importlib-metadata==8.6.1",
"importlib-resources==6.5.2",
"inflection==0.5.1",
"jinja2==3.1.5",
"jsonmerge==1.8.0",
"jsonschema-specifications==2024.10.1",
"jsonschema==4.23.0",
"kiwisolver==1.4.8",
"kornia==0.6.7",
"lark==1.1.2",
"lazy-loader==0.4",
"lightning-utilities==0.12.0",
"llvmlite==0.44.0",
"mako==1.3.9",
"markupsafe==2.1.5",
"matplotlib==3.10.0",
"mpmath==1.3.0",
"multidict==6.1.0",
"narwhals==1.27.1",
"networkx==3.4.2",
"numba==0.61.0",
"numpy==1.26.2",
"olive-ai==0.7.1.1",
"omegaconf==2.2.3",
"onnx==1.17.0",
"onnxruntime==1.20.1",
"open-clip-torch==2.20.0",
"opencv-python==4.11.0.86",
"optimum==1.24.0",
"optuna==4.2.1",
"orjson==3.10.15",
"packaging==24.2",
"pandas==2.2.3",
"piexif==1.1.3",
"pillow-avif-plugin==1.4.3",
"pillow==9.5.0",
"pip==22.2.1",
"propcache==0.3.0",
"protobuf==3.20.3",
"psutil==5.9.5",
"pydantic==1.10.21",
"pydub==0.25.1",
"pyparsing==3.2.1",
"pyreadline3==3.5.4",
"python-dateutil==2.9.0.post0",
"python-multipart==0.0.20",
"pytorch-lightning==1.9.4",
"pytz==2025.1",
"pywavelets==1.8.0",
"pyyaml==6.0.2",
"referencing==0.36.2",
"regex==2024.11.6",
"requests==2.32.3",
"resize-right==0.0.2",
"rpds-py==0.23.1",
"safetensors==0.4.2",
"scikit-image==0.21.0",
"scipy==1.15.2",
"semantic-version==2.10.0",
"sentencepiece==0.2.0",
"setuptools==63.2.0",
"six==1.17.0",
"smmap==5.0.2",
"sniffio==1.3.1",
"spandrel==0.1.6",
"sqlalchemy==2.0.38",
"starlette==0.26.1",
"sympy==1.13.3",
"tifffile==2025.2.18",
"timm==1.0.14",
"tokenizers==0.13.3",
"tomesd==0.1.3",
"torch-directml==0.2.0.dev230426",
"torch==2.0.0",
"torchdiffeq==0.2.3",
"torchmetrics==1.6.1",
"torchsde==0.2.6",
"torchvision==0.15.1",
"tqdm==4.67.1",
"trampoline==0.1.2",
"transformers==4.30.2",
"typing-extensions==4.12.2",
"tzdata==2025.1",
"urllib3==2.3.0",
"uvicorn==0.34.0",
"wcwidth==0.2.13",
"websockets==11.0.3",
"yarl==1.18.3",
"zipp==3.21.0"
]
}
Console logs
Additional information
The text was updated successfully, but these errors were encountered: