mirror of
https://github.com/invoke-ai/InvokeAI
synced 2026-04-28 09:41:54 +02:00
fix: SDXL DoRA LoRA fails with enable_partial_loading=true
cast_to_device returns plain torch.Tensor instead of torch.nn.Parameter, causing _aggregate_patch_parameters to replace valid weights with meta device dummies, falsely triggering DoRA's quantization guard. Fixes invoke-ai/InvokeAI#8624
This commit is contained in:
parent
7eaf1d5bd0
commit
a0a87212a0
@ -49,7 +49,10 @@ class CustomModuleMixin:
|
||||
# parameters. But, of course, any sub-layers that need to access the actual values of the parameters will fail.
|
||||
for param_name in orig_params.keys():
|
||||
param = orig_params[param_name]
|
||||
if type(param) is torch.nn.Parameter and type(param.data) is torch.Tensor:
|
||||
if type(param) is torch.Tensor:
|
||||
# Plain tensor (e.g. after cast_to_device moved a Parameter to another device).
|
||||
pass
|
||||
elif type(param) is torch.nn.Parameter and type(param.data) is torch.Tensor:
|
||||
pass
|
||||
elif type(param) is GGMLTensor:
|
||||
# Move to device and dequantize here. Doing it in the patch layer can result in redundant casts /
|
||||
|
||||
Loading…
Reference in New Issue
Block a user