Upload lora-scripts/sd-scripts/library/ipex/gradscaler.py with huggingface_hub
Browse files
lora-scripts/sd-scripts/library/ipex/gradscaler.py
ADDED
@@ -0,0 +1,183 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from collections import defaultdict
|
2 |
+
import torch
|
3 |
+
import intel_extension_for_pytorch as ipex # pylint: disable=import-error, unused-import
|
4 |
+
import intel_extension_for_pytorch._C as core # pylint: disable=import-error, unused-import
|
5 |
+
|
6 |
+
# pylint: disable=protected-access, missing-function-docstring, line-too-long
|
7 |
+
|
8 |
+
device_supports_fp64 = torch.xpu.has_fp64_dtype()
|
9 |
+
OptState = ipex.cpu.autocast._grad_scaler.OptState
|
10 |
+
_MultiDeviceReplicator = ipex.cpu.autocast._grad_scaler._MultiDeviceReplicator
|
11 |
+
_refresh_per_optimizer_state = ipex.cpu.autocast._grad_scaler._refresh_per_optimizer_state
|
12 |
+
|
13 |
+
def _unscale_grads_(self, optimizer, inv_scale, found_inf, allow_fp16): # pylint: disable=unused-argument
|
14 |
+
per_device_inv_scale = _MultiDeviceReplicator(inv_scale)
|
15 |
+
per_device_found_inf = _MultiDeviceReplicator(found_inf)
|
16 |
+
|
17 |
+
# To set up _amp_foreach_non_finite_check_and_unscale_, split grads by device and dtype.
|
18 |
+
# There could be hundreds of grads, so we'd like to iterate through them just once.
|
19 |
+
# However, we don't know their devices or dtypes in advance.
|
20 |
+
|
21 |
+
# https://stackoverflow.com/questions/5029934/defaultdict-of-defaultdict
|
22 |
+
# Google says mypy struggles with defaultdicts type annotations.
|
23 |
+
per_device_and_dtype_grads = defaultdict(lambda: defaultdict(list)) # type: ignore[var-annotated]
|
24 |
+
# sync grad to master weight
|
25 |
+
if hasattr(optimizer, "sync_grad"):
|
26 |
+
optimizer.sync_grad()
|
27 |
+
with torch.no_grad():
|
28 |
+
for group in optimizer.param_groups:
|
29 |
+
for param in group["params"]:
|
30 |
+
if param.grad is None:
|
31 |
+
continue
|
32 |
+
if (not allow_fp16) and param.grad.dtype == torch.float16:
|
33 |
+
raise ValueError("Attempting to unscale FP16 gradients.")
|
34 |
+
if param.grad.is_sparse:
|
35 |
+
# is_coalesced() == False means the sparse grad has values with duplicate indices.
|
36 |
+
# coalesce() deduplicates indices and adds all values that have the same index.
|
37 |
+
# For scaled fp16 values, there's a good chance coalescing will cause overflow,
|
38 |
+
# so we should check the coalesced _values().
|
39 |
+
if param.grad.dtype is torch.float16:
|
40 |
+
param.grad = param.grad.coalesce()
|
41 |
+
to_unscale = param.grad._values()
|
42 |
+
else:
|
43 |
+
to_unscale = param.grad
|
44 |
+
|
45 |
+
# -: is there a way to split by device and dtype without appending in the inner loop?
|
46 |
+
to_unscale = to_unscale.to("cpu")
|
47 |
+
per_device_and_dtype_grads[to_unscale.device][
|
48 |
+
to_unscale.dtype
|
49 |
+
].append(to_unscale)
|
50 |
+
|
51 |
+
for _, per_dtype_grads in per_device_and_dtype_grads.items():
|
52 |
+
for grads in per_dtype_grads.values():
|
53 |
+
core._amp_foreach_non_finite_check_and_unscale_(
|
54 |
+
grads,
|
55 |
+
per_device_found_inf.get("cpu"),
|
56 |
+
per_device_inv_scale.get("cpu"),
|
57 |
+
)
|
58 |
+
|
59 |
+
return per_device_found_inf._per_device_tensors
|
60 |
+
|
61 |
+
def unscale_(self, optimizer):
|
62 |
+
"""
|
63 |
+
Divides ("unscales") the optimizer's gradient tensors by the scale factor.
|
64 |
+
:meth:`unscale_` is optional, serving cases where you need to
|
65 |
+
:ref:`modify or inspect gradients<working-with-unscaled-gradients>`
|
66 |
+
between the backward pass(es) and :meth:`step`.
|
67 |
+
If :meth:`unscale_` is not called explicitly, gradients will be unscaled automatically during :meth:`step`.
|
68 |
+
Simple example, using :meth:`unscale_` to enable clipping of unscaled gradients::
|
69 |
+
...
|
70 |
+
scaler.scale(loss).backward()
|
71 |
+
scaler.unscale_(optimizer)
|
72 |
+
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
|
73 |
+
scaler.step(optimizer)
|
74 |
+
scaler.update()
|
75 |
+
Args:
|
76 |
+
optimizer (torch.optim.Optimizer): Optimizer that owns the gradients to be unscaled.
|
77 |
+
.. warning::
|
78 |
+
:meth:`unscale_` should only be called once per optimizer per :meth:`step` call,
|
79 |
+
and only after all gradients for that optimizer's assigned parameters have been accumulated.
|
80 |
+
Calling :meth:`unscale_` twice for a given optimizer between each :meth:`step` triggers a RuntimeError.
|
81 |
+
.. warning::
|
82 |
+
:meth:`unscale_` may unscale sparse gradients out of place, replacing the ``.grad`` attribute.
|
83 |
+
"""
|
84 |
+
if not self._enabled:
|
85 |
+
return
|
86 |
+
|
87 |
+
self._check_scale_growth_tracker("unscale_")
|
88 |
+
|
89 |
+
optimizer_state = self._per_optimizer_states[id(optimizer)]
|
90 |
+
|
91 |
+
if optimizer_state["stage"] is OptState.UNSCALED: # pylint: disable=no-else-raise
|
92 |
+
raise RuntimeError(
|
93 |
+
"unscale_() has already been called on this optimizer since the last update()."
|
94 |
+
)
|
95 |
+
elif optimizer_state["stage"] is OptState.STEPPED:
|
96 |
+
raise RuntimeError("unscale_() is being called after step().")
|
97 |
+
|
98 |
+
# FP32 division can be imprecise for certain compile options, so we carry out the reciprocal in FP64.
|
99 |
+
assert self._scale is not None
|
100 |
+
if device_supports_fp64:
|
101 |
+
inv_scale = self._scale.double().reciprocal().float()
|
102 |
+
else:
|
103 |
+
inv_scale = self._scale.to("cpu").double().reciprocal().float().to(self._scale.device)
|
104 |
+
found_inf = torch.full(
|
105 |
+
(1,), 0.0, dtype=torch.float32, device=self._scale.device
|
106 |
+
)
|
107 |
+
|
108 |
+
optimizer_state["found_inf_per_device"] = self._unscale_grads_(
|
109 |
+
optimizer, inv_scale, found_inf, False
|
110 |
+
)
|
111 |
+
optimizer_state["stage"] = OptState.UNSCALED
|
112 |
+
|
113 |
+
def update(self, new_scale=None):
|
114 |
+
"""
|
115 |
+
Updates the scale factor.
|
116 |
+
If any optimizer steps were skipped the scale is multiplied by ``backoff_factor``
|
117 |
+
to reduce it. If ``growth_interval`` unskipped iterations occurred consecutively,
|
118 |
+
the scale is multiplied by ``growth_factor`` to increase it.
|
119 |
+
Passing ``new_scale`` sets the new scale value manually. (``new_scale`` is not
|
120 |
+
used directly, it's used to fill GradScaler's internal scale tensor. So if
|
121 |
+
``new_scale`` was a tensor, later in-place changes to that tensor will not further
|
122 |
+
affect the scale GradScaler uses internally.)
|
123 |
+
Args:
|
124 |
+
new_scale (float or :class:`torch.FloatTensor`, optional, default=None): New scale factor.
|
125 |
+
.. warning::
|
126 |
+
:meth:`update` should only be called at the end of the iteration, after ``scaler.step(optimizer)`` has
|
127 |
+
been invoked for all optimizers used this iteration.
|
128 |
+
"""
|
129 |
+
if not self._enabled:
|
130 |
+
return
|
131 |
+
|
132 |
+
_scale, _growth_tracker = self._check_scale_growth_tracker("update")
|
133 |
+
|
134 |
+
if new_scale is not None:
|
135 |
+
# Accept a new user-defined scale.
|
136 |
+
if isinstance(new_scale, float):
|
137 |
+
self._scale.fill_(new_scale) # type: ignore[union-attr]
|
138 |
+
else:
|
139 |
+
reason = "new_scale should be a float or a 1-element torch.FloatTensor with requires_grad=False."
|
140 |
+
assert isinstance(new_scale, torch.FloatTensor), reason # type: ignore[attr-defined]
|
141 |
+
assert new_scale.numel() == 1, reason
|
142 |
+
assert new_scale.requires_grad is False, reason
|
143 |
+
self._scale.copy_(new_scale) # type: ignore[union-attr]
|
144 |
+
else:
|
145 |
+
# Consume shared inf/nan data collected from optimizers to update the scale.
|
146 |
+
# If all found_inf tensors are on the same device as self._scale, this operation is asynchronous.
|
147 |
+
found_infs = [
|
148 |
+
found_inf.to(device="cpu", non_blocking=True)
|
149 |
+
for state in self._per_optimizer_states.values()
|
150 |
+
for found_inf in state["found_inf_per_device"].values()
|
151 |
+
]
|
152 |
+
|
153 |
+
assert len(found_infs) > 0, "No inf checks were recorded prior to update."
|
154 |
+
|
155 |
+
found_inf_combined = found_infs[0]
|
156 |
+
if len(found_infs) > 1:
|
157 |
+
for i in range(1, len(found_infs)):
|
158 |
+
found_inf_combined += found_infs[i]
|
159 |
+
|
160 |
+
to_device = _scale.device
|
161 |
+
_scale = _scale.to("cpu")
|
162 |
+
_growth_tracker = _growth_tracker.to("cpu")
|
163 |
+
|
164 |
+
core._amp_update_scale_(
|
165 |
+
_scale,
|
166 |
+
_growth_tracker,
|
167 |
+
found_inf_combined,
|
168 |
+
self._growth_factor,
|
169 |
+
self._backoff_factor,
|
170 |
+
self._growth_interval,
|
171 |
+
)
|
172 |
+
|
173 |
+
_scale = _scale.to(to_device)
|
174 |
+
_growth_tracker = _growth_tracker.to(to_device)
|
175 |
+
# To prepare for next iteration, clear the data collected from optimizers this iteration.
|
176 |
+
self._per_optimizer_states = defaultdict(_refresh_per_optimizer_state)
|
177 |
+
|
178 |
+
def gradscaler_init():
|
179 |
+
torch.xpu.amp.GradScaler = ipex.cpu.autocast._grad_scaler.GradScaler
|
180 |
+
torch.xpu.amp.GradScaler._unscale_grads_ = _unscale_grads_
|
181 |
+
torch.xpu.amp.GradScaler.unscale_ = unscale_
|
182 |
+
torch.xpu.amp.GradScaler.update = update
|
183 |
+
return torch.xpu.amp.GradScaler
|