torch.optim.rmsprop 的源代码
```html
import torch from torch import Tensor from .optimizer import (Optimizer, _default_to_fused_or_foreach, _use_grad_for_differentiable, _differentiable_doc, _foreach_doc, _maximize_doc, _view_as_real) from typing import List, Optional __all__ = ["RMSprop", "rmsprop"][docs]class RMSprop(Optimizer): def __init__( self, params, lr=1e-2, alpha=0.99, eps=1e-8, weight_decay=0, momentum=0, centered=False, foreach: Optional[bool] = None, maximize: bool = False, differentiable: bool = False, ): if not 0.0 <= lr: raise ValueError(f"Invalid learning rate: {lr}") if not 0.0 <= eps: raise ValueError(f"Invalid epsilon value: {eps}") if not 0.0 <= momentum: raise ValueError(f"Invalid momentum value: {momentum}") if not 0.0 <= weight_decay: raise ValueError(f"Invalid weight_decay value: {weight_decay}") if not 0.0 <= alpha: raise ValueError(f"Invalid alpha value: {alpha}") defaults = dict( lr=lr, momentum=momentum, alpha=alpha, eps=eps, centered=centered, weight_decay=weight_decay, foreach=foreach, maximize=maximize, differentiable=differentiable, ) super().__init__(params, defaults) def __setstate__(self, state): super().__setstate__(state) for group in self.param_groups: group.setdefault("momentum", 0) group.setdefault("centered", False) group.setdefault("foreach", None) group.setdefault("maximize", False) group.setdefault("differentiable", False) def _init_group(self, group, params_with_grad, grads, square_avgs, momentum_buffer_list, grad_avgs): has_complex = False for p in group["params"]: if p.grad is None: continue has_complex |= torch.is_complex(p) params_with_grad.append(p) if p.grad.is_sparse: raise RuntimeError("RMSprop does not support sparse gradients") grads.append(p.grad) state = self.state[p] # State initialization if len(state) == 0: state["step"] = 0 state["square_avg"] = torch.zeros_like( p, memory_format=torch.preserve_format ) if group["momentum"] > 0: state["momentum_buffer"] = torch.zeros_like( p, memory_format=torch.preserve_format ) if group["centered"]: state[<span class="s2