B
    -]                 @   s(   d dl Z d dlmZ G dd deZdS )    N)	Optimizerc               @   sZ   e Zd ZdZdddZdd	 Zd
d Zdd Zdd Zdd Z	dd Z
dd Zdd ZdS )LARCa  
    :class:`LARC` is a pytorch implementation of both the scaling and clipping variants of LARC,
    in which the ratio between gradient and parameter magnitudes is used to calculate an adaptive
    local learning rate for each individual parameter. The algorithm is designed to improve
    convergence of large batch training.

    See https://arxiv.org/abs/1708.03888 for calculation of the local learning rate.

    In practice it modifies the gradients of parameters as a proxy for modifying the learning rate
    of the parameters. This design allows it to be used as a wrapper around any torch.optim Optimizer.

    ```
    model = ...
    optim = torch.optim.Adam(model.parameters(), lr=...)
    optim = LARC(optim)
    ```

    It can even be used in conjunction with apex.fp16_utils.FP16_optimizer.

    ```
    model = ...
    optim = torch.optim.Adam(model.parameters(), lr=...)
    optim = LARC(optim)
    optim = apex.fp16_utils.FP16_Optimizer(optim)
    ```

    Args:
        optimizer: Pytorch optimizer to wrap and modify learning rate for.
        trust_coefficient: Trust coefficient for calculating the lr. See https://arxiv.org/abs/1708.03888
        clip: Decides between clipping or scaling mode of LARC. If `clip=True` the learning rate is set to `min(optimizer_lr, local_lr)` for each parameter. If `clip=False` the learning rate is set to `local_lr*optimizer_lr`.
        eps: epsilon kludge to help with numerical stability while calculating adaptive_lr
    Mb`?T:0yE>Mb?c             C   s*   |j | _ || _|| _|| _|| _|| _d S )N)param_groupsoptimtrust_coefficientepsclipepsilon)self	optimizerr	   r   r
   r    r   Y/global/project/projectdirs/dasrepo/etalumis/pyprob_saeid-dev/pyprob/nn/optimizer_larc.py__init__+   s    zLARC.__init__c             C   s
   | j  S )N)r   __getstate__)r   r   r   r   r   3   s    zLARC.__getstate__c             C   s   | j | t|  d S )N)r   __setstate__print)r   stater   r   r   r   6   s    zLARC.__setstate__c             C   s
   | j  S )N)r   __repr__)r   r   r   r   r   :   s    zLARC.__repr__c             C   s
   | j  S )N)r   
state_dict)r   r   r   r   r   >   s    zLARC.state_dictc             C   s   | j | d S )N)r   load_state_dict)r   r   r   r   r   r   A   s    zLARC.load_state_dictc             C   s   | j   d S )N)r   	zero_grad)r   r   r   r   r   D   s    zLARC.zero_gradc             C   s   | j | d S )N)r   add_param_group)r   param_groupr   r   r   r   G   s    zLARC.add_param_groupc       
   	   C   s6  t   g }x| jjD ]}d|kr,|d nd}|| d|d< x|d D ]}|jd kr\qLt |j}t |jj}|dkr|dkr| j| |||  | j	  }n| j
}| jrt||d  d}n|}|j j||j 7  _|j j|9  _qLW qW W d Q R X | j  x&t| jjD ]\}	}||	 |d< qW d S )Nweight_decayr   paramslr   )torchno_gradr   r   appendgradnormdatar	   r
   r   r   minstep	enumerate)
r   Zweight_decaysgroupr   p
param_normZ	grad_normZlarc_local_lrZadaptive_lrir   r   r   r'   J   s,    


"
z	LARC.stepN)r   Tr   r   )__name__
__module____qualname____doc__r   r   r   r   r   r   r   r   r'   r   r   r   r   r   	   s    
r   )r    torch.optimr   r   r   r   r   r   <module>   s   