B
    #=6\Í  ã               @   s\   d dl Z d dlZd dlZd dlmZ dd„ Zdd„ Zdd„ Zd	d
„ Zda	ej
ddd„ƒZdS )é    N)Údefault_generatorc             C   s   t  | ¡ dS )zoSets the random number generator state.

    Args:
        new_state (torch.ByteTensor): The desired state
    N)r   Z	set_state)Z	new_state© r   ú//tmp/pip-install-l3r2oljg/torch/torch/random.pyÚset_rng_state   s    r   c               C   s   t  ¡ S )zBReturns the random number generator state as a `torch.ByteTensor`.)r   Z	get_stater   r   r   r   Úget_rng_state   s    r   c             C   s.   t | ƒ} ddl}|jjs$|j | ¡ t | ¡S )zSets the seed for generating random numbers. Returns a
    `torch._C.Generator` object.

    Args:
        seed (int): The desired seed.
    r   N)ÚintÚ
torch.cudaÚcudaZ_in_bad_forkZmanual_seed_allr   Úmanual_seed)ÚseedÚtorchr   r   r   r
      s
    r
   c               C   s   t  ¡ S )zSReturns the initial seed for generating random numbers as a
    Python `long`.
    )r   Úinitial_seedr   r   r   r   r   &   s    r   FTÚfork_rngÚdevicesc       
      c   sþ   ddl }|sdV  dS | dkr\|j ¡ }|dkrNtsNt dj|||d¡ datt|ƒƒ} nt| ƒ} | 	¡ }g }x4| D ],}|j 
|¡ | |j 	¡ ¡ W dQ R X qvW z
dV  W d| |¡ x:t| |ƒD ],\}}	|j 
|¡ |j |	¡ W dQ R X qÈW X dS )aØ  
    Forks the RNG, so that when you return, the RNG is reset
    to the state that it was previously in.

    Arguments:
        devices (iterable of CUDA IDs): CUDA devices for which to fork
            the RNG.  CPU RNG state is always forked.  By default, :meth:`fork_rng` operates
            on all devices, but will emit a warning if your machine has a lot
            of devices, since this function will run very slowly in that case.
            If you explicitly specify devices, this warning will be supressed
        enabled (bool): if ``False``, the RNG is not forked.  This is a convenience
            argument for easily disabling the context manager without having
            to reindent your Python code.
    r   Né   aô  CUDA reports that you have {num_devices} available devices, and you have used {caller} without explicitly specifying which devices are being used. For safety, we initialize *every* CUDA device by default, which can be quite slow if you have a lot of GPUs.  If you know that you are only making use of a few CUDA devices, set the environment variable CUDA_VISIBLE_DEVICES or the '{devices_kw}' keyword argument of {caller} with the set of devices you are actually using.  For example, if you are using CPU only, set CUDA_VISIBLE_DEVICES= or devices=[]; if you are using GPU 0 only, set CUDA_VISIBLE_DEVICES=0 or devices=[0].  To initialize all devices and suppress this warning, set the '{devices_kw}' keyword argument to `range(torch.cuda.device_count())`.)Únum_devicesZcallerZ
devices_kwT)r   r	   Zdevice_countÚ_fork_rng_warned_alreadyÚwarningsÚwarnÚformatÚlistÚranger   ÚdeviceÚappendr   Úzip)
r   ZenabledZ_callerZ_devices_kwr   r   Zcpu_rng_stateZgpu_rng_statesr   Zgpu_rng_stater   r   r   r   0   s0    



)NTr   r   )r   Ú
contextlibr   Ztorch._Cr   r   r   r
   r   r   Úcontextmanagerr   r   r   r   r   Ú<module>   s   	