ó
¾÷Xc           @@  s˜  d  d l  m Z d  d l Z d  d l m Z d d l m Z d d l m	 Z	 d d l m
 Z
 e j ƒ  d k r} d  d l Z n  d	 „  Z d
 e f d „  ƒ  YZ d e f d „  ƒ  YZ d e f d „  ƒ  YZ d e f d „  ƒ  YZ d e f d „  ƒ  YZ d e f d „  ƒ  YZ d e f d „  ƒ  YZ d e f d „  ƒ  YZ d e f d „  ƒ  YZ e Z e Z e Z e Z e Z e Z e Z d „  Z d d „ Z! d „  Z" d S(   i    (   t   absolute_importN(   t   zipi   (   t   backend(   t   serialize_keras_object(   t   deserialize_keras_objectt
   tensorflowc         C@  s6   | d k r2 t  j | | k |  | | |  ƒ }  n  |  S(   Ni    (   t   Kt   switch(   t   gt   ct   n(    (    s/   /tmp/pip-build-isqEY4/keras/keras/optimizers.pyt	   clip_norm   s    &t	   Optimizerc           B@  sS   e  Z d  Z d „  Z d „  Z d „  Z d „  Z d „  Z d „  Z e	 d „  ƒ Z
 RS(   s¾  Abstract optimizer base class.

    Note: this is the parent class of all optimizers, not an actual optimizer
    that can be used for training models.

    All Keras optimizers support the following keyword arguments:

        clipnorm: float >= 0. Gradients will be clipped
            when their L2 norm exceeds this value.
        clipvalue: float >= 0. Gradients will be clipped
            when their absolute value exceeds this value.
    c         K@  sh   d d h } x3 | D]+ } | | k r t  d t | ƒ ƒ ‚ q q W|  j j | ƒ g  |  _ g  |  _ d  S(   Nt   clipnormt	   clipvalues1   Unexpected keyword argument passed to optimizer: (   t	   TypeErrort   strt   __dict__t   updatet   updatest   weights(   t   selft   kwargst   allowed_kwargst   k(    (    s/   /tmp/pip-build-isqEY4/keras/keras/optimizers.pyt   __init__!   s    	c         C@  s
   t  ‚ d  S(   N(   t   NotImplementedError(   R   t   paramst   constraintst   loss(    (    s/   /tmp/pip-build-isqEY4/keras/keras/optimizers.pyt   get_updates+   s    c         C@  sé   t  j | | ƒ } t |  d ƒ r• |  j d k r• t  j t g  | D] } t  j t  j | ƒ ƒ ^ q@ ƒ ƒ } g  | D] } t | |  j | ƒ ^ qq } n  t |  d ƒ rå |  j d k rå g  | D]" } t  j	 | |  j |  j ƒ ^ qº } n  | S(   NR   i    R   (
   R   t	   gradientst   hasattrR   t   sqrtt   sumt   squareR   R   t   clip(   R   R   R   t   gradsR   t   norm(    (    s/   /tmp/pip-build-isqEY4/keras/keras/optimizers.pyt   get_gradients.   s    :+2c         C@  s§   |  j  } g  } t j | ƒ } xu t | | | ƒ D]a \ } } } | j | j k r t d t | j ƒ d t | j ƒ ƒ ‚ n  | j | | f ƒ q1 Wt j | ƒ d S(   s  Sets the weights of the optimizer, from Numpy arrays.

        Should only be called after computing the gradients
        (otherwise the optimizer has no weights).

        # Arguments
            weights: a list of Numpy arrays. The number
                of arrays and their shape must match
                number of the dimensions of the weights
                of the optimizer (i.e. it should match the
                output of `get_weights`).

        # Raises
            ValueError: in case of incompatible weight shapes.
        s   Optimizer weight shape s+    not compatible with provided weight shape N(	   R   R   t   batch_get_valueR   t   shapet
   ValueErrorR   t   appendt   batch_set_value(   R   R   R   t   weight_value_tuplest   param_valuest   pvt   pt   w(    (    s/   /tmp/pip-build-isqEY4/keras/keras/optimizers.pyt   set_weights7   s    	"*c         C@  s   t  j |  j ƒ S(   sz   Returns the current value of the weights of the optimizer.

        # Returns
            A list of numpy arrays.
        (   R   R(   R   (   R   (    (    s/   /tmp/pip-build-isqEY4/keras/keras/optimizers.pyt   get_weightsS   s    c         C@  sH   i  } t  |  d ƒ r% |  j | d <n  t  |  d ƒ rD |  j | d <n  | S(   NR   R   (   R    R   R   (   R   t   config(    (    s/   /tmp/pip-build-isqEY4/keras/keras/optimizers.pyt
   get_config[   s    c         C@  s
   |  |   S(   N(    (   t   clsR4   (    (    s/   /tmp/pip-build-isqEY4/keras/keras/optimizers.pyt   from_configc   s    (   t   __name__t
   __module__t   __doc__R   R   R'   R2   R3   R5   t   classmethodR7   (    (    (    s/   /tmp/pip-build-isqEY4/keras/keras/optimizers.pyR      s   	
						t   SGDc           B@  s5   e  Z d  Z d d d e d „ Z d „  Z d „  Z RS(   sq  Stochastic gradient descent optimizer.

    Includes support for momentum,
    learning rate decay, and Nesterov momentum.

    # Arguments
        lr: float >= 0. Learning rate.
        momentum: float >= 0. Parameter updates momentum.
        decay: float >= 0. Learning rate decay over each update.
        nesterov: boolean. Whether to apply Nesterov momentum.
    g{®Gáz„?g        c         K@  sŒ   t  t |  ƒ j |   t j d d d ƒ|  _ t j | d d ƒ|  _ t j | d d ƒ|  _ t j | d d ƒ|  _ | |  _	 | |  _
 d  S(   Ng        t   namet
   iterationst   lrt   momentumt   decay(   t   superR<   R   R   t   variableR>   R?   R@   RA   t   initial_decayt   nesterov(   R   R?   R@   RA   RE   R   (    (    s/   /tmp/pip-build-isqEY4/keras/keras/optimizers.pyR   u   s    	c         C@  s–  |  j  | | ƒ } g  |  _ |  j } |  j d k rq | d d |  j |  j 9} |  j j t j |  j d ƒ ƒ n  g  | D] } t j	 | ƒ ^ qx } g  | D] } t j
 | ƒ ^ qš }	 |  j g |	 |  _ xÄ t | | |	 ƒ D]° \ } }
 } |  j | | |
 } |  j j t j | | ƒ ƒ |  j r@| |  j | | |
 } n
 | | } | | k ro| | } | | ƒ } n  |  j j t j | | ƒ ƒ qÛ W|  j S(   Ni    g      ð?i   (   R'   R   R?   RD   RA   R>   R+   R   t
   update_addt   get_variable_shapet   zerosR   R   R@   R   RE   (   R   R   R   R   R%   R?   R0   t   shapesR)   t   momentsR   t   mt   vt   new_pR	   (    (    s/   /tmp/pip-build-isqEY4/keras/keras/optimizers.pyR      s(    		""""	

 c         C@  s–   i t  t j |  j ƒ ƒ d 6t  t j |  j ƒ ƒ d 6t  t j |  j ƒ ƒ d 6|  j d 6} t t |  ƒ j	 ƒ  } t
 t | j ƒ  ƒ t | j ƒ  ƒ ƒ S(   NR?   R@   RA   RE   (   t   floatR   t	   get_valueR?   R@   RA   RE   RB   R<   R5   t   dictt   listt   items(   R   R4   t   base_config(    (    s/   /tmp/pip-build-isqEY4/keras/keras/optimizers.pyR5      s    (   R8   R9   R:   t   FalseR   R   R5   (    (    (    s/   /tmp/pip-build-isqEY4/keras/keras/optimizers.pyR<   h   s
   			t   RMSpropc           B@  s5   e  Z d  Z d d d d d „ Z d „  Z d „  Z RS(   sl  RMSProp optimizer.

    It is recommended to leave the parameters of this optimizer
    at their default values
    (except the learning rate, which can be freely tuned).

    This optimizer is usually a good choice for recurrent
    neural networks.

    # Arguments
        lr: float >= 0. Learning rate.
        rho: float >= 0.
        epsilon: float >= 0. Fuzz factor.
        decay: float >= 0. Learning rate decay over each update.

    # References
        - [rmsprop: Divide the gradient by a running average of its recent magnitude](http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf)
    gü©ñÒMbP?gÍÌÌÌÌÌì?g:Œ0âŽyE>g        c         K@  sŒ   t  t |  ƒ j |   t j | d d ƒ|  _ t j | d d ƒ|  _ | |  _ t j | d d ƒ|  _ | |  _	 t j d d d ƒ|  _
 d  S(   NR=   R?   t   rhoRA   g        R>   (   RB   RU   R   R   RC   R?   RV   t   epsilonRA   RD   R>   (   R   R?   RV   RW   RA   R   (    (    s/   /tmp/pip-build-isqEY4/keras/keras/optimizers.pyR   º   s    		c         C@  s  |  j  | | ƒ } g  | D] } t j | ƒ ^ q } g  | D] } t j | ƒ ^ q; } | |  _ g  |  _ |  j }	 |  j d k r¾ |	 d d |  j |  j	 9}	 |  j j
 t j |  j	 d ƒ ƒ n  xÇ t | | | ƒ D]³ \ } }
 } |  j | d |  j t j |
 ƒ } |  j j
 t j | | ƒ ƒ | |	 |
 t j | ƒ |  j } | | k rh| | } | | ƒ } n  |  j j
 t j | | ƒ ƒ qÑ W|  j S(   Ni    g      ð?i   (   R'   R   RG   RH   R   R   R?   RD   RA   R>   R+   RF   R   RV   R#   R   R!   RW   (   R   R   R   R   R%   R0   RI   R)   t   accumulatorsR?   R   t   at   new_aRM   R	   (    (    s/   /tmp/pip-build-isqEY4/keras/keras/optimizers.pyR   Ä   s$    ""			""%"
 c         C@  s–   i t  t j |  j ƒ ƒ d 6t  t j |  j ƒ ƒ d 6t  t j |  j ƒ ƒ d 6|  j d 6} t t |  ƒ j	 ƒ  } t
 t | j ƒ  ƒ t | j ƒ  ƒ ƒ S(   NR?   RV   RA   RW   (   RN   R   RO   R?   RV   RA   RW   RB   RU   R5   RP   RQ   RR   (   R   R4   RS   (    (    s/   /tmp/pip-build-isqEY4/keras/keras/optimizers.pyR5   Ý   s    (   R8   R9   R:   R   R   R5   (    (    (    s/   /tmp/pip-build-isqEY4/keras/keras/optimizers.pyRU   ¦   s   
	t   Adagradc           B@  s2   e  Z d  Z d d d d „ Z d „  Z d „  Z RS(   s±  Adagrad optimizer.

    It is recommended to leave the parameters of this optimizer
    at their default values.

    # Arguments
        lr: float >= 0. Learning rate.
        epsilon: float >= 0.
        decay: float >= 0. Learning rate decay over each update.

    # References
        - [Adaptive Subgradient Methods for Online Learning and Stochastic Optimization](http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf)
    g{®Gáz„?g:Œ0âŽyE>g        c         K@  st   t  t |  ƒ j |   t j | d d ƒ|  _ | |  _ t j | d d ƒ|  _ | |  _ t j d d d ƒ|  _	 d  S(   NR=   R?   RA   g        R>   (
   RB   R[   R   R   RC   R?   RW   RA   RD   R>   (   R   R?   RW   RA   R   (    (    s/   /tmp/pip-build-isqEY4/keras/keras/optimizers.pyR   õ   s    		c         C@  s}  |  j  | | ƒ } g  | D] } t j | ƒ ^ q } g  | D] } t j | ƒ ^ q; } | |  _ g  |  _ |  j }	 |  j d k r¾ |	 d d |  j |  j	 9}	 |  j j
 t j |  j	 d ƒ ƒ n  xµ t | | | ƒ D]¡ \ } }
 } | t j |
 ƒ } |  j j
 t j | | ƒ ƒ | |	 |
 t j | ƒ |  j } | | k rV| | } | | ƒ } n  |  j j
 t j | | ƒ ƒ qÑ W|  j S(   Ni    g      ð?i   (   R'   R   RG   RH   R   R   R?   RD   RA   R>   R+   RF   R   R#   R   R!   RW   (   R   R   R   R   R%   R0   RI   R)   RX   R?   R   RY   RZ   RM   R	   (    (    s/   /tmp/pip-build-isqEY4/keras/keras/optimizers.pyR   ý   s$    ""			"""
 c         C@  s}   i t  t j |  j ƒ ƒ d 6t  t j |  j ƒ ƒ d 6|  j d 6} t t |  ƒ j ƒ  } t	 t
 | j ƒ  ƒ t
 | j ƒ  ƒ ƒ S(   NR?   RA   RW   (   RN   R   RO   R?   RA   RW   RB   R[   R5   RP   RQ   RR   (   R   R4   RS   (    (    s/   /tmp/pip-build-isqEY4/keras/keras/optimizers.pyR5     s
    (   R8   R9   R:   R   R   R5   (    (    (    s/   /tmp/pip-build-isqEY4/keras/keras/optimizers.pyR[   æ   s   	t   Adadeltac           B@  s5   e  Z d  Z d d d d d „ Z d „  Z d „  Z RS(   sÜ  Adadelta optimizer.

    It is recommended to leave the parameters of this optimizer
    at their default values.

    # Arguments
        lr: float >= 0. Learning rate.
            It is recommended to leave it at the default value.
        rho: float >= 0.
        epsilon: float >= 0. Fuzz factor.
        decay: float >= 0. Learning rate decay over each update.

    # References
        - [Adadelta - an adaptive learning rate method](http://arxiv.org/abs/1212.5701)
    g      ð?gffffffî?g:Œ0âŽyE>g        c         K@  s}   t  t |  ƒ j |   t j | d d ƒ|  _ | |  _ | |  _ t j | d d ƒ|  _ | |  _	 t j d d d ƒ|  _
 d  S(   NR=   R?   RA   g        R>   (   RB   R\   R   R   RC   R?   RV   RW   RA   RD   R>   (   R   R?   RV   RW   RA   R   (    (    s/   /tmp/pip-build-isqEY4/keras/keras/optimizers.pyR   -  s    			c         C@  s  |  j  | | ƒ } g  | D] } t j | ƒ ^ q } g  | D] } t j | ƒ ^ q; } g  | D] } t j | ƒ ^ q] }	 | |	 |  _ g  |  _ |  j }
 |  j d k rä |
 d d |  j |  j	 9}
 |  j j
 t j |  j	 d ƒ ƒ n  x(t | | | |	 ƒ D]\ } } } } |  j | d |  j t j | ƒ } |  j j
 t j | | ƒ ƒ | t j | |  j ƒ t j | |  j ƒ } | |
 | } | | k r®| | } | | ƒ } n  |  j j
 t j | | ƒ ƒ |  j | d |  j t j | ƒ } |  j j
 t j | | ƒ ƒ qú W|  j S(   Ni    g      ð?i   (   R'   R   RG   RH   R   R   R?   RD   RA   R>   R+   RF   R   RV   R#   R   R!   RW   (   R   R   R   R   R%   R0   RI   R)   RX   t   delta_accumulatorsR?   R   RY   t   d_aRZ   R   RM   R	   t   new_d_a(    (    s/   /tmp/pip-build-isqEY4/keras/keras/optimizers.pyR   7  s,    """		"(%.
% c         C@  s‡   i t  t j |  j ƒ ƒ d 6|  j d 6t  t j |  j ƒ ƒ d 6|  j d 6} t t |  ƒ j	 ƒ  } t
 t | j ƒ  ƒ t | j ƒ  ƒ ƒ S(   NR?   RV   RA   RW   (   RN   R   RO   R?   RV   RA   RW   RB   R\   R5   RP   RQ   RR   (   R   R4   RS   (    (    s/   /tmp/pip-build-isqEY4/keras/keras/optimizers.pyR5   X  s    
(   R8   R9   R:   R   R   R5   (    (    (    s/   /tmp/pip-build-isqEY4/keras/keras/optimizers.pyR\     s   
	!t   Adamc           B@  s8   e  Z d  Z d d d d d d „ Z d „  Z d „  Z RS(	   sÞ  Adam optimizer.

    Default parameters follow those provided in the original paper.

    # Arguments
        lr: float >= 0. Learning rate.
        beta_1: float, 0 < beta < 1. Generally close to 1.
        beta_2: float, 0 < beta < 1. Generally close to 1.
        epsilon: float >= 0. Fuzz factor.
        decay: float >= 0. Learning rate decay over each update.

    # References
        - [Adam - A Method for Stochastic Optimization](http://arxiv.org/abs/1412.6980v8)
    gü©ñÒMbP?gÍÌÌÌÌÌì?g+‡ÙÎ÷ï?g:Œ0âŽyE>g        c         K@  s¤   t  t |  ƒ j |   t j d d d ƒ|  _ t j | d d ƒ|  _ t j | d d ƒ|  _ t j | d d ƒ|  _ | |  _	 t j | d d ƒ|  _
 | |  _ d  S(   Ni    R=   R>   R?   t   beta_1t   beta_2RA   (   RB   R`   R   R   RC   R>   R?   Ra   Rb   RW   RA   RD   (   R   R?   Ra   Rb   RW   RA   R   (    (    s/   /tmp/pip-build-isqEY4/keras/keras/optimizers.pyR   q  s    	c         C@  s@  |  j  | | ƒ } t j |  j d ƒ g |  _ |  j } |  j d k rd | d d |  j |  j 9} n  |  j d } | t j d t j	 |  j
 | ƒ ƒ d t j	 |  j | ƒ } g  | D] } t j | ƒ ^ qµ }	 g  |	 D] }
 t j |
 ƒ ^ q× } g  |	 D] }
 t j |
 ƒ ^ qù } |  j g | | |  _ xt | | | | ƒ D]ô \ } } } } |  j | d |  j | } |  j
 | d |  j
 t j | ƒ } | | | t j | ƒ |  j } |  j j t j | | ƒ ƒ |  j j t j | | ƒ ƒ | } | | k r| | } | | ƒ } n  |  j j t j | | ƒ ƒ qAW|  j S(   Ni   i    g      ð?(   R'   R   RF   R>   R   R?   RD   RA   R!   t   powRb   Ra   RG   RH   R   R   R#   RW   R+   R   (   R   R   R   R   R%   R?   t   tt   lr_tR0   RI   R)   t   mst   vsR   RK   RL   t   m_tt   v_tt   p_tRM   R	   (    (    s/   /tmp/pip-build-isqEY4/keras/keras/optimizers.pyR   |  s0    	""""(%"
 c         C@  s¯   i t  t j |  j ƒ ƒ d 6t  t j |  j ƒ ƒ d 6t  t j |  j ƒ ƒ d 6t  t j |  j ƒ ƒ d 6|  j d 6} t t	 |  ƒ j
 ƒ  } t t | j ƒ  ƒ t | j ƒ  ƒ ƒ S(   NR?   Ra   Rb   RA   RW   (   RN   R   RO   R?   Ra   Rb   RA   RW   RB   R`   R5   RP   RQ   RR   (   R   R4   RS   (    (    s/   /tmp/pip-build-isqEY4/keras/keras/optimizers.pyR5     s    (   R8   R9   R:   R   R   R5   (    (    (    s/   /tmp/pip-build-isqEY4/keras/keras/optimizers.pyR`   a  s
   	
	!t   Adamaxc           B@  s8   e  Z d  Z d d d d d d „ Z d „  Z d „  Z RS(	   sø  Adamax optimizer from Adam paper's Section 7.

    It is a variant of Adam based on the infinity norm.
    Default parameters follow those provided in the paper.

    # Arguments
        lr: float >= 0. Learning rate.
        beta_1/beta_2: floats, 0 < beta < 1. Generally close to 1.
        epsilon: float >= 0. Fuzz factor.
        decay: float >= 0. Learning rate decay over each update.

    # References
        - [Adam - A Method for Stochastic Optimization](http://arxiv.org/abs/1412.6980v8)
    gü©ñÒMb`?gÍÌÌÌÌÌì?g+‡ÙÎ÷ï?g:Œ0âŽyE>g        c         K@  s¤   t  t |  ƒ j |   t j d d d ƒ|  _ t j | d d ƒ|  _ t j | d d ƒ|  _ t j | d d ƒ|  _ | |  _	 t j | d d ƒ|  _
 | |  _ d  S(   Ng        R=   R>   R?   Ra   Rb   RA   (   RB   Rk   R   R   RC   R>   R?   Ra   Rb   RW   RA   RD   (   R   R?   Ra   Rb   RW   RA   R   (    (    s/   /tmp/pip-build-isqEY4/keras/keras/optimizers.pyR   ·  s    	c         C@  s  |  j  | | ƒ } t j |  j d ƒ g |  _ |  j } |  j d k rd | d d |  j |  j 9} n  |  j d } | d t j |  j	 | ƒ } g  | D] } t j
 | ƒ ^ q• }	 g  |	 D] }
 t j |
 ƒ ^ q· } g  |	 D] }
 t j |
 ƒ ^ qÙ } |  j g | | |  _ xÿ t | | | | ƒ D]è \ } } } } |  j	 | d |  j	 | } t j |  j | t j | ƒ ƒ } | | | | |  j } |  j j t j | | ƒ ƒ |  j j t j | | ƒ ƒ | } | | k rí| | } | | ƒ } n  |  j j t j | | ƒ ƒ q!W|  j S(   Ni   i    g      ð?(   R'   R   RF   R>   R   R?   RD   RA   Rc   Ra   RG   RH   R   R   t   maximumRb   t   absRW   R+   R   (   R   R   R   R   R%   R?   Rd   Re   R0   RI   R)   Rf   t   usR   RK   t   uRh   t   u_tRj   RM   R	   (    (    s/   /tmp/pip-build-isqEY4/keras/keras/optimizers.pyR   Â  s.    	"""("
 c         C@  s¯   i t  t j |  j ƒ ƒ d 6t  t j |  j ƒ ƒ d 6t  t j |  j ƒ ƒ d 6t  t j |  j ƒ ƒ d 6|  j d 6} t t	 |  ƒ j
 ƒ  } t t | j ƒ  ƒ t | j ƒ  ƒ ƒ S(   NR?   Ra   Rb   RA   RW   (   RN   R   RO   R?   Ra   Rb   RA   RW   RB   Rk   R5   RP   RQ   RR   (   R   R4   RS   (    (    s/   /tmp/pip-build-isqEY4/keras/keras/optimizers.pyR5   å  s    (   R8   R9   R:   R   R   R5   (    (    (    s/   /tmp/pip-build-isqEY4/keras/keras/optimizers.pyRk   §  s
   	
	#t   Nadamc           B@  s8   e  Z d  Z d d d d d d „ Z d „  Z d „  Z RS(	   s¨  Nesterov Adam optimizer.

    Much like Adam is essentially RMSprop with momentum,
    Nadam is Adam RMSprop with Nesterov momentum.

    Default parameters follow those provided in the paper.
    It is recommended to leave the parameters of this optimizer
    at their default values.

    # Arguments
        lr: float >= 0. Learning rate.
        beta_1/beta_2: floats, 0 < beta < 1. Generally close to 1.
        epsilon: float >= 0. Fuzz factor.

    # References
        - [Nadam report](http://cs229.stanford.edu/proj2015/054_report.pdf)
        - [On the importance of initialization and momentum in deep learning](http://www.cs.toronto.edu/~fritz/absps/momentum.pdf)
    gü©ñÒMb`?gÍÌÌÌÌÌì?g+‡ÙÎ÷ï?g:Œ0âŽyE>gü©ñÒMbp?c         K@  s¤   t  t |  ƒ j |   t j d d d ƒ|  _ t j d d d ƒ|  _ t j | d d ƒ|  _ t j | d d ƒ|  _ t j | d d ƒ|  _	 | |  _
 | |  _ d  S(	   Ng        R=   R>   g      ð?t
   m_scheduleR?   Ra   Rb   (   RB   Rq   R   R   RC   R>   Rr   R?   Ra   Rb   RW   t   schedule_decay(   R   R?   Ra   Rb   RW   Rs   R   (    (    s/   /tmp/pip-build-isqEY4/keras/keras/optimizers.pyR     s    	c         C@  s©  |  j  | | ƒ } t j |  j d ƒ g |  _ |  j d } |  j d d t j d | |  j ƒ } |  j d d t j d | d |  j ƒ } |  j | } |  j | | }	 |  j j	 |  j | f ƒ g  | D] }
 t j
 |
 ƒ ^ qÌ } g  | D] } t j | ƒ ^ qî } g  | D] } t j | ƒ ^ q} |  j g | | |  _ x]t | | | | ƒ D]F\ }
 } } } | d | } |  j | d |  j | } | d |	 } |  j | d |  j t j | ƒ } | d t j |  j | ƒ } d | | | | } |  j j	 t j | | ƒ ƒ |  j j	 t j | | ƒ ƒ |
 |  j | t j | ƒ |  j } | } |
 | k r‚| |
 } | | ƒ } n  |  j j	 t j |
 | ƒ ƒ qXW|  j S(   Ni   g      ð?g      à?g¸…ëQ¸î?(   R'   R   RF   R>   R   Ra   Rc   Rs   Rr   R+   RG   RH   R   R   Rb   R#   R   R?   R!   RW   (   R   R   R   R   R%   Rd   t   momentum_cache_tt   momentum_cache_t_1t   m_schedule_newt   m_schedule_nextR0   RI   R)   Rf   Rg   R   RK   RL   t   g_primeRh   t	   m_t_primeRi   t	   v_t_primet   m_t_barRj   RM   R	   (    (    s/   /tmp/pip-build-isqEY4/keras/keras/optimizers.pyR     s8    (,"""(%%
 c         C@  s    i t  t j |  j ƒ ƒ d 6t  t j |  j ƒ ƒ d 6t  t j |  j ƒ ƒ d 6|  j d 6|  j d 6} t t	 |  ƒ j
 ƒ  } t t | j ƒ  ƒ t | j ƒ  ƒ ƒ S(   NR?   Ra   Rb   RW   Rs   (   RN   R   RO   R?   Ra   Rb   RW   Rs   RB   Rq   R5   RP   RQ   RR   (   R   R4   RS   (    (    s/   /tmp/pip-build-isqEY4/keras/keras/optimizers.pyR5   7  s    
(   R8   R9   R:   R   R   R5   (    (    (    s/   /tmp/pip-build-isqEY4/keras/keras/optimizers.pyRq   ï  s
   	
	)t   TFOptimizerc           B@  sA   e  Z d  Z d „  Z d „  Z e d „  ƒ Z d „  Z d „  Z RS(   s4   Wrapper class for native TensorFlow optimizers.
    c         C@  s.   | |  _  t j d d d ƒ|  _ g  |  _ d  S(   Ng        R=   R>   (   t	   optimizerR   RC   R>   R   (   R   R}   (    (    s/   /tmp/pip-build-isqEY4/keras/keras/optimizers.pyR   E  s    	c         C@  s\   | r t  d ƒ ‚ n  |  j j | | ƒ } |  j j | d |  j ƒ} |  j j | ƒ |  j S(   Ns€   TF optimizers do not support weights constraints. Either remove all weights constraints in your model, or use a Keras optimizer.t   global_step(   R*   R}   t   compute_gradientst   apply_gradientsR>   R   R+   (   R   R   R   R   R%   t
   opt_update(    (    s/   /tmp/pip-build-isqEY4/keras/keras/optimizers.pyR   J  s    	c         C@  s
   t  ‚ d  S(   N(   R   (   R   (    (    s/   /tmp/pip-build-isqEY4/keras/keras/optimizers.pyR   V  s    c         C@  s
   t  ‚ d  S(   N(   R   (   R   (    (    s/   /tmp/pip-build-isqEY4/keras/keras/optimizers.pyR5   Z  s    c         C@  s
   t  ‚ d  S(   N(   R   (   R   R4   (    (    s/   /tmp/pip-build-isqEY4/keras/keras/optimizers.pyR7   ]  s    (	   R8   R9   R:   R   R   t   propertyR   R5   R7   (    (    (    s/   /tmp/pip-build-isqEY4/keras/keras/optimizers.pyR|   A  s   			c         C@  s
   t  |  ƒ S(   N(   R   (   R}   (    (    s/   /tmp/pip-build-isqEY4/keras/keras/optimizers.pyt	   serializel  s    c         C@  s‡   i t  d 6t d 6t d 6t d 6t d 6t d 6t d 6t d 6} |  d	 j ƒ  | k rk |  d	 j ƒ  |  d	 <n  t	 |  d
 | d | d d ƒS(   s\  Inverse of the `serialize` function.

    # Arguments
        config: Optimizer configuration dictionary.
        custom_objects: Optional dictionary mapping
            names (strings) to custom objects
            (classes and functions)
            to be considered during deserialization.

    # Returns
        A Keras Optimizer instance.
    t   sgdt   rmspropt   adagradt   adadeltat   adamt   adamaxt   nadamt   tfoptimizert
   class_namet   module_objectst   custom_objectst   printable_module_nameR}   (
   R<   RU   R[   R\   R`   Rk   Rq   R|   t   lowerR   (   R4   RŽ   t   all_classes(    (    s/   /tmp/pip-build-isqEY4/keras/keras/optimizers.pyt   deserializep  s    
	c         C@  s©   t  j ƒ  d k r4 t |  t j j ƒ r4 t |  ƒ Sn  t |  t ƒ rM t |  ƒ St |  t	 j
 ƒ rƒ i t |  ƒ d 6i  d 6} t | ƒ St |  t ƒ r– |  St d |  ƒ ‚ d S(   sö  Retrieves a Keras Optimizer instance.

    # Arguments
        identifier: Optimizer identifier, one of
            - String: name of an optimizer
            - Dictionary: configuration dictionary.
            - Keras Optimizer instance (it will be returned unchanged).
            - TensorFlow Optimizer instance
                (it will be wrapped as a Keras Optimizer).

    # Returns
        A Keras Optimizer instance.

    # Raises
        ValueError: If `identifier` cannot be interpreted.
    R   RŒ   R4   s)   Could not interpret optimizer identifier:N(   R   R   t
   isinstancet   tft   trainR   R|   RP   R’   t   sixt   string_typesR   R*   (   t
   identifierR4   (    (    s/   /tmp/pip-build-isqEY4/keras/keras/optimizers.pyt   get  s    

(#   t
   __future__R    R–   t	   six.movesR   t    R   R   t   utils.generic_utilsR   R   R   R”   R   t   objectR   R<   RU   R[   R\   R`   Rk   Rq   R|   R„   R…   R†   R‡   Rˆ   R‰   RŠ   Rƒ   t   NoneR’   R™   (    (    (    s/   /tmp/pip-build-isqEY4/keras/keras/optimizers.pyt   <module>   s6   	U>@6EFHR"	 