ó
¾÷Xc           @@  sÜ   d  d l  m Z d d l m Z d d l m Z d d l m Z d d l m Z d d l m Z d d l m	 Z
 d d	 l m Z d
 e f d „  ƒ  YZ d e f d „  ƒ  YZ d e f d „  ƒ  YZ d e f d „  ƒ  YZ d S(   i    (   t   absolute_importi   (   t   initializers(   t   regularizers(   t   constraints(   t   Layer(   t	   InputSpec(   t   backend(   t
   interfacest	   LeakyReLUc           B@  s,   e  Z d  Z d d „ Z d „  Z d „  Z RS(   s’  Leaky version of a Rectified Linear Unit.

    It allows a small gradient when the unit is not active:
    `f(x) = alpha * x for x < 0`,
    `f(x) = x for x >= 0`.

    # Input shape
        Arbitrary. Use the keyword argument `input_shape`
        (tuple of integers, does not include the samples axis)
        when using this layer as the first layer in a model.

    # Output shape
        Same shape as the input.

    # Arguments
        alpha: float >= 0. Negative slope coefficient.

    # References
        - [Rectifier Nonlinearities Improve Neural Network Acoustic Models](https://web.stanford.edu/~awni/papers/relu_hybrid_icml2013_final.pdf)
    g333333Ó?c         K@  s5   t  t |  ƒ j |   t |  _ t j | ƒ |  _ d  S(   N(   t   superR   t   __init__t   Truet   supports_maskingt   Kt   cast_to_floatxt   alpha(   t   selfR   t   kwargs(    (    s@   /tmp/pip-build-isqEY4/keras/keras/layers/advanced_activations.pyR
   #   s    	c         C@  s   t  j | d |  j ƒS(   NR   (   R   t   reluR   (   R   t   inputs(    (    s@   /tmp/pip-build-isqEY4/keras/keras/layers/advanced_activations.pyt   call(   s    c         C@  sK   i |  j  d 6} t t |  ƒ j ƒ  } t t | j ƒ  ƒ t | j ƒ  ƒ ƒ S(   NR   (   R   R	   R   t
   get_configt   dictt   listt   items(   R   t   configt   base_config(    (    s@   /tmp/pip-build-isqEY4/keras/keras/layers/advanced_activations.pyR   +   s    (   t   __name__t
   __module__t   __doc__R
   R   R   (    (    (    s@   /tmp/pip-build-isqEY4/keras/keras/layers/advanced_activations.pyR      s   	t   PReLUc           B@  sJ   e  Z d  Z e j d d d d d „ ƒ Z d „  Z d d „ Z d „  Z	 RS(   s¼  Parametric Rectified Linear Unit.

    It follows:
    `f(x) = alpha * x for x < 0`,
    `f(x) = x for x >= 0`,
    where `alpha` is a learned array with the same shape as x.

    # Input shape
        Arbitrary. Use the keyword argument `input_shape`
        (tuple of integers, does not include the samples axis)
        when using this layer as the first layer in a model.

    # Output shape
        Same shape as the input.

    # Arguments
        alpha_initializer: initializer function for the weights.
        alpha_regularizer: regularizer for the weights.
        alpha_constraint: constraint for the weights.
        shared_axes: the axes along which to share learnable
            parameters for the activation function.
            For example, if the incoming feature maps
            are from a 2D convolution
            with output shape `(batch, height, width, channels)`,
            and you wish to share parameters across space
            so that each filter only has one set of parameters,
            set `shared_axes=[1, 2]`.

    # References
        - [Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification](https://arxiv.org/abs/1502.01852)
    t   zerosc         K@  s¤   t  t |  ƒ j |   t |  _ t j | ƒ |  _ t j | ƒ |  _	 t
 j | ƒ |  _ | d  k rm d  |  _ n3 t | t t f ƒ s‘ | g |  _ n t | ƒ |  _ d  S(   N(   R	   R   R
   R   R   R   t   gett   alpha_initializerR   t   alpha_regularizerR   t   alpha_constraintt   Nonet   shared_axest
   isinstanceR   t   tuple(   R   R!   R"   R#   R%   R   (    (    s@   /tmp/pip-build-isqEY4/keras/keras/layers/advanced_activations.pyR
   R   s    	c      
   C@  s  t  | d ƒ } t g t | ƒ |  _ |  j d  k	 rk x3 |  j D]% } d | | d <t |  j | d <q? Wn  |  j | d d d |  j d |  j	 d |  j
 ƒ|  _ i  } |  j rð x@ t d t | ƒ ƒ D]& } | |  j k rÃ | | | | <qÃ qÃ Wn  t d t | ƒ d | ƒ |  _ t |  _ d  S(	   Ni   t   nameR   t   initializert   regularizert
   constraintt   ndimt   axes(   R   t   Falset   lent   param_broadcastR%   R$   R   t
   add_weightR!   R"   R#   R   t   rangeR   t
   input_spect   built(   R   t   input_shapet   param_shapet   iR-   (    (    s@   /tmp/pip-build-isqEY4/keras/keras/layers/advanced_activations.pyt   buildd   s$    			c         C@  sq   t  j | ƒ } t  j ƒ  d k rQ t  j |  j |  j ƒ | t  j | ƒ d } n |  j t  j | ƒ } | | S(   Nt   theanog      à?(   R   R   R   t   pattern_broadcastR   R0   t   abs(   R   R   t   maskt   post   neg(    (    s@   /tmp/pip-build-isqEY4/keras/keras/layers/advanced_activations.pyR   y   s
    0c         C@  s„   i t  j |  j ƒ d 6t j |  j ƒ d 6t j |  j ƒ d 6|  j d 6} t t	 |  ƒ j
 ƒ  } t t | j ƒ  ƒ t | j ƒ  ƒ ƒ S(   NR!   R"   R#   R%   (   R   t	   serializeR!   R   R"   R   R#   R%   R	   R   R   R   R   R   (   R   R   R   (    (    s@   /tmp/pip-build-isqEY4/keras/keras/layers/advanced_activations.pyR   ‚   s    N(
   R   R   R   R   t   legacy_prelu_supportR$   R
   R8   R   R   (    (    (    s@   /tmp/pip-build-isqEY4/keras/keras/layers/advanced_activations.pyR   1   s   		t   ELUc           B@  s,   e  Z d  Z d d „ Z d „  Z d „  Z RS(   sB  Exponential Linear Unit.

    It follows:
    `f(x) =  alpha * (exp(x) - 1.) for x < 0`,
    `f(x) = x for x >= 0`.

    # Input shape
        Arbitrary. Use the keyword argument `input_shape`
        (tuple of integers, does not include the samples axis)
        when using this layer as the first layer in a model.

    # Output shape
        Same shape as the input.

    # Arguments
        alpha: scale for the negative factor.

    # References
        - [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)](https://arxiv.org/abs/1511.07289v1)
    g      ð?c         K@  s5   t  t |  ƒ j |   t |  _ t j | ƒ |  _ d  S(   N(   R	   RA   R
   R   R   R   R   R   (   R   R   R   (    (    s@   /tmp/pip-build-isqEY4/keras/keras/layers/advanced_activations.pyR
   £   s    	c         C@  s   t  j | |  j ƒ S(   N(   R   t   eluR   (   R   R   (    (    s@   /tmp/pip-build-isqEY4/keras/keras/layers/advanced_activations.pyR   ¨   s    c         C@  sQ   i t  |  j ƒ d 6} t t |  ƒ j ƒ  } t t | j ƒ  ƒ t | j ƒ  ƒ ƒ S(   NR   (   t   floatR   R	   RA   R   R   R   R   (   R   R   R   (    (    s@   /tmp/pip-build-isqEY4/keras/keras/layers/advanced_activations.pyR   «   s    (   R   R   R   R
   R   R   (    (    (    s@   /tmp/pip-build-isqEY4/keras/keras/layers/advanced_activations.pyRA      s   	t   ThresholdedReLUc           B@  s/   e  Z d  Z d d „ Z d d „ Z d „  Z RS(   s:  Thresholded Rectified Linear Unit.

    It follows:
    `f(x) = x for x > theta`,
    `f(x) = 0 otherwise`.

    # Input shape
        Arbitrary. Use the keyword argument `input_shape`
        (tuple of integers, does not include the samples axis)
        when using this layer as the first layer in a model.

    # Output shape
        Same shape as the input.

    # Arguments
        theta: float >= 0. Threshold location of activation.

    # References
        - [Zero-Bias Autoencoders and the Benefits of Co-Adapting Features](http://arxiv.org/abs/1402.3337)
    g      ð?c         K@  s5   t  t |  ƒ j |   t |  _ t j | ƒ |  _ d  S(   N(   R	   RD   R
   R   R   R   R   t   theta(   R   RE   R   (    (    s@   /tmp/pip-build-isqEY4/keras/keras/layers/advanced_activations.pyR
   Ç   s    	c         C@  s#   | t  j | |  j k t  j ƒ  ƒ S(   N(   R   t   castRE   t   floatx(   R   R   R<   (    (    s@   /tmp/pip-build-isqEY4/keras/keras/layers/advanced_activations.pyR   Ì   s    c         C@  sQ   i t  |  j ƒ d 6} t t |  ƒ j ƒ  } t t | j ƒ  ƒ t | j ƒ  ƒ ƒ S(   NRE   (   RC   RE   R	   RD   R   R   R   R   (   R   R   R   (    (    s@   /tmp/pip-build-isqEY4/keras/keras/layers/advanced_activations.pyR   Ï   s    N(   R   R   R   R
   R$   R   R   (    (    (    s@   /tmp/pip-build-isqEY4/keras/keras/layers/advanced_activations.pyRD   ±   s   N(   t
   __future__R    t    R   R   R   t   engineR   R   R   R   t   legacyR   R   R   RA   RD   (    (    (    s@   /tmp/pip-build-isqEY4/keras/keras/layers/advanced_activations.pyt   <module>   s   $\$