
Xc           @` s  d  d l  m Z m Z m Z d d l m Z m Z m Z m Z m	 Z	 m
 Z
 m Z m Z m Z m Z m Z m Z m Z m Z m Z m Z m Z m Z m Z m Z m Z m Z m Z m Z m Z m Z m Z m  Z  m! Z! m" Z" m# Z# m$ Z$ m% Z% m& Z& m' Z' m( Z( m) Z) m* Z* m+ Z+ d d l, m- Z- d d l. m/ Z/ d d l0 Td d l1 Td d l2 Td d l3 m4 Z4 m5 Z5 m6 Z6 m7 Z7 m8 Z8 m9 Z9 d d l: m; Z; d  d	 l< Z< d d
 l= m> Z? d d l= m@ Z@ d d l= mA ZA d	 d	 d d eC d	 d d  Z> d	 d d eC d d  ZD d	 S(   i    (   t   absolute_importt   print_functiont   divisioni   ('   t   CrossentropyCategorical1Hott   CrossentropyCategorical1HotGradt!   CrossentropySoftmax1HotWithBiasDxt%   CrossentropySoftmaxArgmax1HotWithBiast
   LogSoftmaxt#   Prepend_scalar_constant_to_each_rowt   Prepend_scalar_to_each_rowt   Softmaxt   SoftmaxGradt   SoftmaxWithBiast   binary_crossentropyt   categorical_crossentropyt   crossentropy_categorical_1hott"   crossentropy_categorical_1hot_gradt   crossentropy_softmax_1hott#   crossentropy_softmax_1hot_with_biast&   crossentropy_softmax_1hot_with_bias_dxt*   crossentropy_softmax_argmax_1hot_with_biast(   crossentropy_softmax_max_and_argmax_1hott2   crossentropy_softmax_max_and_argmax_1hot_with_biast)   crossentropy_to_crossentropy_with_softmaxt3   crossentropy_to_crossentropy_with_softmax_with_biast-   graph_merge_softmax_with_crossentropy_softmaxt	   h_softmaxt
   logsoftmaxt   logsoftmax_opt   prepend_0_to_each_rowt   prepend_1_to_each_rowt   prepend_scalar_to_each_rowt   relut   softmaxt   softmax_gradt   softmax_grapht
   softmax_opt   softmax_simplifiert   softmax_with_biast   elut   confusion_matrixt   softsign(   t   opt(   t   ConvOp(   t   *(   t   softplust   sigmoidt   sigmoid_inplacet   scalar_sigmoidt   ultra_fast_sigmoidt   hard_sigmoid(   t   batch_normalizationN(   t   conv2d(   t   conv2d_grad_wrt_inputs(   t   conv3dt   validc	   
   	   K` s   d |	 k s d |	 k r' t  d   n  t |	 j    d k re t j t |	 j    d d d n  | d
 k	 r t j d d d | d
 k r | } q t  d	   n  t |  | | | | | | |  S(   sC  
    This function will build the symbolic graph for convolving a mini-batch of a
    stack of 2D inputs with a set of 2D filters. The implementation is modelled
    after Convolutional Neural Networks (CNN).


    Parameters
    ----------
    input: symbolic 4D tensor
        Mini-batch of feature map stacks, of shape
        (batch size, input channels, input rows, input columns).
        See the optional parameter ``input_shape``.

    filters: symbolic 4D tensor
        Set of filters used in CNN layer of shape
        (output channels, input channels, filter rows, filter columns).
        See the optional parameter ``filter_shape``.

    input_shape: None, tuple/list of len 4 of int or Constant variable
        The shape of the input parameter.
        Optional, possibly used to choose an optimal implementation.
        You can give ``None`` for any element of the list to specify that this
        element is not known at compile time.

    filter_shape: None, tuple/list of len 4 of int or Constant variable
        The shape of the filters parameter.
        Optional, possibly used to choose an optimal implementation.
        You can give ``None`` for any element of the list to specify that this
        element is not known at compile time.

    border_mode: str, int or tuple of two int
        Either of the following:

        ``'valid'``: apply filter wherever it completely overlaps with the
            input. Generates output of shape: input shape - filter shape + 1
        ``'full'``: apply filter wherever it partly overlaps with the input.
            Generates output of shape: input shape + filter shape - 1
        ``'half'``: pad input with a symmetric border of ``filter rows // 2``
            rows and ``filter columns // 2`` columns, then perform a valid
            convolution. For filters with an odd number of rows and columns, this
            leads to the output shape being equal to the input shape.
        ``int``: pad input with a symmetric border of zeros of the given
            width, then perform a valid convolution.
        ``(int1, int2)``: pad input with a symmetric border of ``int1`` rows
            and ``int2`` columns, then perform a valid convolution.

    subsample: tuple of len 2
        Factor by which to subsample the output.
        Also called strides elsewhere.

    filter_flip: bool
        If ``True``, will flip the filter rows and columns
        before sliding them over the input. This operation is normally referred
        to as a convolution, and this is the default. If ``False``, the filters
        are not flipped and the operation is referred to as a cross-correlation.

    image_shape: None, tuple/list of len 4 of int or Constant variable
        Deprecated alias for input_shape.

    filter_dilation: tuple of len 2
        Factor by which to subsample (stride) the input.
        Also called dilation elsewhere.

    kwargs: Any other keyword arguments are accepted for backwards
            compatibility, but will be ignored.

    Returns
    -------
    Symbolic 4D tensor
        Set of feature maps generated by convolutional layer. Tensor is
        of shape (batch size, output channels, output rows, output columns)

    Notes
    -----
        If cuDNN is available, it will be used on the
        GPU. Otherwise, it is the *CorrMM* convolution that will be used
        "caffe style convolution".

        This is only supported in Theano 0.8 or the development
        version until it is released.

        The parameter filter_dilation is an implementation of `dilated
        convolution <https://arxiv.org/pdf/1511.07122v3.pdf>`_.

    t   imshp_logicalt   kshp_logicals   Keyword arguments 'imshp_logical' and 'kshp_logical' for conv2d are not supported anymore (and have not been a reliable way to perform upsampling). That feature is still available by calling theano.tensor.nnet.conv.conv2d() for the time being.i    sX    are now deprecated in `tensor.nnet.abstract_conv.conv2d` interface and will be ignored.t
   stackleveli   so   The `image_shape` keyword argument to `tensor.nnet.conv2d` is deprecated, it has been renamed to `input_shape`.sD   input_shape and image_shape should not be provided at the same time.N(   t
   ValueErrort   lent   keyst   warningst   warnt   strt   Nonet   abstract_conv2d(
   t   inputt   filterst   input_shapet   filter_shapet   border_modet	   subsamplet   filter_flipt   image_shapet   filter_dilationt   kwargs(    (    s;   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/__init__.pyR4   '   s     Y
		c         C` s7   t  d |  d | d | d | d | d | d | d |  S(	   s  
    This function will build the symbolic graph for applying a transposed
    convolution over a mini-batch of a stack of 2D inputs with a set of 2D
    filters.


    Parameters
    ----------
    input: symbolic 4D tensor
        Mini-batch of feature map stacks, of shape
        (batch size, input channels, input rows, input columns).
        See the optional parameter ``input_shape``.

    filters: symbolic 4D tensor
        Set of filters used in CNN layer of shape
        (input channels, output channels, filter rows, filter columns).
        See the optional parameter ``filter_shape``. **Note: the order for
        ``output_channels`` and ``input_channels`` is reversed with respect to
        ``conv2d``.**

    output_shape: tuple/list of len 4 of int or Constant variable
        The shape of the output of ``conv2d_transpose``. The last two elements
        are allowed to be ``tensor.scalar`` variables.

    filter_shape: None, tuple/list of len 4 of int or Constant variable
        The shape of the filters parameter.
        Optional, possibly used to choose an optimal implementation.
        You can give ``None`` for any element of the list to specify that this
        element is not known at compile time.

    border_mode: str, int or tuple of two int
        Refers to the ``border_mode`` argument of the corresponding forward
        (non-transposed) convolution. See the argument description in
        ``conv2d``.  What was ``padding`` for the forward convolution means
        ``cropping`` the output of the transposed one. ``valid`` corresponds to
        no cropping, ``full`` to maximal cropping.

    input_dilation: tuple of len 2
        Corresponds to ``subsample`` (also called strides elsewhere) in the
        non-transposed convolution.

    filter_flip: bool
        If ``True``, will flip the filter rows and columns
        before sliding them over the input. This operation is normally referred
        to as a convolution, and this is the default. If ``False``, the filters
        are not flipped and the operation is referred to as a cross-correlation.

    filter_dilation: tuple of len 2
        Factor by which to subsample (stride) the input.
        Also called dilation elsewhere.

    Returns
    -------
    Symbolic 4D tensor
        Set of feature maps generated by the transposed convolution. Tensor is
        of shape (batch size, output channels, output rows, output columns)

    Notes
    -----
        If cuDNN is available, it will be used on the
        GPU. Otherwise, it is the *CorrMM* convolution that will be used
        "caffe style convolution".

        This operation is also sometimes called "deconvolution".

        The parameter filter_dilation is an implementation of `dilated
        convolution <https://arxiv.org/pdf/1511.07122v3.pdf>`_.

    t   output_gradRD   RE   RF   RG   RH   RI   RK   (   R5   (   RC   RD   t   output_shapeRF   RG   t   input_dilationRI   RK   (    (    s;   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/__init__.pyt   conv2d_transpose   s    I(   i   i   (   i   i   (   i   i   (   i   i   (E   t
   __future__R    R   R   t   nnetR   R   R   R   R   R   R	   R
   R   R   R   R   R   R   R   R   R   R   R   R   R   R   R   R   R   R   R   R   R   R    R!   R"   R#   R$   R%   R&   R'   R(   R)   t    R*   t   convR+   t   Conv3Dt
   ConvGrad3Dt   ConvTransp3Dt   sigmR-   R.   R/   R0   R1   R2   t   bnR3   R>   t   abstract_convR4   RB   R5   R6   RA   t   TrueRP   (    (    (    s;   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/__init__.pyt   <module>   s$   


.	t