ó
àÆ÷Xc           @` sJ  d  Z  d d l m Z m Z m Z d d l Z d d l m Z m Z d d l	 Z	 d d l
 Z
 d d l m Z m Z d d l m Z m Z d d l m Z d d l m Z m Z d d	 l m Z d d l Z d d l Z d d l Z y6 d d
 l m Z m Z m Z d d l m Z e  Z! Wn e" k
 r1e# Z! n Xd Z$ e j% d ƒ Z& d d „ Z( d d „ Z) d d „ Z* d „  Z+ d d „ Z, d „  Z- d d „ Z. d „  Z/ d d „ Z0 d d d d7 e  d8 d „ Z1 d d d d9 e  d: d „ Z2 d d d; e  d< d „ Z3 d d d= e  d> d „ Z4 d d d? e  d@ d „ Z5 d d dA e  dB d „ Z6 e  d  „ Z7 e  d! „ Z8 d d e  d" „ Z9 d# e f d$ „  ƒ  YZ: d% e: f d& „  ƒ  YZ; d' e; f d( „  ƒ  YZ< d) e; f d* „  ƒ  YZ= d+ e: f d, „  ƒ  YZ> d- e> f d. „  ƒ  YZ? d/ e> f d0 „  ƒ  YZ@ d1 e: f d2 „  ƒ  YZA d3 eA f d4 „  ƒ  YZB d5 eA f d6 „  ƒ  YZC d S(C   s   
Abstract conv interface
i    (   t   absolute_importt   print_functiont   divisionN(   t   reraiset   integer_types(   t   as_tensor_variablet   patternbroadcast(   t   get_scalar_constant_valuet   NotScalarConstantError(   t   Assert(   t   Applyt   Op(   t   xrange(   t   _valfrommodet   _bvalfromboundaryt   convolve(   t   _convolve2ds   restructuredtext ens    theano.tensor.nnet.abstract_convc         ` sÜ   |  d |  d } ‰ | d | d } ‰ ˆ d k rT t j t ˆ ƒ d d ƒ‰ n  t ˆ  t ƒ rš t ‡  ‡ ‡ ‡ ‡ f d †  t t ˆ ƒ ƒ Dƒ ƒ } n4 t ‡  ‡ ‡ ‡ ‡ f d †  t t ˆ ƒ ƒ Dƒ ƒ } | | f | S(   s8  
    This function compute the output shape of convolution operation.

    Parameters
    ----------
    image_shape: tuple of int (symbolic or numeric) corresponding to the input
        image shape. Its four (or five) element must correspond respectively
        to: batch size, number of input channels, height and width (and
        possibly depth) of the image. None where undefined.
    kernel_shape: tuple of int (symbolic or numeric) corresponding to the
        kernel shape. Its four (or five) elements must correspond respectively
        to: number of output channels, number of input channels, height and
        width (and possibly depth) of the kernel. None where undefined.
    border_mode: string, int (symbolic or numeric) or tuple of int (symbolic
        or numeric). If it is a string, it must be 'valid', 'half' or 'full'.
        If it is a tuple, its two (or three) elements respectively correspond
        to the padding on height and width (and possibly depth) axis.
    subsample: tuple of int (symbolic or numeric). Its two or three elements
        espectively correspond to the subsampling on height and width (and
        possibly depth) axis.
    filter_dilation: tuple of int (symbolic or numeric). Its two or three
        elements correspond respectively to the dilation on height and width axis.

    Returns
    -------
    output_shape: tuple of int corresponding to the output image shape. Its
        four element must correspond respectively to: batch size, number of
        output channels, height and width of the image. None where undefined.

    i    i   t   dtypet   intc         3` s;   |  ]1 } t  ˆ | ˆ | ˆ  | ˆ | ˆ | ƒ Vq d  S(   N(   t   get_conv_shape_1axis(   t   .0t   i(   t   border_modet   filter_dilationt   imshpt   kshpt	   subsample(    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pys	   <genexpr>K   s   c         3` s7   |  ]- } t  ˆ | ˆ | ˆ  ˆ | ˆ | ƒ Vq d  S(   N(   R   (   R   R   (   R   R   R   R   R   (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pys	   <genexpr>O   s   N(   t   Nonet   numpyt   onest   lent
   isinstancet   tuplet   range(   t   image_shapet   kernel_shapeR   R   R   t   bsizet   nkernt   out_shp(    (   R   R   R   R   R   s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyt   get_conv_output_shape#   s    !i   c         C` së   d |  | | | | g k r d S| d | d } | d k rJ | d } nO | d k rc | d } n6 | d k rx d } n! | } | d k  r™ t d ƒ ‚ n  | d k r² |  | } n |  d | | } | d k rÝ | | } n  | d } | S(	   s1  
    This function compute the output shape of convolution operation.

    Parameters
    ----------
    image_shape: int or None. Corresponds to the input image shape on a
        given axis. None if undefined.
    kernel_shape: int or None. Corresponds to the kernel shape on a given
        axis. None if undefined.
    border_mode: string or int. If it is a string, it must be
        'valid', 'half' or 'full'. If it is an integer, it must correspond to
        the padding on the considered axis.
    subsample: int. It must correspond to the subsampling on the
        considered axis.
    dilation: int. It must correspond to the dilation on the
        considered axis.

    Returns
    -------
    out_shp: int corresponding to the output image shape on the
        considered axis. None if undefined.

    i   t   halfi   t   fullt   validi    s   border_mode must be >= 0N(   R   t
   ValueError(   R"   R#   R   R   t   dilationt   dil_kernel_shapet   padR&   (    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyR   W   s(    	
c         ` sÜ   |  d |  d } ‰ | d | d } ‰ ˆ d k rT t j t ˆ ƒ d d ƒ‰ n  t ˆ  t ƒ rš t ‡  ‡ ‡ ‡ ‡ f d †  t t ˆ ƒ ƒ Dƒ ƒ } n4 t ‡  ‡ ‡ ‡ ‡ f d †  t t ˆ ƒ ƒ Dƒ ƒ } | | f | S(   s  
    This function tries to compute the kernel shape of convolution gradWeights.

    The weights shape can only be computed exactly when subsample is 1 and
    border_mode is not 'half'. If subsample is not 1 or border_mode is 'half',
    this function will return None.

    Parameters
    ----------
    image_shape: tuple of int corresponding to the input image shape. Its
        four (or five) elements must correspond respectively to: batch size,
        number of output channels, height and width of the image. None where
        undefined.
    top_shape: tuple of int (symbolic or numeric) corresponding to the top
        image shape. Its four (or five) element must correspond respectively
        to: batch size, number of output channels, height and width (and
        possibly depth) of the image. None where undefined.
    border_mode: string, int (symbolic or numeric) or tuple of int (symbolic
        or numeric). If it is a string, it must be 'valid', 'half' or 'full'.
        If it is a tuple, its two (or three) elements respectively correspond
        to the padding on height and width (and possibly depth) axis.
    subsample: tuple of int (symbolic or numeric). Its two or three elements
        respectively correspond to the subsampling on height and width (and
        possibly depth) axis.
    filter_dilation: tuple of int (symbolic or numeric). Its two or three
        elements correspond respectively to the dilation on height and
        width axis.

    Returns
    -------
    kernel_shape: tuple of int (symbolic or numeric) corresponding to the
        kernel shape. Its four (or five) elements correspond respectively
        to: number of output channels, number of input channels, height and
        width (and possibly depth) of the kernel. None where undefined.

    i   i   R   R   c         3` s;   |  ]1 } t  ˆ | ˆ | ˆ  | ˆ | ˆ | ƒ Vq d  S(   N(   t    get_conv_gradweights_shape_1axis(   R   R   (   R   R   R   R   t   topshp(    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pys	   <genexpr>»   s   c         3` s7   |  ]- } t  ˆ | ˆ | ˆ  ˆ | ˆ | ƒ Vq d  S(   N(   R/   (   R   R   (   R   R   R   R   R0   (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pys	   <genexpr>¿   s   N(   R   R   R   R   R   R    R!   (   R"   t	   top_shapeR   R   R   R%   t   nchanR&   (    (   R   R   R   R   R0   s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyt   get_conv_gradweights_shape   s    'c         C` s»   d |  | | | | g k r d S| d k s7 | d k r; d S| d k rT | |  } nF | d k rm |  | } n- | d k  rˆ t d ƒ ‚ n  |  d | | } | d k r³ | | } n  | d S(	   só  
    This function tries to compute the image shape of convolution gradWeights.

    The weights shape can only be computed exactly when subsample is 1 and
    border_mode is not 'half'. If subsample is not 1 or border_mode is 'half',
    this function will return None.

    Parameters
    ----------
    image_shape: int or None. Corresponds to the input image shape on a
        given axis. None if undefined.
    top_shape: int or None. Corresponds to the top shape on a given axis.
        None if undefined.
    border_mode: string or int. If it is a string, it must be
        'valid', 'half' or 'full'. If it is an integer, it must correspond to
        the padding on the considered axis.
    subsample: int. It must correspond to the subsampling on the
        considered axis.
    dilation: int. It must correspond to the dilation on the
        considered axis.

    Returns
    -------
    kernel_shape: int or None. Corresponds to the kernel shape on a given
        axis. None if undefined.

    i   R(   R)   R*   i    s   border_mode must be >= 0i   N(   R   R+   (   R"   R1   R   R   R,   R#   (    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyR/   Å   s    c         ` sÜ   | d | d } ‰ |  d |  d } ‰ ˆ d k rT t j t ˆ ƒ d d ƒ‰ n  t ˆ  t ƒ rš t ‡  ‡ ‡ ‡ ‡ f d †  t t ˆ ƒ ƒ Dƒ ƒ } n4 t ‡  ‡ ‡ ‡ ‡ f d †  t t ˆ ƒ ƒ Dƒ ƒ } | | f | S(	   sî  
    This function tries to compute the image shape of convolution gradInputs.

    The image shape can only be computed exactly when subsample is 1.
    If subsample for a dimension is not 1, this function will return None for
    that dimension.

    Parameters
    ----------
    kernel_shape: tuple of int (symbolic or numeric) corresponding to the
        kernel shape. Its four (or five) elements must correspond respectively
        to: number of output channels, number of input channels, height and
        width (and possibly depth) of the kernel. None where undefined.
    top_shape: tuple of int (symbolic or numeric) corresponding to the top
        image shape. Its four (or five) element must correspond respectively
        to: batch size, number of output channels, height and width (and
        possibly depth) of the image. None where undefined.
    border_mode: string, int (symbolic or numeric) or tuple of int (symbolic
        or numeric). If it is a string, it must be 'valid', 'half' or 'full'.
        If it is a tuple, its two (or three) elements respectively correspond
        to the padding on height and width (and possibly depth) axis.
    subsample: tuple of int (symbolic or numeric). Its two or three elements
        respectively correspond to the subsampling on height and width (and
        possibly depth) axis.
    filter_dilation: tuple of int (symbolic or numeric). Its two or three
        elements correspond respectively to the dilation on height and
        width axis.

    Returns
    -------
    image_shape: tuple of int corresponding to the input image shape. Its
        four element must correspond respectively to: batch size, number of
        output channels, height and width of the image. None where undefined.

    i    i   i   R   R   c         3` s;   |  ]1 } t  ˆ | ˆ | ˆ  | ˆ | ˆ | ƒ Vq d  S(   N(   t   get_conv_gradinputs_shape_1axis(   R   R   (   R   R   R   R   R0   (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pys	   <genexpr>$  s   c         3` s7   |  ]- } t  ˆ | ˆ | ˆ  ˆ | ˆ | ƒ Vq d  S(   N(   R4   (   R   R   (   R   R   R   R   R0   (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pys	   <genexpr>(  s   N(   R   R   R   R   R   R    R!   (   R#   R1   R   R   R   R$   R%   R&   (    (   R   R   R   R   R0   s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyt   get_conv_gradinputs_shape÷   s    &c         C` sà   d |  | | | | g k r d S| d k r/ d S|  d | d } | d k rZ | d } nO | d k rs | d } n6 | d k rˆ d } n! | } | d k  r© t d ƒ ‚ n  | d k rÆ | | d } n | d | | d } | S(	   sµ  
    This function tries to compute the image shape of convolution gradInputs.

    The image shape can only be computed exactly when subsample is 1.
    If subsample is not 1, this function will return None.

    Parameters
    ----------
    kernel_shape: int or None. Corresponds to the kernel shape on a given
        axis. None if undefined.
    top_shape: int or None. Corresponds to the top shape on a given axis.
        None if undefined.
    border_mode: string or int. If it is a string, it must be
        'valid', 'half' or 'full'. If it is an integer, it must correspond to
        the padding on the considered axis.
    subsample: int. It must correspond to the subsampling on the
        considered axis.
    dilation: int. It must correspond to the dilation on the
        considered axis.

    Returns
    -------
    image_shape: int or None. Corresponds to the input image shape on a
        given axis. None if undefined.

    i   R(   i   R)   R*   i    s   border_mode must be >= 0N(   R   R+   (   R#   R1   R   R   R,   R-   R.   R"   (    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyR4   .  s&    	c         ` sè   t  |  ƒ }  t  | ƒ } t  | ƒ } t |  ƒ t | ƒ k sT t |  ƒ t | ƒ k rX t St |  ƒ d t | ƒ k rx t S| d k	 r¤ t |  ƒ d t | ƒ k r¤ t St |  | | | | ƒ } d „  ‰  t ‡  f d †  t | | ƒ Dƒ ƒ S(   sÑ  
    This function checks if the given image shapes are consistent.

    Parameters
    ----------
    image_shape: tuple of int (symbolic or numeric) corresponding to the input
        image shape. Its four (or five) element must correspond respectively
        to: batch size, number of input channels, height and width (and
        possibly depth) of the image. None where undefined.
    kernel_shape: tuple of int (symbolic or numeric) corresponding to the
        kernel shape. Its four (or five) elements must correspond respectively
        to: number of output channels, number of input channels, height and
        width (and possibly depth) of the kernel. None where undefined.
    output_shape: tuple of int (symbolic or numeric) corresponding to the
        output shape. Its four (or five) elements must correspond respectively
        to: batch size, number of output channels, height and width
        (and possibly depth) of the output. None where undefined.
    border_mode: string, int (symbolic or numeric) or tuple of int (symbolic
        or numeric). If it is a string, it must be 'valid', 'half' or 'full'.
        If it is a tuple, its two (or three) elements respectively correspond
        to the padding on height and width (and possibly depth) axis.
    subsample: tuple of int (symbolic or numeric). Its two or three elements
        respectively correspond to the subsampling on height and width (and
        possibly depth) axis.
    filter_dilation: tuple of int (symbolic or numeric). Its two or three
        elements correspond respectively to the dilation on height and
        width axis.

    Returns
    -------
    Returns False if a convolution with the given input shape, kernel shape
    and parameters would not have produced the given output shape.

    Returns True in all other cases: if the given output shape matches the
    computed output shape, but also if the shape could not be checked because
    because the shape contains symbolic values.

    i   c         S` sg   |  d  k s | d  k r t Sy2 t |  ƒ }  t | ƒ } t |  ƒ t | ƒ k SWn t k
 rb t SXd  S(   N(   R   t   TrueR   R   R   (   t   givent   computed(    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyt	   check_dim¡  s    c         3` s$   |  ] \ } } ˆ  | | ƒ Vq d  S(   N(    (   R   R7   R8   (   R9   (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pys	   <genexpr>¬  s   N(   R    R   t   FalseR   R'   t   allt   zip(   R"   R#   t   output_shapeR   R   R   t   computed_output_shape(    (   R9   s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyt   check_conv_gradinputs_shapeh  s    )0(	c         C` s0  g  } xt  |  ƒ D]\ } } yv t | ƒ } | d k  rb | d k  r‡ t d | | f ƒ ‚ q‡ n% | d k r‡ t d | | f ƒ ‚ n  | j | ƒ Wq t k
 r!| d k  ré t d | ƒ } | j | | t j j | d ƒ ƒ ƒ q"t d | ƒ } | j | | t j j	 | d ƒ ƒ ƒ q Xq Wt
 | ƒ S(   s¼  This function adds Assert nodes that check if shape is a valid convolution shape.

    The first two dimensions should be larger than or equal to zero. The convolution
    dimensions should be larger than zero.

    Parameters
    ----------
    shape: tuple of int (symbolic or numeric) corresponding to the input, output or
        kernel shape of a convolution. For input and output, the first elements should
        should be the batch size and number of channels. For kernels, the first and
        second elements should contain the number of input and output channels.
        The remaining dimensions are the convolution dimensions.

    Returns
    -------
    Returns a tuple similar to the given `shape`. For constant elements in `shape`,
    the function checks the value and raises a `ValueError` if the dimension is invalid.
    The elements that are not constant are wrapped in an `Assert` op that checks the
    dimension at run time.
    i   i    sA   The convolution would produce an invalid shape (dim[%d]: %d < 0).sB   The convolution would produce an invalid shape (dim[%d]: %d <= 0).s=   The convolution would produce an invalid shape (dim[%d] < 0).s>   The convolution would produce an invalid shape (dim[%d] <= 0).(   t	   enumerateR   R+   t   appendR   R	   t   theanot   tensort   get   gtR    (   t   shapet	   out_shapeR   t   nt   const_nt
   assert_shp(    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyt   assert_conv_shape°  s"    (-s   Unexpected shape.c         C` s¡   | d k s t j j j r  |  S|  j } g  } xN t |  j ƒ D]= } | | d k	 r? | j t j	 j
 | | | | ƒ ƒ q? q? W| r™ t | ƒ |  | Œ S|  Sd S(   s  Wraps `x` in an `Assert` to check its shape.

    Parameters
    ----------
    x : Tensor
        x will be wrapped in an `Assert`.
    expected_shape : tuple or list
        The expected shape of `x`. The size of a dimension can be None,
        which means it will not be checked.
    msg : str
        The error message of the `Assert`.

    Returns
    -------
    Tensor
        `x` wrapped in an `Assert`. At execution time, this will throw an
        AssertionError if the shape of `x` does not match `expected_shape`.
        If `expected_shape` is None or contains only Nones, the function
        will return `x` directly.

    N(   R   RB   t   configt   convt   assert_shapeRF   R!   t   ndimRA   RC   t   eqR	   (   t   xt   expected_shapet   msgRF   t   testsR   (    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyRN   Ú  s    	+R*   c   	      C` sR   t  |  ƒ }  t  | ƒ } t d | d | d | d | d | d | ƒ } | |  | ƒ S(   s1  This function will build the symbolic graph for convolving a mini-batch of a
    stack of 2D inputs with a set of 2D filters. The implementation is modelled
    after Convolutional Neural Networks (CNN).

    Refer to :func:`nnet.conv2d <theano.tensor.nnet.conv2d>` for a more detailed documentation.
    R   R   R   R   t   filter_flipR   (   R   t   AbstractConv2d(	   t   inputt   filterst   input_shapet   filter_shapeR   R   RU   R   t   conv_op(    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyt   conv2dý  s    	c   	      C` sR   t  |  ƒ }  t  | ƒ } t d | d | d | d | d | d | ƒ } | |  | ƒ S(   s
  
    This function will build the symbolic graph for convolving a mini-batch of a
    stack of 3D inputs with a set of 3D filters. The implementation is modelled
    after Convolutional Neural Networks (CNN).


    Parameters
    ----------
    input: symbolic 5D tensor
        Mini-batch of feature map stacks, of shape
        (batch size, input channels, input depth, input rows, input columns).
        See the optional parameter ``input_shape``.

    filters: symbolic 5D tensor
        Set of filters used in CNN layer of shape
        (output channels, input channels, filter depth, filter rows, filter columns).
        See the optional parameter ``filter_shape``.

    input_shape: None, tuple/list of len 5 of int or Constant variable
        The shape of the input parameter.
        Optional, possibly used to choose an optimal implementation.
        You can give ``None`` for any element of the list to specify that this
        element is not known at compile time.

    filter_shape: None, tuple/list of len 5 of int or Constant variable
        The shape of the filters parameter.
        Optional, possibly used to choose an optimal implementation.
        You can give ``None`` for any element of the list to specify that this
        element is not known at compile time.

    border_mode: str, int or tuple of three int
        Either of the following:

        ``'valid'``: apply filter wherever it completely overlaps with the
            input. Generates output of shape: input shape - filter shape + 1
        ``'full'``: apply filter wherever it partly overlaps with the input.
            Generates output of shape: input shape + filter shape - 1
        ``'half'``: pad input with a symmetric border of ``filter // 2``,
            then perform a valid convolution. For filters with an odd
            number of slices, rows and columns, this leads to the output
            shape being equal to the input shape.
        ``int``: pad input with a symmetric border of zeros of the given
            width, then perform a valid convolution.
        ``(int1, int2, int3)``
            pad input with a symmetric border of ``int1``, ``int2`` and
            ``int3`` columns, then perform a valid convolution.

    subsample: tuple of len 3
        Factor by which to subsample the output.
        Also called strides elsewhere.

    filter_flip: bool
        If ``True``, will flip the filter x, y and z dimensions before
        sliding them over the input. This operation is normally
        referred to as a convolution, and this is the default. If
        ``False``, the filters are not flipped and the operation is
        referred to as a cross-correlation.

    filter_dilation: tuple of len 3
        Factor by which to subsample (stride) the input.
        Also called dilation elsewhere.

    Returns
    -------
    Symbolic 5D tensor
        Set of feature maps generated by convolutional layer. Tensor is
        is of shape (batch size, output channels, output depth,
        output rows, output columns)

    Notes
    -----
        If cuDNN is available, it will be used on the
        GPU. Otherwise, it is the *Corr3dMM* convolution that will be used
        "caffe style convolution".

        This is only supported in Theano 0.8 or the development
        version until it is released.

    R   R   R   R   RU   R   (   R   t   AbstractConv3d(	   RW   RX   RY   RZ   R   R   RU   R   R[   (    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyt   conv3d  s    X	c         C` s†  t  | ƒ } t  |  ƒ }  xB d d g D]4 } t | | t j j t t d ƒ f ƒ s% t ‚ q% WxB d d g D]4 } t | | t j j	 t j j t f ƒ sj t ‚ qj W| d k	 rü xK d d d d g D]4 } t | | t j j t t d ƒ f ƒ sÁ t ‚ qÁ Wn  t
 | ƒ }	 x: d d g D], } t | | t j j	 ƒ rd |	 | <qqWt d |	 d | d | d | d	 | d
 | ƒ }
 |
 | |  | d ƒ S(   sï  Compute conv output gradient w.r.t its inputs

    This function builds the symbolic graph for getting the
    gradient of the output of a convolution (namely output_grad)
    w.r.t the input of the convolution, given a set of 2D filters
    used by the convolution, such that the output_grad is upsampled
    to the input_shape.

    Parameters
    ----------
    output_grad : symbolic 4D tensor
        mini-batch of feature map stacks, of shape (batch size, input
        channels, input rows, input columns).  This is the tensor that
        will be upsampled or the output gradient of the convolution
        whose gradient will be taken with respect to the input of the
        convolution.
    filters : symbolic 4D tensor
        set of filters used in CNN layer of shape (output channels,
        input channels, filter rows, filter columns).  See the
        optional parameter ``filter_shape``.
    input_shape : [None/int/Constant] * 2 + [Tensor/int/Constant] * 2
        The shape of the input (upsampled) parameter.
        A tuple/list of len 4, with the first two dimensions
        being None or int or Constant and the last two dimensions being
        Tensor or int or Constant.
        Not Optional, since given the output_grad shape
        and the subsample values, multiple input_shape may be
        plausible.
    filter_shape : None or [None/int/Constant] * 4
        The shape of the filters parameter. None or a tuple/list of len 4.
        Optional, possibly used  to choose an optimal implementation.
        You can give ``None`` for any element of the list to specify that
        this element is not known at compile time.
    border_mode : str, int or tuple of two int
        Either of the following:

          ``'valid'``
            apply filter wherever it completely overlaps with the
            input. Generates output of shape: input shape - filter
            shape + 1

          ``'full'``
            apply filter wherever it partly overlaps with the input.
            Generates output of shape: input shape + filter shape - 1

          ``'half'``
            pad input with a symmetric border of ``filter rows // 2``
            rows and ``filter columns // 2`` columns, then perform a
            valid convolution. For filters with an odd number of rows
            and columns, this leads to the output shape being equal to
            the input shape. It is known as 'same' elsewhere.

          ``int``
            pad input with a symmetric border of zeros of the given
            width, then perform a valid convolution.

          ``(int1, int2)``
            pad input with a symmetric border of ``int1`` rows and
            ``int2`` columns, then perform a valid convolution.

    subsample : tuple of len 2
        The subsampling used in the forward pass.  Also called strides
        elsewhere.
    filter_flip : bool
        If ``True``, will flip the filter rows and columns before
        sliding them over the input. This operation is normally
        referred to as a convolution, and this is the default. If
        ``False``, the filters are not flipped and the operation is
        referred to as a cross-correlation.
    filter_dilation : tuple of len 2
        The filter dilation used in the forward pass.
        Also known as input striding.

    Returns
    -------
    symbolic 4D tensor
        set of feature maps generated by convolutional layer. Tensor
        is of shape (batch size, output channels, output rows, output
        columns)

    Notes
    -----

    :note: If cuDNN is available, it will be used on the
        GPU. Otherwise, it is the *CorrMM* convolution that will be used
        "caffe style convolution".

    :note: This is only supported in Theano 0.8 or the development
        version until it is released.

    i    i   i   i   R   R   R   R   RU   R   iþÿÿÿN(   R   R   RB   RC   t   TensorConstantR   t   typeR   t   AssertionErrort   TensorVariablet   listt   AbstractConv2d_gradInputs(   t   output_gradRX   RY   RZ   R   R   RU   R   t   dimt   numerical_input_shapet   grad_input_op(    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyt   conv2d_grad_wrt_inputsz  s0    d	"	c         C` s  t  | ƒ } t  |  ƒ }  xB d d g D]4 } t | | t j j t t d ƒ f ƒ s% t ‚ q% WxE d d d g D]4 } t | | t j j	 t j j t f ƒ sm t ‚ qm W| d k	 rxN d d d d d g D]4 } t | | t j j t t d ƒ f ƒ sÇ t ‚ qÇ Wn  t
 | ƒ }	 x= d d d g D], } t | | t j j	 ƒ rd |	 | <qqWt d |	 d | d | d	 | d
 | d | ƒ }
 |
 | |  | d ƒ S(   s  Compute conv output gradient w.r.t its inputs

    This function builds the symbolic graph for getting the
    gradient of the output of a convolution (namely output_grad)
    w.r.t the input of the convolution, given a set of 3D filters
    used by the convolution, such that the output_grad is upsampled
    to the input_shape.

    Parameters
    ----------
    output_grad : symbolic 5D tensor
        mini-batch of feature map stacks, of shape (batch size, input
        channels, input depth, input rows, input columns).  This is the
        tensor that will be upsampled or the output gradient of the
        convolution whose gradient will be taken with respect to the
        input of the convolution.
    filters : symbolic 5D tensor
        set of filters used in CNN layer of shape (output channels,
        input channels, filter depth, filter rows, filter columns).
        See the optional parameter ``filter_shape``.
    input_shape : [None/int/Constant] * 2 + [Tensor/int/Constant] * 2
        The shape of the input (upsampled) parameter.
        A tuple/list of len 5, with the first two dimensions
        being None or int or Constant and the last three dimensions being
        Tensor or int or Constant.
        Not Optional, since given the output_grad shape
        and the subsample values, multiple input_shape may be
        plausible.
    filter_shape : None or [None/int/Constant] * 5
        The shape of the filters parameter. None or a tuple/list of len 5.
        Optional, possibly used  to choose an optimal implementation.
        You can give ``None`` for any element of the list to specify that
        this element is not known at compile time.
    border_mode : str, int or tuple of three int
        Either of the following:

          ``'valid'``
            apply filter wherever it completely overlaps with the
            input. Generates output of shape: input shape - filter
            shape + 1

          ``'full'``
            apply filter wherever it partly overlaps with the input.
            Generates output of shape: input shape + filter shape - 1

          ``'half'``
            pad input with a symmetric border of ``filter // 2``,
            then perform a valid convolution. For filters with an odd
            number of slices, rows and columns, this leads to the output
            shape being equal to the input shape. It is known as 'same'
            elsewhere.

          ``int``
            pad input with a symmetric border of zeros of the given
            width, then perform a valid convolution.

          ``(int1, int2, int3)``
            pad input with a symmetric border of ``int1``, ``int2`` and
            ``int3`` columns, then perform a valid convolution.

    subsample : tuple of len 3
        The subsampling used in the forward pass.  Also called strides
        elsewhere.
    filter_flip : bool
        If ``True``, will flip the filter x, y and z dimensions before
        sliding them over the input. This operation is normally
        referred to as a convolution, and this is the default. If
        ``False``, the filters are not flipped and the operation is
        referred to as a cross-correlation.
    filter_dilation : tuple of len 3
        The filter dilation used in the forward pass.
        Also known as input striding.

    Returns
    -------
    symbolic 5D tensor
        set of feature maps generated by convolutional layer. Tensor
        is of shape (batch size, output channels, output depth,
        output rows, output columns)

    Notes
    -----

    :note: If cuDNN is available, it will be used on the
        GPU. Otherwise, it is the *Corr3dMM* convolution that will be used
        "caffe style convolution".

    :note: This is only supported in Theano 0.8 or the development
        version until it is released.

    i    i   i   i   i   R   R   R   R   RU   R   iýÿÿÿN(   R   R   RB   RC   R_   R   R`   R   Ra   Rb   Rc   t   AbstractConv3d_gradInputs(   Re   RX   RY   RZ   R   R   RU   R   Rf   Rg   Rh   (    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyt   conv3d_grad_wrt_inputs  s0    d	"	c         C` s†  t  |  ƒ }  t  | ƒ } xB d d g D]4 } t | | t j j t t d ƒ f ƒ s% t ‚ q% WxB d d g D]4 } t | | t j j	 t j j t f ƒ sj t ‚ qj W| d k	 rü xK d d d d g D]4 } t | | t j j t t d ƒ f ƒ sÁ t ‚ qÁ Wn  t
 | ƒ }	 x: d d g D], } t | | t j j	 ƒ rd |	 | <qqWt d | d |	 d | d | d	 | d
 | ƒ }
 |
 |  | | d ƒ S(   sè  Compute conv output gradient w.r.t its weights

    This function will build the symbolic graph for getting the
    gradient of the output of a convolution (output_grad) w.r.t its wights.

    Parameters
    ----------
    input : symbolic 4D tensor
        mini-batch of feature map stacks, of shape (batch size, input
        channels, input rows, input columns).  This is the input of
        the convolution in the forward pass.
    output_grad : symbolic 4D tensor
        mini-batch of feature map stacks, of shape (batch size, input
        channels, input rows, input columns).  This is the gradient of
        the output of convolution.
    filter_shape : [None/int/Constant] * 2 + [Tensor/int/Constant] * 2
        The shape of the filter parameter.  A tuple/list of len 4, with the
        first two dimensions being None or int or Constant and the last two
        dimensions being Tensor or int or Constant.
        Not Optional, since given the output_grad shape and
        the input_shape, multiple filter_shape may be plausible.
    input_shape : None or [None/int/Constant] * 4
        The shape of the input parameter. None or a tuple/list of len 4.
        Optional, possibly used to choose an optimal implementation.
        You can give ``None`` for any element of the list to specify
        that this element is not known at compile time.
    border_mode : str, int or tuple of two ints
        Either of the following:

          ``'valid'``
            apply filter wherever it completely overlaps with the
            input. Generates output of shape: input shape - filter
            shape + 1

          ``'full'``
            apply filter wherever it partly overlaps with the input.
            Generates output of shape: input shape + filter shape - 1

          ``'half'``
            pad input with a symmetric border of ``filter rows // 2``
            rows and ``filter columns // 2`` columns, then perform a
            valid convolution. For filters with an odd number of rows
            and columns, this leads to the output shape being equal to
            the input shape. It is known as 'same' elsewhere.

          ``int``
            pad input with a symmetric border of zeros of the given
            width, then perform a valid convolution.

          ``(int1, int2)``
            pad input with a symmetric border of ``int1`` rows and
            ``int2`` columns, then perform a valid convolution.
    subsample : tuple of len 2
        The subsampling used in the forward pass of the convolutional
        operation.  Also called strides elsewhere.
    filter_flip : bool
        If ``True``, will flip the filter rows and columns before
        sliding them over the input. This operation is normally
        referred to as a convolution, and this is the default. If
        ``False``, the filters are not flipped and the operation is
        referred to as a cross-correlation.
    filter_dilation : tuple of len 2
        The filter dilation used in the forward pass.
        Also known as input striding.

    Returns
    -------
    symbolic 4D tensor
        set of feature maps generated by convolutional layer. Tensor
        is of shape (batch size, output channels, output rows, output
        columns)

    Notes
    -----

    :note: If cuDNN is available, it will be used on the
        GPU. Otherwise, it is the *CorrMM* convolution that will be used
        "caffe style convolution".

    :note: This is only supported in Theano 0.8 or the development
        version until it is released.

    i    i   i   i   R   R   R   R   RU   R   iþÿÿÿN(   R   R   RB   RC   R_   R   R`   R   Ra   Rb   Rc   t   AbstractConv2d_gradWeights(   RW   Re   RZ   RY   R   R   RU   R   Rf   t   numerical_filter_shapet   gradWeight_op(    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyt   conv2d_grad_wrt_weightsˆ  s0    \	"	c         C` s  t  |  ƒ }  t  | ƒ } xB d d g D]4 } t | | t j j t t d ƒ f ƒ s% t ‚ q% WxE d d d g D]4 } t | | t j j	 t j j t f ƒ sm t ‚ qm W| d k	 rxN d d d d d g D]4 } t | | t j j t t d ƒ f ƒ sÇ t ‚ qÇ Wn  t
 | ƒ }	 x= d d d g D], } t | | t j j	 ƒ rd |	 | <qqWt d | d |	 d | d	 | d
 | d | ƒ }
 |
 |  | | d ƒ S(   sÿ  Compute conv output gradient w.r.t its weights

    This function will build the symbolic graph for getting the
    gradient of the output of a convolution (output_grad) w.r.t its weights.

    Parameters
    ----------
    input : symbolic 5D tensor
        mini-batch of feature map stacks, of shape (batch size, input
        channels, input depth, input rows, input columns).  This is the input
        of the convolution in the forward pass.
    output_grad : symbolic 5D tensor
        mini-batch of feature map stacks, of shape (batch size, input
        channels, input depth, input rows, input columns).  This is the
        gradient of the output of convolution.
    filter_shape : [None/int/Constant] * 2 + [Tensor/int/Constant] * 2
        The shape of the filter parameter.  A tuple/list of len 5, with the
        first two dimensions being None or int or Constant and the last three
        dimensions being Tensor or int or Constant.
        Not Optional, since given the output_grad shape and
        the input_shape, multiple filter_shape may be plausible.
    input_shape : None or [None/int/Constant] * 5
        The shape of the input parameter. None or a tuple/list of len 5.
        Optional, possibly used to choose an optimal implementation.
        You can give ``None`` for any element of the list to specify
        that this element is not known at compile time.
    border_mode : str, int or tuple of two ints
        Either of the following:

          ``'valid'``
            apply filter wherever it completely overlaps with the
            input. Generates output of shape: input shape - filter
            shape + 1

          ``'full'``
            apply filter wherever it partly overlaps with the input.
            Generates output of shape: input shape + filter shape - 1

          ``'half'``
            pad input with a symmetric border of ``filter rows // 2``
            rows and ``filter columns // 2`` columns, then perform a
            valid convolution. For filters with an odd number of rows
            and columns, this leads to the output shape being equal to
            the input shape. It is known as 'same' elsewhere.

          ``int``
            pad input with a symmetric border of zeros of the given
            width, then perform a valid convolution.

          ``(int1, int2, int3)``
            pad input with a symmetric border of ``int1``, ``int2`` and
            ``int3``, then perform a valid convolution.
    subsample : tuple of len 3
        The subsampling used in the forward pass of the convolutional
        operation.  Also called strides elsewhere.
    filter_flip : bool
        If ``True``, will flip the filters before sliding them over the
        input. This operation is normally referred to as a convolution,
        and this is the default. If ``False``, the filters are not
        flipped and the operation is referred to as a cross-correlation.
    filter_dilation : tuple of len 3
        The filter dilation used in the forward pass.
        Also known as input striding.

    Returns
    -------
    symbolic 5D tensor
        set of feature maps generated by convolutional layer. Tensor
        is of shape (batch size, output channels, output time, output
        rows, output columns)

    Notes
    -----

    :note: If cuDNN is available, it will be used on the
        GPU. Otherwise, it is the *Corr3dMM* convolution that will be used
        "caffe style convolution".

    :note: This is only supported in Theano 0.8 or the development
        version until it is released.

    i    i   i   i   i   R   R   R   R   RU   R   iýÿÿÿN(   R   R   RB   RC   R_   R   R`   R   Ra   Rb   Rc   t   AbstractConv3d_gradWeights(   RW   Re   RZ   RY   R   R   RU   R   Rf   Rm   Rn   (    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyt   conv3d_grad_wrt_weights  s0    [	"	c         C` sP   t  d |  d | ƒ j d d ƒ } t  d |  d | ƒ j d d ƒ } | | } | S(   sº  Compute 2D kernel for bilinear upsampling

    This function builds the 2D kernel that can be used to upsample
    a tensor by the given ratio using bilinear interpolation.

    Parameters
    ----------
    ratio: int or Constant/Scalar Theano tensor of int* dtype
        the ratio by which an image will be upsampled by the returned filter
        in the 2D space.

    normalize: bool
        param normalize: indicates whether to normalize the kernel or not.
        Default is True.

    Returns
    -------
    symbolic 2D tensor
        the 2D kernels that can be applied to any given image to upsample it
        by the indicated ratio using bilinear interpolation in two dimensions.

    t   ratiot	   normalizeRQ   i    (   t   bilinear_kernel_1Dt
   dimshuffle(   Rr   Rs   t   hkernt   vkernt   kern(    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyt   bilinear_kernel_2D…  s    !!
c         C` sd   t  j } | j d |  d d t  j j ƒ} | j | | d d d … g ƒ } | r` | |  } n  | S(   s¹  Compute 1D kernel for bilinear upsampling

    This function builds the 1D kernel that can be used to upsample
    a tensor by the given ratio using bilinear interpolation.

    Parameters
    ----------
    ratio: int or Constant/Scalar Theano tensor of int* dtype
        the ratio by which an image will be upsampled by the returned filter
        in the 2D space.

    normalize: bool
        param normalize: indicates whether to normalize the kernel or not.
        Default is True.

    Returns
    -------
    symbolic 1D tensor
        the 1D kernels that can be applied to any given image to upsample it
        by the indicated ratio using bilinear interpolation in one dimension.

    i   R   iþÿÿÿNiÿÿÿÿ(   RB   RC   t   arangeRL   t   floatXt   concatenate(   Rr   Rs   t   Tt	   half_kernRx   (    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyRt   £  s    	""c         C` s(  t  j } y | | } Wn t k
 r0 d } n X|  j d \ } } |  j d d | | f ƒ }	 | j |	 d d … d d … d d … d d … f |	 |	 d d … d d … d d … d d … f f d d ƒ}
 | j |
 d d … d d … d d … d d … f |
 |
 d d … d d … d d … d d … f f d d ƒ}
 | d } d | | d d d } | rit d | d t ƒ } t	 d	 |
 d
 | t
 j t
 j d d … t
 j f d | d | | | f d d d | d f d | d f d t d d ƒ } t	 d	 | d
 | t
 j t
 j t
 j d d … f d | d | | | | f d d d d | f d d | f d t d d ƒ } n t d | d t ƒ } t	 d	 |
 d
 | t
 j t
 j d d … d d … f d | d | | | | f d d d | | f d | | f d t d d ƒ } | j |  j d |  j d | | | | f ƒ S(   s}  Compute bilinear upsampling

    This function will build the symbolic graph for upsampling
    a tensor by the given ratio using bilinear interpolation.

    Parameters
    ----------
    input: symbolic 4D tensor
        mini-batch of feature map stacks, of shape (batch size,
        input channels, input rows, input columns) that will be upsampled.

    ratio: `int or Constant or Scalar Tensor of int* dtype`
        the ratio by which the input is upsampled in the 2D space (row and
        col size).

    batch_size: None, int or Constant variable
        The size of the first dimension of the input variable.
        Optional, possibly used to choose an optimal implementation.
        batch_size will be used only if num_input_channels is not None.

    num_input_channels: None, int or Constant variable
        The size of the second dimension of the input variable.
        Optional, possibly used to choose an optimal implementation.
        num_input_channels will be used only if batch_size is not None.

    use_1D_kernel: bool
        if set to true, row and column will be upsampled seperately by 1D
        kernels, otherwise they are upsampled together using a 2D kernel. The
        final result is the same, only the speed can differ, given factors such
        as upsampling ratio.

    Returns
    -------
    symbolic 4D tensor
        set of feature maps generated by bilinear upsampling. Tensor
        is of shape (batch size, num_input_channels, input row size * ratio,
        input column size * ratio)

    Notes
    -----

    :note: The kernel used for bilinear interpolation is fixed (not learned).

    :note: When the upsampling ratio is even, the last row and column is
        repeated one extra time compared to the first row and column which makes
        the upsampled tensor asymmetrical on both sides. This does not happen when
        the upsampling ratio is odd.

    i   iÿÿÿÿi   Nt   axisi   Rr   Rs   Re   RX   RY   RZ   R   i    R   RU   R   (   i   i   Ni   (   i   i   (   i   i   i   N(   i   i   (   i   i   NN(   i   i   (   RB   RC   t	   TypeErrorR   RF   t   reshapeR|   Rt   R6   Ri   t   npt   newaxisRy   (   RW   Rr   t
   batch_sizet   num_input_channelst   use_1D_kernelR}   t   up_bst   rowt   colt   up_inputt
   concat_matt
   concat_colR.   Rx   t   upsampled_rowt   upsampled_mat(    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyt   bilinear_upsamplingÄ  sj    7	
4:4:
						t   BaseAbstractConvc           B` sV   e  Z d  Z e Z d Z d d d d e d d	 „ Z d
 „  Z	 d „  Z
 d d d „ Z RS(   s÷	  Base class for AbstractConv

    Parameters
    ----------
     convdim: The number of convolution dimensions (2 or 3).

     imshp: None, tuple/list of len ``(2 + convdim)`` of int or Constant variable
        The shape of the input parameter.
        Optional, possibly used to choose an optimal implementation.
        You can give ``None`` for any element of the list to specify that this
        element is not known at compile time.
        imshp is defined w.r.t the forward conv.

     kshp: None, tuple/list of len ``(2 + convdim)`` of int or Constant variable
        The shape of the filters parameter.
        Optional, possibly used to choose an optimal implementation.
        You can give ``None`` for any element of the list to specify that this
        element is not known at compile time.
        kshp is defined w.r.t the forward conv.

     border_mode: str, int or tuple of ``convdim`` ints
        Either of the following:

        ``'valid'``: apply filter wherever it completely overlaps with the
            input. Generates output of shape: input shape - filter shape + 1
        ``'full'``: apply filter wherever it partly overlaps with the input.
            Generates output of shape: input shape + filter shape - 1
        ``'half'``: pad input with a symmetric border of ``filter size // 2``
            in each convolution dimension, then perform a valid convolution.
            For filters with an odd filter size, this leads to the output
            shape being equal to the input shape.
        ``int``: pad input with a symmetric border of zeros of the given
            width, then perform a valid convolution.
        ``(int1, int2)``: (for 2D) pad input with a symmetric border of ``int1``,
            ``int2``, then perform a valid convolution.
        ``(int1, int2, int3)``: (for 3D) pad input with a symmetric border of
            ``int1``, ``int2`` and ``int3``, then perform a valid convolution.

    subsample: tuple of len ``convdim``
        Factor by which to subsample the output.
        Also called strides elsewhere.

    filter_flip: bool
        If ``True``, will flip the filter rows and columns
        before sliding them over the input. This operation is normally referred
        to as a convolution, and this is the default. If ``False``, the filters
        are not flipped and the operation is referred to as a
        cross-correlation.

    filter_dilation: tuple of len ``convdim``
        Factor by which to subsample (stride) the input.
        Also called dilation factor.
    t   convdimR   R   RU   R   R   R   R*   c   
      C` sÑ  | |  _  | d k r' t d | ƒ ‚ n  | d  k r@ d | } n  | d  k rY d | } n  t | t ƒ rx | f | } n  t | t ƒ rÌ t | ƒ | k r´ t d j | | ƒ ƒ ‚ n  t t t	 | ƒ ƒ } n  | d | k rå d } n  t | t ƒ rt
 | ƒ d k p| d k s-t d
 j | | ƒ ƒ ‚ n  | r?t | ƒ n d d | |  _ xh |  j D]] } | d  k	 rZy t | d t ƒWq·t k
 r³t t t d ƒ t j ƒ  d ƒ q·XqZqZW| rÍt | ƒ n d d | |  _ xh |  j D]] }	 |	 d  k	 rèy t |	 d t ƒWqEt k
 rAt t t d ƒ t j ƒ  d ƒ qEXqèqèW| |  _ | |  _ t | ƒ | k r…t d j | ƒ ƒ ‚ n  t | ƒ |  _ t | ƒ | k r¾t d j | ƒ ƒ ‚ n  t | ƒ |  _ d  S(   Ni   i   s)   convolution dimension {} is not supportedi   s3   border mode must have exactly {} values, but was {}i    R*   R)   R(   sj   invalid border_mode {}, which must be either "valid", "full", "half", an integer or a tuple of {} integerst   only_process_constantss6   imshp should be None or a tuple of constant int valuess5   kshp should be None or a tuple of constant int valuess   subsample must have {} elementss%   filter_dilation must have {} elements(   i   i   (   i   (   i   (   i    (   s   valids   fulls   half(   N(   N(   R‘   R+   R   R   R   R    R   t   formatt   mapR   t   minR   R   R6   R   R   t   syst   exc_infoR   R   RU   R   R   (
   t   selfR‘   R   R   R   R   RU   R   t   imshp_it   kshp_i(    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyt   __init__v  sf    		!#		#				c         C` s   t  S(   N(   R:   (   R˜   t   node(    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyt   do_constant_folding·  s    c         C` s¢   |  j  d k rŒ | \ } } | \ } | d | d k s> t ‚ | d | d d } | | d | d 9} | | d | d | d 9} | St d |  j  ƒ ‚ d S(   s6    Useful with the hack in profiling to print the MFlopsi   i   i   i    s$   flops not implemented for convdim={}N(   R‘   Ra   t   NotImplementedError(   R˜   t   inpt   outpt   inputsRX   t   outputst   flops(    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyR£   ¼  s    	i   c         ` s  t  s t d ƒ ‚ n  | d k r9 t d j | ƒ ƒ ‚ n  t ˆ  t ƒ r[ ˆ  f ˆ j ‰  n  t ˆ  ƒ ˆ j k rŽ t d j ˆ  ˆ j ƒ ƒ ‚ n  t | j	 ˆ j	 | d g ˆ j ˆ  ƒ } t
 j | d | j ƒ} ˆ j	 ˆ j  t ‡  ‡ ‡ f d †  t ˆ j ƒ Dƒ ƒ } t
 j | d ˆ j ƒ} ˆ | t d ƒ t d ƒ f t ‡  f d	 †  t ˆ j ƒ Dƒ ƒ <ˆ j d
 k rQt | ƒ }	 t d ƒ }
 t j ƒ  º t j d t
 j ƒ xŸ t | j	 d ƒ D]Š } x t ˆ j	 d ƒ D]l } xc t | j	 d ƒ D]N } | | | d f c t | | | d f | | | d f d |	 |
 d ƒ 7<qîWqÔWqºWWd QXn· ˆ j d k rüx¥ t | j	 d ƒ D] } xx t ˆ j	 d ƒ D]c } xZ t | j	 d ƒ D]E } | | | d f c t | | | d f | | | d f | ƒ 7<q¨WqŽWqtWn t d ƒ ‚ | S(   sF   
        Basic slow Python 2D or 3D convolution for DebugMode
        sR   AbstractConv perform requires the python package for scipy.signal to be installed.R*   R)   s7   invalid mode {}, which must be either "valid" or "full"s'   invalid dilation {}, expected {} valuesi   R   c         3` s4   |  ]* } ˆ j  ˆ j | d  ˆ  | d  Vq d S(   i   N(   RF   R‘   (   R   R   (   R,   Rx   R˜   (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pys	   <genexpr>é  s   c         3` s%   |  ] } t  d  d  ˆ  | ƒ Vq d  S(   N(   t   sliceR   (   R   R   (   R,   (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pys	   <genexpr>í  s    i   t   fillt   ignorei    .Ni   s*   only 2D and 3D convolution are implemented(   s   valids   full(   t   imported_scipy_signalRž   R+   R“   R   R   R‘   R   R'   RF   R   t   zerosR   R    R!   R¤   R   R   R   t   warningst   catch_warningst   simplefiltert   ComplexWarningR   R   R   (   R˜   t   imgRx   t   modeR,   RG   t   outt   dil_kern_shpt   dilated_kernt   valt   bvalt   bRH   t   im0(    (   R,   Rx   R˜   s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyRM   Ð  sR    	?&'&(   s   convdims   border_modes	   subsamples   filter_flips   imshps   kshps   filter_dilationN(   t   __name__t
   __module__t   __doc__R:   t   check_broadcastt	   __props__R   R6   R›   R   R£   RM   (    (    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyR   <  s   5 	?		t   AbstractConvc           B` sM   e  Z d  Z d d d d e d d „ Z d „  Z d „  Z d „  Z d „  Z	 RS(   s¯    Abstract Op for the forward convolution.
    Refer to :func:`BaseAbstractConv <theano.tensor.nnet.abstract_conv.BaseAbstractConv>`
    for a more detailed documentation.
    R*   c         C` sA   t  t |  ƒ j d | d | d | d | d | d | d | ƒ d  S(   NR‘   R   R   R   R   RU   R   (   t   superR»   R›   (   R˜   R‘   R   R   R   R   RU   R   (    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyR›     s    c         C` s[  t  | t j ƒ s! t | ƒ } n  t  | t j ƒ sB t | ƒ } n  | j j d | j d | j ƒ } | j | ƒ } | j j	 d |  j
 k r¥ t d d |  j
 ƒ ‚ n  | j j	 d |  j
 k rØ t d d |  j
 ƒ ‚ n  t | |  j d ƒ } t | |  j d ƒ } | j d | j d g t g |  j
 } | j j d | ƒ ƒ  } t |  | | g | g ƒ S(	   NR   t   broadcastablei   s   img must be %dD tensors   kern must be %dD tensorsG   AbstractConv shape mismatch: shape of image does not match given imshp.sH   AbstractConv shape mismatch: shape of filters does not match given kshp.i    (   R   RB   t   VariableR   R`   t   cloneR   R½   t   filter_variableRO   R‘   R€   RN   R   R   R:   R
   (   R˜   R­   Rx   t   ktypeR½   t   output(    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyt	   make_node  s&    		
c         ` s«  | \ ‰ ‰ t  j ˆ ƒ ‰ t  j ˆ ƒ ‰ t ‡ ‡ f d †  t ˆ j ƒ Dƒ ƒ ‰  | \ } ˆ j ‰ t ˆ t ƒ r… t ˆ ƒ d k pŽ ˆ d k s© t d j	 ˆ ƒ ƒ ‚ n  ˆ d k rÝ t ‡  f d †  t ˆ j ƒ Dƒ ƒ ‰ n4 ˆ d k rt ‡  f d †  t ˆ j ƒ Dƒ ƒ ‰ n  t ˆ t ƒ rèt ‡ f d	 †  t ˆ j ƒ Dƒ ƒ ‰ d ‰ t  j
 ˆ j d ˆ j d
 f t ‡ ‡ f d †  t ˆ j ƒ Dƒ ƒ d ˆ j ƒ} ˆ | t d  ƒ t d  ƒ f t ‡ ‡ f d †  t ˆ j ƒ Dƒ ƒ <| ‰ n  ˆ j s*ˆ t d  ƒ t d  ƒ f t d  d  d ƒ f ˆ j ‰ n  ˆ j ˆ ˆ d d d ˆ j ƒ} | t d  ƒ t d  ƒ f t ‡ f d †  t ˆ j ƒ Dƒ ƒ } | j d j j | ƒ | d <d  S(   Nc         3` s3   |  ]) } ˆ  j  d  | d ˆ j | d Vq d S(   i   i   N(   RF   R   (   R   R   (   Rx   R˜   (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pys	   <genexpr>=  s   i    R*   R)   R(   sg   invalid border_mode {}, which must be either "valid", "full", "half", an integer or a tuple of integersc         3` s   |  ] } ˆ  | d  Vq d S(   i   N(    (   R   R   (   t   dil_kernshp(    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pys	   <genexpr>J  s    c         3` s   |  ] } ˆ  | d  Vq d S(   i   N(    (   R   R   (   RÄ   (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pys	   <genexpr>L  s    c         3` s   |  ] } t  ˆ  | ƒ Vq d  S(   N(   R   (   R   R   (   R®   (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pys	   <genexpr>N  s    i   c         3` s,   |  ]" } ˆ  j  | d  d  ˆ | Vq d S(   i   N(   RF   (   R   R   (   R­   R.   (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pys	   <genexpr>Q  s   R   c         3` s5   |  ]+ } t  ˆ | ˆ  j | d  ˆ | ƒ Vq d S(   i   N(   R¤   RF   (   R   R   (   R­   R.   (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pys	   <genexpr>U  s   iÿÿÿÿR®   R,   c         3` s(   |  ] } t  d  d  ˆ  j | ƒ Vq d  S(   N(   R¤   R   R   (   R   R   (   R˜   (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pys	   <genexpr>\  s   (   s   valids   fulls   half(   R   t   asarrayR    R!   R‘   R   R   R•   R+   R“   R¨   RF   R   R¤   R   RU   RM   R   R¢   R`   t   filter(   R˜   Rœ   RŸ   t   out_t   ot   new_imgt   conv_out(    (   RÄ   R­   Rx   R®   R.   R˜   s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyt   perform9  s@    		!((%B		9!c         C` s©   d  } | d d  k	 r: |  j | d | d ƒ j d } n  | d d  k	 r¢ | d  k rz |  j | d | d ƒ j d } q¢ | |  j | d | d ƒ j d 7} n  | g S(   Ni    i   (   R   RÃ   R¢   (   R˜   R¡   t   eval_pointst   rval(    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyt   R_opa  s    $$(c         C` sô   | d } | d } |  j  d  k	 rp g  t d |  j ƒ D]0 } |  j  | d  k rZ | | n
 |  j  | ^ q7 } n  |  j d  k	 rÌ g  t d |  j ƒ D]0 } |  j | d  k r¶ | | n
 |  j | ^ q“ } n  t | | |  j |  j |  j ƒ } | g S(   Ni    i   i   (	   R   R   R!   R‘   R   R'   R   R   R   (   R˜   Rœ   t   input_shapesR   R   R   t   res(    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyt   infer_shapel  s    

JJN(
   R¶   R·   R¸   R   R6   R›   RÃ   RË   RÎ   RÑ   (    (    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyR»   	  s   		(	RV   c           B` s2   e  Z d  Z d d d d e d d „ Z d „  Z RS(   s¯    Abstract Op for the forward convolution.
    Refer to :func:`BaseAbstractConv <theano.tensor.nnet.abstract_conv.BaseAbstractConv>`
    for a more detailed documentation.
    R*   i   c         C` sA   t  t |  ƒ j d d d | d | d | d | d | d | ƒ d  S(	   NR‘   i   R   R   R   R   RU   R   (   R¼   RV   R›   (   R˜   R   R   R   R   RU   R   (    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyR›   ‚  s    c         C` só   | \ } } | \ } t  |  j |  j |  j |  j |  j |  j ƒ | | | j d d t ƒ} t	 |  j |  j |  j |  j |  j |  j ƒ | | | j d d t ƒ} t
 | | j ƒ } | j j | ƒ } t
 | | j ƒ } | j j | ƒ } | | f S(   Niþÿÿÿt   add_assert_shape(   Rd   R   R   R   R   RU   R   RF   R:   Rl   R   R½   R`   RÀ   (   R˜   RŸ   t   gradst   bottomt   weightst   topt   d_bottomt	   d_weights(    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyt   grad  s&    			N(   i   i   (   i   i   (   R¶   R·   R¸   R   R6   R›   RÙ   (    (    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyRV   |  s   R]   c           B` s2   e  Z d  Z d d d d e d d „ Z d „  Z RS(   s¯    Abstract Op for the forward convolution.
    Refer to :func:`BaseAbstractConv <theano.tensor.nnet.abstract_conv.BaseAbstractConv>`
    for a more detailed documentation.
    R*   i   c         C` sA   t  t |  ƒ j d d d | d | d | d | d | d | ƒ d  S(	   NR‘   i   R   R   R   R   RU   R   (   R¼   R]   R›   (   R˜   R   R   R   R   RU   R   (    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyR›   ´  s    c         C` sç   | \ } } | \ } t  |  j |  j |  j |  j |  j |  j ƒ | | | j d ƒ } t |  j |  j |  j |  j |  j |  j ƒ | | | j d ƒ } t	 | | j
 ƒ } | j j | ƒ } t	 | | j
 ƒ } | j j | ƒ } | | f S(   Niýÿÿÿ(   Rj   R   R   R   R   RU   R   RF   Rp   R   R½   R`   RÀ   (   R˜   RŸ   RÓ   RÔ   RÕ   RÖ   R×   RØ   (    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyRÙ   Â  s&    			N(   i   i   i   (   i   i   i   (   R¶   R·   R¸   R   R6   R›   RÙ   (    (    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyR]   ®  s   t   AbstractConv_gradWeightsc           B` sP   e  Z d  Z d d d d e d d „ Z e d „ Z d „  Z d „  Z d „  Z	 RS(   sT  Gradient wrt. filters for `AbstractConv`.
    Refer to :func:`BaseAbstractConv <theano.tensor.nnet.abstract_conv.BaseAbstractConv>`
    for a more detailed documentation.

    :note: You will not want to use this directly, but rely on
           Theano's automatic differentiation or graph optimization to
           use it as needed.

    R*   c         C` sA   t  t |  ƒ j d | d | d | d | d | d | d | ƒ d  S(   NR‘   R   R   R   R   RU   R   (   R¼   RÚ   R›   (   R˜   R‘   R   R   R   R   RU   R   (    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyR›   é  s    c         C` s^  t  | t j ƒ s! t | ƒ } n  t  | t j ƒ sB t | ƒ } n  | j j d | j d | j ƒ } | j | ƒ } | j j	 d |  j
 k r¥ t d d |  j
 ƒ ‚ n  | j j	 d |  j
 k rØ t d d |  j
 ƒ ‚ n  | rö t | |  j d ƒ } n  t | ƒ } | j d | j d g t g |  j
 } | j j d | ƒ ƒ  } t |  | | | g | g ƒ S(   NR   R½   i   s   img must be %dD tensors   topgrad must be %dD tensorsS   AbstractConv_gradWeights shape mismatch: shape of image does not match given imshp.i   (   R   RB   R¾   R   R`   R¿   R   R½   RÀ   RO   R‘   R€   RN   R   R:   R
   (   R˜   R­   t   topgradRF   RÒ   t   gtypeR½   RÂ   (    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyRÃ   ù  s&    
c         ` sò  | \ ‰ } ‰ t  j ˆ ƒ ‰ t  j | ƒ } | \ } ˆ j ‰ t ˆ t ƒ r` t ˆ ƒ d k pi ˆ d k s„ t d j ˆ ƒ ƒ ‚ n  t ‡ ‡ f d †  t ˆ j	 ƒ Dƒ ƒ ‰  ˆ d k rà t ‡  f d †  t ˆ j	 ƒ Dƒ ƒ ‰ n4 ˆ d k rt ‡  f d †  t ˆ j	 ƒ Dƒ ƒ ‰ n  t ˆ t ƒ rët ‡ f d	 †  t ˆ j	 ƒ Dƒ ƒ ‰ d ‰ t  j
 ˆ j d ˆ j d
 f t ‡ ‡ f d †  t ˆ j	 ƒ Dƒ ƒ d ˆ j ƒ} ˆ | t d  ƒ t d  ƒ f t ‡ ‡ f d †  t ˆ j	 ƒ Dƒ ƒ <| ‰ n  t ‡ f d †  t ˆ j	 ƒ Dƒ ƒ r°| j d | j d
 f t ‡  ‡ f d †  t ˆ j	 ƒ Dƒ ƒ } t  j
 | d | j ƒ} | | t d  ƒ t d  ƒ f t ‡ f d †  t ˆ j	 ƒ Dƒ ƒ <| } n  d t t d ˆ j	 d ƒ ƒ }	 t d  ƒ t d  ƒ f t d  d  d ƒ f ˆ j	 }
 | j |	 ƒ |
 } ˆ j |	 ƒ ‰ ˆ j ˆ | d d ƒ} t ‡ f d †  t ˆ j	 ƒ Dƒ ƒ r£| t d  ƒ t d  ƒ f t ‡ f d †  t ˆ j	 ƒ Dƒ ƒ } n  ˆ j rÂ| j |	 ƒ |
 } n | j |	 ƒ } | j d j j | ƒ | d <d  S(   Ni    R*   R)   R(   sg   invalid border_mode {}, which must be either "valid", "full", "half", an integer or a tuple of integersc         3` s,   |  ]" } ˆ | d  ˆ  j  | d  Vq d S(   i   N(   R   (   R   R   (   R˜   RF   (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pys	   <genexpr>!  s   c         3` s   |  ] } ˆ  | d  Vq d S(   i   N(    (   R   R   (   t	   dil_shape(    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pys	   <genexpr>%  s    c         3` s   |  ] } ˆ  | d  Vq d S(   i   N(    (   R   R   (   RÝ   (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pys	   <genexpr>'  s    c         3` s   |  ] } t  ˆ  | ƒ Vq d  S(   N(   R   (   R   R   (   R®   (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pys	   <genexpr>)  s    i   c         3` s,   |  ]" } ˆ  j  | d  d  ˆ | Vq d S(   i   N(   RF   (   R   R   (   R­   R.   (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pys	   <genexpr>-  s   R   c         3` s5   |  ]+ } t  ˆ | ˆ  j | d  ˆ | ƒ Vq d S(   i   N(   R¤   RF   (   R   R   (   R­   R.   (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pys	   <genexpr>1  s   c         3` s"   |  ] } ˆ  j  | d  k Vq d S(   i   N(   R   (   R   R   (   R˜   (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pys	   <genexpr>5  s    c         3` s,   |  ]" } ˆ j  | d  ˆ  | d Vq d S(   i   i   N(   RF   (   R   R   (   RÝ   R­   (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pys	   <genexpr>7  s   c         3` s(   |  ] } t  d  d  ˆ  j | ƒ Vq d  S(   N(   R¤   R   R   (   R   R   (   R˜   (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pys	   <genexpr>;  s   i   iÿÿÿÿR®   c         3` s"   |  ] } ˆ  j  | d  k Vq d S(   i   N(   R   (   R   R   (   R˜   (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pys	   <genexpr>E  s    c         3` s(   |  ] } t  d  d  ˆ  j | ƒ Vq d  S(   N(   R¤   R   R   (   R   R   (   R˜   (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pys	   <genexpr>G  s   (   s   valids   fulls   half(   i   i    (   R   RÅ   R   R   R    R•   R+   R“   R!   R‘   R¨   RF   R   R¤   R   t   anyt	   transposeRM   RU   R¢   R`   RÆ   (   R˜   Rœ   RŸ   RÇ   RÛ   RÈ   RÉ   t	   new_shapet   new_topgradt
   axes_ordert   flip_filtersRx   (    (   RÝ   R­   R®   R.   R˜   RF   s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyRË     s\    		!((%B	%?	 %	c         C` s   d g d g d g g S(   Ni   i    (    (   R˜   Rœ   (    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyt   connection_patternO  s    c         C` sË   | d } | d } |  j  d  k	 r- |  j  n d  g d |  j } | d | d g g  t |  j ƒ D] } | j d | ^ qb } g  t d |  j ƒ D]* } | | d  k r´ | | n | | ^ q” } | g S(   Ni    i   i   (   R   R   R‘   R!   R¡   (   R˜   Rœ   RÏ   R   R0   R   R   t   fallback_kshp(    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyRÑ   R  s    

-.AN(
   R¶   R·   R¸   R   R6   R›   RÃ   RË   Rä   RÑ   (    (    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyRÚ   ß  s   			=	Rl   c           B` s2   e  Z d  Z d d d d e d d „ Z d „  Z RS(   sV  Gradient wrt. filters for `AbstractConv2d`.
    Refer to :func:`BaseAbstractConv <theano.tensor.nnet.abstract_conv.BaseAbstractConv>`
    for a more detailed documentation.

    :note: You will not want to use this directly, but rely on
           Theano's automatic differentiation or graph optimization to
           use it as needed.

    R*   i   c         C` sA   t  t |  ƒ j d d d | d | d | d | d | d | ƒ d  S(	   NR‘   i   R   R   R   R   RU   R   (   R¼   Rl   R›   (   R˜   R   R   R   R   RU   R   (    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyR›   k  s    c   	      C` sú   | d  \ } } | \ } t  |  j |  j |  j |  j |  j |  j ƒ | | | j d ƒ } t |  j |  j |  j |  j |  j |  j ƒ | | ƒ } t	 | | j
 ƒ } | j j | ƒ } t	 | | j
 ƒ } | j j | ƒ } t j j ƒ  ƒ  f } | | f | S(   Ni   iþÿÿÿ(   Rd   R   R   R   R   RU   R   RF   RV   R   R½   R`   RÀ   RB   t   gradientt   DisconnectedType(	   R˜   RŸ   RÓ   RÔ   RÖ   RÕ   R×   t   d_topt   d_height_width(    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyRÙ   y  s*    		N(   i   i   (   i   i   (   R¶   R·   R¸   R   R6   R›   RÙ   (    (    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyRl   a  s   	Rp   c           B` s2   e  Z d  Z d d d d e d d „ Z d „  Z RS(   sV  Gradient wrt. filters for `AbstractConv3d`.
    Refer to :func:`BaseAbstractConv <theano.tensor.nnet.abstract_conv.BaseAbstractConv>`
    for a more detailed documentation.

    :note: You will not want to use this directly, but rely on
           Theano's automatic differentiation or graph optimization to
           use it as needed.

    R*   i   c         C` sA   t  t |  ƒ j d d d | d | d | d | d | d | ƒ d  S(	   NR‘   i   R   R   R   R   RU   R   (   R¼   Rp   R›   (   R˜   R   R   R   R   RU   R   (    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyR›   ¡  s    c   	      C` sú   | d  \ } } | \ } t  |  j |  j |  j |  j |  j |  j ƒ | | | j d ƒ } t |  j |  j |  j |  j |  j |  j ƒ | | ƒ } t	 | | j
 ƒ } | j j | ƒ } t	 | | j
 ƒ } | j j | ƒ } t j j ƒ  ƒ  f } | | f | S(   Ni   iýÿÿÿ(   Rj   R   R   R   R   RU   R   RF   R]   R   R½   R`   RÀ   RB   Ræ   Rç   (	   R˜   RŸ   RÓ   RÔ   RÖ   RÕ   R×   Rè   t   d_depth_height_width(    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyRÙ   ¯  s*    		N(   i   i   i   (   i   i   i   (   R¶   R·   R¸   R   R6   R›   RÙ   (    (    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyRp   —  s   	t   AbstractConv_gradInputsc           B` sP   e  Z d  Z d d d d e d d „ Z e d „ Z d „  Z d „  Z d „  Z	 RS(   sS  Gradient wrt. inputs for `AbstractConv`.
    Refer to :func:`BaseAbstractConv <theano.tensor.nnet.abstract_conv.BaseAbstractConv>`
    for a more detailed documentation.

    :note: You will not want to use this directly, but rely on
           Theano's automatic differentiation or graph optimization to
           use it as needed.

    R*   c         C` sA   t  t |  ƒ j d | d | d | d | d | d | d | ƒ d  S(   NR‘   R   R   R   R   RU   R   (   R¼   Rë   R›   (   R˜   R‘   R   R   R   R   RU   R   (    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyR›   Ø  s    c         C` sd  t  | t j ƒ s! t | ƒ } n  t  | t j ƒ sB t | ƒ } n  | j j d | j d | j ƒ } | j | ƒ } | j j	 d |  j
 k r¥ t d d |  j
 ƒ ‚ n  | j j	 d |  j
 k rØ t d d |  j
 ƒ ‚ n  | rö t | |  j d ƒ } n  t | ƒ } | j j d | j j d g t g |  j
 } | j j d | ƒ ƒ  } t |  | | | g | g ƒ S(	   NR   R½   i   s   kern must be %dD tensors   topgrad must be %dD tensorsS   AbstractConv_gradInputs shape mismatch: shape of filters does not match given kshp.i    i   (   R   RB   R¾   R   R`   R¿   R   R½   RÀ   RO   R‘   R€   RN   R   R:   R
   (   R˜   Rx   RÛ   RF   RÒ   RÜ   R½   RÂ   (    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyRÃ   è  s&    !c         ` sb  | \ ‰ } ‰ t  j ˆ ƒ ‰ t  j | ƒ } | \ } ˆ j ‰ t ˆ t ƒ r` t ˆ ƒ d k pi ˆ d k s„ t d j ˆ ƒ ƒ ‚ n  ˆ j d  k	 r ˆ j n d  g d ˆ j
 } | j d ˆ j d g g  t ˆ j
 ƒ D] } ˆ | ^ qØ } g  t d ˆ j
 ƒ D]* } | | d  k r#| | n | | ^ q} t | ˆ j ˆ j ˆ j ˆ j ƒ }	 t |	 ƒ t | j ƒ k sœt d j t |	 ƒ t | j ƒ ƒ ƒ ‚ n  t ‡ ‡ f d	 †  t ˆ j
 ƒ Dƒ ƒ ‰  d ˆ j
 ‰ ˆ d k rt ‡  f d
 †  t ˆ j
 ƒ Dƒ ƒ ‰ nk ˆ d k r9t ‡  f d †  t ˆ j
 ƒ Dƒ ƒ ‰ n7 t ˆ t ƒ rpt ‡ f d †  t ˆ j
 ƒ Dƒ ƒ ‰ n  t ‡ f d †  t ˆ j
 ƒ Dƒ ƒ r8| j d | j d f t ‡  ‡ ‡ f d †  t ˆ j
 ƒ Dƒ ƒ }
 t  j |
 d | j ƒ} | | t d  ƒ t d  ƒ f t ‡ f d †  t ˆ j
 ƒ Dƒ ƒ <| } n  d t t d ˆ j
 d ƒ ƒ } t d  ƒ t d  ƒ f t d  d  d ƒ f ˆ j
 } ˆ j | ƒ ‰ ˆ j r¯| | } n  ˆ j | ˆ d d d ˆ j ƒ‰ ˆ j ræˆ | ‰ n  t d „  ˆ Dƒ ƒ rAˆ t d  ƒ t d  ƒ f t ‡ ‡ f d †  t ˆ j
 ƒ Dƒ ƒ ‰ n  | j d j j ˆ ƒ | d <d  S(   Ni    R*   R)   R(   sg   invalid border_mode {}, which must be either "valid", "full", "half", an integer or a tuple of integersi   i   s   invalid input_shape for gradInputs: the given input_shape would produce an output of shape {}, but the given topgrad has shape {}c         3` s3   |  ]) } ˆ  j  | d  d ˆ j | d Vq d S(   i   i   N(   RF   R   (   R   R   (   Rx   R˜   (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pys	   <genexpr>  s   c         3` s   |  ] } ˆ  | d  Vq d S(   i   N(    (   R   R   (   RÄ   (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pys	   <genexpr>#  s    c         3` s   |  ] } ˆ  | d  Vq d S(   i   N(    (   R   R   (   RÄ   (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pys	   <genexpr>%  s    c         3` s   |  ] } ˆ  | Vq d  S(   N(    (   R   R   (   R®   (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pys	   <genexpr>'  s    c         3` s"   |  ] } ˆ  j  | d  k Vq d S(   i   N(   R   (   R   R   (   R˜   (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pys	   <genexpr>(  s    c         3` s1   |  ]' } ˆ | d  ˆ | ˆ  | d Vq d S(   i   i   N(    (   R   R   (   RÄ   R.   RF   (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pys	   <genexpr>*  s   R   c         3` s(   |  ] } t  d  d  ˆ  j | ƒ Vq d  S(   N(   R¤   R   R   (   R   R   (   R˜   (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pys	   <genexpr>.  s   iÿÿÿÿR®   R,   c         s` s   |  ] } | d  k Vq d S(   i    N(    (   R   t   p(    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pys	   <genexpr>;  s    c         3` s5   |  ]+ } t  ˆ | ˆ  j | d  ˆ | ƒ Vq d S(   i   N(   R¤   RF   (   R   R   (   R­   R.   (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pys	   <genexpr>=  s   (   s   valids   fulls   half(   i    (   i   i    (   R   RÅ   R   R   R    R•   R+   R“   R   R   R‘   RF   R!   R'   R   R   RÞ   R¨   R   R¤   Rß   RU   RM   R¢   R`   RÆ   (   R˜   Rœ   RŸ   RÇ   RÛ   RÈ   R   R   t   fallback_imshpt   expected_topgrad_shapeRà   Rá   Râ   Rã   (    (   RÄ   R­   Rx   R®   R.   R˜   RF   s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyRË     sj    		!-'A		(((%?	 	!	c         C` s   d g d g d g g S(   Ni   i    (    (   R˜   Rœ   (    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyRä   A  s    c         C` sË   | d } | d } |  j  d  k	 r- |  j  n d  g d |  j } | d | d g g  t |  j ƒ D] } | j d | ^ qb } g  t d |  j ƒ D]* } | | d  k r´ | | n | | ^ q” } | g S(   Ni    i   i   (   R   R   R‘   R!   R¡   (   R˜   Rœ   RÏ   R   R0   R   R   Rí   (    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyRÑ   D  s    

-.AN(
   R¶   R·   R¸   R   R6   R›   RÃ   RË   Rä   RÑ   (    (    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyRë   Í  s   			?	Rd   c           B` s2   e  Z d  Z d d d d e d d „ Z d „  Z RS(   sU  Gradient wrt. inputs for `AbstractConv2d`.
    Refer to :func:`BaseAbstractConv <theano.tensor.nnet.abstract_conv.BaseAbstractConv>`
    for a more detailed documentation.

    :note: You will not want to use this directly, but rely on
           Theano's automatic differentiation or graph optimization to
           use it as needed.

    R*   i   c         C` sA   t  t |  ƒ j d d d | d | d | d | d | d | ƒ d  S(	   NR‘   i   R   R   R   R   RU   R   (   R¼   Rd   R›   (   R˜   R   R   R   R   RU   R   (    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyR›   ^  s    c   	      C` sú   | d  \ } } | \ } t  |  j |  j |  j |  j |  j |  j ƒ | | | j d ƒ } t |  j |  j |  j |  j |  j |  j ƒ | | ƒ } t	 | | j
 ƒ } | j j | ƒ } t	 | | j
 ƒ } | j j | ƒ } t j j ƒ  ƒ  f } | | f | S(   Ni   iþÿÿÿ(   Rl   R   R   R   R   RU   R   RF   RV   R   R½   R`   RÀ   RB   Ræ   Rç   (	   R˜   RŸ   RÓ   RÕ   RÖ   RÔ   RØ   Rè   Ré   (    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyRÙ   l  s(    		N(   i   i   (   i   i   (   R¶   R·   R¸   R   R6   R›   RÙ   (    (    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyRd   S  s   	Rj   c           B` s2   e  Z d  Z d d d d e d d „ Z d „  Z RS(   sU  Gradient wrt. inputs for `AbstractConv3d`.
    Refer to :func:`BaseAbstractConv <theano.tensor.nnet.abstract_conv.BaseAbstractConv>`
    for a more detailed documentation.

    :note: You will not want to use this directly, but rely on
           Theano's automatic differentiation or graph optimization to
           use it as needed.

    R*   i   c         C` sA   t  t |  ƒ j d d d | d | d | d | d | d | ƒ d  S(	   NR‘   i   R   R   R   R   RU   R   (   R¼   Rj   R›   (   R˜   R   R   R   R   RU   R   (    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyR›   ”  s    c   	      C` sú   | d  \ } } | \ } t  |  j |  j |  j |  j |  j |  j ƒ | | | j d ƒ } t |  j |  j |  j |  j |  j |  j ƒ | | ƒ } t	 | | j
 ƒ } | j j | ƒ } t	 | | j
 ƒ } | j j | ƒ } t j j ƒ  ƒ  f } | | f | S(   Ni   iýÿÿÿ(   Rp   R   R   R   R   RU   R   RF   R]   R   R½   R`   RÀ   RB   Ræ   Rç   (	   R˜   RŸ   RÓ   RÕ   RÖ   RÔ   RØ   Rè   Rê   (    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyRÙ   ¢  s&    	N(   i   i   i   (   i   i   i   (   R¶   R·   R¸   R   R6   R›   RÙ   (    (    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyRj   ‰  s   	(   i   i   (   i   i   (   i   i   i   (   i   i   i   (   i   i   (   i   i   (   i   i   i   (   i   i   i   (   i   i   (   i   i   (   i   i   i   (   i   i   i   (D   R¸   t
   __future__R    R   R   t   loggingt   sixR   R   R–   RB   t   theano.tensorR   R   R   R   t   theano.tensor.optR	   t
   theano.gofR
   R   t	   six.movesR   R©   R   R‚   t   scipy.signal.signaltoolsR   R   R   t   scipy.signal.sigtoolsR   R6   R§   t   ImportErrorR:   t   __docformat__t	   getLoggert   _loggerR   R'   R   R3   R/   R5   R4   R?   RK   RN   R\   R^   Ri   Rk   Ro   Rq   Ry   Rt   R   R   R»   RV   R]   RÚ   Rl   Rp   Rë   Rd   Rj   (    (    (    s@   /tmp/pip-build-X4mzal/theano/theano/tensor/nnet/abstract_conv.pyt   <module>   sš   

376	45	<F	*%_ƒƒ{w#tÍs21‚66†6