ó
Ê½÷Xc           @   sü   d  Z  d d l m Z d d l Z d d l m Z m Z m Z m	 Z	 m
 Z
 d d l m Z m Z e j e j ƒ j Z d „  Z d	 „  Z d
 „  Z d d „ Z d d d e j e j f d d i  d „ Z d „  Z d „  Z e j e j f d i  d „ Z d S(   s'   Routines for numerical differentiation.iÿÿÿÿ(   t   divisionNi   (   t   issparset
   csc_matrixt
   csr_matrixt
   coo_matrixt   findi   (   t   group_denset   group_sparsec         C   sb  | d k r$ t  j | d t ƒ} n? | d k rW t  j | ƒ } t  j | d t ƒ} n t d ƒ ‚ t  j | t  j k | t  j k @ƒ r“ | | f S| | } | j ƒ  } |  | }	 | |  }
 | d k rh|  | } | | k  | | k B} t  j | ƒ t  j	 |	 |
 ƒ k } | | | @c d 9<|
 |	 k | @} |
 | | | | <|
 |	 k  | @} |	 | | | | <nð | d k rX|	 | k |
 | k @} |
 |	 k | @} t  j
 | | d |
 | | ƒ | | <t | | <|
 |	 k  | @} t  j
 | | d |	 | | ƒ | | <t | | <t  j
 |
 |	 ƒ | } | t  j | ƒ | k @} | | | | <t | | <n  | | f S(   s–  Adjust final difference scheme to the presence of bounds.

    Parameters
    ----------
    x0 : ndarray, shape (n,)
        Point at which we wish to estimate derivative.
    h : ndarray, shape (n,)
        Desired finite difference steps.
    num_steps : int
        Number of `h` steps in one direction required to implement finite
        difference scheme. For example, 2 means that we need to evaluate
        f(x0 + 2 * h) or f(x0 - 2 * h)
    scheme : {'1-sided', '2-sided'}
        Whether steps in one or both directions are required. In other
        words '1-sided' applies to forward and backward schemes, '2-sided'
        applies to center schemes.
    lb : ndarray, shape (n,)
        Lower bounds on independent variables.
    ub : ndarray, shape (n,)
        Upper bounds on independent variables.

    Returns
    -------
    h_adjusted : ndarray, shape (n,)
        Adjusted step sizes. Step size decreases only if a sign flip or
        switching to one-sided scheme doesn't allow to take a full step.
    use_one_sided : ndarray of bool, shape (n,)
        Whether to switch to one-sided scheme. Informative only for
        ``scheme='2-sided'``.
    s   1-sidedt   dtypes   2-sideds(   `scheme` must be '1-sided' or '2-sided'.iÿÿÿÿg      à?(   t   npt	   ones_liket   boolt   abst
   zeros_liket
   ValueErrort   allt   inft   copyt   maximumt   minimumt   Truet   False(   t   x0t   ht	   num_stepst   schemet   lbt   ubt   use_one_sidedt   h_totalt
   h_adjustedt
   lower_distt
   upper_distt   xt   violatedt   fittingt   forwardt   backwardt   centralt   min_distt   adjusted_central(    (    s6   /tmp/pip-build-7oUkmx/scipy/scipy/optimize/_numdiff.pyt   _adjust_scheme_to_bounds   sH    &




! 
!
c         C   s¤   |  d  k rf | d k r% t d }  qf | d k r> t d }  qf | d k rW t d }  qf t d ƒ ‚ n  | d k j t ƒ d	 d } |  | t j d
 t j | ƒ ƒ S(   Ns   2-pointg      à?s   3-pointi   i   t   css(   `method` must be '2-point' or '3-point'.i    i   g      ð?gUUUUUUÕ?(   t   Nonet   EPSR   t   astypet   floatR	   R   R   (   t   rel_stepR   t   methodt   sign_x0(    (    s6   /tmp/pip-build-7oUkmx/scipy/scipy/optimize/_numdiff.pyt   _compute_absolute_step\   s    c         C   s†   g  |  D] } t  j | d t ƒ^ q \ } } | j d k rU t  j | | j ƒ } n  | j d k r| t  j | | j ƒ } n  | | f S(   NR   i    (   R	   t   asarrayR.   t   ndimt   resizet   shape(   t   boundsR   t   bR   R   (    (    s6   /tmp/pip-build-7oUkmx/scipy/scipy/optimize/_numdiff.pyt   _prepare_boundsk   s    .i    c         C   sD  t  |  ƒ r t |  ƒ }  n' t j |  ƒ }  |  d k j t j ƒ }  |  j d k r` t d ƒ ‚ n  |  j \ } } | d k sŠ t j
 | ƒ r® t j j | ƒ } | j | ƒ } n0 t j | ƒ } | j | f k rÞ t d ƒ ‚ n  |  d d … | f }  t  |  ƒ rt | | |  j |  j ƒ } n t | | |  ƒ } | j ƒ  | | <| S(   sË  Group columns of a 2-d matrix for sparse finite differencing [1]_.

    Two columns are in the same group if in each row at least one of them
    has zero. A greedy sequential algorithm is used to construct groups.

    Parameters
    ----------
    A : array_like or sparse matrix, shape (m, n)
        Matrix of which to group columns.
    order : int, iterable of int with shape (n,) or None
        Permutation array which defines the order of columns enumeration.
        If int or None, a random permutation is used with `order` used as
        a random seed. Default is 0, that is use a random permutation but
        guarantee repeatability.

    Returns
    -------
    groups : ndarray of int, shape (n,)
        Contains values from 0 to n_groups-1, where n_groups is the number
        of found groups. Each value ``groups[i]`` is an index of a group to
        which i-th column assigned. The procedure was helpful only if
        n_groups is significantly less than n.

    References
    ----------
    .. [1] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
           sparse Jacobian matrices", Journal of the Institute of Mathematics
           and its Applications, 13 (1974), pp. 117-120.
    i    i   s   `A` must be 2-dimensional.s   `order` has incorrect shape.N(   R   R   R	   t
   atleast_2dR-   t   int32R4   R   R6   R+   t   isscalart   randomt   RandomStatet   permutationR3   R   t   indicest   indptrR   R   (   t   At   ordert   mt   nt   rngt   groups(    (    s6   /tmp/pip-build-7oUkmx/scipy/scipy/optimize/_numdiff.pyt   group_columnsv   s&    s   3-pointc	            sg  | d k r t  d | ƒ ‚ n  t j | ƒ } | j d k rL t  d ƒ ‚ n  t | | ƒ \ }	 }
 |	 j | j k s… |
 j | j k r” t  d ƒ ‚ n  ‡  ‡ ‡ f d †  } | d k rÄ | | ƒ } n- t j | ƒ } | j d k rñ t  d	 ƒ ‚ n  t j | |	 k  | |
 k Bƒ rt  d
 ƒ ‚ n  t | | | ƒ } | d k rat	 | | d d |	 |
 ƒ \ } } nE | d k r‘t	 | | d d |	 |
 ƒ \ } } n | d k r¦t
 } n  | d k rËt | | | | | | ƒ St | ƒ rùt | ƒ d k rù| \ } } n | } t | ƒ } t | ƒ r&t | ƒ } n t j | ƒ } t j | ƒ } t | | | | | | | | ƒ Sd S(   sÝ  Compute finite difference approximation of the derivatives of a
    vector-valued function.

    If a function maps from R^n to R^m, its derivatives form m-by-n matrix
    called the Jacobian, where an element (i, j) is a partial derivative of
    f[i] with respect to x[j].

    Parameters
    ----------
    fun : callable
        Function of which to estimate the derivatives. The argument x
        passed to this function is ndarray of shape (n,) (never a scalar
        even if n=1). It must return 1-d array_like of shape (m,) or a scalar.
    x0 : array_like of shape (n,) or float
        Point at which to estimate the derivatives. Float will be converted
        to a 1-d array.
    method : {'3-point', '2-point'}, optional
        Finite difference method to use:
            - '2-point' - use the fist order accuracy forward or backward
                          difference.
            - '3-point' - use central difference in interior points and the
                          second order accuracy forward or backward difference
                          near the boundary.
            - 'cs' - use a complex-step finite difference scheme. This assumes
                     that the user function is real-valued and can be
                     analytically continued to the complex plane. Otherwise,
                     produces bogus results.
    rel_step : None or array_like, optional
        Relative step size to use. The absolute step size is computed as
        ``h = rel_step * sign(x0) * max(1, abs(x0))``, possibly adjusted to
        fit into the bounds. For ``method='3-point'`` the sign of `h` is
        ignored. If None (default) then step is selected automatically,
        see Notes.
    f0 : None or array_like, optional
        If not None it is assumed to be equal to ``fun(x0)``, in  this case
        the ``fun(x0)`` is not called. Default is None.
    bounds : tuple of array_like, optional
        Lower and upper bounds on independent variables. Defaults to no bounds.
        Each bound must match the size of `x0` or be a scalar, in the latter
        case the bound will be the same for all variables. Use it to limit the
        range of function evaluation.
    sparsity : {None, array_like, sparse matrix, 2-tuple}, optional
        Defines a sparsity structure of the Jacobian matrix. If the Jacobian
        matrix is known to have only few non-zero elements in each row, then
        it's possible to estimate its several columns by a single function
        evaluation [3]_. To perform such economic computations two ingredients
        are required:

        * structure : array_like or sparse matrix of shape (m, n). A zero
          element means that a corresponding element of the Jacobian
          identically equals to zero.
        * groups : array_like of shape (n,). A column grouping for a given
          sparsity structure, use `group_columns` to obtain it.

        A single array or a sparse matrix is interpreted as a sparsity
        structure, and groups are computed inside the function. A tuple is
        interpreted as (structure, groups). If None (default), a standard
        dense differencing will be used.

        Note, that sparse differencing makes sense only for large Jacobian
        matrices where each row contains few non-zero elements.
    args, kwargs : tuple and dict, optional
        Additional arguments passed to `fun`. Both empty by default.
        The calling signature is ``fun(x, *args, **kwargs)``.

    Returns
    -------
    J : ndarray or csr_matrix
        Finite difference approximation of the Jacobian matrix. If `sparsity`
        is None then ndarray with shape (m, n) is returned. Although if m=1 it
        is returned as a gradient with shape (n,). If `sparsity` is not None,
        csr_matrix with shape (m, n) is returned.

    See Also
    --------
    check_derivative : Check correctness of a function computing derivatives.

    Notes
    -----
    If `rel_step` is not provided, it assigned to ``EPS**(1/s)``, where EPS is
    machine epsilon for float64 numbers, s=2 for '2-point' method and s=3 for
    '3-point' method. Such relative step approximately minimizes a sum of
    truncation and round-off errors, see [1]_.

    A finite difference scheme for '3-point' method is selected automatically.
    The well-known central difference scheme is used for points sufficiently
    far from the boundary, and 3-point forward or backward scheme is used for
    points near the boundary. Both schemes have the second-order accuracy in
    terms of Taylor expansion. Refer to [2]_ for the formulas of 3-point
    forward and backward difference schemes.

    For dense differencing when m=1 Jacobian is returned with a shape (n,),
    on the other hand when n=1 Jacobian is returned with a shape (m, 1).
    Our motivation is the following: a) It handles a case of gradient
    computation (m=1) in a conventional way. b) It clearly separates these two
    different cases. b) In all cases np.atleast_2d can be called to get 2-d
    Jacobian with correct dimensions.

    References
    ----------
    .. [1] W. H. Press et. al. "Numerical Recipes. The Art of Scientific
           Computing. 3rd edition", sec. 5.7.

    .. [2] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
           sparse Jacobian matrices", Journal of the Institute of Mathematics
           and its Applications, 13 (1974), pp. 117-120.

    .. [3] B. Fornberg, "Generation of Finite Difference Formulas on
           Arbitrarily Spaced Grids", Mathematics of Computation 51, 1988.

    Examples
    --------
    >>> import numpy as np
    >>> from scipy.optimize import approx_derivative
    >>>
    >>> def f(x, c1, c2):
    ...     return np.array([x[0] * np.sin(c1 * x[1]),
    ...                      x[0] * np.cos(c2 * x[1])])
    ...
    >>> x0 = np.array([1.0, 0.5 * np.pi])
    >>> approx_derivative(f, x0, args=(1, 2))
    array([[ 1.,  0.],
           [-1.,  0.]])

    Bounds can be used to limit the region of function evaluation.
    In the example below we compute left and right derivative at point 1.0.

    >>> def g(x):
    ...     return x**2 if x >= 1 else x
    ...
    >>> x0 = 1.0
    >>> approx_derivative(g, x0, bounds=(-np.inf, 1.0))
    array([ 1.])
    >>> approx_derivative(g, x0, bounds=(1.0, np.inf))
    array([ 2.])
    s   2-points   3-pointR*   s   Unknown method '%s'. i   s#   `x0` must have at most 1 dimension.s,   Inconsistent shapes between bounds and `x0`.c            s=   t  j ˆ |  ˆ  ˆ Ž ƒ } | j d k r9 t d ƒ ‚ n  | S(   Ni   s-   `fun` return value has more than 1 dimension.(   R	   t
   atleast_1dR4   t   RuntimeError(   R!   t   f(   t   argst   funt   kwargs(    s6   /tmp/pip-build-7oUkmx/scipy/scipy/optimize/_numdiff.pyt   fun_wrappedJ  s    s&   `f0` passed has more than 1 dimension.s    `x0` violates bound constraints.s   1-sideds   2-sidedi   N(   s   2-points   3-points   cs(   R   R	   RI   R4   R9   R6   R+   t   anyR2   R)   R   t   _dense_differenceR   t   lenRH   R   R:   t   _sparse_difference(   RM   R   R0   R/   t   f0R7   t   sparsityRL   RN   R   R   RO   R   R   t	   structureRG   (    (   RL   RM   RN   s6   /tmp/pip-build-7oUkmx/scipy/scipy/optimize/_numdiff.pyt   approx_derivative³   sJ    ‹$!!	c         C   sñ  | j  } | j  } t j | | f ƒ } t j | ƒ }	 x“t | j  ƒ D]‚}
 | d k r‹ | |	 |
 } | |
 | |
 } |  | ƒ | } n/| d k r| |
 r| |	 |
 } | d |	 |
 } | |
 | |
 } |  | ƒ } |  | ƒ } d | d | | } n¶ | d k rn| |
 rn| |	 |
 } | |	 |
 } | |
 | |
 } |  | ƒ } |  | ƒ } | | } nL | d k r®|  | |	 |
 d ƒ } | j } |	 |
 |
 f } n t d ƒ ‚ | | | |
 <qF W| d	 k rêt j | ƒ } n  | j S(
   Ns   2-points   3-pointi   g      Ài   R*   y              ð?s   Never be here.i   (	   t   sizeR	   t   emptyt   diagt   ranget   imagRJ   t   ravelt   T(   RM   R   RT   R   R   R0   RD   RE   t   J_transposedt   h_vecst   iR!   t   dxt   dft   x1t   x2t   f1t   f2(    (    s6   /tmp/pip-build-7oUkmx/scipy/scipy/optimize/_numdiff.pyRQ   y  s@    			c   !      C   sy  | j  } | j  }	 g  }
 g  } g  } t j | ƒ d } xät | ƒ D]Ö} t j | | ƒ } | | } | d k rÚ | | } | | } |  | ƒ | } t j | ƒ \ } t | d  d  … | f ƒ \ } } } | | } n| d k rh| j ƒ  } | j ƒ  } | | @} | | c | | 7<| | c d | | 7<| | @} | | c | | 8<| | c | | 7<t j |	 ƒ } | | | | | | <| | | | | | <|  | ƒ } |  | ƒ } t j | ƒ \ } t | d  d  … | f ƒ \ } } } | | } | | } t j	 | ƒ } | | } d | | d | | | | | | <| | } | | | | | | <n | d k rÛ|  | | d ƒ } | j
 } | } t j | ƒ \ } t | d  d  … | f ƒ \ } } } | | } n t d	 ƒ ‚ |
 j | ƒ | j | ƒ | j | | | | ƒ qD Wt j |
 ƒ }
 t j | ƒ } t j | ƒ } t | |
 | f f d
 | |	 f ƒ}  t |  ƒ S(   Ni   s   2-points   3-pointi   iýÿÿÿi   R*   y              ð?s   Never be here.R6   (   RX   R	   t   maxR[   t   equalt   nonzeroR   R   t   zerosRY   R\   R   t   appendt   hstackR   R   (!   RM   R   RT   R   R   RV   RG   R0   RD   RE   t   row_indicest   col_indicest	   fractionst   n_groupst   groupt   et   h_vecR!   Rb   Rc   t   colsRa   t   jt   _Rd   Re   t   mask_1t   mask_2Rf   Rg   t   maskt   rowst   J(    (    s6   /tmp/pip-build-7oUkmx/scipy/scipy/optimize/_numdiff.pyRS   ¡  sn    		


%
%


&	%$c         C   s  | | | | Ž } t  | ƒ r¾ t |  | d | d | d | d | ƒ} t | ƒ } | | } t | ƒ \ }	 }
 } t j | |	 |
 f ƒ j ƒ  } t j t j | ƒ t j	 d t j | ƒ ƒ ƒ St |  | d | d | d | ƒ} t j | | ƒ } t j | t j	 d t j | ƒ ƒ ƒ Sd S(   sK	  Check correctness of a function computing derivatives (Jacobian or
    gradient) by comparison with a finite difference approximation.

    Parameters
    ----------
    fun : callable
        Function of which to estimate the derivatives. The argument x
        passed to this function is ndarray of shape (n,) (never a scalar
        even if n=1). It must return 1-d array_like of shape (m,) or a scalar.
    jac : callable
        Function which computes Jacobian matrix of `fun`. It must work with
        argument x the same way as `fun`. The return value must be array_like
        or sparse matrix with an appropriate shape.
    x0 : array_like of shape (n,) or float
        Point at which to estimate the derivatives. Float will be converted
        to 1-d array.
    bounds : 2-tuple of array_like, optional
        Lower and upper bounds on independent variables. Defaults to no bounds.
        Each bound must match the size of `x0` or be a scalar, in the latter
        case the bound will be the same for all variables. Use it to limit the
        range of function evaluation.
    args, kwargs : tuple and dict, optional
        Additional arguments passed to `fun` and `jac`. Both empty by default.
        The calling signature is ``fun(x, *args, **kwargs)`` and the same
        for `jac`.

    Returns
    -------
    accuracy : float
        The maximum among all relative errors for elements with absolute values
        higher than 1 and absolute errors for elements with absolute values
        less or equal than 1. If `accuracy` is on the order of 1e-6 or lower,
        then it is likely that your `jac` implementation is correct.

    See Also
    --------
    approx_derivative : Compute finite difference approximation of derivative.

    Examples
    --------
    >>> import numpy as np
    >>> from scipy.optimize import check_derivative
    >>>
    >>>
    >>> def f(x, c1, c2):
    ...     return np.array([x[0] * np.sin(c1 * x[1]),
    ...                      x[0] * np.cos(c2 * x[1])])
    ...
    >>> def jac(x, c1, c2):
    ...     return np.array([
    ...         [np.sin(c1 * x[1]),  c1 * x[0] * np.cos(c1 * x[1])],
    ...         [np.cos(c2 * x[1]), -c2 * x[0] * np.sin(c2 * x[1])]
    ...     ])
    ...
    >>>
    >>> x0 = np.array([1.0, 0.5 * np.pi])
    >>> check_derivative(f, jac, x0, args=(1, 2))
    2.4492935982947064e-16
    R7   RU   RL   RN   i   N(
   R   RW   R   R   R	   R3   R]   Rh   R   R   (   RM   t   jacR   R7   RL   RN   t	   J_to_testt   J_difft   abs_errRa   Rv   t   abs_err_datat   J_diff_data(    (    s6   /tmp/pip-build-7oUkmx/scipy/scipy/optimize/_numdiff.pyt   check_derivativeñ  s    =
(    (    (   t   __doc__t
   __future__R    t   numpyR	   t   sparseR   R   R   R   R   t   _group_columnsR   R   t   finfot   float64t   epsR,   R)   R2   R9   RH   R+   R   RW   RQ   RS   Rƒ   (    (    (    s6   /tmp/pip-build-7oUkmx/scipy/scipy/optimize/_numdiff.pyt   <module>   s    (	O		=	Ä	(	P