ó
àÆ÷Xc           @` sÆ  d  d l  m Z m Z m Z d  d l Z d  d l Z d  d l Z d  d l m Z d  d l	 Z	 d  d l
 Z d  d l Z d  d l j Z e e d d d d d d „ Z d „  Z d	 „  Z e d
 d ƒ Z e j d d d d d d d e d d ƒe j d d d d d d e d d ƒe j d d d d d d d d  d d d d ƒe j d d  d d d d! d d  d d d d" ƒe j d# d$ d d d d% d d  d d d d& ƒe j d' d d d d( d d d d d d) ƒe j d* d d d d+ d d d d, ƒe j d- d. d d d d/ d d0 d d d d1 ƒe d2 k rÂe j e j ƒ \ Z Z e e d ƒ rge e j ƒ e j d  ƒ n  e j  s}e d3 ƒ n  e j! d  k r˜e j" Z! n	 e j! Z! e j# d  k r¼e j" Z# n	 e j# Z# e j$ d  k ràe j" Z$ n	 e j$ Z$ e e j% e j  d e! d! e# d% e$ d4 e j& d+ e j' ƒ\ Z( Z) e j% r6qÂe j  rLe e( ƒ qÂe ƒ  e d5 e j& d6 d7 ƒe d8 d6 d7 ƒe d9 e! e# e# e$ f ƒ e ƒ  e d: e( e) f ƒ e ƒ  e d; ƒ n  d S(<   i    (   t   absolute_importt   print_functiont   divisionN(   t   OptionParseriÐ  i
   t   Cc      	   C` sy  | r“t  d ƒ t  d t j j j ƒ t  d t j j ƒ t  d t j j ƒ t  d t j j ƒ t  d ƒ t  d t j	 ƒ t  d t j
 ƒ t  d	 t j ƒ t  d
 ƒ t  d t j d ƒ ƒ t  d t j d ƒ ƒ t  d t j d ƒ ƒ t  ƒ  t  d ƒ t j ƒ  t  d t j j ƒ t  d t j ƒ t  d t j ƒ t j j j d ƒ s]t j j j d ƒ r“t  d ƒ t j t j j j j d f ƒ t  ƒ  q“n  t j t j | | f d t j j d | ƒƒ } t j t j | | f d t j j d | ƒƒ } t j t j | | f d t j j d | ƒƒ }	 t j g  d |	 d |	 d t j | | ƒ f g ƒ}
 t  g  |
 j! j" j# ƒ  D] } | j$ j% j& d k ^ qoƒ rg  t' |
 j( j) |
 j( j* ƒ D]0 \ } } | j$ j% j& d k r¯t+ | d ƒ ^ q¯} t, | ƒ d k sýt- ‚ | d  rd! } q~d" } ne t  g  |
 j! j" j# ƒ  D] } | j$ j% j& d# k ^ q/ƒ r\d$ } n" d% } | t. |
 j! j" j# ƒ  ƒ 7} d  } d& } |
 ƒ  |  rkt+ t d' ƒ oÍt+ t j d( ƒ oÍt/ |	 t j j j0 ƒ } t+ t d) ƒ oñt/ |	 t j1 j2 ƒ } t3 j3 ƒ  } x t4 | ƒ D] } |
 ƒ  qW| r7t j j j5 ƒ  n  | r\|	 j6 d* t7 d+ t7 ƒ j8 ƒ  n  t3 j3 ƒ  } n  | | | f S(,   sn  
    :param execute: If True, execute a Theano function that should call gemm.
    :param verbose: If True, will print some Theano flags and env variables.
    :param M,N,K: The M,N,K size used by gemm.
    :param iters: The number of calls to gemm to do.

    :return: a tuple (execution time,
                      str that represents the implementation used)
    s   Some Theano flags:s       blas.ldflags=s       compiledir=s       floatX=s       device=s   Some OS information:s       sys.platform=s       sys.version=s       sys.prefix=s   Some environment variables:s       MKL_NUM_THREADS=t   MKL_NUM_THREADSs       OMP_NUM_THREADS=t   OMP_NUM_THREADSs       GOTO_NUM_THREADS=t   GOTO_NUM_THREADSsA   Numpy config: (used when the Theano flag "blas.ldflags" is empty)s   Numpy dot module:s   Numpy location:s   Numpy version:t   gpus   nvcc version:s	   --versiont   dtypet   ordert   updatesgš™™™™™Ù?gš™™™™™é?t   Gemmt   cthunki   i    s(   CPU (with direct Theano binding to blas)sP   CPU (without direct Theano binding to blas but with numpy/scipy binding to blas)t   GpuGemmt   GPUs9   ERROR, unable to tell if Theano used the cpu or the gpu:
iÿÿÿÿt   sandboxt   cudat   gpuarrayt   borrowt   return_internal_type(9   t   printt   theanot   configt   blast   ldflagst
   compiledirt   floatXt   devicet   syst   platformt   versiont   prefixt   ost   getenvt   npt   show_configt   dott
   __module__t   __file__t   __version__t
   startswitht   init_gpu_devicet
   subprocesst   callR   R   t   nvcc_compilert	   nvcc_patht   sharedt   onest   functiont   Tt   anyt   makert   fgrapht   toposortt   opt	   __class__t   __name__t   zipt   fnt   nodest   thunkst   hasattrt   lent   AssertionErrort   strt
   isinstancet   CudaNdarraySharedVariableR   t   GpuArraySharedVariablet   timet   ranget   synchronizet	   get_valuet   Truet   sync(   t   executet   verboset   Mt   Nt   Kt   itersR
   t   at   bt   ct   ft   xt   nodet   thunkt   c_implt   implt   t0t   t1RJ   t   sync2t   i(    (    s6   /tmp/pip-build-X4mzal/theano/theano/misc/check_blas.pyRK      s‚    






$$$64%*
		4	c         C` s   t  ƒ  | j S(   N(   RK   t   COMPLETE(   t   statet   channel(    (    s6   /tmp/pip-build-X4mzal/theano/theano/misc/check_blas.pyt
   jobman_jobk   s    c           C` s   t  ƒ  S(   N(   RK   (    (    (    s6   /tmp/pip-build-X4mzal/theano/theano/misc/check_blas.pyt   testp   s    t   usagesq   %prog <options>
Compute time needed to perform BLAS gemm computations between matrices of size (M, N) and (N, K).s   -qs   --quiett   actiont
   store_truet   destt   quiett   defaultt   helps=   If true, do not print the comparison table and config optionss   --print_onlyt
   print_onlys)   If true, do not perform gemm computationss   -Ms   --Mt   storeRM   t   typet   ints   The M size to gemms   -Ns   --NRN   s   The N size to gemms   -Ks   --KRO   s   The K size to gemms   --itert   iters   The number of calls to gemms   --orderR
   sŸ   The numpy memory layout parameter used when creating the numpy.ndarray objects. It accepts 'C' for C memory order and 'F' for Fortran order (for all matrices).s   -Bs   --Bt   Biˆ  s   The M, N, and K for big gemmt   __main__s%  
        Some results that you can compare against. They were 10 executions
        of gemm in float64 with matrices of shape 2000x2000 (M=N=K=2000).
        All memory layout was in C order.

        CPU tested: Xeon E5345(2.33Ghz, 8M L2 cache, 1333Mhz FSB),
                    Xeon E5430(2.66Ghz, 12M L2 cache, 1333Mhz FSB),
                    Xeon E5450(3Ghz, 12M L2 cache, 1333Mhz FSB),
                    Xeon X5560(2.8Ghz, 12M L2 cache, hyper-threads?)
                    Core 2 E8500, Core i7 930(2.8Ghz, hyper-threads enabled),
                    Core i7 950(3.07GHz, hyper-threads enabled)
                    Xeon X5550(2.67GHz, 8M l2 cache?, hyper-threads enabled)


        Libraries tested:
            * numpy with ATLAS from distribution (FC9) package (1 thread)
            * manually compiled numpy and ATLAS with 2 threads
            * goto 1.26 with 1, 2, 4 and 8 threads
            * goto2 1.13 compiled with multiple threads enabled

                          Xeon   Xeon   Xeon  Core2 i7    i7     Xeon   Xeon
        lib/nb threads    E5345  E5430  E5450 E8500 930   950    X5560  X5550

        numpy 1.3.0 blas                                                775.92s
        numpy_FC9_atlas/1 39.2s  35.0s  30.7s 29.6s 21.5s 19.60s
        goto/1            18.7s  16.1s  14.2s 13.7s 16.1s 14.67s
        numpy_MAN_atlas/2 12.0s  11.6s  10.2s  9.2s  9.0s
        goto/2             9.5s   8.1s   7.1s  7.3s  8.1s  7.4s
        goto/4             4.9s   4.4s   3.7s  -     4.1s  3.8s
        goto/8             2.7s   2.4s   2.0s  -     4.1s  3.8s
        openblas/1                                        14.04s
        openblas/2                                         7.16s
        openblas/4                                         3.71s
        openblas/8                                         3.70s
        mkl 11.0.083/1            7.97s
        mkl 10.2.2.025/1                                         13.7s
        mkl 10.2.2.025/2                                          7.6s
        mkl 10.2.2.025/4                                          4.0s
        mkl 10.2.2.025/8                                          2.0s
        goto2 1.13/1                                                     14.37s
        goto2 1.13/2                                                      7.26s
        goto2 1.13/4                                                      3.70s
        goto2 1.13/8                                                      1.94s
        goto2 1.13/16                                                     3.16s

        Test time in float32

        cuda version      6.5    6.0    5.5    5.0    4.2    4.1    4.0    3.2    3.0   # note
        gpu
        K6000/NOECC       0.06s         0.06s
        K40                             0.07s
        K20m/ECC          0.08s 0.08s          0.07s
        K20/NOECC                              0.07s
        M2090                           0.19s
        C2075                                         0.25s
        M2075                                  0.25s
        M2070                                  0.25s         0.27s         0.32s
        M2070-Q                                0.48s         0.27s         0.32s
        M2050(Amazon)                          0.25s
        C1060                                                              0.46s
        K600                            1.04s

        GTX Titan Black                 0.05s
        GTX Titan(D15U-50)              0.06s  0.06s  don't work
        GTX 780                         0.06s
        GTX 980           0.06s
        GTX 970           0.08s
        GTX 680                         0.11s  0.12s  0.154s               0.218s
        GRID K520         0.14s
        GTX 580                         0.16s  0.16s  0.164s               0.203s
        GTX 480                         0.19s  0.19s  0.192s               0.237s 0.27s
        GTX 750 Ti        0.20s
        GTX 470                         0.23s  0.23s  0.238s               0.297s 0.34s
        GTX 660                         0.18s  0.20s  0.23s
        GTX 560                                       0.30s
        GTX 650 Ti                             0.27s
        GTX 765M                 0.27s
        GTX 460                                0.37s                0.45s
        GTX 285                         0.42s         0.452s        0.452s        0.40s # cuda 3.0 seems faster? driver version?
        750M                                   0.49s
        GT 610            2.38s
        GTX 550 Ti                                                  0.57s
        GT 520                                        2.68s                3.06s
        GT 520M                                2.44s                       3.19s        # with bumblebee on Ubuntu 12.04
        GT 220                                                             3.80s
        GT 210                                                      6.35s
        8500 GT                                                                   10.68s

        Results for larger matrices.
        There were 10 executions of gemm in float32
        with matrices of shape 5000x5000 (M=N=K=5000).
        All memory layout was in C order.

        cuda version      7.5    7.0    6.5
        gpu
        M40               0.47s
        k80               0.96s
        K6000/NOECC              0.69s
        K40                             0.88s
        K20m/ECC
        K20/NOECC
        M2090
        C2075
        M2075
        M2070
        M2070-Q
        M2050(Amazon)
        C1060
        K600

        GTX Titan X       0.45s  0.47s
        GTX Titan Black   0.64s  0.64s
        GTX Titan(D15U-50)
        GTX 780
        GTX 980 Ti        0.41s
        GTX 980
        GTX 970           0.66s
        GTX 680                  1.57s
        GRID K520
        GTX 750 Ti        2.01s  2.01s
        GTX 750           2.46s  2.37s
        GTX 660           2.32s  2.32s
        GTX 580           2.42s         2.47s
        GTX 480           2.87s         2.88s
        TX1                      7.6s (float32 storage and computation)
        GT 610                   33.5s
        RP   s   We executedt   endt    s-   calls to gemm with a and b matrices of shapess   (%d, %d) and (%d, %d).s"   Total execution time: %.2fs on %s.s—   Try to run this script a few times. Experience shows that the first time is not as fast as followings calls. The difference is not big, but consistent.(*   t
   __future__R    R   R   R!   R   RE   t   optparseR   R+   t   numpyR#   R   t   theano.tensort   tensorR2   RI   RK   Ra   Rb   t   parsert
   add_optiont   FalseR9   t
   parse_argst   argvt   optionst	   argumentsR>   R   Ri   t   exitRg   RM   Ro   RN   RO   Rj   Rn   R
   t   tRY   (    (    (    s6   /tmp/pip-build-X4mzal/theano/theano/misc/check_blas.pyt   <module>   sŒ   U				~
					