ó
ÉÈ÷Xc           @   s¼   d  Z  d d l m Z d d l m Z d d l m Z d d l m Z d d l m	 Z	 d d „ Z e j d ƒ d ƒ d d d d d	 „ Z e j d
 ƒ d ƒ d „  Z d e _ e ƒ  Z d S(   sL   Python wrappers around Brain.

This file is MACHINE GENERATED! Do not edit.
iÿÿÿÿ(   t   text_format(   t
   op_def_pb2(   t   op_def_registry(   t   ops(   t   op_def_libraryc         C   s=   t  j d d |  d | d | d | d | d | d | d	 | ƒS(
   sC  Training via negative sampling.

  Args:
    w_in: A `Tensor` of type mutable `float32`. input word embedding.
    w_out: A `Tensor` of type mutable `float32`. output word embedding.
    examples: A `Tensor` of type `int32`. A vector of word ids.
    labels: A `Tensor` of type `int32`. A vector of word ids.
    lr: A `Tensor` of type `float32`.
    vocab_count: A list of `ints`. Count of words in the vocabulary.
    num_negative_samples: An `int`. Number of negative samples per exaple.
    name: A name for the operation (optional).

  Returns:
    The created Operation.
  t   NegTraint   w_int   w_outt   examplest   labelst   lrt   vocab_countt   num_negative_samplest   name(   t   _op_def_libt   apply_op(   R   R   R   R	   R
   R   R   R   (    (    sj   /tmp/pip-build-UG86a1/tensorflow/tensorflow-0.6.0.data/purelib/tensorflow/models/embedding/gen_word2vec.pyt	   neg_train   s
    R   c         C   s1   t  j d d |  d | d | d | d | d | ƒS(   sr  Parses a text file and creates a batch of examples.

  Args:
    filename: A `string`. The corpus's text file name.
    batch_size: An `int`. The size of produced batch.
    window_size: An optional `int`. Defaults to `5`.
      The number of words to predict to the left and right of the target.
    min_count: An optional `int`. Defaults to `5`.
      The minimum number of word occurrences for it to be included in the
      vocabulary.
    subsample: An optional `float`. Defaults to `0.001`.
      Threshold for word occurrence. Words that appear with higher
      frequency will be randomly down-sampled. Set to 0 to disable.
    name: A name for the operation (optional).

  Returns:
    A tuple of `Tensor` objects (vocab_word, vocab_freq, words_per_epoch, current_epoch, total_words_processed, examples, labels).
    vocab_word: A `Tensor` of type `string`. A vector of words in the corpus.
    vocab_freq: A `Tensor` of type `int32`. Frequencies of words. Sorted in the non-ascending order.
    words_per_epoch: A `Tensor` of type `int64`. Number of words per epoch in the data file.
    current_epoch: A `Tensor` of type `int32`. The current epoch number.
    total_words_processed: A `Tensor` of type `int64`. The total number of words processed so far.
    examples: A `Tensor` of type `int32`. A vector of word ids.
    labels: A `Tensor` of type `int32`. A vector of word ids.
  t   Skipgramt   filenamet
   batch_sizet   window_sizet	   min_countt	   subsampleR   (   R   R   (   R   R   R   R   R   R   (    (    sj   /tmp/pip-build-UG86a1/tensorflow/tensorflow-0.6.0.data/purelib/tensorflow/models/embedding/gen_word2vec.pyt   skipgram'   s    R   c          C   sI   t  j ƒ  }  t j t j |  ƒ t j |  ƒ t j	 ƒ  } | j
 |  ƒ | S(   N(   R   t   OpListR    t   Merget   _InitOpDefLibraryt   op_list_asciiR   t   register_op_listR   t   OpDefLibraryt   add_op_list(   t   op_listt
   op_def_lib(    (    sj   /tmp/pip-build-UG86a1/tensorflow/tensorflow-0.6.0.data/purelib/tensorflow/models/embedding/gen_word2vec.pyR   I   s    s  op {
  name: "NegTrain"
  input_arg {
    name: "w_in"
    type: DT_FLOAT
    is_ref: true
  }
  input_arg {
    name: "w_out"
    type: DT_FLOAT
    is_ref: true
  }
  input_arg {
    name: "examples"
    type: DT_INT32
  }
  input_arg {
    name: "labels"
    type: DT_INT32
  }
  input_arg {
    name: "lr"
    type: DT_FLOAT
  }
  attr {
    name: "vocab_count"
    type: "list(int)"
  }
  attr {
    name: "num_negative_samples"
    type: "int"
  }
}
op {
  name: "Skipgram"
  output_arg {
    name: "vocab_word"
    type: DT_STRING
  }
  output_arg {
    name: "vocab_freq"
    type: DT_INT32
  }
  output_arg {
    name: "words_per_epoch"
    type: DT_INT64
  }
  output_arg {
    name: "current_epoch"
    type: DT_INT32
  }
  output_arg {
    name: "total_words_processed"
    type: DT_INT64
  }
  output_arg {
    name: "examples"
    type: DT_INT32
  }
  output_arg {
    name: "labels"
    type: DT_INT32
  }
  attr {
    name: "filename"
    type: "string"
  }
  attr {
    name: "batch_size"
    type: "int"
  }
  attr {
    name: "window_size"
    type: "int"
    default_value {
      i: 5
    }
  }
  attr {
    name: "min_count"
    type: "int"
    default_value {
      i: 5
    }
  }
  attr {
    name: "subsample"
    type: "float"
    default_value {
      f: 0.001
    }
  }
}
N(   t   __doc__t   google.protobufR    t   tensorflow.core.frameworkR   t   tensorflow.python.frameworkR   R   t   tensorflow.python.opsR   t   NoneR   t   RegisterShapeR   R   R   R   (    (    (    sj   /tmp/pip-build-UG86a1/tensorflow/tensorflow-0.6.0.data/purelib/tensorflow/models/embedding/gen_word2vec.pyt   <module>   s    	f	