Skip to content

Commit dc9616b

Browse files
committed
Fix typos in string and comments of python and C++ code in models
1 parent 669790a commit dc9616b

File tree

7 files changed

+8
-8
lines changed

7 files changed

+8
-8
lines changed

tensorflow/models/embedding/word2vec_kernels.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -117,7 +117,7 @@ class SkipgramOp : public OpKernel {
117117
int32 label_limit_ GUARDED_BY(mu_);
118118

119119
// {example_pos_, label_pos_} is the cursor for the next example.
120-
// example_pos_ wrapps around at the end of corpus_. For each
120+
// example_pos_ wraps around at the end of corpus_. For each
121121
// example, we randomly generate [label_pos_, label_limit) for
122122
// labels.
123123
void NextExample(int32* example, int32* label) EXCLUSIVE_LOCKS_REQUIRED(mu_) {

tensorflow/models/embedding/word2vec_ops.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ w_out: output word embedding.
6565
examples: A vector of word ids.
6666
labels: A vector of word ids.
6767
vocab_count: Count of words in the vocabulary.
68-
num_negative_samples: Number of negative samples per exaple.
68+
num_negative_samples: Number of negative samples per example.
6969
)doc");
7070

7171
} // end namespace tensorflow

tensorflow/models/image/alexnet/alexnet_benchmark.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -164,7 +164,7 @@ def time_tensorflow_run(session, target, info_string):
164164
165165
Args:
166166
session: the TensorFlow session to run the computation under.
167-
target: the targe Tensor that is passed to the session's run() function.
167+
target: the target Tensor that is passed to the session's run() function.
168168
info_string: a string summarizing this run, to be printed with the stats.
169169
170170
Returns:

tensorflow/models/image/cifar10/cifar10.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -301,7 +301,7 @@ def _add_loss_summaries(total_loss):
301301
losses = tf.get_collection('losses')
302302
loss_averages_op = loss_averages.apply(losses + [total_loss])
303303

304-
# Attach a scalar summmary to all individual losses and the total loss; do the
304+
# Attach a scalar summary to all individual losses and the total loss; do the
305305
# same for the averaged version of the losses.
306306
for l in losses + [total_loss]:
307307
# Name each loss as '(raw)' and name the moving average version of the loss
@@ -384,5 +384,5 @@ def _progress(count, block_size, total_size):
384384
reporthook=_progress)
385385
print()
386386
statinfo = os.stat(filepath)
387-
print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
387+
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
388388
tarfile.open(filepath, 'r:gz').extractall(dest_directory)

tensorflow/models/image/cifar10/cifar10_multi_gpu_train.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,7 @@ def tower_loss(scope):
9595
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
9696
loss_averages_op = loss_averages.apply(losses + [total_loss])
9797

98-
# Attach a scalar summmary to all individual losses and the total loss; do the
98+
# Attach a scalar summary to all individual losses and the total loss; do the
9999
# same for the averaged version of the losses.
100100
for l in losses + [total_loss]:
101101
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training

tensorflow/models/rnn/translate/data_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -255,7 +255,7 @@ def prepare_wmt_data(data_dir, en_vocabulary_size, fr_vocabulary_size):
255255
(3) path to the token-ids for English development data-set,
256256
(4) path to the token-ids for French development data-set,
257257
(5) path to the English vocabulary file,
258-
(6) path to the French vocabluary file.
258+
(6) path to the French vocabulary file.
259259
"""
260260
# Get wmt data to the specified directory.
261261
train_path = get_wmt_enfr_train_set(data_dir)

tensorflow/models/rnn/translate/seq2seq_model.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -184,7 +184,7 @@ def step(self, session, encoder_inputs, decoder_inputs, target_weights,
184184
average perplexity, and the outputs.
185185
186186
Raises:
187-
ValueError: if length of enconder_inputs, decoder_inputs, or
187+
ValueError: if length of encoder_inputs, decoder_inputs, or
188188
target_weights disagrees with bucket size for the specified bucket_id.
189189
"""
190190
# Check if the sizes match.

0 commit comments

Comments
 (0)