User:Alexander Roidl/words2vec: Difference between revisions

From XPUB & Lens-Based wiki
(Created page with "Trained on my reader. ==Graph== frameless")
 
No edit summary
Line 1: Line 1:
Part of algologs workshop ~
Trained on my reader.
Trained on my reader.


==Graph==
==Graph==
[[File:Screen Shot 2018-03-24 at 11.00.52.png|frameless]]
[[File:Screen Shot 2018-03-24 at 11.00.52.png|frameless]]
==Code==
<source lang="python">
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import os
import random
import zipfile
import codecs
import numpy as np
from six.moves import urllib
from six.moves import xrange  # pylint: disable=redefined-builtin
import tensorflow as tf
from nltk.tokenize import word_tokenize # Algolit
# Custom Algolit addition
def export(fn, data):
outputdir = 'output/'
if not os.path.exists(outputdir):
os.makedirs(outputdir)
with open(outputdir+fn,'w+') as output:
output.write(str(data))
print('*exported '+fn+'*')
# Step 1: Download the data.
# url = 'http://mattmahoney.net/dc/'
# def maybe_download(filename, expected_bytes):
#  """Download a file if not present, and make sure it's the right size."""
#  if not os.path.exists(filename):
#    filename, _ = urllib.request.urlretrieve(url + filename, filename)
#  statinfo = os.stat(filename)
#  if statinfo.st_size == expected_bytes:
#    print('Found and verified', filename)
#  else:
#    print(statinfo.st_size)
#    raise Exception(
#        'Failed to verify ' + filename + '. Can you get to it with a browser?')
#  return filename
# filename = maybe_download('text8.zip', 31344016)
# # Read the data into a list of strings.
# def read_data(filename):
#  """Extract the first file enclosed in a zip file as a list of words"""
#  with zipfile.ZipFile(filename) as f:
#    data = tf.compat.as_str(f.read(f.namelist()[0])).split()
#  return data
# words = read_data(filename)
# print('Data size', len(words))
# CUSTOM Algolit addition
trainingset = 'input/text_stripped.txt'
def read_input_text(trainingset):
words = []
with open(trainingset, 'r') as source:
lines = source.readlines()
for line in lines:
#line = line.decode('utf8')
wordlist = word_tokenize(line)
for word in wordlist:
words.append(word)
return words
words = read_input_text(trainingset)
# Algolit logging
export('wordlist-'+str(len(words))+'.txt', words)
# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 5000
def build_dataset(words):
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
# Custom Algolit addition (logging disregarded words)
disregarded = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0  # dictionary['UNK']
unk_count += 1
# Custom Algolit addition
disregarded.append(word)
data.append(index)
count[0][1] = unk_count
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reverse_dictionary, disregarded
data, count, dictionary, reverse_dictionary, disregarded = build_dataset(words)
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
# Algolit logging
export('dictionary-'+str(len(reverse_dictionary))+'.txt', reverse_dictionary)
export('counted.txt', collections.Counter(words))
export('disregarded-'+str(len(disregarded))+'.txt', disregarded)
export('data-'+str(len(data))+'.txt', data)
# Custom Algolit addition: translate the data object back to words
reversed_input = []
for index in data:
word = reverse_dictionary[index]
reversed_input.append(word)
reversed_input_fulltext = ' '.join(reversed_input).encode('utf-8')
export('reversed-input-'+str(len(reversed_input))+'.txt', reversed_input_fulltext)
del words  # Hint to reduce memory.
data_index = 0
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1  # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(batch_size // num_skips):
target = skip_window  # target label at the center of the buffer
targets_to_avoid = [skip_window]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
# for i in range(8):
# print(batch[i], reverse_dictionary[batch[i]],
# '->', labels[i, 0], reverse_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
batch_size = 128
embedding_size = 128  # Dimension of the embedding vector.
skip_window = 1      # How many words to consider left and right.
num_skips = 2        # How many times to reuse an input to generate a label.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16    # Random set of words to evaluate similarity on.
valid_window = 100  # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
num_sampled = 64    # Number of negative examples to sample.
graph = tf.Graph()
with graph.as_default():
# Input data.
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Algolit logging
export('big-random-matrix.txt', embeddings)
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
loss = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_sampled,
num_classes=vocabulary_size))
# Construct the SGD optimizer using a learning rate of 1.0.
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Add variable initializer.
init = tf.global_variables_initializer()
# Step 5: Begin training.
num_steps = 100001
with tf.Session(graph=graph) as session:
# We must initialize all variables before we use them.
init.run()
print("Initialized")
average_loss = 0
for step in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(
batch_size, num_skips, skip_window)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print("Average loss at step ", step, ": ", average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
# CUSTOM Algolit addition
with codecs.open('output/logfile.txt', 'a+', 'utf-8') as destination:
destination.write('step: '+str(step)+'\n')
destination.write('loss value: '+str(average_loss)+'\n')
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8  # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = "Nearest to %s:" % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = "%s %s," % (log_str, close_word)
# CUSTOM Algolit addition
destination.write(log_str+'\n')
print(log_str)
# CUSTOM Algolit addition
destination.write('\n\n')
final_embeddings = normalized_embeddings.eval()
# Algolit logging
export('final_embeddings-matrix.txt', final_embeddings)
# Step 6: Visualize the embeddings.
def plot_with_labels(low_dim_embs, labels, filename='output/graph.eps', format='eps'):
assert low_dim_embs.shape[0] >= len(labels), "More labels than embeddings"
plt.figure(figsize=(18, 18))  # in inches
ax = plt.axes(frameon=False)
ax.get_xaxis().tick_bottom()
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
for i, label in enumerate(labels):
x, y = low_dim_embs[i, :]
plt.scatter(x, y)
plt.annotate(label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels)
except ImportError:
print("Please install sklearn, matplotlib, and scipy to visualize embeddings.")
</python>

Revision as of 11:04, 24 March 2018

Part of algologs workshop ~ Trained on my reader.

Graph

Screen Shot 2018-03-24 at 11.00.52.png


Code

<source lang="python">

  1. Copyright 2015 The TensorFlow Authors. All Rights Reserved.
  2. Licensed under the Apache License, Version 2.0 (the "License");
  3. you may not use this file except in compliance with the License.
  4. You may obtain a copy of the License at
  5. http://www.apache.org/licenses/LICENSE-2.0
  6. Unless required by applicable law or agreed to in writing, software
  7. distributed under the License is distributed on an "AS IS" BASIS,
  8. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. See the License for the specific language governing permissions and
  10. limitations under the License.
  11. ==============================================================================

from __future__ import absolute_import from __future__ import division from __future__ import print_function

import collections import math import os import random import zipfile import codecs

import numpy as np from six.moves import urllib from six.moves import xrange # pylint: disable=redefined-builtin import tensorflow as tf

from nltk.tokenize import word_tokenize # Algolit

  1. Custom Algolit addition

def export(fn, data): outputdir = 'output/' if not os.path.exists(outputdir): os.makedirs(outputdir) with open(outputdir+fn,'w+') as output: output.write(str(data)) print('*exported '+fn+'*')

  1. Step 1: Download the data.
  2. url = 'http://mattmahoney.net/dc/'
  3. def maybe_download(filename, expected_bytes):
  4. """Download a file if not present, and make sure it's the right size."""
  5. if not os.path.exists(filename):
  6. filename, _ = urllib.request.urlretrieve(url + filename, filename)
  7. statinfo = os.stat(filename)
  8. if statinfo.st_size == expected_bytes:
  9. print('Found and verified', filename)
  10. else:
  11. print(statinfo.st_size)
  12. raise Exception(
  13. 'Failed to verify ' + filename + '. Can you get to it with a browser?')
  14. return filename
  1. filename = maybe_download('text8.zip', 31344016)
  1. # Read the data into a list of strings.
  2. def read_data(filename):
  3. """Extract the first file enclosed in a zip file as a list of words"""
  4. with zipfile.ZipFile(filename) as f:
  5. data = tf.compat.as_str(f.read(f.namelist()[0])).split()
  6. return data
  1. words = read_data(filename)
  2. print('Data size', len(words))
  1. CUSTOM Algolit addition

trainingset = 'input/text_stripped.txt' def read_input_text(trainingset): words = [] with open(trainingset, 'r') as source: lines = source.readlines() for line in lines: #line = line.decode('utf8') wordlist = word_tokenize(line) for word in wordlist: words.append(word) return words

words = read_input_text(trainingset)

  1. Algolit logging

export('wordlist-'+str(len(words))+'.txt', words)


  1. Step 2: Build the dictionary and replace rare words with UNK token.

vocabulary_size = 5000

def build_dataset(words): count = 'UNK', -1 count.extend(collections.Counter(words).most_common(vocabulary_size - 1)) dictionary = dict() for word, _ in count: dictionary[word] = len(dictionary) data = list()

# Custom Algolit addition (logging disregarded words) disregarded = list()

unk_count = 0 for word in words: if word in dictionary: index = dictionary[word] else: index = 0 # dictionary['UNK'] unk_count += 1

# Custom Algolit addition disregarded.append(word)

data.append(index) count[0][1] = unk_count reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys())) return data, count, dictionary, reverse_dictionary, disregarded

data, count, dictionary, reverse_dictionary, disregarded = build_dataset(words) print('Most common words (+UNK)', count[:5]) print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])

  1. Algolit logging

export('dictionary-'+str(len(reverse_dictionary))+'.txt', reverse_dictionary) export('counted.txt', collections.Counter(words)) export('disregarded-'+str(len(disregarded))+'.txt', disregarded) export('data-'+str(len(data))+'.txt', data)

  1. Custom Algolit addition: translate the data object back to words

reversed_input = [] for index in data: word = reverse_dictionary[index] reversed_input.append(word) reversed_input_fulltext = ' '.join(reversed_input).encode('utf-8') export('reversed-input-'+str(len(reversed_input))+'.txt', reversed_input_fulltext)


del words # Hint to reduce memory. data_index = 0


  1. Step 3: Function to generate a training batch for the skip-gram model.

def generate_batch(batch_size, num_skips, skip_window): global data_index assert batch_size % num_skips == 0 assert num_skips <= 2 * skip_window batch = np.ndarray(shape=(batch_size), dtype=np.int32) labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32) span = 2 * skip_window + 1 # [ skip_window target skip_window ] buffer = collections.deque(maxlen=span) for _ in range(span): buffer.append(data[data_index]) data_index = (data_index + 1) % len(data) for i in range(batch_size // num_skips): target = skip_window # target label at the center of the buffer targets_to_avoid = [skip_window] for j in range(num_skips): while target in targets_to_avoid: target = random.randint(0, span - 1) targets_to_avoid.append(target) batch[i * num_skips + j] = buffer[skip_window] labels[i * num_skips + j, 0] = buffer[target] buffer.append(data[data_index]) data_index = (data_index + 1) % len(data) return batch, labels

batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)

  1. for i in range(8):

# print(batch[i], reverse_dictionary[batch[i]], # '->', labels[i, 0], reverse_dictionary[labels[i, 0]])


  1. Step 4: Build and train a skip-gram model.

batch_size = 128 embedding_size = 128 # Dimension of the embedding vector. skip_window = 1 # How many words to consider left and right. num_skips = 2 # How many times to reuse an input to generate a label.

  1. We pick a random validation set to sample nearest neighbors. Here we limit the
  2. validation samples to the words that have a low numeric ID, which by
  3. construction are also the most frequent.

valid_size = 16 # Random set of words to evaluate similarity on. valid_window = 100 # Only pick dev samples in the head of the distribution. valid_examples = np.random.choice(valid_window, valid_size, replace=False) num_sampled = 64 # Number of negative examples to sample.

graph = tf.Graph()

with graph.as_default():

# Input data. train_inputs = tf.placeholder(tf.int32, shape=[batch_size]) train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1]) valid_dataset = tf.constant(valid_examples, dtype=tf.int32)

# Ops and variables pinned to the CPU because of missing GPU implementation with tf.device('/cpu:0'): # Look up embeddings for inputs. embeddings = tf.Variable( tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0)) embed = tf.nn.embedding_lookup(embeddings, train_inputs)

# Algolit logging export('big-random-matrix.txt', embeddings)

# Construct the variables for the NCE loss nce_weights = tf.Variable( tf.truncated_normal([vocabulary_size, embedding_size], stddev=1.0 / math.sqrt(embedding_size))) nce_biases = tf.Variable(tf.zeros([vocabulary_size]))

# Compute the average NCE loss for the batch. # tf.nce_loss automatically draws a new sample of the negative labels each # time we evaluate the loss. loss = tf.reduce_mean( tf.nn.nce_loss(weights=nce_weights, biases=nce_biases, labels=train_labels, inputs=embed, num_sampled=num_sampled, num_classes=vocabulary_size))

# Construct the SGD optimizer using a learning rate of 1.0. optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)

# Compute the cosine similarity between minibatch examples and all embeddings. norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True)) normalized_embeddings = embeddings / norm valid_embeddings = tf.nn.embedding_lookup( normalized_embeddings, valid_dataset) similarity = tf.matmul( valid_embeddings, normalized_embeddings, transpose_b=True)

# Add variable initializer. init = tf.global_variables_initializer()

  1. Step 5: Begin training.

num_steps = 100001

with tf.Session(graph=graph) as session: # We must initialize all variables before we use them. init.run() print("Initialized")

average_loss = 0 for step in xrange(num_steps): batch_inputs, batch_labels = generate_batch( batch_size, num_skips, skip_window) feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}

# We perform one update step by evaluating the optimizer op (including it # in the list of returned values for session.run() _, loss_val = session.run([optimizer, loss], feed_dict=feed_dict) average_loss += loss_val

if step % 2000 == 0: if step > 0: average_loss /= 2000 # The average loss is an estimate of the loss over the last 2000 batches. print("Average loss at step ", step, ": ", average_loss) average_loss = 0

# Note that this is expensive (~20% slowdown if computed every 500 steps) if step % 10000 == 0: sim = similarity.eval()

# CUSTOM Algolit addition with codecs.open('output/logfile.txt', 'a+', 'utf-8') as destination: destination.write('step: '+str(step)+'\n') destination.write('loss value: '+str(average_loss)+'\n')

for i in xrange(valid_size): valid_word = reverse_dictionary[valid_examples[i]] top_k = 8 # number of nearest neighbors nearest = (-sim[i, :]).argsort()[1:top_k + 1] log_str = "Nearest to %s:" % valid_word for k in xrange(top_k): close_word = reverse_dictionary[nearest[k]] log_str = "%s %s," % (log_str, close_word)

# CUSTOM Algolit addition destination.write(log_str+'\n')

print(log_str)

# CUSTOM Algolit addition destination.write('\n\n')

final_embeddings = normalized_embeddings.eval()

# Algolit logging export('final_embeddings-matrix.txt', final_embeddings)


  1. Step 6: Visualize the embeddings.

def plot_with_labels(low_dim_embs, labels, filename='output/graph.eps', format='eps'): assert low_dim_embs.shape[0] >= len(labels), "More labels than embeddings" plt.figure(figsize=(18, 18)) # in inches ax = plt.axes(frameon=False) ax.get_xaxis().tick_bottom() ax.axes.get_xaxis().set_visible(False) ax.axes.get_yaxis().set_visible(False) for i, label in enumerate(labels): x, y = low_dim_embs[i, :] plt.scatter(x, y) plt.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points', ha='right', va='bottom')

plt.savefig(filename)

try: from sklearn.manifold import TSNE import matplotlib.pyplot as plt

tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000) plot_only = 500 low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :]) labels = [reverse_dictionary[i] for i in xrange(plot_only)] plot_with_labels(low_dim_embs, labels)

except ImportError: print("Please install sklearn, matplotlib, and scipy to visualize embeddings.") </python>