Skip to content

Instantly share code, notes, and snippets.

@is1394
Forked from bellbind/curve_example.py
Created September 1, 2017 02:56
Show Gist options
  • Save is1394/f0431d9976870cc11247149227420ef2 to your computer and use it in GitHub Desktop.
Save is1394/f0431d9976870cc11247149227420ef2 to your computer and use it in GitHub Desktop.
[python3][keras][tensorflow] number recognition with mnist dataset
# simple LSTM model
from keras.models import Sequential
from keras.layers import Activation, Dense, LSTM
model = Sequential()
model.add(LSTM(128, input_shape=(32, 1))) # 32 timespan 1 value unit
model.add(Dense(1))
model.add(Activation("linear"))
model.compile(optimizer="rmsprop", loss="mse")
# several sin curve data for train
import numpy as np
for cycle in range(4, 36):
data = np.sin(np.arange(360) * (np.pi / cycle))
# chunk to 32-timespan and its 1-next value
x_train = np.reshape(
[data[i:i+32] for i in range(len(data) - 32 - 1)], (-1, 32, 1))
y_train = np.reshape(
[data[i+32] for i in range(len(data) - 32 - 1)], (-1, 1))
model.fit(x_train, y_train, verbose = False)
pass
# make initial curve data
x = np.sin(np.arange(32) * np.pi / 18 + 3 * np.pi / 4)
# generate subsequent curve
for i in range(300):
y = model.predict(x[-32:].reshape(1, 32, 1))
x = np.append(x, y)
pass
# plot curve
import matplotlib.pyplot as plt
fig = plt.figure()
sub = fig.add_subplot(1, 1, 1)
sub.plot(x[32:])
fig.show()
input("quit to enter> ")
# [define network model]
from keras.models import Sequential
from keras.layers import Activation, Dense, Dropout, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
model = Sequential()
# count of convolution filters=32, rows of a filter=3, cols of a filter=3,
# note: input_shape format is different by the backend: tensorflow or theano
# tensorflow: (rows, cols, channels) # count of color channels
model.add(Convolution2D(32, 3, 3, input_shape=(28, 28, 1)))
model.add(Activation("relu"))
model.add(Convolution2D(64, 3, 3))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation("relu"))
model.add(Dropout(0.5))
model.add(Dense(10))
model.add(Activation("softmax"))
model.compile(
optimizer="sgd", loss="categorical_crossentropy", metrics=["accuracy"])
print(model.to_yaml()) # show model
# [prepare dataset]
from keras.datasets import mnist
from keras.utils import np_utils
# mnist dataset as monoral image and its number
# x: 0-255 of 28x28, y: 0-9, train: 60000 x and y, test: 10000 x and y
(x_train_raw, y_train_raw), (x_test_raw, y_test_raw) = mnist.load_data()
# convert (samples, rows, cols) to (samples, rows, cols, channels)
x_train = x_train_raw.reshape(*x_train_raw.shape, 1).astype("float32") / 255
x_test = x_test_raw.reshape(*x_test_raw.shape, 1).astype("float32") / 255
# convert 0-9 data to 10 categorical array: e.g. 2 => [0.0.1,0.0, 0,0,0,0,0]
y_train = np_utils.to_categorical(y_train_raw, 10)
y_test = np_utils.to_categorical(y_test_raw, 10)
# [train by train dataset] wait an hour
history = model.fit(x_train, y_train, validation_data=(x_test, y_test))
# [check by test dataset]
score = model.evaluate(x_test, y_test)
print("[score] {}, accuracy: {}".format(*score))
# [predict by single data]
result = model.predict(x_test[0:1]) # single element numpy array of a data
print("[result] {} may be {}".format(result[0], result[0].argmax()))
print("[answer] {} as {}".format(y_test[0], y_test_raw[0]))
#############################################################################
# [setup keras]
# $ python3 -m venv keras
# $ ./keras/bin/pip install six
# $ ./keras/bin/pip install https://storage.googleapis.com/tensorflow/mac/cpu/tensorflow-0.12.1-py3-none-any.whl
# $ ./keras/bin/pip install keras
# $ ./keras/bin/pip install h5py # for save and load model weight
#
# $ ./keras/bin/python3 mnist_example.py
from keras.models import Sequential
from keras.layers import Activation, Dense
# xor model
model = Sequential()
model.add(Dense(2, input_dim=2, activation="tanh"))
model.add(Dense(1, activation="linear"))
model.compile(optimizer="sgd", loss="mse")
# training data
import numpy as np
x = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.float32)
y = np.logical_xor(x[:, 0], x[:, 1]).astype(np.float32).reshape((-1, 1))
# train
model.fit(x, y, nb_epoch=15000)
print("\n[weight and bias]")
for layer in model.layers:
print(layer.get_weights())
pass
# predict
pred_x = x[0:4]
pred_y = model.predict(pred_x)
print("\n[predicate]")
print([pred_x, pred_y]) # => [0, 1, 1, 0] ?
from keras.models import Sequential
from keras.layers import Activation, Dense
# xor model
model = Sequential()
model.add(Dense(4, input_dim=2, activation="relu"))
model.add(Dense(2, activation="softmax"))
model.compile(optimizer="sgd", loss="categorical_crossentropy")
# training data
import numpy as np
x = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.float32)
y1 = np.logical_xor(x[:, 0], x[:, 1])
y0 = np.logical_not(y1)
y = np.array([y0, y1]).T.astype(np.float32)
# train
model.fit(x, y, nb_epoch=15000)
print("\n[weight and bias]")
for layer in model.layers:
print(layer.get_weights())
pass
# predict
pred_x = x[0:4]
pred_y = model.predict(pred_x)
print("\n[predicate]")
print([pred_x, pred_y]) # => [[1, 0], [0, 1], [0, 1], [1, 0]]?
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment