Skip to content

Instantly share code, notes, and snippets.

@gongybable
Last active January 22, 2020 16:00
Show Gist options
  • Save gongybable/32a17847db952277b8fed11fed24523e to your computer and use it in GitHub Desktop.
Save gongybable/32a17847db952277b8fed11fed24523e to your computer and use it in GitHub Desktop.
def updatePerceptron(X, y, W, learn_rate = 0.005):
del_w = np.zeros(W.shape)
n_records = len(X)
for i in range(n_records):
pred = prediction(X[i], W)
# error for the probability of class 1 and class 2 is the same
# so we can calculate error with any one of them
error = y[i] - pred
# The gradient descent step, the error times the inputs
del_w += error * X[i]
# Update the weights here.
W += learn_rate * del_w
return W
def trainAlgorithm(X, y, learn_rate = 0.005, num_epochs = 1000):
n_records, n_features = X.shape
last_loss = None
# Initialize weights
weights = np.random.normal(scale=1 / n_features**.5, size=n_features)
for i in range(num_epochs):
# In each epoch, we apply the perceptron step.
weights = updatePerceptron(X, y, weights, learn_rate)
# Printing out the mean square error on the training set
if i % (num_epochs / 10) == 0:
outputs = prediction(X, weights)
loss = lossFunction(outputs, y)
if last_loss and last_loss < loss:
print("Train loss: ", loss, " WARNING - Loss Increasing")
else:
print("Train loss: ", loss)
last_loss = loss
return weights
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment