import numpy as np from random import * from pylab import plot, ylim, show import matplotlib unit_step = lambda x: 0 if x < 0 else 1 training_data = [ (np.array([0,0,1]), 0), (np.array([0,1,1]), 1), (np.array([1,0,1]), 1), (np.array([1,1,1]), 1), ] w = np.random.rand(3) #This may not work errors = [] eta = 0.2 n = 100 for i in range(n): x, expected = choice(training_data) # Is this an iterator? result = np.dot(w,x) error = expected - unit_step(result) errors.append(error) w += eta * error * x for x, _ in training_data: # The , _ will skip the second elem of a tuple. result = np.dot(x,w) print("{}: {} -> {}".format(x[:2],result, unit_step(result))) #This is a useful print function ylim([-1,1]) plot(errors) show() #Is this updating? # # import numpy as np # # #In class assingment 9-5-17 # #Create a vector w such that the unbiased classification # #function will separate the following data points: # #Perform perceptron learning on the following dataset. the initial weight vector is initialized to zero # # #ANSWER will use classify function to return dot product of w x, must make sure it's a number # def classify(w, b, x): # if w @ x + b >= 0: # return 1 # else: # return -1 # # #What does classify result mean, need a place to store it # def update(w, b, x, y): # y_p = classify(w, b, x) # if y_p != y: # w += y_p * x # #bias gets added in the same way as weight, except we add 1 or subtract 1 // after this not in class # b += y_p # else: # w += y_p # b += y_p # return y_p # # #(<0.1, 0.9>, -1) # #(<0.9, 0.2>, +1) # #(<0.3, 0.6>, -1) # #(<0.7, 0.3>, +1)