import numpy as np X = np.array(([2, 9], [1, 5], [3, 6]), dtype='float') Y = np.array(([92], [86], [89]), dtype = 'float') X = X / np.amax(X , axis=0) Y = Y / 100 epochs = 1000 learning_rate = 0.6 inputLayers = 2 hiddenLayers = 3 outputLayers = 1 wh = np.random.uniform(size = (inputLayers, hiddenLayers)) bh = np.random.uniform(size = (1, hiddenLayers)) w0 = np.random.uniform(size = (hiddenLayers, outputLayers)) b0 = np.random.uniform(size = (1, outputLayers)) def sigmoid(z) : return 1 / (1 + np.exp(-z)) def derivative(x) : return x * (1-x) for i in range(epochs) : # Forward Propagation z_h = np.dot(X, wh) + bh sigmoid_h = sigmoid(z_h) z_0 = np.dot(sigmoid_h, w0) + b0 output = sigmoid(z_0) # Backward Propagation deltaK = (Y - output) * derivative(output) deltaH = deltaK.dot(w0.T) * derivative(sigmoid_h) w0 = w0 + learning_rate * sigmoid_h.T.dot(deltaK) wh = wh + learning_rate * X.T.dot(deltaH) print(f"Input:\n {X}") print(f"Actual Output:\n {Y} ") print(f"Predicted Output:\n {output}") # OUTPUT :- # Input: # [[0.66666667 1. ] # [0.33333333 0.55555556] # [1. 0.66666667]] # Actual Output: # [[0.92] # [0.86] # [0.89]] # Predicted Output: # [[0.89561426] # [0.87785989] # [0.89594741]]