From c1b9e20bb9799201564e44d685ed1afff75e3a5d Mon Sep 17 00:00:00 2001 From: kushagrapandey31111 <160846414+kushagrapandey31111@users.noreply.github.com> Date: Thu, 22 Feb 2024 16:10:54 +0530 Subject: [PATCH 01/14] Update xor.py --- xor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/xor.py b/xor.py index 61eb149..11dc273 100644 --- a/xor.py +++ b/xor.py @@ -3,7 +3,7 @@ # XOR Gate # def sig(x): - return 1 / (1 + np.exp(x)) + return 1 / (1 + np.exp(-x)) def sigDeriv(x): return x * (1 - x) From 91caf9bec28108900ae7345953cfa9dd0543b7bb Mon Sep 17 00:00:00 2001 From: kushagrapandey31111 <160846414+kushagrapandey31111@users.noreply.github.com> Date: Thu, 22 Feb 2024 16:11:14 +0530 Subject: [PATCH 02/14] Update xor.py --- xor.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/xor.py b/xor.py index 11dc273..fd321cd 100644 --- a/xor.py +++ b/xor.py @@ -21,7 +21,8 @@ def sigDeriv(x): outputBias = np.zeros((1, 1), dtype = float) epochs = 50000 -lRate = 1 +lRate = 0.1 + for _ in range(epochs): From 14b0d518b2e68675057fba174a5120ec8d336828 Mon Sep 17 00:00:00 2001 From: kushagrapandey31111 <160846414+kushagrapandey31111@users.noreply.github.com> Date: Thu, 22 Feb 2024 16:12:25 +0530 Subject: [PATCH 03/14] Update xor.py --- xor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/xor.py b/xor.py index fd321cd..b558487 100644 --- a/xor.py +++ b/xor.py @@ -38,7 +38,7 @@ def sigDeriv(x): # back prop error = target - dPredictedOutput = error + dPredictedOutput = error * sigDeriv(predictedOutput) errorHiddenLayer = dPredictedOutput.dot(outputWeights.T) dHiddenLayer = errorHiddenLayer * sigDeriv(hiddenLayerOutput) From e3d04c2f495c7274dc3edeba361ebd2981810c50 Mon Sep 17 00:00:00 2001 From: kushagrapandey31111 <160846414+kushagrapandey31111@users.noreply.github.com> Date: Thu, 22 Feb 2024 16:13:14 +0530 Subject: [PATCH 04/14] Fixes Issue #3 & Issue #4 --- xor.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/xor.py b/xor.py index b558487..6ae7175 100644 --- a/xor.py +++ b/xor.py @@ -21,8 +21,7 @@ def sigDeriv(x): outputBias = np.zeros((1, 1), dtype = float) epochs = 50000 -lRate = 0.1 - +lRate = 1 for _ in range(epochs): @@ -37,17 +36,18 @@ def sigDeriv(x): predictedOutput = sig(outputLayerActivation) # back prop - error = target + error = target - predictedOutput dPredictedOutput = error * sigDeriv(predictedOutput) errorHiddenLayer = dPredictedOutput.dot(outputWeights.T) dHiddenLayer = errorHiddenLayer * sigDeriv(hiddenLayerOutput) + # updating weights, bias outputWeights += hiddenLayerOutput.T.dot(dPredictedOutput) * lRate - outputBias += np.sum(dPredictedOutput, axis = 0, keepdims=True) - hiddenWeights += input.T.dot(dHiddenLayer) + outputBias += np.sum(dPredictedOutput, axis = 0, keepdims=True) * lRate + hiddenWeights += input.T.dot(dHiddenLayer) * lRate hiddenBias += np.sum(dHiddenLayer, axis = 0, keepdims=True) * lRate From d8865b8e99f3408662ffede209ce889db8b9f535 Mon Sep 17 00:00:00 2001 From: kushagrapandey31111 <160846414+kushagrapandey31111@users.noreply.github.com> Date: Thu, 22 Feb 2024 16:14:58 +0530 Subject: [PATCH 05/14] fixes #3 & Issue #4 --- xor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/xor.py b/xor.py index 6ae7175..9053973 100644 --- a/xor.py +++ b/xor.py @@ -21,7 +21,7 @@ def sigDeriv(x): outputBias = np.zeros((1, 1), dtype = float) epochs = 50000 -lRate = 1 +lRate = 0.1 for _ in range(epochs): From c5e426cfcdadc09ccd9b1be08f9247fce0c8005b Mon Sep 17 00:00:00 2001 From: piyushdaksh <160640645+piyushdaksh@users.noreply.github.com> Date: Thu, 22 Feb 2024 16:16:26 +0530 Subject: [PATCH 06/14] Update pca.py Fixes: Issue #5 --- pca.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pca.py b/pca.py index 52f6b40..d363ba6 100644 --- a/pca.py +++ b/pca.py @@ -35,6 +35,8 @@ # Descending sort (eigenvalue, eigenvector) pairs with respect to eigenvalue +eig_pairs.sort() +eig_pairs.reverse() From dd219dbec84349e5e7dca7e935dade58217f2628 Mon Sep 17 00:00:00 2001 From: piyushdaksh <160640645+piyushdaksh@users.noreply.github.com> Date: Thu, 22 Feb 2024 16:17:20 +0530 Subject: [PATCH 07/14] Update pca.py Fixes: Issue #6 --- pca.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/pca.py b/pca.py index d363ba6..9a51347 100644 --- a/pca.py +++ b/pca.py @@ -109,16 +109,17 @@ # The projected data in 3D will be n x 3 matrix Proj_data_3D = np.dot(X_std,P_reduce) + # Visualize data in 3D fig = plt.figure() ax = fig.add_subplot(111, projection='3d') # Scatter plot in 3D (test negative for diabetes) -negative = ax.scatter(Proj_data_3D[:0,][y == 0], Proj_data_3D[:1,][y == 0], Proj_data_3D[:,2][y == 0], label="No Diabetes") +negative = ax.scatter(Proj_data_3D[:,0][y == 0], Proj_data_3D[:,1][y == 0], Proj_data_3D[:,2][y == 0], label="No Diabetes") # Scatter plot in 3D (test positive for diabetes) -positive = ax.scatter(Proj_data_3D[:0,][y == 0], Proj_data_3D[:1,][y == 0], Proj_data_3D[:,2][y == 1], color="red", label="Have Diabetes") +positive = ax.scatter(Proj_data_3D[:,0][y == 1], Proj_data_3D[:,1][y == 1], Proj_data_3D[:,2][y == 1], color="red", label="Have Diabetes") ax.set_title('PCA Reduces Data to 3D') @@ -149,3 +150,6 @@ + + + From e33204ea8050de55dc7c7ef5b9c2af9e005ece8f Mon Sep 17 00:00:00 2001 From: kushagrapandey31111 <160846414+kushagrapandey31111@users.noreply.github.com> Date: Thu, 22 Feb 2024 16:18:10 +0530 Subject: [PATCH 08/14] Update dig.py --- dig.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/dig.py b/dig.py index bcc6924..6190311 100644 --- a/dig.py +++ b/dig.py @@ -1,4 +1,4 @@ -import numpy as np +zimport numpy as np import pandas as pd from matplotlib import pyplot as plt @@ -56,14 +56,15 @@ def ReLU_deriv(Z): def backward_prop(Z1, A1, Z2, A2, W1, W2, X, Y): one_hot_Y = one_hot(Y) dZ2 = A2 - one_hot_Y - dW2 = dZ2.dot(A1.T) - db2 = np.sum(dZ2) + dW2 = 1 / m * dZ2.dot(A1.T) + db2 = 1 / m * np.sum(dZ2) dZ1 = W2.T.dot(dZ2) * ReLU_deriv(Z1) - dW1 = dZ1.dot(X.T) - db1 = np.sum(dZ1) + dW1 = 1 / m * dZ1.dot(X.T) + db1 = 1 / m * np.sum(dZ1) return dW1, db1, dW2, db2 + def update_params(W1, b1, W2, b2, dW1, db1, dW2, db2, alpha): W1 = W1 - alpha * dW1 b1 = b1 - alpha * db1 From b6ae32f137cc2a5188922a07caf66e1d39136aa2 Mon Sep 17 00:00:00 2001 From: kushagrapandey31111 <160846414+kushagrapandey31111@users.noreply.github.com> Date: Thu, 22 Feb 2024 16:18:36 +0530 Subject: [PATCH 09/14] Update dig.py --- dig.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dig.py b/dig.py index 6190311..20b04e1 100644 --- a/dig.py +++ b/dig.py @@ -20,7 +20,7 @@ def softmax(Z): return A def one_hot(Y): - one_hot_Y = np.ones((Y.size, Y.max() + 1)) + one_hot_Y = np.zeros((Y.size, Y.max() + 1)) one_hot_Y[np.arange(Y.size), Y] = 1 one_hot_Y = one_hot_Y.T return one_hot_Y From cc9c094154a9600e7b4d4a89cc5a5a5e50a7974c Mon Sep 17 00:00:00 2001 From: kushagrapandey31111 <160846414+kushagrapandey31111@users.noreply.github.com> Date: Thu, 22 Feb 2024 16:19:20 +0530 Subject: [PATCH 10/14] Update dig.py --- dig.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dig.py b/dig.py index 20b04e1..da9ee73 100644 --- a/dig.py +++ b/dig.py @@ -43,7 +43,7 @@ def one_hot(Y): def forward_prop(W1, b1, W2, b2, X): Z1 = W1.dot(X) + b1 - A1 = Z1 + A1 = ReLU(Z1) Z2 = W2.dot(A1) + b2 A2 = softmax(Z2) return Z1, A1, Z2, A2 From 25fd79c7ef01b9e6be7471e0d7d3dd1bb0446a98 Mon Sep 17 00:00:00 2001 From: piyushdaksh <160640645+piyushdaksh@users.noreply.github.com> Date: Thu, 22 Feb 2024 16:19:47 +0530 Subject: [PATCH 11/14] Update pca.py Fixes: Undocumented Issues --- pca.py | 27 +++------------------------ 1 file changed, 3 insertions(+), 24 deletions(-) diff --git a/pca.py b/pca.py index 9a51347..4752d39 100644 --- a/pca.py +++ b/pca.py @@ -3,7 +3,7 @@ from matplotlib import pyplot as plt # from mpl_toolkits.mplot3d import Axes3D -dataset = pd.read_csv("data/diabetes.csv") +dataset = pd.read_csv("diabetes.csv") # print(dataset.head()) # print(dataset.describe()) @@ -12,7 +12,7 @@ y = dataset.iloc[:,8] # Standardize feature space mean 0 and variance 1 -X_std = (X+np.mean(X,axis = 0))/np.std(X,axis = 0) +X_std = (X-np.mean(X,axis = 0))/np.std(X,axis = 0) @@ -33,14 +33,10 @@ # Set of (eigenvalue, eigenvector) pairs eig_pairs = [(eigenvalues[index], eigenvectors[:,index]) for index in range(len(eigenvalues))] - # Descending sort (eigenvalue, eigenvector) pairs with respect to eigenvalue eig_pairs.sort() eig_pairs.reverse() - - - eigvalues_sort = [eig_pairs[index][0] for index in range(len(eigenvalues))] eigvectors_sort = [eig_pairs[index][1] for index in range(len(eigenvalues))] @@ -73,7 +69,7 @@ # Keep the first two principal components # P_reduce is 8 x 2 matrix -P_reduce = np.array(eigvectors_sort[0:2]).T +P_reduce = np.array(eigvectors_sort[0:2]).transpose() # The projected data in 2D will be n x 2 matrix Proj_data_2D = np.dot(X_std,P_reduce) @@ -109,7 +105,6 @@ # The projected data in 3D will be n x 3 matrix Proj_data_3D = np.dot(X_std,P_reduce) - # Visualize data in 3D fig = plt.figure() @@ -137,19 +132,3 @@ plt.show() - - - - - - - - - - - - - - - - From 2db05643b5debbeb11397b8d762fe2905deb2a9c Mon Sep 17 00:00:00 2001 From: kushagrapandey31111 <160846414+kushagrapandey31111@users.noreply.github.com> Date: Thu, 22 Feb 2024 16:19:56 +0530 Subject: [PATCH 12/14] Update dig.py --- dig.py | 167 +++++++++++++++++++-------------------------------------- 1 file changed, 54 insertions(+), 113 deletions(-) diff --git a/dig.py b/dig.py index da9ee73..6ae7175 100644 --- a/dig.py +++ b/dig.py @@ -1,125 +1,66 @@ -zimport numpy as np -import pandas as pd -from matplotlib import pyplot as plt +import numpy as np -data = pd.read_csv('data/train.csv') +# XOR Gate # -def init_params(): - W1 = np.random.rand(10, 784) - 0.5 - b1 = np.random.rand(10, 1) - 0.5 - W2 = np.random.rand(10, 10) - 0.5 - b2 = np.random.rand(10, 1) - 0.5 - return W1, b1, W2, b2 +def sig(x): + return 1 / (1 + np.exp(-x)) +def sigDeriv(x): + return x * (1 - x) -def ReLU(Z): - return np.maximum(Z, 0) -def softmax(Z): - A = np.exp(Z) / sum(np.exp(Z)) - return A +input = np.array([[0,0],[0,1],[1,0],[1,1]]) +target = np.array([[0],[1],[1],[0]]) + +inputLayerNeurons, hiddenLayerNeurons, outputLayerNeurons = 2, 2, 1 + +# Init random weights +hiddenWeights = np.random.uniform(size=(inputLayerNeurons,hiddenLayerNeurons)) +hiddenBias = np.zeros((1, 2), dtype = float) +outputWeights = np.random.uniform(size=(hiddenLayerNeurons,outputLayerNeurons)) +outputBias = np.zeros((1, 1), dtype = float) + +epochs = 50000 +lRate = 1 + + +for _ in range(epochs): + + # forward prop + hiddenLayerActivation = np.dot(input, hiddenWeights) + hiddenLayerActivation += hiddenBias + hiddenLayerOutput = sig(hiddenLayerActivation) + + outputLayerActivation = np.dot(hiddenLayerOutput, outputWeights) + outputLayerActivation += outputBias + predictedOutput = sig(outputLayerActivation) + + # back prop + error = target - predictedOutput + dPredictedOutput = error * sigDeriv(predictedOutput) + + errorHiddenLayer = dPredictedOutput.dot(outputWeights.T) + dHiddenLayer = errorHiddenLayer * sigDeriv(hiddenLayerOutput) -def one_hot(Y): - one_hot_Y = np.zeros((Y.size, Y.max() + 1)) - one_hot_Y[np.arange(Y.size), Y] = 1 - one_hot_Y = one_hot_Y.T - return one_hot_Y - - -data = np.array(data) -m, n = data.shape -np.random.shuffle(data) - -data_dev = data[0:1000].T -Y_dev = data_dev[0] -X_dev = data_dev[1:n] -X_dev = X_dev / 255.0 - -data_train = data[1000:m].T -Y_train = data_train[0] -X_train = data_train[1:n] -X_train = X_train / 255.0 -_,m_train = X_train.shape - -def forward_prop(W1, b1, W2, b2, X): - Z1 = W1.dot(X) + b1 - A1 = ReLU(Z1) - Z2 = W2.dot(A1) + b2 - A2 = softmax(Z2) - return Z1, A1, Z2, A2 - - - -def ReLU_deriv(Z): - return Z > 0 - -def backward_prop(Z1, A1, Z2, A2, W1, W2, X, Y): - one_hot_Y = one_hot(Y) - dZ2 = A2 - one_hot_Y - dW2 = 1 / m * dZ2.dot(A1.T) - db2 = 1 / m * np.sum(dZ2) - dZ1 = W2.T.dot(dZ2) * ReLU_deriv(Z1) - dW1 = 1 / m * dZ1.dot(X.T) - db1 = 1 / m * np.sum(dZ1) - return dW1, db1, dW2, db2 - - - -def update_params(W1, b1, W2, b2, dW1, db1, dW2, db2, alpha): - W1 = W1 - alpha * dW1 - b1 = b1 - alpha * db1 - W2 = W2 - alpha * dW2 - b2 = b2 - alpha * db2 - return W1, b1, W2, b2 - -def get_predictions(A2): - return np.argmax(A2, 0) - -def get_accuracy(predictions, Y): -# print(predictions, Y) - return np.sum(predictions == Y) / Y.size - -def gradient_descent(X, Y, alpha, iterations): - W1, b1, W2, b2 = init_params() - for i in range(iterations): - Z1, A1, Z2, A2 = forward_prop(W1, b1, W2, b2, X) - dW1, db1, dW2, db2 = backward_prop(Z1, A1, Z2, A2, W1, W2, X, Y) - W1, b1, W2, b2 = update_params(W1, b1, W2, b2, dW1, db1, dW2, db2, alpha) - if i % 10 == 0: - print("Iteration: ", i) - predictions = get_predictions(A2) - print(get_accuracy(predictions, Y)) - return W1, b1, W2, b2 - - -W1, b1, W2, b2 = gradient_descent(X_train, Y_train, 0.001, 50) - - - -def make_predictions(X, W1, b1, W2, b2): - _, _, _, A2 = forward_prop(W1, b1, W2, b2, X) - predictions = get_predictions(A2) - return predictions - -def test_prediction(index, W1, b1, W2, b2): - current_image = X_train[:, index, None] - prediction = make_predictions(X_train[:, index, None], W1, b1, W2, b2) - label = Y_train[index] - print("Prediction: ", prediction) - print("Label: ", label) - current_image = current_image.reshape((28, 28)) * 255 -# plt.gray() - plt.imshow(current_image, interpolation='nearest') - plt.show() + # updating weights, bias + outputWeights += hiddenLayerOutput.T.dot(dPredictedOutput) * lRate + outputBias += np.sum(dPredictedOutput, axis = 0, keepdims=True) * lRate + hiddenWeights += input.T.dot(dHiddenLayer) * lRate + hiddenBias += np.sum(dHiddenLayer, axis = 0, keepdims=True) * lRate + -test_prediction(0, W1, b1, W2, b2) -test_prediction(1, W1, b1, W2, b2) -test_prediction(2, W1, b1, W2, b2) -test_prediction(3, W1, b1, W2, b2) +print("Final hidden weights: ", end = '') +print(*hiddenWeights) +print("Final hidden bias: ", end = '') +print(*hiddenBias) +print("Final output weights: ", end = '') +print(*outputWeights) +print("Final output bias: ", end = '') +print(*outputBias) -dev_predictions = make_predictions(X_dev, W1, b1, W2, b2) -print("accuracy:", get_accuracy(dev_predictions, Y_dev)) +print("\nOutput from nn: ", end = '') +print(*predictedOutput) From 8b2ff40218ffeee1effe935ef65c576e341b8b36 Mon Sep 17 00:00:00 2001 From: kushagrapandey31111 <160846414+kushagrapandey31111@users.noreply.github.com> Date: Thu, 22 Feb 2024 16:20:53 +0530 Subject: [PATCH 13/14] revert changes and Fixes un assigned issues --- dig.py | 164 ++++++++++++++++++++++++++++++++++++++------------------- 1 file changed, 111 insertions(+), 53 deletions(-) diff --git a/dig.py b/dig.py index 6ae7175..a1917a7 100644 --- a/dig.py +++ b/dig.py @@ -1,66 +1,124 @@ import numpy as np +import pandas as pd +from matplotlib import pyplot as plt -# XOR Gate # +data = pd.read_csv('train.csv') -def sig(x): - return 1 / (1 + np.exp(-x)) +def init_params(): + W1 = np.random.rand(10, 784) - 0.5 + b1 = np.random.rand(10, 1) - 0.5 + W2 = np.random.rand(10, 10) - 0.5 + b2 = np.random.rand(10, 1) - 0.5 + return W1, b1, W2, b2 -def sigDeriv(x): - return x * (1 - x) +def ReLU(Z): + return np.maximum(Z, 0) -input = np.array([[0,0],[0,1],[1,0],[1,1]]) -target = np.array([[0],[1],[1],[0]]) - -inputLayerNeurons, hiddenLayerNeurons, outputLayerNeurons = 2, 2, 1 - -# Init random weights -hiddenWeights = np.random.uniform(size=(inputLayerNeurons,hiddenLayerNeurons)) -hiddenBias = np.zeros((1, 2), dtype = float) -outputWeights = np.random.uniform(size=(hiddenLayerNeurons,outputLayerNeurons)) -outputBias = np.zeros((1, 1), dtype = float) - -epochs = 50000 -lRate = 1 - - -for _ in range(epochs): - - # forward prop - hiddenLayerActivation = np.dot(input, hiddenWeights) - hiddenLayerActivation += hiddenBias - hiddenLayerOutput = sig(hiddenLayerActivation) - - outputLayerActivation = np.dot(hiddenLayerOutput, outputWeights) - outputLayerActivation += outputBias - predictedOutput = sig(outputLayerActivation) - - # back prop - error = target - predictedOutput - dPredictedOutput = error * sigDeriv(predictedOutput) - - errorHiddenLayer = dPredictedOutput.dot(outputWeights.T) - dHiddenLayer = errorHiddenLayer * sigDeriv(hiddenLayerOutput) +def softmax(Z): + A = np.exp(Z) / sum(np.exp(Z)) + return A +def one_hot(Y): + one_hot_Y = np.zeros((Y.size, Y.max() + 1)) + one_hot_Y[np.arange(Y.size), Y] = 1 + one_hot_Y = one_hot_Y.T + return one_hot_Y + + +data = np.array(data) +m, n = data.shape +np.random.shuffle(data) + +data_dev = data[0:1000].T +Y_dev = data_dev[0] +X_dev = data_dev[1:n] +X_dev = X_dev / 255.0 + +data_train = data[1000:m].T +Y_train = data_train[0] +X_train = data_train[1:n] +X_train = X_train / 255.0 +_,m_train = X_train.shape + +def forward_prop(W1, b1, W2, b2, X): + Z1 = W1.dot(X) + b1 + A1 = ReLU(Z1) + Z2 = W2.dot(A1) + b2 + A2 = softmax(Z2) + return Z1, A1, Z2, A2 + + + +def ReLU_deriv(Z): + return Z > 0 + +def backward_prop(Z1, A1, Z2, A2, W1, W2, X, Y): + one_hot_Y = one_hot(Y) + dZ2 = A2 - one_hot_Y + dW2 = 1 / m * dZ2.dot(A1.T) + db2 = 1 / m * np.sum(dZ2) + dZ1 = W2.T.dot(dZ2) * ReLU_deriv(Z1) + dW1 = 1 / m * dZ1.dot(X.T) + db1 = 1 / m * np.sum(dZ1) + return dW1, db1, dW2, db2 + + +def update_params(W1, b1, W2, b2, dW1, db1, dW2, db2, alpha): + W1 = W1 - alpha * dW1 + b1 = b1 - alpha * db1 + W2 = W2 - alpha * dW2 + b2 = b2 - alpha * db2 + return W1, b1, W2, b2 + +def get_predictions(A2): + return np.argmax(A2, 0) + +def get_accuracy(predictions, Y): +# print(predictions, Y) + return np.sum(predictions == Y) / Y.size + +def gradient_descent(X, Y, alpha, iterations): + W1, b1, W2, b2 = init_params() + for i in range(iterations): + Z1, A1, Z2, A2 = forward_prop(W1, b1, W2, b2, X) + dW1, db1, dW2, db2 = backward_prop(Z1, A1, Z2, A2, W1, W2, X, Y) + W1, b1, W2, b2 = update_params(W1, b1, W2, b2, dW1, db1, dW2, db2, alpha) + if i % 10 == 0: + print("Iteration: ", i) + predictions = get_predictions(A2) + print(get_accuracy(predictions, Y)) + return W1, b1, W2, b2 + + +W1, b1, W2, b2 = gradient_descent(X_train, Y_train, 0.10, 500) + + + +def make_predictions(X, W1, b1, W2, b2): + _, _, _, A2 = forward_prop(W1, b1, W2, b2, X) + predictions = get_predictions(A2) + return predictions + +def test_prediction(index, W1, b1, W2, b2): + current_image = X_train[:, index, None] + prediction = make_predictions(X_train[:, index, None], W1, b1, W2, b2) + label = Y_train[index] + print("Prediction: ", prediction) + print("Label: ", label) + current_image = current_image.reshape((28, 28)) * 255 +# plt.gray() + plt.imshow(current_image, interpolation='nearest') + plt.show() - # updating weights, bias - outputWeights += hiddenLayerOutput.T.dot(dPredictedOutput) * lRate - outputBias += np.sum(dPredictedOutput, axis = 0, keepdims=True) * lRate - hiddenWeights += input.T.dot(dHiddenLayer) * lRate - hiddenBias += np.sum(dHiddenLayer, axis = 0, keepdims=True) * lRate - -print("Final hidden weights: ", end = '') -print(*hiddenWeights) -print("Final hidden bias: ", end = '') -print(*hiddenBias) -print("Final output weights: ", end = '') -print(*outputWeights) -print("Final output bias: ", end = '') -print(*outputBias) +test_prediction(0, W1, b1, W2, b2) +test_prediction(1, W1, b1, W2, b2) +test_prediction(2, W1, b1, W2, b2) +test_prediction(3, W1, b1, W2, b2) +dev_predictions = make_predictions(X_dev, W1, b1, W2, b2) +print("accuracy:", get_accuracy(dev_predictions, Y_dev)) -print("\nOutput from nn: ", end = '') -print(*predictedOutput) From 3fa3dbf3f6233809d08237b5144fd0528acea734 Mon Sep 17 00:00:00 2001 From: kushagrapandey31111 <160846414+kushagrapandey31111@users.noreply.github.com> Date: Thu, 22 Feb 2024 16:21:52 +0530 Subject: [PATCH 14/14] Update README.md --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index e615c50..20ef70b 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,5 @@ # ml Welcome to the OpenSourceSprint! This competition provides participants with the opportunity to showcase their machine learning skills by fixing issues in designated repositories on GitHub. + +# All the issues has been resolved and other secret issues mentioned at the end of every issue has also been resolved