Skip to content

Commit d765bbc

Browse files
Added NN sources
Assignments written in Python
1 parent 0166ceb commit d765bbc

File tree

8 files changed

+419
-0
lines changed

8 files changed

+419
-0
lines changed
Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
10x - 2y - 8z = 3
2+
-21x + 7y + 3z = 5
3+
11x + 12y - 5z = 7
Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,43 @@
1+
def parse(file_name):
2+
with open(file_name) as file:
3+
equations = file.read()
4+
equations = equations.replace(" ", "").split("\n")
5+
coefficients_matrix = []
6+
for equation in equations:
7+
coefficients = []
8+
index_of_x = index_of_y = -1
9+
if "x" in equation:
10+
index_of_x = equation.index("x")
11+
if index_of_x == 0:
12+
coefficients.append(1)
13+
elif equation[index_of_x - 1] == '-':
14+
coefficients.append(-1)
15+
else:
16+
coefficients.append(int(equation[:index_of_x]))
17+
else:
18+
coefficients.append(0)
19+
if "y" in equation:
20+
index_of_y = equation.index("y")
21+
if index_of_y == 0 or equation[index_of_y - 1] == "+":
22+
coefficients.append(1)
23+
elif equation[index_of_y - 1] == '-':
24+
coefficients.append(-1)
25+
else:
26+
coefficients.append(int(equation[index_of_x + 1:equation.index("y")]))
27+
else:
28+
coefficients.append(0)
29+
if "z" in equation:
30+
index_of_z = equation.index("z")
31+
if index_of_z == 0 or equation[index_of_z - 1] == "+":
32+
coefficients.append(1)
33+
elif equation[index_of_z - 1] == '-':
34+
coefficients.append(-1)
35+
elif "y" in equation:
36+
coefficients.append(int(equation[index_of_y + 1:equation.index("z")]))
37+
else:
38+
coefficients.append(int(equation[index_of_x + 1:equation.index("z")]))
39+
else:
40+
coefficients.append(0)
41+
coefficients_matrix.append(coefficients)
42+
results = [int(equation.split("=")[-1]) for equation in equations]
43+
return coefficients_matrix, results

Neural Networks/Equations/main.py

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
import numpy
2+
3+
from equations_parser import parse
4+
from python_solver import python_solve
5+
from numpy_solver import numpy_solve
6+
7+
8+
def main():
9+
coefficients, results = parse("equations.txt")
10+
print("Coefficients:")
11+
print(numpy.array(coefficients))
12+
print("Results:")
13+
print(numpy.array(results))
14+
15+
print("\nPython solution:")
16+
python_solution = python_solve(coefficients, results)
17+
if python_solution is None:
18+
print("The system of equations does not have a unique solution.")
19+
else:
20+
print(numpy.array(python_solution))
21+
22+
print("\nNumPy solution:")
23+
numpy_solution = numpy_solve(numpy.array(coefficients), numpy.array(results))
24+
if numpy_solution is None:
25+
print("The system of equations does not have a unique solution.")
26+
else:
27+
print(numpy_solution)
28+
29+
if python_solution is not None and numpy_solution is not None:
30+
if numpy.array_equal(numpy.around(numpy.array(python_solution), 3), numpy.around(numpy_solution, 3)):
31+
print("\nBoth scripts give the same solution.")
32+
33+
34+
if __name__ == '__main__':
35+
main()
Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
import numpy
2+
3+
4+
def minor(matrix, row_index, column_index):
5+
return numpy.array([numpy.concatenate((row[:column_index], row[column_index + 1:])) for row in
6+
numpy.concatenate((matrix[:row_index], matrix[row_index + 1:]))])
7+
8+
9+
def minors_matrix(matrix):
10+
return numpy.array([numpy.array([numpy.linalg.det(minor(matrix, row, column))
11+
for column in range(len(matrix[0]))]) for row in range(len(matrix))])
12+
13+
14+
def cofactors_matrix(matrix):
15+
return numpy.array([numpy.array([(-1) ** (row + column) * matrix[row][column]
16+
for column in range(len(matrix[0]))]) for row in range(len(matrix))])
17+
18+
19+
def invert(matrix):
20+
determinant = numpy.linalg.det(matrix)
21+
if determinant == 0:
22+
return None
23+
adjunct_matrix = numpy.transpose(cofactors_matrix(minors_matrix(matrix)))
24+
return adjunct_matrix / determinant
25+
26+
27+
def numpy_solve(coefficients, results):
28+
inverse = invert(coefficients)
29+
if inverse is None:
30+
return None
31+
return numpy.dot(inverse, results)
Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,51 @@
1+
def minor(matrix, row_index, column_index):
2+
return [row[:column_index] + row[column_index + 1:] for row in matrix[:row_index] + matrix[row_index + 1:]]
3+
4+
5+
def two_dimensional_determinant(matrix):
6+
return matrix[0][0] * matrix[1][1] - matrix[0][1] * matrix[1][0]
7+
8+
9+
def minors_matrix(matrix):
10+
return [[two_dimensional_determinant(minor(matrix, row, column))
11+
for column in range(len(matrix[0]))] for row in range(len(matrix))]
12+
13+
14+
def cofactors_matrix(matrix):
15+
return [[(-1) ** (row + column) * matrix[row][column]
16+
for column in range(len(matrix[0]))] for row in range(len(matrix))]
17+
18+
19+
def transpose(matrix):
20+
return [[matrix[row][column] for row in range(len(matrix))]
21+
for column in range(len(matrix[0]))]
22+
23+
24+
def three_dimensional_determinant(matrix):
25+
determinant = 0
26+
for column in range(len(matrix[0])):
27+
determinant += (-1) ** column * matrix[0][column] * two_dimensional_determinant(minor(matrix, 0, column))
28+
return determinant
29+
30+
31+
def invert(matrix):
32+
determinant = three_dimensional_determinant(matrix)
33+
if determinant == 0:
34+
return None
35+
adjunct_matrix = transpose(cofactors_matrix(minors_matrix(matrix)))
36+
return [[adjunct_matrix[row][column] / determinant for column in range(len(adjunct_matrix))]
37+
for row in range(len(adjunct_matrix))]
38+
39+
40+
def scalar_product(matrix, array):
41+
solutions = []
42+
for row in range(len(matrix)):
43+
solutions.append(sum([matrix[row][column] * array[column] for column in range(len(array))]))
44+
return solutions
45+
46+
47+
def python_solve(coefficients, results):
48+
inverse = invert(coefficients)
49+
if inverse is None:
50+
return None
51+
return scalar_product(inverse, results)

Neural Networks/backpropagation.py

Lines changed: 163 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,163 @@
1+
import copy
2+
import gzip
3+
import pickle
4+
from matplotlib import pyplot
5+
import numpy
6+
import time
7+
8+
9+
def get_datasets(file_name):
10+
file = gzip.open(file_name, "rb")
11+
_training_set, _validation_set, _testing_set = pickle.load(file, encoding="latin")
12+
file.close()
13+
return _training_set, _validation_set, _testing_set
14+
15+
16+
def add_ones(samples):
17+
samples_number, _ = numpy.shape(samples)
18+
ones = numpy.ones((samples_number, 1))
19+
return numpy.hstack((samples, ones))
20+
21+
22+
def one_hot_encode(labels):
23+
return numpy.eye(numpy.max(labels) + 1)[labels]
24+
25+
26+
def create_weights(samples, labels, hidden_neurons=100):
27+
pixels = numpy.shape(samples)[1]
28+
digits = numpy.shape(labels)[1]
29+
hidden_weights = numpy.random.randn(pixels, hidden_neurons) / numpy.sqrt(pixels)
30+
output_weights = numpy.random.randn(hidden_neurons + 1, digits) / numpy.sqrt(hidden_neurons + 1)
31+
return {"hidden": hidden_weights, "output": output_weights}
32+
33+
34+
def create_batches(samples, labels, batch_size=10):
35+
sample_number = numpy.shape(samples)[0]
36+
batch_number = sample_number / batch_size
37+
permutation = numpy.random.permutation(sample_number)
38+
shuffled_samples = samples[permutation, :]
39+
shuffled_labels = labels[permutation, :]
40+
return zip(numpy.vsplit(shuffled_samples, batch_number), numpy.vsplit(shuffled_labels, batch_number))
41+
42+
43+
def initialize_gradients(weights):
44+
return {"hidden": numpy.zeros(numpy.shape(weights["hidden"])),
45+
"output": numpy.zeros(numpy.shape(weights["output"]))}
46+
47+
48+
def activate(values):
49+
return 1 / (1 + numpy.exp(-values))
50+
51+
52+
def derive(values):
53+
return values * (1 - values)
54+
55+
56+
def softmax(values):
57+
exponentials = numpy.exp(values)
58+
return exponentials / numpy.sum(exponentials, axis=1, keepdims=True)
59+
60+
61+
def feed_forward(samples, weights):
62+
hidden_activations = activate(numpy.dot(samples, weights["hidden"]))
63+
output_activations = softmax(numpy.dot(add_ones(hidden_activations), weights["output"]))
64+
return {"hidden": hidden_activations, "output": output_activations}
65+
66+
67+
def back_propagate(samples, labels, weights, activations):
68+
output_errors = activations["output"] - labels
69+
output_gradients = numpy.dot(numpy.transpose(add_ones(activations["hidden"])), output_errors)
70+
hidden_errors = derive(activations["hidden"]) * numpy.dot(output_errors, numpy.transpose(weights["output"][:-1, :]))
71+
hidden_gradients = numpy.dot(numpy.transpose(samples), hidden_errors)
72+
return {"hidden": hidden_gradients, "output": output_gradients}
73+
74+
75+
def train(samples, labels, iterations, batch_size, learning_rate, momentum, regularization):
76+
sample_number = numpy.shape(samples)[0]
77+
weights = create_weights(samples, labels)
78+
iterations_weights = list()
79+
start_time = time.time()
80+
for iteration in range(iterations):
81+
for sample_batch, label_batch in create_batches(samples, labels, batch_size):
82+
added_gradients = initialize_gradients(weights)
83+
for batch in range(batch_size):
84+
sample = sample_batch[batch:batch + 1, :]
85+
label = label_batch[batch:batch + 1, :]
86+
activations = feed_forward(sample, weights)
87+
gradients = back_propagate(sample, label, weights, activations)
88+
for layer in ["hidden", "output"]:
89+
added_gradients[layer] = momentum * added_gradients[layer] - learning_rate * gradients[layer]
90+
for layer in ["hidden", "output"]:
91+
weights[layer] = (1 - learning_rate * regularization / sample_number) * weights[layer] + \
92+
added_gradients[layer] / batch_size
93+
iterations_weights.append(copy.deepcopy(weights))
94+
# end_time = time.time()
95+
# print("Iteration {}: {} cost ({} seconds)"
96+
# .format(iteration + 1, compute_cost(labels, feed_forward(samples, weights)["output"]),
97+
# end_time - start_time))
98+
# start_time = end_time
99+
return iterations_weights
100+
101+
102+
def compute_cost(labels, activations):
103+
return -numpy.mean(labels * numpy.log(activations) + (1 - labels) * numpy.log(1 - activations))
104+
105+
106+
def classify(samples, weights):
107+
digits = feed_forward(samples, weights)["output"]
108+
return numpy.argmax(digits, axis=1)
109+
110+
111+
def compute_accuracy(samples, labels, weights):
112+
samples_number, = numpy.shape(labels)
113+
predictions = classify(samples, weights)
114+
correct = predictions == labels
115+
return numpy.sum(correct) / float(samples_number)
116+
117+
118+
def show_accuracies(accuracies):
119+
pyplot.figure()
120+
training, = pyplot.plot(accuracies["training"], color="blue", label="training")
121+
validation, = pyplot.plot(accuracies["validation"], color="green", label="validation")
122+
testing, = pyplot.plot(accuracies["testing"], color="violet", label="testing")
123+
pyplot.legend(handles=[training, validation, testing])
124+
pyplot.xlabel("Iterations")
125+
pyplot.ylabel("Accuracies")
126+
pyplot.show()
127+
128+
129+
def test(file_name="mnist.pkl.gz", iterations=30, batch_size=10, learning_rate=0.01, momentum=0.9, regularization=0.1):
130+
training_set, validation_set, testing_set = get_datasets(file_name)
131+
training_samples = add_ones(training_set[0])
132+
training_labels = training_set[1]
133+
validation_samples = add_ones(validation_set[0])
134+
validation_labels = validation_set[1]
135+
testing_samples = add_ones(testing_set[0])
136+
testing_labels = testing_set[1]
137+
iterations_weights = train(training_samples, one_hot_encode(training_labels), iterations, batch_size, learning_rate,
138+
momentum, regularization)
139+
accuracies = {"training": [], "validation": [], "testing": []}
140+
for iteration_weights in iterations_weights:
141+
accuracies["training"].append(compute_accuracy(training_samples, training_labels, iteration_weights))
142+
accuracies["validation"].append(compute_accuracy(validation_samples, validation_labels, iteration_weights))
143+
accuracies["testing"].append(compute_accuracy(testing_samples, testing_labels, iteration_weights))
144+
print("{} iterations, {} batch size, {} learning rate, {} momentum, {} regularization:"
145+
.format(iterations, batch_size, learning_rate, momentum, regularization))
146+
print("{} training accuracy".format(max(accuracies["training"])))
147+
print("{} validation accuracy".format(max(accuracies["validation"])))
148+
print("{} testing accuracy".format(max(accuracies["testing"])))
149+
show_accuracies(accuracies)
150+
151+
152+
def main():
153+
"""
154+
30 iterations, 10 batch size, 0.01 learning rate, 0.9 momentum, 0.1 regularization:
155+
0.9488 training accuracy
156+
0.952 validation accuracy
157+
0.9478 testing accuracy
158+
"""
159+
test()
160+
161+
162+
if __name__ == '__main__':
163+
main()

Neural Networks/mnist.pkl.gz

15.4 MB
Binary file not shown.

0 commit comments

Comments
 (0)