-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
0 parents
commit afbb16d
Showing
17 changed files
with
1,169 additions
and
0 deletions.
There are no files selected for viewing
Large diffs are not rendered by default.
Oops, something went wrong.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,10 @@ | ||
import numpy as np | ||
|
||
def softmax(input): | ||
'''Softmax activation function''' | ||
exp = np.exp(input) | ||
return exp/np.sum(exp) | ||
|
||
def relu(input): | ||
'''Rectified Linear Unit activation function''' | ||
return np.maximum(0,input) |
Empty file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,5 @@ | ||
import numpy as np | ||
|
||
def xavier_init(n_input,n_output,shape): | ||
'''Xavier initialization''' | ||
return np.random.normal(0,np.sqrt(2/(n_input+n_output)),shape) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,166 @@ | ||
import numpy as np | ||
from activations import * | ||
from initializers import * | ||
|
||
|
||
class Conv2d: | ||
'''Created a convolution kernel that is convolved with the layer input to produce a tensor of outputs''' | ||
def __init__(self, kernel_size, num_filters,input_num_filters,padding='same',activation='relu',use_bias=False): | ||
self.num_filters = num_filters | ||
self.kernel_size = kernel_size | ||
self.padding = padding | ||
self.stride = 1 | ||
self.use_bias = use_bias | ||
self.activation = activation | ||
self.weights = xavier_init(input_num_filters,num_filters,(kernel_size,kernel_size,input_num_filters,num_filters)) | ||
|
||
def apply_filter(self,curr_slice,filter): | ||
''' | ||
Apply filter on a current slice of data | ||
Arguments: | ||
curr_slice - current slice of data, shape (kernel_size,kernel_size,N_input_channels) | ||
filter - weight array, shape (kernel_size,kernel_size,N_input_channels) | ||
Returns: | ||
Z - scalar value, result of convoluting the sliding window on a slice of input data | ||
''' | ||
|
||
assert curr_slice.shape==filter.shape | ||
|
||
res = np.multiply(curr_slice,filter) | ||
Z = np.sum(res) | ||
if(self.use_bias): | ||
#add bias | ||
pass | ||
return Z | ||
|
||
def zero_pad(self,batch,n): | ||
'''Pad image with n zeros''' | ||
return np.pad(batch,((0,0),(n,n),(n,n),(0,0))) | ||
|
||
def forward(self,batch): | ||
''' | ||
Implements the forward propagation for a convolution function | ||
Arguments: | ||
batch - current batch of data, shape (N_samples,H,W,N_input_channels) | ||
Returns: array of feature maps for all samples in batch, shape (N_samples,H_new,W_new,N_channels) | ||
''' | ||
n_samples, H, W, _ = batch.shape | ||
new_H,new_W,pad = 0,0,0 | ||
k,s = self.kernel_size, self.stride | ||
|
||
if(self.padding=='same'): | ||
pad = int((k-1)/2) | ||
new_H,new_W = H,W | ||
elif(self.padding=='valid'): | ||
pad = 0 | ||
new_H,new_W = H-k+1,W-k+1 | ||
else: | ||
raise KeyError("Unknow padding value {0}".format(self.padding)) | ||
|
||
output = np.zeros((n_samples,new_H,new_W,self.num_filters)) | ||
padded_imgs = self.zero_pad(batch,pad) | ||
|
||
for i in range(n_samples): | ||
img_padded = padded_imgs[i,:,:,:] | ||
for h in range(new_H): | ||
for w in range(new_W): | ||
for c in range(self.num_filters): | ||
#corners of sliding window: | ||
h_start,h_end = h*s,h*s+k | ||
w_start,w_end = w*s,w*s+k | ||
|
||
curr_slice = img_padded[h_start:h_end,w_start:w_end,:] | ||
output[i,h,w,c] = self.apply_filter(curr_slice,self.weights[:,:,:,c]) | ||
|
||
|
||
if(self.activation=='relu'): | ||
output = relu(output) | ||
|
||
return output | ||
|
||
|
||
|
||
class MaxPooL2d: | ||
'''Downsamples the input representation by taking the maximum value over the sliding window defined by kernel_size''' | ||
|
||
def __init__(self, kernel_size): | ||
self.kernel_size = kernel_size | ||
self.stride = kernel_size | ||
|
||
def forward(self,batch): | ||
''' | ||
Implements the forward propagation for a maximum pooling function | ||
Arguments: | ||
batch - current batch of data, shape (N_samples,H,W,N_input_channels) | ||
Returns: array of downsampled images for all samples in batch, shape (N_samples,H_new,W_new,N_input_channels) | ||
''' | ||
n_samples, H, W, n_channels = batch.shape | ||
k,s = self.kernel_size,self.stride | ||
new_H, new_W = H//k, W//k | ||
|
||
output = np.zeros((n_samples,new_H,new_W,n_channels)) | ||
|
||
for i in range(n_samples): | ||
curr_img = batch[i,:,:,:] | ||
for h in range(new_H): | ||
for w in range(new_W): | ||
for c in range(n_channels): | ||
#corners of sliding window: | ||
h_start,h_end = h*s,h*s+k | ||
w_start,w_end = w*s,w*s+k | ||
|
||
curr_slice = curr_img[h_start:h_end,w_start:w_end,c] | ||
output[i,h,w,c] = np.max(curr_slice) | ||
|
||
return output | ||
|
||
class Dense: | ||
'''Dense Layer''' | ||
|
||
def __init__(self,n_output,n_input,activation='softmax',use_bias=True): | ||
self.n_output = n_output | ||
self.n_input = n_input | ||
self.activation = activation | ||
self.use_bias = use_bias | ||
self.weights = xavier_init(n_input,n_output,(n_input,n_output)) | ||
self.b = np.zeros((1,n_output)) | ||
|
||
self.dw, self.db = None, None | ||
|
||
if(use_bias): | ||
self.b = xavier_init(n_input,n_output,(1,n_output)) | ||
|
||
@property | ||
def gradients(self): | ||
if self.dw is None and self.db is None: | ||
return None | ||
return self.dw, self.db | ||
|
||
@property | ||
def weights(self): | ||
return self.dw, self.db | ||
|
||
def forward(self,batch): | ||
assert batch.shape[1] == self.n_input | ||
output = np.dot(batch,self.weights)+self.b | ||
|
||
if(self.activation=='softmax'): | ||
return softmax(output) | ||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,10 @@ | ||
from utils import convert_prob_to_onehot | ||
import numpy as np | ||
|
||
|
||
def accuracy(y_true,y_hat): | ||
y_pred = convert_prob_to_onehot(y_hat) | ||
return (y_pred==y_true).all(axis=1).mean() | ||
|
||
def categorical_cross_entropy(y_true,y_hat): | ||
return -np.sum(y_true*np.log(np.clip(y_hat,1e-20,1.)))/y_hat.shape[0] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,26 @@ | ||
import time | ||
|
||
class SequentialModel: | ||
'''Provides training and inference features on this model.''' | ||
def __init__(self, layers,optimizer): | ||
self.layers = layers | ||
self.optimizer = optimizer | ||
self.train_acc = [] | ||
self.train_loss = [] | ||
self.valid_acc = [] | ||
self.valid_loss = [] | ||
|
||
def train(self,X_train,y_train,X_valid,y_valid,epochs=10,batch_size=32): | ||
'''Train model''' | ||
|
||
for epoch in range(epochs): | ||
self.forward() | ||
self.backward() | ||
self.update_weights() | ||
|
||
def forward(self,batch): | ||
pass | ||
def backward(self,activation): | ||
pass | ||
def update_weights(): | ||
pass |
Oops, something went wrong.