Skip to content

Commit

Permalink
New functions
Browse files Browse the repository at this point in the history
  • Loading branch information
Tushn committed Dec 9, 2016
1 parent e77fa5f commit ac0bce0
Show file tree
Hide file tree
Showing 6 changed files with 135 additions and 0 deletions.
14 changes: 14 additions & 0 deletions functions/ann/createmlp.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
function net = createmlp(n,p,q)
net{1} = ones(1,n);
net{2} = rand(n+1,p(1));

if(length(p)==1)
i = 1;
else
for i = 2:length(p)
net{i+1} = rand(p(i-1)+1,p(i));
end
end

net{length(net)+1} = rand(p(i)+1,q);
end
3 changes: 3 additions & 0 deletions functions/ann/logsig.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
function a = logsig(n)
a = 1 ./ (1 + exp(-n));
end
59 changes: 59 additions & 0 deletions functions/ann/mlp.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
%% Mlp function
% [net, erro, y_est, erros] = mlp(data, hl, ol, tries)
% Inputs:
% - data: data of inputs
% - rt: result training
% - hl: number of hidden layers
% - tries: number max of tries
% - alpha: learning rate
% function [net, erro, y_est, erros] = mlp(data, rt, alpha, hl, tries)
% function [net, outnet, erro, erros] = mlp(data, rt, alpha, hl, tries)
function [net, outnet, erros] = mlp(data, rt, alpha, hl, tries)
% normalize
% for i = 1:size(data,2)
% data(:,i) = data(:,i)/norm(data(:,i));
% end
% rt = rt/norm(rt);

ol = size(rt, 2); % number of output layers
net = createmlp_new(length(data(1,:)),hl,ol);
erros = [];
momentum = 1;
for cont = 1:tries
ids = randperm(size(data,1));
cont
while length(ids)>0
id = ids(1); ids(1) = [];

[out, outnet] = usemlp_new( data(id,:), net);
% Erros calculated
% output layer erro
e = rt(id,:) - out;
% delta output layer (errors)
delta = {};
delta{length(outnet)} = out.*(1-out).*(e);
for i = length(outnet)-1:-1:1
delta{i} = (outnet{i}.*(1-outnet{i})).*(delta{i+1}*net{i+1}(1:end-1,:)');
end

% Weights calculated
% adjust weights
net{end} = net{end} + alpha*[outnet{end-1} -1]'*delta{end};
for i = length(net)-1:-1:2
w = net{i};
w = w*momentum + alpha*[outnet{i-1} -1]'*delta{i};
net{i} = w;
end
erro(id,:) = rt(id,:) - usemlp_new( data(id,:), net);
end
erros(cont) = mean(diag(erro'*erro))/size(erro,1);
end

% Verify errors for all data
oute = [];
outend = {};
for k = 1:size(data, 1)
[oute(k,:),outend{k}] = usemlp_new( data(k,:), net);
end
oute
end
10 changes: 10 additions & 0 deletions functions/ann/usemlp.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
% Using mlp
function [out, outnet] = usemlp(inputs, net)
outnet{1} = (inputs.*net{1});

for i = 2:length(net)
outnet{i} = logsig([outnet{i-1} -1]*net{i});
end

out = outnet{length(outnet)};
end
35 changes: 35 additions & 0 deletions functions/perceptron.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
%% perceptron - neural network
% [w, erro, y_est, errors] = perceptron(x, y, alpha, tries)
% - x: data for training
% - y: data out
% - alpha: learning rate
% - tries: tries
%
% - w: weighted estimated
% - erro: error estimated
% - y_est: 'y' estimated
% - errors: all errors in the last training
%
% Note this perceptron is not ideal because it is not using error for finish training process.
function [w, erro, y_est, errors] = perceptron(x, y, alpha, tries)
[sizeY, sizeX] = size(x);
w = [rand(1,sizeX) 1]';
mat = [x ones(sizeY,1)];

errors = [];
cont = 1;
erro(cont) = norm(y-mat*w);
while(erro(cont)>0.001 && tries > 0)
for i = 1:sizeY
errors(i) = y(i) - mat(i,:)*w;
if(abs(errors(i))>0.01) % nao usei degrau
w = w + (alpha*errors(i)*mat(i,:))';
end
end

tries = tries - 1;
cont = cont + 1;
erro(cont) = norm(errors);
end
y_est = mat*w;
end
14 changes: 14 additions & 0 deletions functions/rm.m
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
%% Linear regression using at least square (ALS)
% [b, y_est, erro] = rm(x,y)
% - x: data for training
% - y: data for out
%
% - b: constants for equations
% - y_est: 'y' estimated
% - erro: error is norm(y-y_est)
function [b, y_est, erro] = rm(x, y)
x(:,size(x,2)+1) = ones(size(y,1),1);
b = pinv(x'*x)*x'*y;
y_est = x*b;
erro = norm(y-y_est);
end

0 comments on commit ac0bce0

Please sign in to comment.