Tuesday, May 29, 2018

recursive and iterative BFS, DFS,inorder traversal of graph in python

from collections import defaultdict
import queue

#######DFS  RECURSIVE
class node:
    def __init__(self,value):
        self.data = value
        self.left = None        
        self.right  = None
class DFS:
    def __init__(self):
        self.graph =defaultdict(list)
    def dfs_call(self,visited,val):
        if val is None:
            return        
        visited[val] = True        
        print(val)

        for i in self.graph[val]:
            if i is not None:
                if(visited[i]==False):
                    self.dfs_call(visited,i)


    def dfs_recursive(self,val):
        visited = [False]*(len(self.graph))
        self.dfs_call(visited, val)

###### DFS RECURSIVE

#######DFS ITERATIVE for binary tree
def dfs_iter(root):
    visited = [False]*10    
    stack =[]
    stack.append(root)

    while(stack):
        val = stack.pop()
        print (val.data)
        if((val.right  is not None)):
            stack.append(val.right)
        if((val.left  is not None)):
            stack.append(val.left)

###DFS ITERATIVE FOR GRAPH
def dfs_iter_graph(g,len):
    visited = [False]*len
    stack = []
    stack.append(0)
    visited[0]=True
    while(stack):
        val = stack.pop()
        print(val)

        for i in g.graph[val]:
            if i is not None:
                if(visited[i] == False):
                    stack.append(i)
                    visited[i] = True


##INORDER RECURSIVE AND ITERATIVE###########
def inorder(root):
    if(root):
        inorder(root.left)
        print (root.data)
        inorder(root.right)



def inorder_iter(root):
    current =root
    stack = []

    done = 0
    while(not done):
        if(current is not None):

            stack.append(current)
            current =current.left



        else:
            if(stack):
                current = stack.pop()
                print(current.data)

                current = current.right
            else:
                done =1





######BFS WITH QUEUE

def BFS(root,length):
    L = queue.Queue(maxsize=10)
    visited = [False]*length
    L.put(0)
    visited[0] = True
    while( not L.empty()):
        val = L.get()
        print(val)

        for i in g.graph[val]:
            if i is not None:
                if(visited[i] == False):
                    L.put(i)
                    visited[i] == True











root = node(5)
root.left = node(4)
root.right = node(10)
root.left.left = node(3)
root.right.right = node(15)
# inorder_iter(root)
# dfs_iter(root)






g = DFS()
g.graph[0].append(1)
g.graph[0].append(2)
g.graph[1].append(3)
g.graph[1].append(4)
g.graph[2].append(5)
g.graph[2].append(6)
g.graph[3].append(None)
g.graph[3].append(None)
g.graph[4].append(None)
g.graph[4].append(None)
g.graph[5].append(None)
g.graph[5].append(None)
g.graph[6].append(None)
g.graph[6].append(None)
# g.dfs_recursive(0)len = len(g.graph)
print (len)

# dfs_iter_graph(g,len)BFS(g,len)

Saturday, May 19, 2018

Matlab Multilayer Perceptron (MLP) with back propagation algorithm to solve a 3-class classification problem.

Specification:

Number of layers: 2 (hidden + output) 
Number of input units: 13 (+ 1 for bias)
Number of hidden units: 8 (+1 for bias) 
Number of output units: 3 
Activation functions: sigmoid for hidden units; softmax for output units 
Initial weights: uniform random numbers between 0 and 1




Code:






%%Laxmi Kadariya
%change iteration and learning rate in line number 23 and 24
%%% read of input file and normalizing the features
clc
clear all
WineData = importdata("wine_data.txt");
class = WineData(1:end,1);
features = WineData(1:end,2:end);
max_feature = max(features);
features_norm = features./max_feature;
%%% forward pass structure
%%%calculate An
V = rand(8,14);
W = rand(3,9);
length = size(features,1);
learning_rate =0.5;
iterations =5000;
fprintf("folowing steps are followed\n");
fprintf("1: forward pass to compute an,zn,bn and yn\n");
fprintf("2: compute the error signal.\n");
fprintf("3: Pass error backward to compter error for hidden layer.\n");
fprintf("4: Compute gradient\n");
fprintf("5: Update weight\n");
fprintf("*********************************************\n");
count_original =0;
fprintf("Total number of instance = %d \n",length);
fprintf("learning rate = %f \n",learning_rate);
fprintf("Total number of iteration = %d \n",iterations);
tic
for iter = 1:iterations
Vchange = zeros(8,14);
Wchange = zeros(3,9);
cost =0;
for m = 1:length
%%%fprintf("forward pass to compute an,zn,bn and yn");
X = [1,features_norm(m,1:end)].'; %%note here to change
A =V*X;
z_nobias = sigmoidFunction(A);
zn = [1,z_nobias.'].'; %%calculate zn
bn= W*zn; %%%calculate bn
ynh=softmaxFunc(bn); %%calculate output softmax
[maximum,Index] =max(ynh);
if iter ==1
if Index == class(m,1)
count_original =count_original+1;
end
end
output =class(m,1);
if output == 1
actual_yn =[1; 0; 0];
elseif output == 2
actual_yn =[0;1; 0];
else
actual_yn =[0; 0; 1];
end
%%fprintf("2: compute the error signal.");
error = ynh - actual_yn; %%calculation of error
value = 0;
%cost calculation
cost = cost + sum(actual_yn.*log(ynh)+(1-actual_yn).*log(1-ynh));
%%fprintf("3: Pass error backward to compter error for hidden layer.");
for i=1:8
del(1,i) = sum(error.*W(:,i+1))*z_nobias(i)*(1-z_nobias(i));
end
%compute the gradient
Wchange = Wchange + error .*zn';
Vchange = Vchange + del' .* X';
end
%%%%update the weight
W = W-((learning_rate/178).*Wchange);
V = V-((learning_rate/178).*Vchange);
cost =cost*-1/178;
if(iter == 1 )
fprintf("cost befor training %f\n",cost);
fprintf("Total number of correctly classified instance before traing = %d \n",count_original);
fprintf("Start Training\n");
end
end
%%% testing
count = 0;
for k = 1:length
X = [1,features_norm(k,1:end)].'; %%note here to change
A =V*X;
z_nobias = sigmoidFunction(A);
zn = [1,z_nobias.'].';
bn= W*zn;
%%calculate output softmax
yn=softmaxFunc(bn);
[maximum,Index] =max(yn);
classify_output(k,1)=Index;
if Index == class(k,1)
count =count+1;
end
end
time = toc;
fprintf("Total number of correctly classified instance after training = %d \n",count);
fprintf("Cost after Training = %f \n",cost);
fprintf("Total time = %f sec\n",time);
fprintf("*********************************************\n");
function s=softmaxFunc(z)
% Compute softmax function
s = zeros(size(z));
s= exp(z)/sum(exp(z));
end
function g = sigmoidFunction(z)
% Compute sigmoid function
%
g = zeros(size(z));
g = 1.0 ./ ( 1.0 + exp(-z)); % For Matlab
% g = 1.0 ./ ( 1.0 + e.^(-z)); % For Octave, it can use 'exp(1)' or 'e'
end
view raw MatlabProg.m hosted with ❤ by GitHub
Output:


folowing steps are followed
1: forward pass to compute an,zn,bn and yn
2: compute the error signal.
3: Pass error backward to compter error for hidden layer.
4: Compute gradient
5: Update weight
*********************************************
Total number of instance = 178 
learning rate = 0.500000 
Total number of iteration = 5000 
cost befor training 1.971846
Total number of correctly classified instance before traing = 71 
Start Training
Total number of correctly classified instance after training = 178 
Cost after Training = 0.020612 
Total time = 22.546547 sec
*********************************************
>> 

Matlab program to implement gradient descent algorithm to fit linear regression parameter

Data: fisherIrisSetosaData

implementation of  Linear Regression to predict sepal length given sepal width. 



This program finds the regression parameter w0 and w1. With this parameter, the test instance output can be obtained.
Code:



clc
clear all

fisherIrisSetosaData = importdata("fisherIrisSetosaData.txt");

Y = fisherIrisSetosaData(1:end,1);%sepal length
X = fisherIrisSetosaData(1:end,2);%sepal Width

w0_old = 0;
w1_old = 0;
w0diff =1;
w1diff = 1;
alpha = 0.1;
count =0;
tic;
while (w0diff >0.000001 || w1diff >0.000001)
        count = count +1;
        sum_w0 = 0;
        sum_w1 = 0;
        for i =1:50
            sum_w0 = sum_w0+ ((w0_old+w1_old.*X(i,1))-Y(i,1));
            sum_w1 = sum_w1+ ((w0_old+w1_old.*X(i,1))-Y(i,1)).*X(i,1);
        
        
            
            
            
        end 
       % sum_w0 = sum((w0_old+w1_old.*X)-Y);
       % sum_w1 =sum(((w0_old+w1_old.*X)-Y).*X);
        cost_w0 = alpha/50 *sum_w0;
        cost_w1 =alpha/50 *sum_w1;
        w0_new = w0_old-cost_w0;
        w1_new = w1_old -cost_w1;
        w0diff = abs(w0_old - w0_new);
        w1diff = abs(w1_old - w1_new);
        w0_old = w0_new;
        w1_old = w1_new;
        
        
     
    
end 

time_descent = toc;


fprintf('value of Parameter w0 and w1 is with alpha  = %f\n',alpha)
%disp("value of Parameter w0 and w1 is given as ");
fprintf('W0 = %f\n',w0_old);
fprintf('W0 = %f\n',w1_old);

plot(X,Y,"g*");
hold on
calculated_y = w0_old+w1_old.* X;
plot(X,calculated_y,'r')
legend("real data","calculated line",'Location','NorthWest');

xlabel("SepalWidth");
ylabel("Sepal Length");
title("linear regression with gradient descent");
fprintf("Solving linear function takes %f seconds.\n",time_descent);
fprintf("Total number of Iteration  = %d.\n",count);
disp(count)