Academic Integrity: tutoring, explanations, and feedback — we don’t complete graded work or submit on a student’s behalf.

# MLP with backpropagation learning of AND function from pylab import * # traini

ID: 3601446 • Letter: #

Question

# MLP with backpropagation learning of AND function
from pylab import *

# training vectors (Boolean AND function and constant input)
X=array([[0,0,1,1],[0,1,0,1],[1,1,1,1]])
Y=array([0,0,0,1])

# model specifications
Ni=3; Nh=3; No=1;

#parameter and array initialization
Ntrials=2000
h=zeros(Nh); y=zeros(No)
wh=randn(Nh,Ni); wo=randn(No,Nh)
dwh=zeros(wh.shape); dwo=zeros(wo.shape)
dh=zeros(Nh); do=zeros(No)  
error=zeros(Ntrials)

for trial in range(Ntrials):
#pick example pattern randomly
pat=randint(4); x=X[:,pat]
  
#calculate hidden states of sigmoid nodes
for ih in range(Nh): #for each hidden node
sumInput=0
for ii in range(Ni): #loop over input features
sumInput=sumInput+wh[ih,ii]*x[ii]
h[ih]=1/(1+exp(-sumInput))
#calclate output (prediction) with sigm=oid nodes
for io in range(No): #for each output node
sumInput=0;
for ih in range(Nh): #loop over inputs from hidden
sumInput=sumInput+wo[io,ih]*h[ih];
y[io]=1/(1+exp(-sumInput))
  
# delta term for each layer (objective function error)   
for io in range(No): #for each output node
do[io]=y[io]*(1-y[io])*(Y[pat]-y[io])   
sumInput=0
for ih in range(Nh): #for each hidden node
sumInput=0; #backpropgation starts
for io in range(No):
sumInput=sumInput+wo[io,ih]*do[io]
dh[ih]=h[ih]*(1-h[ih])*sumInput
  
#update weights with momentum
for io in range(No):
for ih in range(Nh):
dwo[io,ih]=0.9*dwo[io,ih]+h[ih]*do[io];
wo[io,ih]=wo[io,ih]+0.1*dwo[io,ih];
for ih in range(Nh):
for ii in range(Ni):
dwh[ih,ii]=0.9*dwh[ih,ii]+x[ii]*dh[ih];
wh[ih,ii]=wh[ih,ii]+0.1*dwh[ih,ii];

# test all pattern   
for pat in range(4):
x=X[:,pat]
#calculate prediction   
for ih in range(Nh): #for each hidden node
sumInput=0
for ii in range(Ni): #loop over input features
sumInput=sumInput+wh[ih,ii]*x[ii]
h[ih]=1/(1+exp(-sumInput))
for io in range(No): #for each output node
sumInput=0;
for ih in range(Nh): #loop over inputs from hidden
sumInput=sumInput+wo[io,ih]*h[ih];
y[io]=1/(1+exp(-sumInput))
#y[io]=round(y[io])
  
error[trial]=error[trial]+abs(y-Y[pat]);
plot(error);
show()

1. 150 marks, 40 marks for Grads] Implement a multi-layer perceptron (MLP) by modifying the MLP progranm from the class to solve the XOR problem and train it to translate the digital letters given in file pattern1 into the corresponding ASCII representation. Plot a training curve and interpret your result:s.

Explanation / Answer

public T getRootData() {

return root.getData();

}

public int getHeight() {

return root.getHeight();

}

public int getNumberOfNodes() {

return root.getNumberOfNodes();

}

public boolean isEmpty() {

return root == null;

}

public void clear() {

root = null;

}

protected BinaryNode<T> getRootNode() {

return root;

}

protected void setRootData(T rootData){

root.setData(rootData);

}

protected void setRootNode(BinaryNode<T> rootNode){

root = rootNode;

}

public Iterator<T> getPreorderIterator() {

throw new UnsupportedOperationException("Preorder not supported.");

}

public Iterator<T> getInorderIterator() {

throw new UnsupportedOperationException("Inorder not supported.");

}

public Iterator<T> getPostorderIterator() {

return new PostorderIterator();

}

public Iterator<T> getLevelorderIterator() {

throw new UnsupportedOperationException("Level Order not supported.");

}

private class PostorderIterator implements Iterator<T> {

private Stack<BinaryNode<T>> nodeStack;

private BinaryNode<T> current;

public PostorderIterator() {

nodeStack = new Stack<>();

current = root;

populateStack(current);

}

  

private void populateStack(BinaryNode<T> node){

nodeStack.add(node);

if(node.hasRightChild()){

populateStack(node.getRightChild());

}

if(node.hasLeftChild()){

populateStack(node.getLeftChild());

}

}

public boolean hasNext() {

return !nodeStack.isEmpty();

}

public T next() {