error in Naive bayes classifier - python

i'm beginner in machine learning and i'm trying to implement my first Naive Bayes by myself for better understanding. So, i have dataset from http://archive.ics.uci.edu/ml/datasets/Adult (american census data, classes are '<=50k' and '>50k').
Here is my python code:
#!/usr/bin/python
import sys
import csv
words_stats = {} # {'word': {'class1': cnt, 'class2': cnt'}}
words_cnt = 0
targets_stats = {} # {'class1': 3234, 'class2': 884} how many words in each class
class_stats = {} # {'class1': 7896, 'class2': 3034} how many lines in each class
items_cnt = 0
def train(dataset, targets):
global words_stats, words_cnt, targets_stats, items_cnt, class_stats
num = len(dataset)
for item in xrange(num):
class_stats[targets[item]] = class_stats.get(targets[item], 0) + 1
for i in xrange(len(dataset[item])):
word = dataset[item][i]
if not words_stats.has_key(word):
words_stats[word] = {}
tgt = targets[item]
cnt = words_stats[word].get(tgt, 0)
words_stats[word][tgt] = cnt + 1
targets_stats[tgt] = targets_stats.get(tgt, 0) + 1
words_cnt += 1
items_cnt = num
def classify(doc, tgt_set):
global words_stats, words_cnt, targets_stats, items_cnt
probs = {} #the probability itself P(c|W) = P(W|c) * P(c) / P(W)
pc = {} #probability of the class in document set P(c)
pwc = {} #probability of the word set in particular class. P(W|c)
pw = 1 #probability of the word set in documet set
for word in doc:
if word not in words_stats:
continue #dirty, very dirty
pw = pw * float(sum(words_stats[word].values())) / words_cnt
for tgt in tgt_set:
pc[tgt] = class_stats[tgt] / float(items_cnt)
for word in doc:
if word not in words_stats:
continue #dirty, very dirty
tgt_wrd_cnt = words_stats[word].get(tgt, 0)
pwc[tgt] = pwc.get(tgt, 1) * float(tgt_wrd_cnt) / targets_stats[tgt]
probs[tgt] = (pwc[tgt] * pc[tgt]) / pw
l = sorted(probs.items(), key = lambda i: i[1], reverse=True)
print probs
return l[0][0]
def check_results(dataset, targets):
num = len(dataset)
tgt_set = set(targets)
correct = 0
incorrect = 0
for item in xrange(num):
res = classify(dataset[item], tgt_set)
if res == targets[item]:
correct = correct + 1
else:
incorrect = incorrect + 1
print 'correct:', float(correct) / num, ' incorrect:', float(incorrect) / num
def load_data(fil):
data = []
tgts = []
reader = csv.reader(fil)
for line in reader:
d = [x.strip() for x in line]
if '?' in d:
continue
if not len(d):
continue
data.append(d[:-1])
tgts.append(d[-1:][0])
return data, tgts
if __name__ == '__main__':
if len(sys.argv) < 3:
print './program train_data.txt test_data.txt'
sys.exit(1)
filename = sys.argv[1]
fil = open(filename, 'r')
data, tgt = load_data(fil)
train(data, tgt)
test_file = open(sys.argv[2], 'r')
test_data, test_tgt = load_data(test_file)
check_results(test_data, tgt)
it gives ~61% of correct results. when i print probabilities i get the following:
{'<=50K': 0.07371606889800396, '>50K': 15.325378327213354}
but in case of correct classifier i expect to see sum of both probabilities equal to 1.
At first i thought the problem is in float underflow and tried to make all calculations in logarithms, but results were similiar.
i understand that omitting some words is gonna affect accuracy, but the probabilities are sooo wrong.
What do i do wrong or don't understand?
for your convinience i've uploaded dataset and python script here:
https://dl.dropboxusercontent.com/u/36180992/adult.tar.gz
Thank you for your help.

Naive Bayes doesn't compute a probability directly, rather it computes a "raw score" that is relatively compared to the other scores for each label in order to classify an instance. This score can easily be converted to a "probability" in the range of [0, 1]:
total = sum(probs.itervalues())
for label, score in probs.iteritems():
probs[label] = score / total
However, keep in mind this still doesn't represent a true probability, as mentioned in this answer:
naive Bayes tends to predict probabilities that are almost always either very close to zero or very close to one.

Related

How to read data in a class to solve a tabu search problem

I'm trying to learn search algorithms in order to prepare my master thesis, so I have a TSP problem in which I want to find the best and minimal route to visite all the states , I'm using a .txt file named cities__coordinates.txt that contains the coordinates for every state, so to read the data I've found this source code that has a class to read the data you'll find it here:
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import copy
import re
import math
class Data():
'''
the format of solomon dataset
'''
def __init__(self):
self.customerNum = 0 # the number of customers
self.nodeNum = 0 # the sum of customers and depots
self.vehicleNum = 0
self.capacity = 0
self.cor_X = []
self.cor_Y = []
self.demand = []
self.readyTime = []
self.dueTime = []
self.serviceTime = []
self.disMatrix = {}
def read_data(self, path, customerNum, depotNum):
'''
function to read solomom data from .txt files, notice that it must be solomon dataset
INPUT
# data : class Data
# path : Data path
# customerNum : the number of customer
OutPut : none
'''
self.customerNum = customerNum
self.nodeNum = customerNum + depotNum
f = open('cities__coordinates.txt', 'r')
lines = f.readlines()
count = 0
for line in lines:
count = count + 1
if(count == 5):
line = line[:-1].strip()
str = re.split(r" +", line)
self.vehicleNum = float(str[0])
self.capacity = float(str[1])
elif(count >= 10 and count <= 10 + customerNum):
line = line[:-1]
str = re.split(r" +", line)
self.cor_X.append(float(str[2]))
self.cor_Y.append(float(str[3]))
self.demand.append(float(str[4]))
self.readyTime.append(float(str[5]))
self.dueTime.append(float(str[6]))
self.serviceTime.append(float(str[7]))
# compute the distance matrix
self.disMatrix = {}
for i in range(0, self.nodeNum):
dis_temp={}
for j in range(0, self.nodeNum):
dis_temp[j] = int(math.hypot(self.cor_X[i] - self.cor_X[j],self.cor_Y[i] - self.cor_Y[j]))
self.disMatrix[i] = dis_temp
def plot_nodes(self):
'''
Description: function to plot
'''
Graph = nx.DiGraph()
nodes_name = [str(x) for x in list(range(self.nodeNum))]
Graph.add_nodes_from(nodes_name)
cor_xy = np.array([self.cor_X,self.cor_Y]).T.astype(int)
pos_location = {nodes_name[i]:x for i,x in enumerate(cor_xy)}
nodes_color_dict = ['r'] + ['gray'] * (self.nodeNum-1)
nx.draw_networkx(Graph,pos_location,node_size=200,node_color=nodes_color_dict,labels=None)
plt.show(Graph)
def plot_route(self,route,color='k'):
Graph = nx.DiGraph()
nodes_name = [0]
cor_xy=[[self.cor_X[0] , self.cor_Y[0]]]
edge = []
edges = [[0,route[0]]]
for i in route :
nodes_name.append(i)
cor_xy.append([self.cor_X[i] , self.cor_Y[i]])
edge.append(i)
if len(edge) == 2 :
edges.append(copy.deepcopy(edge))
edge.pop(0)
edges.append([route[-1],0])
Graph.add_nodes_from(nodes_name)
Graph.add_edges_from(edges)
pos_location = {nodes_name[i]:x for i,x in enumerate(cor_xy)}
nodes_color_dict = ['r'] + ['gray'] * (len(route))
nx.draw_networkx(Graph,pos_location,node_size=200,node_color=nodes_color_dict,edge_color=color, labels=None)
plt.show(Graph)
so in read_data function I've changed the path to my .txt file , and for the code which will calculate all the distance and took the tabu search and all the staffs, here it is the code:
from itertools import combinations
import os,sys,copy
import numpy as np
import time
from Datareader import Data
import matplotlib.pyplot as plt
class Tabu():
def __init__(self,disMatrix,max_iters=200,maxTabuSize=20):
"""parameters definition"""
self.disMatrix = disMatrix
self.maxTabuSize = maxTabuSize
self.max_iters = max_iters
self.tabu_list=[]
def get_route_distance(self,route):
'''
Description: function to calculate total distance of a route. evaluate function.
parameters: route : list
return : total distance : folat
'''
routes = [0] + route + [0] # add the start and end point
total_distance = 0
for i,n in enumerate(routes):
if i != 0 :
total_distance = total_distance + self.disMatrix[last_pos][n]
last_pos = n
return total_distance
def exchange(self,s1,s2,arr):
"""
function to Swap positions of two elements in an arr
Args: int,int,list
s1 : target 1
s2 : target 2
arr : target array
Ouput: list
current_list : target array
"""
current_list = copy.deepcopy(arr)
index1 , index2 = current_list.index(s1) , current_list.index(s2) # get index
current_list[index1], current_list[index2]= arr[index2] , arr[index1]
return current_list
def generate_initial_solution(self,num=10,mode='greedy'):
"""
function to get the initial solution,there two different way to generate route_init.
Args:
num : int
the number of points
mode : string
"greedy" : advance step by choosing optimal one
"random" : randomly generate a series number
Ouput: list
s_init : initial solution route_init
"""
if mode == 'greedy':
route_init=[0]
for i in range(num):
best_distance = 10000000
for j in range(num+1):
if self.disMatrix[i][j] < best_distance and j not in route_init:
best_distance = self.disMatrix[i][j]
best_candidate = j
route_init.append(best_candidate)
route_init.remove(0)
if mode == 'random':
route_init = np.arange(1,num+1) #init solution from 1 to num
np.random.shuffle(route_init) #shuffle the list randomly
return list(route_init)
def tabu_search(self,s_init):
"""tabu search"""
s_best = s_init
bestCandidate = copy.deepcopy(s_best)
routes , temp_tabu = [] , [] # init
routes.append(s_best)
while(self.max_iters):
self.max_iters -= 1 # Number of iterations
neighbors = copy.deepcopy(s_best)
for s in combinations(neighbors, 2):
sCandidate = self.exchange(s[0],s[1],neighbors) # exchange number to generate candidates
if s not in self.tabu_list and self.get_route_distance(sCandidate) < self.get_route_distance(bestCandidate):
bestCandidate = sCandidate
temp_tabu = s
if self.get_route_distance(bestCandidate) < self.get_route_distance(s_best): # record the best solution
s_best = bestCandidate
if temp_tabu not in self.tabu_list:
self.tabu_list.append(temp_tabu)
if len(self.tabu_list) > self.maxTabuSize :
self.tabu_list.pop(0)
routes.append(bestCandidate)
return s_best, routes
if __name__ == "__main__":
data = Data()
data.read_data(path='cities__coordinates.txt',customerNum=100,depotNum=1) # change the path
""" Tabu :
disMatrix : the distance matrix from 0 to X , 0 represernt starting and stopping point。
for example: disMatrix = [[0,3,4,...
1,0,5,...
3,5,0,...]]
that means the distance from 0 to 0 is 0, from 0 to 1 is 3,... from 1 to 3 is 5....
max_iters : maximum iterations
maxTabuSize : maximum iterations
"""
tsp = Tabu(disMatrix=data.disMatrix,max_iters=10,maxTabuSize=10)
# two different way to generate initial solution
# num : the number of points
s_init = tsp.generate_initial_solution(num=10,mode='greedy') # mode = "greedy" or "random"
print('init route : ' , s_init)
print('init distance : ' , tsp.get_route_distance(s_init))
start = time.time()
best_route , routes = tsp.tabu_search(s_init) # tabu search
end = time.time()
print('best route : ' , best_route)
print('best best_distance : ' , tsp.get_route_distance(best_route))
print('the time cost : ',end - start )
# plot the result changes with iterations
results=[]
for i in routes:
results.append(tsp.get_route_distance(i))
plt.plot(np.arange(len(results)) , results)
plt.show()
# plot the route
data.plot_route(best_route)
when I execute it, it takes a little time and then it shows me this error :
Traceback (most recent call last):
File "C:/Users/malle/OneDrive/Desktop/TS.py", line 100, in <module>
data.read_data(path='cities__coordinates.txt',customerNum=100,depotNum=1) # change the path
File "C:/Users/malle/OneDrive/Desktop\Datareader.py", line 49, in read_data
self.cor_X.append(float(str[2]))
IndexError: list index out of range
anyone can help to resolve this problem please ?

Manual kth Nearest Neighbor Euclidean Distance

I have to modify the following code in order to use 1, 3, and 5 neighbors and print the accuracy of each one. I can not use the sklearn library KNeighborsClassifier so I am stuck because I don't know how to modify k neighbors value here.
Code:
shuffle_df = dataset.sample(frac=1)
train_size = int(0.75 * len(dataset))
train_set=shuffle_df[:train_size]
test_set=shuffle_df[train_size:]
testarray=[]
for w in range(37):
sepallengthtest = test_set.iloc[w,0]
sepalwidthtest = test_set.iloc[w,1]
petallenghttest = test_set.iloc[w,2]
petalwidthtest = test_set.iloc[w,3]
classtest = test_set.iloc[w,4]
trainarry=[]
for m in range(111):
sepallengthtrain = train_set.iloc[m,0]
widthtesthtrain = train_set.iloc[m,1]
petallenghttrain = train_set.iloc[m,2]
petalwidthtrain = train_set.iloc[m,3]
classtrain = test_set.iloc[w,4]
distance = sqrt((sepallengthtest-sepallengthtrain)**2 + (sepalwidthtest-widthtesthtrain)**2 + (petallenghttest-petallenghttrain)**2 + (petalwidthtest-petalwidthtrain)**2)
if len(trainarry)==0:
trainarry.append([distance, classtest, classtrain])
if trainarry[0][0]>distance:
trainarry.pop(0)
trainarry.append([distance, classtest, classtrain])
testarray.append(trainarry)
count=0
for i in range(37):
results = testarray[i]
if results[0][1] == results[0][2]:
count+=1
print(count)
accuracy=count/len(testarray)
print(f"\n Accuracy{accuracy: .3%}")
Output:
Neighbors: 1 3 5
Success Rate: 86.8% 89.5% 89.5%

How to fix broken data in feature extraction/pre-processing in speech recognition?

i am very new in machine learning. I stumble on this source code on github that has no database, so i decided to use my own database. This code is to recognize speaker with MFCC and GMM-UBM. But when i try to run the code, i got this error "ValueError: Found array with 1 sample(s) (shape=(1, 13)) while a minimum of 2 is required". It seems like when the code is trying to fit the GMM on the 68th dataset, the MFCC shape of the data is broken. I assume there's something wrong on the feature extraction process.
Please help me! thank you very much.
Here's the code
import python_speech_features as psf
from sklearn.mixture import GaussianMixture
from sklearn.externals import joblib
from scipy.io import wavfile
from functools import reduce
import numpy as np
from os import listdir
from os.path import isfile, join
import os
import re
DATA_PATH = 'dataCoba'
# Make a list of speakers from the newdata/data folder. The format for the files in the folder is
# name_1,wav for training and name_2.wav for testing
substring = "_2"
onlyfiles = [f for f in listdir(DATA_PATH) if isfile(join(DATA_PATH, f))]
onlyfiles.sort()
onlyones = []
for filename in onlyfiles:
dups = re.search('[\w]+_2.wav', filename)
#dups = re.search('[\w].wav', filename)
if dups is None:
onlyones.append(''.join(filename.split('_')[0]))
print(onlyones)
SPEAKERS = onlyones
TOTAL_SPEAKERS = len(SPEAKERS)
MODEL_SPEAKERS = len(SPEAKERS)
print(len(SPEAKERS))
class SpeakerRecognition:
# Create a GMM and UBM model for each speaker. The GMM is modelled after the speaker and UBM for each speaker
# is modelled after all the other speakers. Likelihood Ratio test is used to verify speaker
def setGMMUBM(self, no_components):
self.GMM = []
self.UBM = []
for i in range(MODEL_SPEAKERS):
self.GMM.append(GaussianMixture(n_components= no_components, covariance_type= 'diag'))
self.UBM.append(GaussianMixture(n_components= no_components, covariance_type= 'diag'))
# Load in data from .wav files in data/
# Extract mfcc (first 13 coefficients) from each audio sample
def load_data(self):
#training
self.spk = [wavfile.read(DATA_PATH + '/' + (str(i).replace('.wav','')) + '_1.wav') for i in SPEAKERS]
self.spk_mfcc = [psf.mfcc(self.spk[i][1], self.spk[i][0]) for i in range(0, TOTAL_SPEAKERS)]
#testing
self.p_spk = [wavfile.read(DATA_PATH + '/' + (str(i).replace('.wav','')) + '_2.wav') for i in SPEAKERS]
self.p_spk_mfcc = [psf.mfcc(self.p_spk[i][1], self.p_spk[i][0]) for i in range(0, TOTAL_SPEAKERS)]
print(self.spk_mfcc)
for i in range(TOTAL_SPEAKERS):
self.spk_train_size.append(len(self.spk_mfcc[i]))
self.spk_start.append(len(self.total_mfcc))
print("Speaker Number(train) = ",i)
print ("self.spk_mfcc[i] = ", len(self.spk_mfcc[i]))
for mfcc in self.spk_mfcc[i]:
self.total_mfcc.append(mfcc)
self.speaker_label.append(i)
self.spk_end.append(len(self.total_mfcc))
print("self.total_mfcc = ", len(self.total_mfcc))
print("\n")
for i in range(TOTAL_SPEAKERS):
#print("self.p_spk_mfcc =", self.p_spk_mfcc)
self.spk_test_size.append(len(self.p_spk_mfcc[i]))
self.spk_start.append(len(self.p_total_mfcc))
print("Speaker Num(test) = ",i)
print("self.p_spk_mfcc = ",len(self.p_spk_mfcc[i]))
print("MFCC Shape = ",self.spk_mfcc[i].shape)
for mfcc in self.p_spk_mfcc[i]:
self.p_total_mfcc.append(mfcc)
self.p_speaker_label.append(i)
self.p_spk_end.append(len(self.p_total_mfcc))
print("self.total_mfcc = ", len(self.p_total_mfcc))
print("\n")
# Gaussian Mixture Model is made of a number of Gaussian distribution components.
# To model data, a suitable number o gaussian components have to be selected.
# There is no method for finding this. It is done by trial and error. This runs
# the program for different values of component and records accuracy for each one
[![This is the error when i run the code][1]][1]
def find_best_params(self):
best_no_components = 1
maxacc = 0
for i in range(100, 256):
self.setGMMUBM(i)
self.fit_model()
_, acc, _ = self.predict()
print("Accuracy for n = {} is {}".format(i, acc))
if acc > maxacc:
maxacc = acc
best_no_components = i
return best_no_components
# Fit the GMM UBM models with training data
# fit = N buah data * dimensi data
def fit_model(self):
for i in range(MODEL_SPEAKERS):
print("Fit start for {}".format(i))
self.GMM[i].fit(self.spk_mfcc[i])
print(self.spk_mfcc[i].shape)
self.UBM[i].fit(self.total_mfcc[:self.spk_start[i]] + self.total_mfcc[self.spk_end[i]:])
print("Fit end for {}".format(i))
joblib.dump(self.UBM[i], 'dumps/new/ubm' + str(i) + '.pkl')
joblib.dump(self.GMM[i], 'dumps/new/gmm' + str(i) + '.pkl')
def model(self, no_components = 244):
self.setGMMUBM(no_components)
self.fit_model()
# Predict the output for each model for each speaker and produce confusion matrix
def load_model(self):
for i in range(0, MODEL_SPEAKERS):
self.GMM.append(joblib.load('dumps/new/gmm' + str(i) + '.pkl'))
self.UBM.append(joblib.load('dumps/new/ubm' + str(i) + '.pkl'))
def predict(self):
avg_accuracy = 0
confusion = [[ 0 for y in range(MODEL_SPEAKERS) ] for x in range(TOTAL_SPEAKERS)]
for i in range(TOTAL_SPEAKERS):
for j in range(MODEL_SPEAKERS):
x = self.GMM[j].score_samples(self.p_spk_mfcc[i]) - self.UBM[j].score_samples(self.p_spk_mfcc[i])
for score in x :
if score > 0:
confusion[i][j] += 1
confusion_diag = [confusion[i][i] for i in range(MODEL_SPEAKERS)]
diag_sum = 0
for item in confusion_diag:
diag_sum += item
remain_sum = 0
for i in range(MODEL_SPEAKERS):
for j in range(MODEL_SPEAKERS):
if i != j:
remain_sum += confusion[i][j]
spk_accuracy = 0
for i in range(MODEL_SPEAKERS):
best_guess, _ = max(enumerate(confusion[i]), key=lambda p: p[1])
print("For Accent {}, best guess is {}".format(SPEAKERS[i], SPEAKERS[best_guess]))
if i == best_guess:
spk_accuracy += 1
#print(MODEL_SPEAKERS)
spk_accuracy /= MODEL_SPEAKERS
avg_accuracy = diag_sum/(remain_sum+diag_sum)
return confusion, avg_accuracy, spk_accuracy
def __init__(self):
self.test_spk = []
self.test_mfcc = []
# Speaker data and corresponding mfcc
self.spk = []
self.spk_mfcc = []
self.p_spk = []
self.p_spk_mfcc = []
# Holds all the training mfccs of all speakers and
# speaker_label is the speaker label for the corresponding mfcc
self.total_mfcc = []
self.speaker_label = []
self.spk_train_size = [] # Index upto which is training data for that speaker.
self.p_total_mfcc = []
self.p_speaker_label = []
#print(self.p_speaker_label)
self.spk_test_size = []
# Since the length of all the audio files are different, spk_start and spk_end hold
self.spk_start = []
self.spk_end = []
self.p_spk_start = []
self.p_spk_end = []
self.GMM = []
self.UBM = []
self.load_data()
self.cepstral_mean_subtraction()
# Cepstral Mean Subtraction (Feature Normalization step)
def cepstral_mean_subtraction(self):
for i, speaker_mfcc in enumerate(self.spk_mfcc):
average = reduce(lambda acc, ele: acc + ele, speaker_mfcc)
average = list(map(lambda x: x/len(speaker_mfcc), average))
for j, feature_vector in enumerate(speaker_mfcc):
for k, feature in enumerate(feature_vector):
self.spk_mfcc[i][j][k] -= average[k]
for i, speaker_mfcc in enumerate(self.p_spk_mfcc):
average = reduce(lambda acc, ele: acc + ele, speaker_mfcc)
average = list(map(lambda x: x / len(speaker_mfcc), average))
for j, feature_vector in enumerate(speaker_mfcc):
for k, feature in enumerate(feature_vector):
self.p_spk_mfcc[i][j][k] -= average[k]
#TBD : Ten fold validation
def ten_fold():
#fold_size = 0.1 * self.n
fold_offset = 0.0
accuracy_per_fold = 0
average_accuracy = 0
for i in range(0, 10):
print("Fold start is {} and fold end is {} ".format( fold_offset, fold_offset + fold_size))
#accuracy = self.execute(int(fold_offset), int(fold_offset + fold_size))
#print("Accuracy is of test {} is : {} ".format(i, accuracy))
#average_accuracy += accuracy
#fold_offset += fold_size
average_accuracy /= 10.0
print("Average accuracy " + str(100 * average_accuracy))
return average_accuracy
# Final result is a confusion matrix which represents the accuracy of the fit of the model
if __name__ == '__main__':
SR = SpeakerRecognition()
#SR.load_model()
SR.setGMMUBM(no_components=13)
#SR.find_best_params()
SR.fit_model()
confusion, mfcc_accuracy, spk_accuracy = SR.predict()
print("Confusion Matrix")
print(np.matrix(confusion))
print("Accuracy in predicting speakers : {}".format(spk_accuracy))
print("Accuracy in testing for MFCC : {}".format(mfcc_accuracy))

unsupported operand type(s) for +=: 'zip' and 'zip'

thanks for the answer before and I have changed it what Alperen suggested, but I have another problem, my code :
import sys
import os
import itertools
import os.path
import random
from PIL import Image
from svmutil import *
DIMENSION = 200
sys.path.append("../train/")
ROOT_DIR = os.path.dirname(os.getcwd()) + "/train"
NEGATIVE = "negative"
POSITIVE = "positive"
CLASSES = [NEGATIVE, POSITIVE]
# libsvm constants
LINEAR = 0
RBF = 2
# Other
USE_LINEAR = False
IS_TUNING = False
def main():
try:
train, tune, test = getData(IS_TUNING)
models = getModels(train)
results = None
if IS_TUNING:
print ("!!! TUNING MODE !!!")
results = classify(models, tune)
else:
results = classify(models, test)
print
totalCount = 0
totalCorrect = 0
for clazz in CLASSES:
count, correct = results[clazz]
totalCount += count
totalCorrect += correct
print ("%s %d %d %f") % (clazz, correct, count, (float(correct) / count))
print ("%s %d %d %f") % ("Overall", totalCorrect, totalCount,(float(totalCorrect) / totalCount))
except Exception as e:
print (e)
return 5
def classify(models, dataSet):
results = {}
for trueClazz in CLASSES:
count = 0
correct = 0
for item in dataSet[trueClazz]:
predClazz, prob = predict(models, item)
print ("%s,%s,%f") % (trueClazz, predClazz, prob)
count += 1
if trueClazz == predClazz: correct += 1
results[trueClazz] = (count, correct)
return results
def predict(models, item):
maxProb = 0.0
bestClass = ""
for clazz, model in models.iteritems():
prob = predictSingle(model, item)
if prob > maxProb:
maxProb = prob
bestClass = clazz
return (bestClass, maxProb)
def predictSingle(model, item):
output = svm_predict([0], [item], model, "-q -b 1")
prob = output[2][0][0]
return prob
def getModels(trainingData):
models = {}
param = getParam(USE_LINEAR)
for c in CLASSES:
labels, data = getTrainingData(trainingData, c)
prob = svm_problem(labels, data)
m = svm_train(prob, param)
models[c] = m
return models
def getTrainingData(trainingData, clazz):
labeledData = getLabeledDataVector(trainingData, clazz, 1)
negClasses = [c for c in CLASSES if not c == clazz]
for c in negClasses:
ld = getLabeledDataVector(trainingData, c, -1)
labeledData += ld
random.shuffle(labeledData)
unzipped = [list(t) for t in zip(*labeledData)]
labels, data = unzipped[0], unzipped[1]
return (labels, data)
def getParam(linear = True):
param = svm_parameter("-q")
param.probability = 1
if(linear):
param.kernel_type = LINEAR
param.C = .01
else:
param.kernel_type = RBF
param.C = .01
param.gamma = .00000001
return param
def getLabeledDataVector(dataset, clazz, label):
data = dataset[clazz]
labels = [label] * len(data)
output = zip(labels, data)
return output
def getData(generateTuningData):
trainingData = {}
tuneData = {}
testData = {}
for clazz in CLASSES:
(train, tune, test) = buildTrainTestVectors(buildImageList(ROOT_DIR + clazz + "/"), generateTuningData)
trainingData[clazz] = train
tuneData[clazz] = tune
testData[clazz] = test
return (trainingData, tuneData, testData)
def buildImageList(dirName):
imgs = [Image.open(dirName + fileName).resize((DIMENSION, DIMENSION)) for fileName in os.listdir(dirName)]
imgs = [list(itertools.chain.from_iterable(img.getdata())) for img in imgs]
return imgs
def buildTrainTestVectors(imgs, generateTuningData):
# 70% for training, 30% for test.
testSplit = int(.7 * len(imgs))
baseTraining = imgs[:testSplit]
test = imgs[testSplit:]
training = None
tuning = None
if generateTuningData:
# 50% of training for true training, 50% for tuning.
tuneSplit = int(.5 * len(baseTraining))
training = baseTraining[:tuneSplit]
tuning = baseTraining[tuneSplit:]
else:
training = baseTraining
return (training, tuning, test)
if __name__ == "__main__":
sys.exit(main())
and I got the new massage
Klik this massage to see new error massage
What should I do? I have searched every answer but never make me get the answer. Now I use this code for my final project at university. I hope anyone can help me for this problem. But thank you for another last answer
EDIT:
This lines causes the error:
labeledData += ld
+= operand doesn't work for zips. You can change zips to list.
def getLabeledDataVector(dataset, clazz, label):
...
return list(output)
Also, unzipped list can be empty, you should fix this line too(Thanks to ShadowRanger for comment):
labels, data = unzipped if unzipped else ([], [])
This changes probably will affect your code's logic. You should fix them on your own.
BEFORE EDIT:
In getData(generateTuningData) function, ROOT_DIR + clazz expression causes the error, because ROOT_DIR is None.
sys.path.append doesn't return anything(returns None).
You need to change your code as:
...
import os.path
...
sys.path.append("../train/")
ROOT_DIR = os.path.dirname(os.getcwd()) + "/train/" # parent directory and "/train/"
...
I assumed ROOT_DIR is your current working directory's parent + "/train/". If it is not, you can fix it.
Also, there may be other problems, but this solves unsupported operand type(s).

Simple Neural Network with MNIST stuck at 17% missclasification error

Hello Im putting my code for inspection because Ive been playing with my neural network implementation in python for a few weeks and I cant seem to reach a misclasification error below 17% some times 16%. ve been trying different learning rate values, different hidden neurons number and still not a lot of mprovement. Im well aware my implementation is the basic traditional neural network, but I was expecting better results according to other implementations ive seen in internet. I hope this is of interest for you guys, it would be really cool if you could point me new ideas about what could be the problem in my code, or maybe you think this is the best I can do with a traditional implementation and I should add something new, that would be cool too.
In any case, here is my code, I hope is readable enough, I tried to do it as simple as possible, since its my way to understand how neural networks work.
Edit:Perhaps my question is not so clear, basically I would like, if its of interest for you guys, is to help me find details in my current implementation that could improve my misclasification error below 17% because apparently its the best my implementation can do. I would be very thankful for any advise or idea, Im deeply interested in this topic, but Im a beginner and it would be great to have some smart ideas that can help me improve my implementation.
File: mnist_dataset.py - Extract the mnist data
import numpy as np
from struct import unpack
train_input_file = open("dataset/train-images-idx3-ubyte", "rb")
train_output_file = open("/dataset/train-labels-idx1-ubyte", "rb")
test_input_file = open("dataset/t10k-images-idx3-ubyte", "rb")
test_output_file = open("dataset/t10k-labels-idx1-ubyte", "rb")
def readData(f,labels = False,scale = 1):
header = hex(unpack('>L',np.fromfile(f,dtype=np.int32,count=1)[0])[0])
num = int(unpack('>L',np.fromfile(f,dtype=np.int32,count=1)[0])[0])
col = 1
row = 1
if labels == False:
row = int(unpack('>L',np.fromfile(f,dtype=np.int32,count=1)[0])[0])
col = int(unpack('>L',np.fromfile(f,dtype=np.int32,count=1)[0])[0])
data = np.zeros((int(num/scale),col*row))
for i in range(0,int(num/scale),1):
data[i] = np.fromfile(f,dtype=np.ubyte,count=col*row)
return data
def getMNISTData():
def norm(v):
return v/255
train_input = readData(train_input_file, scale=1)/255.0
train_out = readData(train_output_file, True,scale=1)
test_input = readData(test_input_file)/255.0
test_out = readData(test_output_file, True)
print "Train input: " + str(train_input.shape)
print "Train output: " + str(train_out.shape)
print "Test input: " + str(test_input.shape)
print "Test output: " + str(test_out.shape)
train_input_file.close()
train_output_file.close()
test_input_file.close()
test_output_file.close()
return (train_input,train_out,test_input,test_out)
File: NN.py - neural network implementation
import mnist_dataset
import numpy as np
import random
import matplotlib.pyplot as plt
def encode_data_10(v):
e = (0.0) * np.ones((1, 10), dtype=float)
e[:, int(v)] = 1.0
return e.tolist()
def encode_data_1(v):
n = -1.0 + ((0.2)*v)
return n
x_train, y_train, x_test, y_test = mnist_dataset.getMNISTData()
learning_rate = 1.0
iter = 3000
sample_size = 30
num_hidden_neurons = 500
num_output_neurons = 10
if num_output_neurons > 1:
y_train = np.matrix(np.array(map(encode_data_10,y_train)))
y_test = np.matrix(np.array(map(encode_data_10,y_test)))
else:
y_train = np.matrix(map(encode_data_1,y_train))
y_test = np.matrix(map(encode_data_1,y_test))
def getSample(sample_size,x,y):
r = random.sample(xrange(1, len(y), 1), sample_size)
x_r = np.zeros((sample_size,x.shape[1]))
y_r = np.zeros((sample_size,y.shape[1]))
for i,n in enumerate(r):
x_r[i] = x[n]
y_r[i] = y[n]
return (x_r,y_r)
inputVector, targetVector = getSample(sample_size, x_train, y_train)
hiddenWeights = np.mat(np.random.random((num_hidden_neurons, x_train.shape[1])))
print "W0 shape: " + str(hiddenWeights.shape)
outputWeights = np.mat(np.random.random((num_output_neurons,num_hidden_neurons)))
print "W1 shape: " + str(outputWeights.shape)
def act_func_l1(a):
return (1.0/(1 + np.exp(-a)))
def der_act_func_l1(a):
return act_func_l1(a)*(1.0 - act_func_l1(a))
def feedforward(l0):
global hiddenWeights
global outputWeights
Z1 = l0 * hiddenWeights.T
layer1 = np.matrix(act_func_l1(np.asarray(Z1)))
Z2 = layer1 * outputWeights.T
layer2 = act_func_l1(np.asarray(Z2))
return (layer1,layer2)
def miss(x,y):
layer1, layer2 = feedforward(x)
def c(n):
if n > 0.5:
return 1.0
else:
return 0.0
layer2 = map(lambda v: map(c, v), layer2)
def cc(t):
return np.abs(cmp(np.array(y[t[0]]).tolist()[0], np.array(t[1]).tolist()))
return (np.sum(map(cc, enumerate(layer2))))
miss_x = np.zeros((iter, 1))
for j in xrange(iter):
hiddenActualInput = inputVector * hiddenWeights.T
hiddenOutputVector = np.matrix(act_func_l1(np.asarray(hiddenActualInput)))
outputActualInput = hiddenOutputVector * outputWeights.T
outputVector = act_func_l1(np.asarray(outputActualInput))
layer2_error2 = np.square(outputVector - targetVector)
print "Error: " + str(np.mean(np.abs(layer2_error2)))
m = miss(x_test,y_test)
miss_x[j] = m
print str(j) + " - Misses (%): " + str(m)
if m <= 2000:
learning_rate = 0.05
outputDelta = np.mat(der_act_func_l1(np.asarray(outputVector))*np.asarray(outputVector - targetVector))
hiddenDelta = np.mat(der_act_func_l1(np.asarray(hiddenOutputVector)) * np.asarray((outputDelta*outputWeights)))
hiddenWeights = np.mat(hiddenWeights.T - (learning_rate*np.asarray(inputVector.T*hiddenDelta))).T
outputWeights = np.mat(outputWeights.T - (learning_rate*np.asarray(hiddenOutputVector.T*outputDelta))).T
inputVector, targetVector = getSample(sample_size, x_train, y_train)
plt.plot(xrange(iter), miss_x, label = 'Miss rate(%)')
plt.legend(loc='upper right')
plt.show()

Categories

Resources