0% found this document useful (0 votes)
160 views

1 - All Python Codes + Neo4j Samples

This document contains code to implement the Naive Bayes algorithm and K-nearest neighbors algorithm using Python. It includes code to calculate probabilities of different attributes given a yes/no classification. Additional code implements perceptrons and uses a genetic algorithm to solve the knapsack problem.

Uploaded by

Sadaf Farooq
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
160 views

1 - All Python Codes + Neo4j Samples

This document contains code to implement the Naive Bayes algorithm and K-nearest neighbors algorithm using Python. It includes code to calculate probabilities of different attributes given a yes/no classification. Additional code implements perceptrons and uses a genetic algorithm to solve the knapsack problem.

Uploaded by

Sadaf Farooq
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as DOCX, PDF, TXT or read online on Scribd
You are on page 1/ 16

Naïve Bayes Algorithm

from array import *

T=[
['<=30', 'high', 'n', 'fair','n'],
['<=30', 'high', 'n', 'exe', 'n'],
['3140', 'high', 'n', 'fair', 'y'],
['40', 'medium', 'n', 'fair', 'y'],
['40', 'w', 'y', 'fair', 'y'],
['40', 'w', 'y', 'exe', 'n'],
['3140', 'w', 'y', 'exe', 'y'],
['<=30', 'medium', 'n', 'fair', 'n'],
['<=30', 'w', 'y', 'fair', 'y'],
['40', 'medium', 'y', 'fair', 'y'],
['<=30', 'medium', 'y', 'exe', 'y'],
['3140', 'medium', 'n', 'exe', 'y'],
['3140', 'high', 'y', 'fair', 'y'],
['40', 'medium', 'n', 'exe', 'n']
]
y=0
n=0

for record in T:
y += record[4].count('y')

print('No of Yes: ', y)

for record in T:
n += record[4].count('n')

print('No of No: ', n, '\n')

x1 = 0
for p1 in T:
x1 += p1[0].count('<=30') and p1[4].count('y')

y1 = 0
for p1 in T:
y1 += p1[0].count('<=30') and p1[4].count('n')

print('P(<=30)|YES: ', x1/y)


print('P(<=30)|NO: ', y1/n)

x2 = 0
for p1 in T:
x2 += p1[0].count('3140') and p1[4].count('y')

y2 = 0
for p1 in T:
y2 += p1[0].count('3140') and p1[4].count('n')

print('P(3140)|YES: ', x2/y)


print('P(3140)|NO: ', y2/n)

x3 = 0
for p1 in T:
x3 += p1[0].count('40') and p1[4].count('y')

y3 = 0
for p1 in T:
y3 += p1[0].count('40') and p1[4].count('n')

print('P(40)|YES: ', x3/y)


print('P(40)|NO: ', y3/n, '\n')

x4 = 0
for p1 in T:
x4 += p1[1].count('high') and p1[4].count('y')

y4 = 0
for p1 in T:
y4 += p1[1].count('high') and p1[4].count('n')

print('P(high)|YES: ', x4/y)


print('P(high)|NO: ', y4/n)

x5 = 0
for p1 in T:
x5 += p1[1].count('medium') and p1[4].count('y')

y5 = 0
for p1 in T:
y5 += p1[1].count('medium') and p1[4].count('n')

print('P(medium)|YES: ', x5/y)


print('P(medium)|NO: ', y5/n)

x6 = 0
for p1 in T:
x6 += p1[1].count('w') and p1[4].count('y')

y6 = 0
for p1 in T:
y6 += p1[1].count('w') and p1[4].count('n')

print('P(w)|YES: ', x6/y)


print('P(w)|NO: ', y6/n, '\n')

x7 = 0
for p1 in T:
x7 += p1[2].count('n') and p1[4].count('y')

y7 = 0
for p1 in T:
y7 += p1[2].count('n') and p1[4].count('n')

print('P(n)|YES: ', x7/y)


print('P(n)|NO: ', y7/n)

x8 = 0
for p1 in T:
x8 += p1[2].count('y') and p1[4].count('y')

y8 = 0
for p1 in T:
y8 += p1[2].count('y') and p1[4].count('n')

print('P(y)|YES: ', x8/y)


print('P(y)|NO: ', y8/n, '\n')

x9 = 0
for p1 in T:
x9 += p1[3].count('fair') and p1[4].count('y')

y9 = 0
for p1 in T:
y9 += p1[3].count('fair') and p1[4].count('n')

print('P(fair)|YES: ', x9/y)


print('P(fair)|NO: ', y9/n)

x10 = 0
for p1 in T:
x10 += p1[3].count('exe') and p1[4].count('y')

y10 = 0
for p1 in T:
y10 += p1[3].count('exe') and p1[4].count('n')

print('P(exe)|YES: ', x10/y)


print('P(exe)|NO: ', y10/n)

yes = x1/y * x5/y * x8/y * x9/y

nn = y1/n * y5/n * y8/n * y9/n

print('\nfor yes')
print (yes)

print('\nfor no')
print (nn)

KNN IMPLEMENTATION USING sklearn


import numpy as np
import matplotlib.pyplot as plt
import pandas as pd

url = "https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data"

# Assign colum names to the dataset


names = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'Class']

# Read dataset to pandas dataframe


dataset = pd.read_csv(url, names=names)

X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, 4].values

from sklearn.model_selection import train_test_split


X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.40)

from sklearn.neighbors import KNeighborsClassifier


classifier = KNeighborsClassifier(n_neighbors=7,metric= 'euclidean')
classifier.fit(X_train, y_train)

y_pred = classifier.predict(X_test)

from sklearn.metrics import classification_report, confusion_matrix, accuracy_score


print("confusion_matrix \n", confusion_matrix(y_test, y_pred))
print(("accuracy_score ", accuracy_score (y_test, y_pred)))
print("\n",classification_report(y_test, y_pred))

PERCEPTRONS CODE-1
import numpy as np

class Perceptron(object):
def __init__(self, no_of_inputs, epoch=20, learning_rate=0.01):
self.epoch = epoch
self.learning_rate = learning_rate
self.weights = np.zeros(no_of_inputs + 1)

def predict(self, inputs):


summation = np.dot(inputs, self.weights[1:]) + self.weights[0]
if summation >= 0:
activation = 1
else:
activation = 0
return activation

def train(self, training_inputs, labels):


for _ in range(self.epoch):
for inputs, label in zip(training_inputs, labels):
prediction = self.predict(inputs)
self.weights[1:] += self.learning_rate * (label - prediction) * inputs
self.weights[0] += self.learning_rate * (label - prediction)
print("learning rate",self.learning_rate , self.weights[1:] )

training_inputs = []
training_inputs.append(np.array([1, 1]))
training_inputs.append(np.array([1, 0]))
training_inputs.append(np.array([0, 1]))
training_inputs.append(np.array([0, 0]))

labels = np.array([1, 0, 0, 0])


perceptron = Perceptron(2)
perceptron.train(training_inputs, labels)

inputs = np.array([0.5, 0.8])


print(perceptron.predict(inputs))

inputs = np.array([1.5, 0.5])


print(perceptron.predict(inputs))

PERCEPTRONS CODE-2
import numpy as np

class Perceptron(object):

def __init__(self, no_of_inputs, threshold=100, learning_rate=0.01):

self.threshold = threshold

self.learning_rate = learning_rate

self.weights = np.zeros(no_of_inputs + 1)

def predict(self, inputs):

summation = np.dot(inputs, self.weights[1:]) + self.weights[0]

if summation > 0:

activation = 1

else:

activation = 0

return activation
def train(self, training_inputs, labels):

for _ in range(self.threshold):

for inputs, label in zip(training_inputs, labels):

prediction = self.predict(inputs)

self.weights[1:] += self.learning_rate * (label - prediction) * inputs

self.weights[0] += self.learning_rate * (label - prediction)

KNAPSACK USING GENETIC ALGORITHM


import
random
import sys
import operator

class Knapsack(object):

#initialize variables and lists


def __init__(self):

self.C = 0
self.weights = []
self.profits = []
self.opt = []
self.parents = []
self.newparents = []
self.bests = []
self.best_p = []
self.iterated = 1
self.population = 0

# increase max recursion for long stack


iMaxStackSize = 15000
sys.setrecursionlimit(iMaxStackSize)

# create the initial population


def initialize(self):

for i in range(self.population):
parent = []
for k in range(0, 5):
k = random.randint(0, 1)
parent.append(k)
self.parents.append(parent)

# set the details of this problem


def properties(self, weights, profits, opt, C, population):

self.weights = weights
self.profits = profits
self.opt = opt
self.C = C
self.population = population
self.initialize()

# calculate the fitness function of each list (sack)


def fitness(self, item):

sum_w = 0
sum_p = 0

# get weights and profits


for index, i in enumerate(item):
if i == 0:
continue
else:
sum_w += self.weights[index]
sum_p += self.profits[index]

# if greater than the optimal return -1 or the number otherwise


if sum_w > self.C:
return -1
else:
return sum_p

# run generations of GA
def evaluation(self):

# loop through parents and calculate fitness


best_pop = self.population // 2
for i in range(len(self.parents)):
parent = self.parents[i]
ft = self.fitness(parent)
self.bests.append((ft, parent))

# sort the fitness list by fitness


self.bests.sort(key=operator.itemgetter(0), reverse=True)
self.best_p = self.bests[:best_pop]
self.best_p = [x[1] for x in self.best_p]

# mutate children after certain condition


def mutation(self, ch):

for i in range(len(ch)):
k = random.uniform(0, 1)
if k > 0.5:
#if random float number greater that 0.5 flip 0 with
1 and vice versa
if ch[i] == 1:
ch[i] = 0
else:
ch[i] = 1
return ch

# crossover two parents to produce two children by miixing them under


random ration each time
def crossover(self, ch1, ch2):

threshold = random.randint(1, len(ch1)-1)


tmp1 = ch1[threshold:]
tmp2 = ch2[threshold:]
ch1 = ch1[:threshold]
ch2 = ch2[:threshold]
ch1.extend(tmp2)
ch2.extend(tmp1)

return ch1, ch2

# run the GA algorithm


def run(self):

# run the evaluation once


self.evaluation()
newparents = []
pop = len(self.best_p)-1

# create a list with unique random integers


sample = random.sample(range(pop), pop)
for i in range(0, pop):
# select the random index of best children to randomize the
process
if i < pop-1:
r1 = self.best_p[i]
r2 = self.best_p[i+1]
nchild1, nchild2 = self.crossover(r1, r2)
newparents.append(nchild1)
newparents.append(nchild2)
else:
r1 = self.best_p[i]
r2 = self.best_p[0]
nchild1, nchild2 = self.crossover(r1, r2)
newparents.append(nchild1)
newparents.append(nchild2)

# mutate the new children and potential parents to ensure global


optima found
for i in range(len(newparents)):
newparents[i] = self.mutation(newparents[i])

if self.opt in newparents:
print ("optimal found in {} generations"
.format(self.iterated))
else:
self.iterated += 1
print("recreate generations for {} time"
.format(self.iterated))
self.parents = newparents
self.bests = []
self.best_p = []
self.run()

# properties for this particular problem


weights = [12, 7, 11, 8, 9]
profits = [24, 13, 23, 15, 16]
opt = [0, 1, 1, 1, 0]
C = 26
population = 10

k = Knapsack()
k.properties(weights, profits, opt, C, population)
k.run()

QUEENS PROBLEM USING GENETIC ALGORITHM


import
random

def random_chromosome(size): #making random chromosomes


return [ random.randint(1, nq) for _ in range(nq) ]

def fitness(chromosome):
horizontal_collisions = sum([chromosome.count(queen)-1 for queen in
chromosome])/2
diagonal_collisions = 0

n = len(chromosome)
left_diagonal = [0] * 2*n
right_diagonal = [0] * 2*n
for i in range(n):
left_diagonal[i + chromosome[i] - 1] += 1
right_diagonal[len(chromosome) - i + chromosome[i] - 2] += 1

diagonal_collisions = 0
for i in range(2*n-1):
counter = 0
if left_diagonal[i] > 1:
counter += left_diagonal[i]-1
if right_diagonal[i] > 1:
counter += right_diagonal[i]-1
diagonal_collisions += counter / (n-abs(i-n+1))

return int(maxFitness - (horizontal_collisions + diagonal_collisions)) #28-


(2+3)=23

def probability(chromosome, fitness):


return fitness(chromosome) / maxFitness

def random_pick(population, probabilities):


populationWithProbabilty = zip(population, probabilities)
total = sum(w for c, w in populationWithProbabilty)
r = random.uniform(0, total)
upto = 0
for c, w in zip(population, probabilities):
if upto + w >= r:
return c
upto += w
assert False, "Shouldn't get here"

def reproduce(x, y): #doing cross_over between two chromosomes


n = len(x)
c = random.randint(0, n - 1)
return x[0:c] + y[c:n]

def mutate(x): #randomly changing the value of a random index of a chromosome


n = len(x)
c = random.randint(0, n - 1)
m = random.randint(1, n)
x[c] = m
return x

def genetic_queen(population, fitness):


mutation_probability = 0.03
new_population = []
probabilities = [probability(n, fitness) for n in population]
for i in range(len(population)):
x = random_pick(population, probabilities) #best chromosome 1
y = random_pick(population, probabilities) #best chromosome 2
child = reproduce(x, y) #creating two new chromosomes from the best 2
chromosomes
if random.random() < mutation_probability:
child = mutate(child)
print_chromosome(child)
new_population.append(child)
if fitness(child) == maxFitness: break
return new_population

def print_chromosome(chrom):
print("Chromosome = {}, Fitness = {}"
.format(str(chrom), fitness(chrom)))
if __name__ == "__main__":
nq = int(input("Enter Number of Queens: ")) #say N = 8
maxFitness = (nq*(nq-1))/2 # 8*7/2 = 28
population = [random_chromosome(nq) for _ in range(100)]

generation = 1

while not maxFitness in [fitness(chrom) for chrom in population]:


print("=== Generation {} ===".format(generation))
population = genetic_queen(population, fitness)
print("")
print("Maximum Fitness = {}".format(max([fitness(n) for n in
population])))
generation += 1
chrom_out = []
print("Solved in Generation {}!".format(generation-1))
for chrom in population:
if fitness(chrom) == maxFitness:
print("");
print("One of the solutions: ")
chrom_out = chrom
print_chromosome(chrom)

board = []

for x in range(nq):
board.append(["x"] * nq)

for i in range(nq):
board[nq-chrom_out[i]][i]="Q"

def print_board(board):
for row in board:
print (" ".join(row))

print()
print_board(board)
Neo4j Sample Codeser
MATCH (A:Person)-[*3]->(b) WHERE A.name=”Alice” RETURN b

CREATE (ee:Person { name: "Emil", from: "Sweden", salary: “65.5k”}),

(js:Person { name: "Johan", from: "Sweden", learn: "surfing" }),

(ir:Person { name: "Ian", from: "England", title: "author" }),

(rvb:Person { name: "Rik", from: "Belgium", pet: "Orval" }),

(ally:Person { name: "Allison", from: "California", hobby: "surfing" })

RETURN ee,js,ir,rvb,ally

MATCH (ee:Person) where ee.name=’Emil’

MATCH(js:Person) where js.name=’Johan’

MATCH(ir:Person) where ir.name=’Ian’

MATCH (rvb:Person) where rvb.name= ‘Rik’

MATCH(ally:Person) where ally.name= ‘Allison’

CREATE (ee:Person { name: "Emil", from: "Sweden", salary: “65.5k”}),

(js:Person { name: "Johan", from: "Sweden", learn: "surfing" }),

(ir:Person { name: "Ian", from: "England", title: "author" }),

(rvb:Person { name: "Rik", from: "Belgium", pet: "Orval" }),

(ally:Person { name: "Allison", from: "California", hobby: "surfing" }),

(ee)-[:KNOWS {since: 2001}]->(js),(ee)-[:FRIEND {rating: 5}]->(ir),

(js)-[:KNOWS]->(ir),(js)-[:KNOWS]->(rvb),

(ir)-[:BOSS]->(js),(ir)-[:FRIEND]->(ally),

(rvb)-[:BROTHER]->(ally)

RETURN ee,js,ir,rvb,ally

MATCH (js:Person)-[:KNOWS]-()-[:KNOWS]-(surfer)

WHERE js.name = "Johan" AND surfer.hobby = "surfing"


RETURN DISTINCT surfer

MATCH (js:Person)-[:KNOWS]-()-[:FRIEND]-(surfer)

WHERE js.name = "Johan" AND surfer.hobby = "surfing"

RETURN DISTINCT surfer

You might also like