Pygad Readthedocs Io en Latest
Pygad Readthedocs Io en Latest
().
function_inputs = [4,-2,3.5,5,-11,-4.7]
desired_output = 44
listtuplenumpy.ndarray().
fitness_function = fitness_func
num_generations = 50
num_parents_mating = 4
sol_per_pop = 8
num_genes = len(function_inputs)
init_range_low = -2
init_range_high = 5
parent_selection_type = "sss"
keep_parents = 1
crossover_type = "single_point"
mutation_type = "random"
mutation_percent_genes = 10
ga_instance = pygad.GA(num_generations=num_generations,
num_parents_mating=num_parents_mating,
fitness_func=fitness_function,
sol_per_pop=sol_per_pop,
num_genes=num_genes,
init_range_low=init_range_low,
init_range_high=init_range_high,
parent_selection_type=parent_selection_type,
keep_parents=keep_parents,
crossover_type=crossover_type,
mutation_type=mutation_type,
mutation_percent_genes=mutation_percent_genes)
run()
ga_instance.run()
run()
prediction = numpy.sum(numpy.array(function_inputs)*solution)
print("Predicted output based on the best solution : {prediction}".
,→format(prediction=prediction))
pygad
nn
gann()
cnn
gacnn
kerasga
torchga
visualize
utils()
helper
’
@article{gad2023pygad,
title={Pygad: An intuitive genetic algorithm python library},
author={Gad, Ahmed Fawzy},
journal={Multimedia Tools and Applications},
pages={1--14},
year={2023},
publisher={Springer}
}
pygad
’ pygad
pygad
pygad.GA
pygadGA
__init__()
pygad.GA
pygad.GA
num_generations
num_parents_mating
fitness_func() (pygad.GA). ’ listtuplenumpy.ndarray
fitness_batch_size=Nonefitness_batch_size1None(), fitness_batch_size1 < fit-
ness_batch_size <= sol_per_popfitness_batch_size
initial_populationNonesol_per_popnum_genesinitial_populationNone
(sol_per_pop num_genes) None
sol_per_pop() initial_population
num_genesinitial_population
gene_type=floatfloatfloatgene_typeintfloatnumpy.int/uint/float(8-64)
listtuplenumpy.ndarray(gene_type=[int, float, numpy.int8]). float(
gene_type=[float, 2]
init_range_low=-4init_range_low-4initial_population
init_range_high=4init_range_high+4initial_population
parent_selection_type="sss"sss(), rws (), sus (), rank (), random (), tournament().
keep_parents=-1-1() 0greater than 0keep_parents< - 1sol_per_popkeep_elitism0
’ keep_parents=0
keep_elitism=10(0 <= keep_elitism <= sol_per_pop). 10KKsol_per_pop0
keep_parents
K_tournament=3tournamentK_tournament3
crossover_type="single_point"single_point(), two_points (), uniform (), scattered
(). single_pointcrossover_type=None
crossover_probability=Nonecrossover_probability
mutation_type="random"random(), swap (), inversion (), scramble (), adaptive(). random
mutation_type=NoneAdaptive
mutation_probability=Nonemutation_probabilitymutation_percent_genesmuta-
tion_num_genes
mutation_by_replacement=False(mutation_type="random"). muta-
tion_by_replacement=True
mutation_percent_genes="default""default"10>0<=100mutation_num_genesmu-
tation_percent_genesmutation_probabilitymutation_num_genesmutation_type
None
mutation_num_genes=NoneNonemutation_num_genesmutation_probabilitymuta-
tion_typeNone
random_mutation_min_val=-1.0randomrandom_mutation_min_val-1mutation_type
None
random_mutation_max_val=1.0randomrandom_mutation_max_val+1mutation_type
None
gene_space=Nonelistrangenumpy.ndarraylisttuplerangenumpy.ndarraygene_space
= [0.3, 5.2, -4, 8]gene_space[[0.4, -5], [0.5, -3.2, 8.2, -9],
...]None’ init_range_lowinit_range_highrandom_mutation_min_valran-
dom_mutation_max_valgene_spacegene_spacegene_space{'low': 2, 'high':
4}"step""low""high"
on_start=None’
on_fitness=None) ’ ) ’
on_parents=None) ) ’
on_crossover=None
on_mutation=None
on_generation=Nonestoprun()
on_stop=None’
delay_after_gen=0.00.0
save_best_solutions=FalseTruebest_solutionsFalse(), best_solutions
save_solutions=FalseTruesolutions
suppress_warnings=FalseFalse
allow_duplicate_genes=TrueTrueFalse
stop_criteria=Nonestrreachsaturatereachrun()reach"reach_40">saturatesatu-
rate"saturate_7"run()
parallel_processing=NoneNone(), [) 'process''thread') ]paral-
lel_processing=['process', 10]parallel_processing=5paral-
lel_processing=["thread", 5]
random_seed=None(). (random_seed=2). None
logger=Nonelogging.Loggerprint()logger=NoneStreamHandler
’ fitness_func
init_range_lowinit_range_high(init_range_lowinit_range_high). ran-
dom_mutation_min_valrandom_mutation_max_val
mutation_typecrossover_typeNone
pygad.GA
plot_fitness()
plot_genes()
plot_new_solution_rate()
supported_int_types
supported_float_types
supported_int_float_types
pygad.GApygad.GApygad.GA
generations_completed
population
valid_parametersTrueGA
run_completedTruerun()
pop_size
best_solutions_fitness
best_solution_generationrun()
best_solutionssave_best_solutionspygad.GATrue
last_generation_fitness
pygad.GA
previous_generation_fitnesslast_generation_fitnesslast_generation_fitness
previous_generation_fitness
last_generation_parents
last_generation_offspring_crossover
last_generation_offspring_mutation
gene_type_singleTruegene_typegene_typelisttuplenumpy.ndarray
gene_type_singleFalse
last_generation_parents_indices
last_generation_elitismkeep_elitism
last_generation_elitism_indiceskeep_elitism
loggerlogging
gene_space_unpackedgene_spacerange(1, 5)[1, 2, 3, 4]{'low': 2, 'high': 4}(
).
pareto_frontspareto_frontspygad.GA
last_generation_
cal_pop_fitness()fitness_func
crossover()crossover_type
mutation()mutation_type
select_parents()parent_selection_type
adaptive_mutation_population_fitness()
summary()
run_run()run_run()
run_select_parents(call_on_parents=True)on_parents()call_on_parents
Trueon_parents()Falserun_select_parents()run()
run_crossover()on_crossover()
run_mutation()on_mutation()
run_update_population()population
pygad.GA
initialize_population()
population
low
high
pop_size
population
initial_population
cal_pop_fitness()
cal_pop_fitness()
save_solutionsTruesolutionssolutions_fitness
save_solutionsFalseTruecal_pop_fitness()keep_elitismlast_generation_elitism
previous_generation_fitness
(save_solutionsFalseTruekeep_elitism), cal_pop_fitness()keep_parents-1
last_generation_parentsprevious_generation_fitness
fitness_func
parallel_processing
fitness_batch_size
’
run()
cal_pop_fitness()fitness_funcpygad.GA
select_parents()parent_selection_typepygad.GA
crossover()mutation()crossover_typemutation_typepygad.GA
population
generations_completed
on_generation
run()
best_solution_generation
run_completedTrue
pygad.GA
ParentSelectionpygad.utils.parent_selection
fitness
num_parents
steady_state_selection()
rank_selection()
random_selection()
tournament_selection()
roulette_wheel_selection()
stochastic_universal_selection()
nsga2_selection()
tournament_selection_nsga2()
Crossoverpygad.utils.crossover
parents
offspring_size
single_point_crossover()
two_points_crossover()
uniform_crossover()
scattered_crossover()
Mutationpygad.utils.mutation
offspring
random_mutation()
mutation_num_genesmutation_percent_genes
random_mutation_min_valrandom_mutation_max_val
pygad.GA
swap_mutation()
inversion_mutation()
scramble_mutation()
adaptive_mutation()
best_solution()
pop_fitness=NoneNonecal_pop_fitness()
best_solution
best_solution_fitness
best_match_idx
plot_fitness()
plot_result()
(),
plot_new_solution_rate()
plot_new_solution_rate()save_solutions=Truepygad.GA
(),
plot_genes()
plot_genes()
graph_type
(),
save()
filename
pygad
pygad.GApygadload()
pygad.load()
pygadpygad.load(filename)
filename
pygad
pygad
fitness_func
pygad
pygad.GA
pygad
fitness_func
fitness_func
supported_int_float_typespygad.GA
listtuplenumpy.ndarray
()
()()
()?
W1W6y=44’ y=44
pygad.GA
() (). fitness_batch_size
fitness_batch_size
fitness_func’
__code__
num_generations = 50
num_parents_mating = 4
fitness_function = fitness_func
sol_per_pop = 8
num_genes = len(function_inputs)
init_range_low = -2
init_range_high = 5
()
()
parent_selection_type = "sss"
keep_parents = 1
crossover_type = "single_point"
mutation_type = "random"
mutation_percent_genes = 10
on_generation
on_generation() generations_completed
def on_gen(ga_instance):
print("Generation : ", ga_instance.generations_completed)
print("Fitness of the best solution :", ga_instance.best_solution()[1])
on_generationon_gen()
ga_instance = pygad.GA(...,
on_generation=on_gen,
...)
pygad.GA
pygad
import pygad
pygad.GA
pygad.GA
pygad.GA
ga_instance = pygad.GA(num_generations=num_generations,
num_parents_mating=num_parents_mating,
fitness_func=fitness_function,
sol_per_pop=sol_per_pop,
num_genes=num_genes,
init_range_low=init_range_low,
init_range_high=init_range_high,
parent_selection_type=parent_selection_type,
keep_parents=keep_parents,
crossover_type=crossover_type,
mutation_type=mutation_type,
mutation_percent_genes=mutation_percent_genes)
pygad
pygad.GArun()
ga_instance.run()
plot_fitness()
ga_instance.plot_fitness()
best_solution()
solution, solution_fitness, solution_idx = ga_instance.best_solution()
print(f"Parameters of the best solution : {solution}")
print(f"Fitness value of the best solution = {solution_fitness}")
print(f"Index of the best solution : {solution_idx}")
best_solution_generationpygad.GAbest fitness
if ga_instance.best_solution_generation != -1:
print(f"Best fitness value reached after {ga_instance.best_solution_generation}␣
,→generations.")
run()save()genetic.pkl
filename = 'genetic'
ga_instance.save(filename=filename)
load()save()load()run()
loaded_ga_instance = pygad.load(filename=filename)
print(loaded_ga_instance.best_solution())
pygad.GAon_generationstop
import pygad
import numpy
function_inputs = [4,-2,3.5,5,-11,-4.7]
desired_output = 44
fitness_function = fitness_func
def on_start(ga_instance):
print("on_start()")
def on_generation(ga_instance):
print("on_generation()")
ga_instance = pygad.GA(num_generations=3,
num_parents_mating=5,
fitness_func=fitness_function,
sol_per_pop=10,
num_genes=len(function_inputs),
on_start=on_start,
on_fitness=on_fitness,
on_parents=on_parents,
on_crossover=on_crossover,
on_mutation=on_mutation,
on_generation=on_generation,
on_stop=on_stop)
ga_instance.run()
num_generations
on_start()
on_fitness()
on_parents()
on_crossover()
()
()
on_mutation()
on_generation()
on_fitness()
on_parents()
on_crossover()
on_mutation()
on_generation()
on_fitness()
on_parents()
on_crossover()
on_mutation()
on_generation()
on_stop()
pygad
import pygad
import numpy
"""
Given the following function:
y = f(w1:w6) = w1x1 + w2x2 + w3x3 + w4x4 + w5x5 + 6wx6
where (x1,x2,x3,x4,x5,x6)=(4,-2,3.5,5,-11,-4.7) and y=44
What are the best values for the 6 weights (w1 to w6)? We are going to use the␣
,→genetic algorithm to optimize this function.
"""
last_fitness = 0
def on_generation(ga_instance):
()
()
global last_fitness
print(f"Generation = {ga_instance.generations_completed}")
print(f"Fitness = {ga_instance.best_solution(pop_fitness=ga_instance.last_
,→generation_fitness)[1]}")
print(f"Change = {ga_instance.best_solution(pop_fitness=ga_instance.last_
,→generation_fitness)[1] - last_fitness}")
last_fitness = ga_instance.best_solution(pop_fitness=ga_instance.last_generation_
,→fitness)[1]
ga_instance = pygad.GA(num_generations=num_generations,
num_parents_mating=num_parents_mating,
sol_per_pop=sol_per_pop,
num_genes=num_genes,
fitness_func=fitness_func,
on_generation=on_generation)
ga_instance.plot_fitness()
prediction = numpy.sum(numpy.array(function_inputs)*solution)
print(f"Predicted output based on the best solution : {prediction}")
if ga_instance.best_solution_generation != -1:
print(f"Best fitness value reached after {ga_instance.best_solution_generation}␣
,→generations.")
ga_instance.save(filename=filename)
(x1,x2,x3,x4,x5,x6)=(4,-2,3.5,5,-11,-4.7)y=50
(x7,x8,x9,x10,x11,x12)=(-2,0.7,-9,1.4,3,5)y=30
() w1 w6
y1y2
listtuplenumpy.ndarray
import pygad
import numpy
"""
Given these 2 functions:
y1 = f(w1:w6) = w1x1 + w2x2 + w3x3 + w4x4 + w5x5 + 6wx6
y2 = f(w1:w6) = w1x7 + w2x8 + w3x9 + w4x10 + w5x11 + 6wx12
where (x1,x2,x3,x4,x5,x6)=(4,-2,3.5,5,-11,-4.7) and y=50
and (x7,x8,x9,x10,x11,x12)=(-2,0.7,-9,1.4,3,5) and y=30
What are the best values for the 6 weights (w1 to w6)? We are going to use the␣
,→genetic algorithm to optimize these 2 functions.
num_generations = 100
num_parents_mating = 10
sol_per_pop = 20
num_genes = len(function_inputs1)
ga_instance = pygad.GA(num_generations=num_generations,
num_parents_mating=num_parents_mating,
sol_per_pop=sol_per_pop,
num_genes=num_genes,
fitness_func=fitness_func,
parent_selection_type='nsga2')
ga_instance.run()
()
()
prediction = numpy.sum(numpy.array(function_inputs1)*solution)
print(f"Predicted output 1 based on the best solution : {prediction}")
prediction = numpy.sum(numpy.array(function_inputs2)*solution)
print(f"Predicted output 2 based on the best solution : {prediction}")
plot_fitness()
fruit.jpg
import imageio
import numpy
target_im = imageio.imread('fruit.jpg')
target_im = numpy.asarray(target_im/255, dtype=float)
pygad.GA
import gari
target_chromosome = gari.img2chromosome(target_im)
gari.img2chromosome()
gari
import numpy
import functools
import operator
def img2chromosome(img_arr):
return numpy.reshape(a=img_arr, newshape=(functools.reduce(operator.mul, img_arr.
,→shape)))
pygad.GA
mutation_by_replacementTrueinit_range_lowinit_range_highran-
dom_mutation_min_valrandom_mutation_max_val
init_range_lowrandom_mutation_min_valinit_range_highrandom_mutation_max_val
’
import pygad
ga_instance = pygad.GA(num_generations=20000,
num_parents_mating=10,
fitness_func=fitness_fun,
sol_per_pop=20,
num_genes=target_im.size,
init_range_low=0.0,
init_range_high=1.0,
mutation_percent_genes=0.01,
mutation_type="random",
mutation_by_replacement=True,
random_mutation_min_val=0.0,
random_mutation_max_val=1.0)
run()
ga_instance.run()
run()plot_fitness()
ga_instance.plot_fitness()
if ga_instance.best_solution_generation != -1:
print(f"Best fitness value reached after {ga_instance.best_solution_generation}␣
,→generations.")
pygad.GA
().
fitness
list
tuple
numpy.ndarray
nsga2
tournament_nsga2
(x1,x2,x3,x4,x5,x6)=(4,-2,3.5,5,-11,-4.7)y=50
(x7,x8,x9,x10,x11,x12)=(-2,0.7,-9,1.4,3,5)y=30
() w1 w6
y1y2
import pygad
import numpy
"""
Given these 2 functions:
y1 = f(w1:w6) = w1x1 + w2x2 + w3x3 + w4x4 + w5x5 + 6wx6
y2 = f(w1:w6) = w1x7 + w2x8 + w3x9 + w4x10 + w5x11 + 6wx12
where (x1,x2,x3,x4,x5,x6)=(4,-2,3.5,5,-11,-4.7) and y=50
and (x7,x8,x9,x10,x11,x12)=(-2,0.7,-9,1.4,3,5) and y=30
What are the best values for the 6 weights (w1 to w6)? We are going to use the␣
,→genetic algorithm to optimize these 2 functions.
num_generations = 100
num_parents_mating = 10
sol_per_pop = 20
num_genes = len(function_inputs1)
ga_instance = pygad.GA(num_generations=num_generations,
num_parents_mating=num_parents_mating,
sol_per_pop=sol_per_pop,
num_genes=num_genes,
fitness_func=fitness_func,
parent_selection_type='nsga2')
ga_instance.run()
prediction = numpy.sum(numpy.array(function_inputs1)*solution)
()
()
print(f"Predicted output 1 based on the best solution : {prediction}")
prediction = numpy.sum(numpy.array(function_inputs2)*solution)
print(f"Predicted output 2 based on the best solution : {prediction}")
plot_fitness()
gene_space
gene_space’ gene_space
gene_space
[0.4, 12, -5, 21.2]
[-2, 0.3]
[1.2, 63.2, 7.4]
gene_space
gene_space
gene_space = [[0.4, 12, -5, 21.2],
[-2, 0.3],
[1.2, 63.2, 7.4]]
gene_space
{'low': 1,
'high': 5}
'low''high'
() ().
gene_space = [{'low': 1, 'high': 5}, {'low': 0.3, 'high': 1.4}, {'low': -0.2, 'high':␣
,→4.5}]
gene_space
gene_space
gene_spacegene_space
gene_space
(intfloatNumPy):
listtuplenumpy.ndarrayrangenumpy.arange()numpy.linspace
dict"low""high""step""low""high""step"().
None: Noneinit_range_lowinit_range_highrandom_mutation_min_valran-
dom_mutation_max_valgene_spaceNone
gene_space
[0.4, -5][0.5, -3.2, 8.8, -9]
gene_space
gene_space = numpy.arange(15)
gene_space
initial_populationgene_space
gene_space
gene_space
gene_spaceintfloat
gene_space[1, 2, 3]
[1, 5]().
None
random_mutation_min_valrandom_mutation_max_val
’
-0.5
gene_space
gene_space{'low': 1, 'high': 5}
random_mutation_min_val=-1random_mutation_max_val=10.3() 1.51.5+0.3=1.8
gene_space
"stop"on_generationon_generationpygad.GA’
num_generationspygad.GA
"stop"
def func_generation(ga_instance):
if ga_instance.best_solution()[1] >= 70:
return "stop"
stop_criteriapygad.GA
str
"word_num"
reachsaturate
reachrun()reach"reach_40">
saturatesaturate"saturate_7"run()
127.415
import pygad
import numpy
return fitness
ga_instance = pygad.GA(num_generations=200,
sol_per_pop=10,
num_parents_mating=4,
num_genes=len(equation_inputs),
fitness_func=fitness_func,
stop_criteria=["reach_127.4", "saturate_15"])
ga_instance.run()
print(f"Number of generations passed is {ga_instance.generations_completed}")
keep_elitism() 1
keep_elitismpygad.GA
import numpy
import pygad
function_inputs = [4,-2,3.5,5,-11,-4.7]
desired_output = 44
ga_instance = pygad.GA(num_generations=2,
num_parents_mating=3,
fitness_func=fitness_func,
num_genes=6,
sol_per_pop=5,
keep_elitism=2)
ga_instance.run()
keep_elitism
>= 0
<= sol_per_pop
keep_elitismsol_per_pop
...
ga_instance = pygad.GA(...,
sol_per_pop=5,
keep_elitism=5)
ga_instance.run()
keep_elitism(), keep_parentskeep_elitismkeep_parentskeep_parentskeep_elitism=0
random_seed
random_seedNone
import numpy
import pygad
function_inputs = [4,-2,3.5,5,-11,-4.7]
desired_output = 44
ga_instance = pygad.GA(num_generations=2,
num_parents_mating=3,
fitness_func=fitness_func,
sol_per_pop=5,
()
()
num_genes=6,
random_seed=2)
ga_instance.run()
best_solution, best_solution_fitness, best_match_idx = ga_instance.best_solution()
print(best_solution)
print(best_solution_fitness)
run()
self.best_solutions
self.best_solutions_fitness
self.solutions
self.solutions_fitness
save()
import pygad
ga_instance = pygad.GA(...)
ga_instance.run()
ga_instance.plot_fitness()
ga_instance.save("pygad_GA")
load()run()
import pygad
loaded_ga_instance = pygad.load("pygad_GA")
loaded_ga_instance.run()
()
()
loaded_ga_instance.plot_fitness()
plot_fitness()
(self.best_solutions self.best_solutions_fitness) save_best_solutionsTrue(self.
solutions self.solutions_fitness) save_solutionsTrue
population
num_offspring
num_parents_mating
fitness_func
sol_per_poppopulation
last_generation_*
last_generation_fitness
last_generation_parentslast_generation_parents_indices
last_generation_elitismlast_generation_elitism_indiceskeep_elitism != 0
keep_elitism
pop_size
allow_duplicate_genes
allow_duplicate_genes=True(), allow_duplicate_genes=False
allow_duplicate_genes
import pygad
def on_generation(ga):
print("Generation", ga.generations_completed)
print(ga.population)
()
()
ga_instance = pygad.GA(num_generations=5,
sol_per_pop=5,
num_genes=4,
mutation_num_genes=3,
random_mutation_min_val=-5,
random_mutation_max_val=5,
num_parents_mating=2,
fitness_func=fitness_func,
gene_type=int,
on_generation=on_generation,
allow_duplicate_genes=False)
ga_instance.run()
Generation 1
[[ 2 -2 -3 3]
[ 0 1 2 3]
[ 5 -3 6 3]
[-3 1 -2 4]
[-1 0 -2 3]]
Generation 2
[[-1 0 -2 3]
[-3 1 -2 4]
[ 0 -3 -2 6]
[-3 0 -2 3]
[ 1 -4 2 4]]
Generation 3
[[ 1 -4 2 4]
[-3 0 -2 3]
[ 4 0 -2 1]
[-4 0 -2 -3]
[-4 2 0 3]]
Generation 4
[[-4 2 0 3]
[-4 0 -2 -3]
[-2 5 4 -3]
[-1 2 -4 4]
[-4 2 0 -3]]
Generation 5
[[-4 2 0 -3]
[-1 2 -4 4]
[ 3 4 -4 0]
[-1 0 2 -2]
[-4 2 -1 1]]
allow_duplicate_genesgene_space().
import pygad
def on_generation(ga):
print("Generation", ga.generations_completed)
print(ga.population)
()
()
ga_instance = pygad.GA(num_generations=1,
sol_per_pop=5,
num_genes=4,
num_parents_mating=2,
fitness_func=fitness_func,
gene_type=int,
gene_space=[[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3,
,→ 4]],
on_generation=on_generation,
allow_duplicate_genes=False)
ga_instance.run()
Generation 1
[[2 3 1 4]
[2 3 1 4]
[2 4 1 3]
[2 3 1 4]
[1 3 2 4]]
Generation 2
[[1 3 2 4]
[2 3 1 4]
[1 3 2 4]
[2 3 4 1]
[1 3 4 2]]
Generation 3
[[1 3 4 2]
[2 3 4 1]
[1 3 4 2]
[3 1 4 2]
[3 2 4 1]]
Generation 4
[[3 2 4 1]
[3 1 4 2]
[3 2 4 1]
[1 2 4 3]
[1 3 4 2]]
Generation 5
[[1 3 4 2]
[1 2 4 3]
[2 1 4 3]
[1 2 4 3]
[1 2 4 3]]
[3, 4][4, 5]
[3, 4, 4, 5]
’ [3, 4]
[]
[]
Gene space: [[0, 1],
[1, 2],
[2, 3],
[3, 4]]
Solution: [1, 2, 2, 3]
[]
[]
gene_type
gene_typegene_typefloatgene_type
gene_typeint
gene_type=int
’ intfloatNumPygene_type
float
int
import pygad
import numpy
ga_instance = pygad.GA(num_generations=10,
sol_per_pop=5,
num_parents_mating=2,
num_genes=len(equation_inputs),
fitness_func=fitness_func,
gene_type=int)
print("Initial Population")
print(ga_instance.initial_population)
ga_instance.run()
()
()
print("Final Population")
print(ga_instance.population)
Initial Population
[[ 1 -1 2 0 -3]
[ 0 -2 0 -3 -1]
[ 0 -1 -1 2 0]
[-2 3 -2 3 3]
[ 0 0 2 -2 -2]]
Final Population
[[ 1 -1 2 2 0]
[ 1 -1 2 2 0]
[ 1 -1 2 2 0]
[ 1 -1 2 2 0]
[ 1 -1 2 2 0]]
floatfloatfloat
gene_type=[float, 3]
float
import pygad
import numpy
return fitness
ga_instance = pygad.GA(num_generations=10,
sol_per_pop=5,
num_parents_mating=2,
num_genes=len(equation_inputs),
fitness_func=fitness_func,
gene_type=[float, 3])
print("Initial Population")
print(ga_instance.initial_population)
ga_instance.run()
print("Final Population")
print(ga_instance.population)
gene_type
Initial Population
[[-2.417 -0.487 3.623 2.457 -2.362]
[-1.231 0.079 -1.63 1.629 -2.637]
[ 0.692 -2.098 0.705 0.914 -3.633]
[ 2.637 -1.339 -1.107 -0.781 -3.896]
[-1.495 1.378 -1.026 3.522 2.379]]
Final Population
[[ 1.714 -1.024 3.623 3.185 -2.362]
[ 0.692 -1.024 3.623 3.185 -2.362]
[ 0.692 -1.024 3.623 3.375 -2.362]
[ 0.692 -1.024 4.041 3.185 -2.362]
[ 1.714 -0.644 3.623 3.185 -2.362]]
gene_typelisttuplenumpy.ndarray
import pygad
import numpy
ga_instance = pygad.GA(num_generations=10,
sol_per_pop=5,
num_parents_mating=2,
num_genes=len(equation_inputs),
fitness_func=fitness_func,
gene_type=[int, float, numpy.float16, numpy.int8, float])
print("Initial Population")
print(ga_instance.initial_population)
ga_instance.run()
print("Final Population")
print(ga_instance.population)
Initial Population
[[0 0.8615522360026828 0.7021484375 -2 3.5301821368185866]
[-3 2.648189378595294 -3.830078125 1 -0.9586271572917742]
[3 3.7729827570110714 1.2529296875 -3 1.395741994211889]
[0 1.0490687178053282 1.51953125 -2 0.7243617940450235]
[0 -0.6550158436937226 -2.861328125 -2 1.8212734549263097]]
()
()
Final Population
[[3 3.7729827570110714 2.055 0 0.7243617940450235]
[3 3.7729827570110714 1.458 0 -0.14638754050305036]
[3 3.7729827570110714 1.458 0 0.0869406120516778]
[3 3.7729827570110714 1.458 0 0.7243617940450235]
[3 3.7729827570110714 1.458 0 -0.14638754050305036]]
float
import pygad
import numpy
ga_instance = pygad.GA(num_generations=10,
sol_per_pop=5,
num_parents_mating=2,
num_genes=len(equation_inputs),
fitness_func=fitness_func,
gene_type=[int, [float, 2], numpy.float16, numpy.int8, [float,␣
,→1]])
print("Initial Population")
print(ga_instance.initial_population)
ga_instance.run()
print("Final Population")
print(ga_instance.population)
Initial Population
[[-2 -1.22 1.716796875 -1 0.2]
[-1 -1.58 -3.091796875 0 -1.3]
[3 3.35 -0.107421875 1 -3.3]
[-2 -3.58 -1.779296875 0 0.6]
[2 -3.73 2.65234375 3 -0.5]]
Final Population
[[2 -4.22 3.47 3 -1.3]
[2 -3.73 3.47 3 -1.3]
[2 -4.22 3.47 2 -1.3]
()
gene_type
()
[2 -4.58 3.47 3 -1.3]
[2 -3.73 3.47 3 -1.3]]
()
parallel_processingpygad.GA
import pygad
...
ga_instance = pygad.GA(...,
parallel_processing=...)
...
parallel_processing
None()
(
listtuple
'process''thread'
0parallel_processing=None
Noneconcurrent.futures module
parallel_processing
parallel_processing=4
parallel_processing=["thread", 5]parallel_processing=5
parallel_processing=["process", 8]
parallel_processing=["process", 0]parallel_processing=None
forpygad.GAparallel_processing=None
import pygad
import time
ga_instance = pygad.GA(num_generations=9999,
num_parents_mating=3,
sol_per_pop=5,
num_genes=10,
fitness_func=fitness_func,
suppress_warnings=True,
parallel_processing=None)
if __name__ == '__main__':
t1 = time.time()
ga_instance.run()
t2 = time.time()
print("Time is", t2-t1)
1.5
’5
...
ga_instance = pygad.GA(...,
parallel_processing=5)
...
99
...
ga_instance = pygad.GA(num_generations=99,
...,
parallel_processing=["process", 5])
...
import pygad
import time
ga_instance = pygad.GA(num_generations=5,
num_parents_mating=3,
sol_per_pop=5,
num_genes=10,
fitness_func=fitness_func,
suppress_warnings=True,
parallel_processing=None)
if __name__ == '__main__':
t1 = time.time()
ga_instance.run()
t2 = time.time()
print("Time is", t2-t1)
...
ga_instance = pygad.GA(...,
parallel_processing=["process", 10])
...
...
ga_instance = pygad.GA(...,
parallel_processing=["thread", 10])
...
summary()
line_length=70
fill_character=" "
line_character="-"
line_character2="="
columns_equal_len=False
print_step_parameters=Trueprint_step_parameters=False
print_parameters_summary=True
print_parameters_summary=Trueprint_step_parameters=False
import pygad
import numpy
function_inputs = [4,-2,3.5,5,-11,-4.7]
desired_output = 44
def on_gen(ga):
pass
ga_instance = pygad.GA(num_generations=100,
num_parents_mating=10,
sol_per_pop=20,
num_genes=len(function_inputs),
on_crossover=on_crossover_callback,
on_generation=on_gen,
parallel_processing=2,
stop_criteria="reach_10",
fitness_batch_size=4,
crossover_probability=0.4,
fitness_func=genetic_fitness)
summary()on_crossover_callback()on_gen()
ga_instance.summary()
----------------------------------------------------------------------
PyGAD Lifecycle
======================================================================
Step Handler Output Shape
======================================================================
Fitness Function genetic_fitness() (1)
Fitness batch size: 4
----------------------------------------------------------------------
Parent Selection steady_state_selection() (10, 6)
Number of Parents: 10
----------------------------------------------------------------------
Crossover single_point_crossover() (10, 6)
Crossover probability: 0.4
----------------------------------------------------------------------
On Crossover on_crossover_callback() None
----------------------------------------------------------------------
Mutation random_mutation() (10, 6)
Mutation Genes: 1
()
()
Random Mutation Range: (-1.0, 1.0)
Mutation by Replacement: False
Allow Duplicated Genes: True
----------------------------------------------------------------------
On Generation on_gen() None
Stop Criteria: [['reach', 10.0]]
----------------------------------------------------------------------
======================================================================
Population Size: (20, 6)
Number of Generations: 100
Initial Population Range: (-4, 4)
Keep Elitism: 1
Gene DType: [<class 'float'>, None]
Parallel Processing: ['thread', 2]
Save Best Solutions: False
Save Solutions: False
======================================================================
print_step_parametersprint_parameters_summaryFalse
ga_instance.summary(print_step_parameters=False,
print_parameters_summary=False)
----------------------------------------------------------------------
PyGAD Lifecycle
======================================================================
Step Handler Output Shape
======================================================================
Fitness Function genetic_fitness() (1)
----------------------------------------------------------------------
Parent Selection steady_state_selection() (10, 6)
----------------------------------------------------------------------
Crossover single_point_crossover() (10, 6)
----------------------------------------------------------------------
On Crossover on_crossover_callback() None
----------------------------------------------------------------------
Mutation random_mutation() (10, 6)
----------------------------------------------------------------------
On Generation on_gen() None
----------------------------------------------------------------------
======================================================================
print()logger
import logging
logger = ...
ga_instance = pygad.GA(...,
logger=logger,
...)
None(logger=None), print()
print()
Handler
Formatter
logging
import logging
# Create a logger
logger = logging.getLogger(__name__)
# Set the logger level to debug so that all the messages are printed.
logger.setLevel(logging.DEBUG)
# Create a formatter
formatter = logging.Formatter('%(message)s')
Formatter
logger.debug('Debug message.')
logger.info('Info message.')
logger.warning('Warn message.')
logger.error('Error message.')
logger.critical('Critical message.')
print()
Debug message.
Info message.
Warn message.
Error message.
Critical message.
logger.handlers.clear()
logfile.txt
import logging
level = logging.DEBUG
name = 'logfile.txt'
logger = logging.getLogger(name)
logger.setLevel(level)
file_handler.setFormatter(file_format)
logger.addHandler(file_handler)
logger.handlers.clear()
import logging
level = logging.DEBUG
name = 'logfile.txt'
logger = logging.getLogger(name)
logger.setLevel(level)
file_handler = logging.FileHandler(name,'a+','utf-8')
file_handler.setLevel(logging.DEBUG)
file_format = logging.Formatter('%(asctime)s %(levelname)s: %(message)s -
,→%(pathname)s:%(lineno)d', datefmt='%Y-%m-%d %H:%M:%S')
file_handler.setFormatter(file_format)
logger.addHandler(file_handler)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
console_format = logging.Formatter('%(message)s')
console_handler.setFormatter(console_format)
logger.addHandler(console_handler)
logfile.txt
logger.handlers.clear()
logger
import logging
import pygad
import numpy
level = logging.DEBUG
name = 'logfile.txt'
logger = logging.getLogger(name)
logger.setLevel(level)
file_handler = logging.FileHandler(name,'a+','utf-8')
file_handler.setLevel(logging.DEBUG)
file_format = logging.Formatter('%(asctime)s %(levelname)s: %(message)s', datefmt='%Y-
,→%m-%d %H:%M:%S')
file_handler.setFormatter(file_format)
logger.addHandler(file_handler)
console_handler = logging.StreamHandler()
()
()
console_handler.setLevel(logging.INFO)
console_format = logging.Formatter('%(message)s')
console_handler.setFormatter(console_format)
logger.addHandler(console_handler)
def on_generation(ga_instance):
ga_instance.logger.info(f"Generation = {ga_instance.generations_completed}")
ga_instance.logger.info(f"Fitness = {ga_instance.best_solution(pop_fitness=ga_
,→instance.last_generation_fitness)[1]}")
ga_instance = pygad.GA(num_generations=10,
sol_per_pop=40,
num_parents_mating=2,
keep_parents=2,
num_genes=len(equation_inputs),
fitness_func=fitness_func,
on_generation=on_generation,
logger=logger)
ga_instance.run()
logger.handlers.clear()
keep_elisitm=0
keep_parents=0
keep_solutions=False
keep_best_solutions=False
import pygad
...
ga_instance = pygad.GA(...,
keep_elitism=0,
keep_parents=0,
save_solutions=False,
save_best_solutions=False,
...)
(> ).
pygad.GA
cal_pop_fitness()pygad.GA
save_solutions
FalseTruesolutionspygad.GAsolutions
save_best_solutions
FalseTrue
keep_elitism
keep_parents
-1
keep_elitism
ga_instance = pygad.GA(...,
keep_elitism=1,
...)
keep_elitism=1keep_elitism=2
().
keep_elitismkeep_parentssave_solutionssave_best_solutionsFalse
ga_instance = pygad.GA(...,
keep_elitism=0,
keep_parents=0,
save_solutions=False,
save_best_solutions=False,
...)
fitness_batch_sizefitness_batch_size
1Nonefitness_batch_size1None(),
1 < fitness_batch_size <= sol_per_popfitness_batch_size1 < fit-
ness_batch_size <= sol_per_popfitness_batch_size
fitness_batch_size
fitness_batch_sizeNone(). 1fitness_func
import pygad
import numpy
function_inputs = [4,-2,3.5,5,-11,-4.7]
desired_output = 44
number_of_calls = 0
ga_instance = pygad.GA(num_generations=5,
num_parents_mating=10,
sol_per_pop=20,
fitness_func=fitness_func,
fitness_batch_size=None,
# fitness_batch_size=1,
num_genes=len(function_inputs),
keep_elitism=0,
keep_parents=0)
ga_instance.run()
print(number_of_calls)
120
fitness_batch_size
fitness_batch_size44().
solutions:
[[ 3.1129432 -0.69123589 1.93792414 2.23772968 -1.54616001 -0.53930799]
[ 3.38508121 0.19890812 1.93792414 2.23095014 -3.08955597 3.10194128]
[ 2.37079504 -0.88819803 2.97545704 1.41742256 -3.95594055 2.45028256]
[ 2.52860734 -0.94178795 2.97545704 0.84131987 -3.78447118 2.41008358]]
solutions_indices:
[16, 17, 18, 19]
20/4 = 5
5*5 = 255*5 + 5 = 30
import pygad
import numpy
function_inputs = [4,-2,3.5,5,-11,-4.7]
desired_output = 44
number_of_calls = 0
ga_instance = pygad.GA(num_generations=5,
num_parents_mating=10,
sol_per_pop=20,
fitness_func=fitness_func_batch,
fitness_batch_size=4,
num_genes=len(function_inputs),
keep_elitism=0,
keep_parents=0)
ga_instance.run()
print(number_of_calls)
30
120 - 30 = 90
fitness_func
on_start
on_fitness
on_parents
on_crossover
on_mutation
on_generation
on_stop
pygad.GA
import pygad
import numpy
def on_start(ga_instanse):
print("on_start")
def on_generation(ga_instanse):
print("on_generation\n")
ga_instance = pygad.GA(num_generations=5,
num_parents_mating=4,
sol_per_pop=10,
num_genes=2,
on_start=on_start,
on_fitness=on_fitness,
on_parents=on_parents,
on_crossover=on_crossover,
on_mutation=on_mutation,
on_generation=on_generation,
on_stop=on_stop,
fitness_func=fitness_func)
ga_instance.run()
Test’ Test
selfpygad.GA
import pygad
import numpy
class Test:
def fitness_func(self, ga_instanse, solution, solution_idx):
return numpy.random.rand()
ga_instance = pygad.GA(num_generations=5,
num_parents_mating=4,
sol_per_pop=10,
num_genes=2,
on_start=Test().on_start,
on_fitness=Test().on_fitness,
on_parents=Test().on_parents,
on_crossover=Test().on_crossover,
on_mutation=Test().on_mutation,
on_generation=Test().on_generation,
on_stop=Test().on_stop,
fitness_func=Test().fitness_func)
ga_instance.run()
pygad.torchga
pygad.utils
crossoverCrossover
mutationMutation
parent_selectionParentSelection
nsga2NSGA2().
pygad.GApygad.GA
pygad.utils.crossover
pygad.utils.crossoverCrossover
single_point_crossover()
two_points_crossover()
uniform_crossover()
scattered_crossover()
parents
offspring_size
pygad.utils.mutation
pygad.utils.mutationMutation
random_mutation()
swap_mutation()
inversion_mutation()
scramble_mutation()
adaptive_mutation()
offspring
“” ():
“”
(f_avg).
(f).
f<f_avg
f>f_avg
f=f_avg
pygad.GAmutation_type="adaptive"
mutation_probabilitymutation_num_genesmutation_percent_genes
list
tuple
numpy.ndarray
listtuplenumpy.ndarray
# mutation_probability
mutation_probability = [0.25, 0.1]
mutation_probability = (0.35, 0.17)
mutation_probability = numpy.array([0.15, 0.05])
# mutation_num_genes
mutation_num_genes = [4, 2]
mutation_num_genes = (3, 1)
mutation_num_genes = numpy.array([7, 2])
# mutation_percent_genes
mutation_percent_genes = [25, 12]
()
()
mutation_percent_genes = (15, 8)
mutation_percent_genes = numpy.array([21, 13])
import pygad
import numpy
output = numpy.sum(solution*function_inputs)
# The value 0.000001 is used to avoid the Inf value when the denominator numpy.
,→abs(output - desired_output) is 0.0.
# Creating an instance of the GA class inside the ga module. Some parameters are␣
,→initialized within the constructor.
ga_instance = pygad.GA(num_generations=200,
fitness_func=fitness_func,
num_parents_mating=10,
sol_per_pop=20,
num_genes=len(function_inputs),
mutation_type="adaptive",
mutation_num_genes=(3, 1))
pygad.utils.parent_selection
pygad.utils.parent_selectionParentSelection
steady_state_selection()
roulette_wheel_selection()
stochastic_universal_selection()
rank_selection()
random_selection()
tournament_selection()
nsga2_selection()
tournament_nsga2_selection()
fitness
num_parents
pygad.utils.nsga2
pygad.utils.nsga2NSGA2
non_dominated_sorting()
get_non_dominated_set()
crowding_distance()
sort_solutions_nsga2()
pygad.GA’
crossover_type
mutation_type
parent_selection_type
import pygad
import numpy
equation_inputs = [4,-2,3.5]
desired_output = 44
ga_instance = pygad.GA(num_generations=10,
sol_per_pop=5,
num_parents_mating=2,
num_genes=len(equation_inputs),
fitness_func=fitness_func)
ga_instance.run()
ga_instance.plot_fitness()
pygad.GA
pygad.utils.nsga2
().
pygad.GApopulationgene_typegene_space
(),
random_split_point = numpy.random.choice(range(offspring_size[1]))
parent1[random_split_point:] = parent2[random_split_point:]
offspring.append(parent1)
idx += 1
return numpy.array(offspring)
crossover_typepygad.GA
ga_instance = pygad.GA(num_generations=10,
sol_per_pop=5,
num_parents_mating=2,
num_genes=len(equation_inputs),
fitness_func=fitness_func,
crossover_type=crossover_func)
pygad.GApopulationgene_typegene_space
def mutation_func(offspring, ga_instance):
...
return offspring
return offspring
mutation_type
ga_instance = pygad.GA(num_generations=10,
sol_per_pop=5,
num_parents_mating=2,
num_genes=len(equation_inputs),
fitness_func=fitness_func,
crossover_type=crossover_func,
mutation_type=mutation_func)
() gene_type
gene_space
mutation_percent_genesmutation_probabilitymutation_num_genes
mutation_by_replacement
random_mutation_min_valrandom_mutation_max_val
allow_duplicate_genes
pygad.GApopulationgene_typegene_space
(num_genes).
numpy.ndarray
def parent_selection_func(fitness, num_parents, ga_instance):
...
return parents, fitness_sorted[:num_parents]
num_parents
def parent_selection_func(fitness, num_parents, ga_instance):
parent_selection_type
ga_instance = pygad.GA(num_generations=10,
sol_per_pop=5,
num_parents_mating=2,
num_genes=len(equation_inputs),
fitness_func=fitness_func,
crossover_type=crossover_func,
mutation_type=mutation_func,
parent_selection_type=parent_selection_func)
import pygad
import numpy
equation_inputs = [4,-2,3.5]
desired_output = 44
return fitness
offspring = []
idx = 0
while len(offspring) != offspring_size[0]:
parent1 = parents[idx % parents.shape[0], :].copy()
parent2 = parents[(idx + 1) % parents.shape[0], :].copy()
random_split_point = numpy.random.choice(range(offspring_size[1]))
parent1[random_split_point:] = parent2[random_split_point:]
offspring.append(parent1)
idx += 1
return numpy.array(offspring)
return offspring
ga_instance = pygad.GA(num_generations=10,
sol_per_pop=5,
num_parents_mating=2,
num_genes=len(equation_inputs),
fitness_func=fitness_func,
crossover_type=crossover_func,
mutation_type=mutation_func,
parent_selection_type=parent_selection_func)
ga_instance.run()
ga_instance.plot_fitness()
import pygad
import numpy
equation_inputs = [4,-2,3.5]
desired_output = 44
class Test:
def fitness_func(self, ga_instance, solution, solution_idx):
output = numpy.sum(solution * equation_inputs)
()
()
return fitness
offspring = []
idx = 0
while len(offspring) != offspring_size[0]:
parent1 = parents[idx % parents.shape[0], :].copy()
parent2 = parents[(idx + 1) % parents.shape[0], :].copy()
random_split_point = numpy.random.choice(range(offspring_size[0]))
parent1[random_split_point:] = parent2[random_split_point:]
offspring.append(parent1)
idx += 1
return numpy.array(offspring)
return offspring
ga_instance = pygad.GA(num_generations=10,
sol_per_pop=5,
num_parents_mating=2,
num_genes=len(equation_inputs),
fitness_func=Test().fitness_func,
parent_selection_type=Test().parent_selection_func,
crossover_type=Test().crossover_func,
mutation_type=Test().mutation_func)
ga_instance.run()
ga_instance.plot_fitness()
pygad.visualize
plot_fitness()
plot_genes()
plot_new_solution_rate()
save_solutionsTruesolutions
import pygad
import numpy
ga_instance = pygad.GA(num_generations=10,
sol_per_pop=10,
num_parents_mating=5,
num_genes=len(equation_inputs),
fitness_func=fitness_func,
gene_space=[range(1, 10), range(10, 20), range(15, 30),␣
,→range(20, 40), range(25, 50), range(10, 30), range(20, 50)],
gene_type=int,
save_solutions=True)
ga_instance.run()
’
plot_fitness()
plot_fitness()() ()
(),
title
xlabel
ylabel
linewidth3
font_size14
plot_type"plot"(), "scatter", "bar"
color"#64f20c"
labelNone
save_dir
plot_type="plot"
plot_type"plot"
ga_instance.plot_fitness()
# ga_instance.plot_fitness(plot_type="plot")
plot_type="scatter"
plot_type"scatter"linewidth
ga_instance.plot_fitness(plot_type="scatter")
plot_type="bar"
plot_type"bar"
ga_instance.plot_fitness(plot_type="bar")
plot_new_solution_rate()
plot_new_solution_rate()
(),
plot_new_solution_rate()plot_fitness()(plot_type).
title
xlabel
ylabel
linewidth3
font_size14
plot_type"plot"(), "scatter", "bar"
color"#3870FF"
save_dir
plot_type="plot"
plot_type"plot"
ga_instance.plot_new_solution_rate()
# ga_instance.plot_new_solution_rate(plot_type="plot")
(sol_per_poppygad.GA)
plot_type="scatter"
plot_type="scatter"
ga_instance.plot_new_solution_rate(plot_type="scatter")
plot_type="bar"
plot_type="scatter"
ga_instance.plot_new_solution_rate(plot_type="bar")
plot_genes()
plot_genes()plot_genes()
(),
title
xlabel
ylabel
linewidth3
font_size14
plot_type"plot"(), "scatter", "bar"
graph_type"plot"(), "boxplot", "histogram"
fill_color"#3870FF"graph_type="plot"
color"#3870FF"
solutions"all""best"
save_dir
graph_type
plot_type"plot"
solutions
solutions="all"save_solutions=Falsepygad.GA
solutions="best"save_best_solutions=Falsepygad.GA
graph_type="plot"
graph_type="plot"
plot_type="plot"
graph_typeplot_type"plot"()
ga_instance.plot_genes()
ga_instance.plot_genes(graph_type="plot")
ga_instance.plot_genes(plot_type="plot")
ga_instance.plot_genes(graph_type="plot",
plot_type="plot")
solutions"all"
ga_instance.plot_genes(solutions="all")
ga_instance.plot_genes(graph_type="plot",
solutions="all")
ga_instance.plot_genes(plot_type="plot",
solutions="all")
ga_instance.plot_genes(graph_type="plot",
plot_type="plot",
solutions="all")
plot_type="scatter"
plot_genes()
ga_instance.plot_genes(plot_type="scatter")
ga_instance.plot_genes(graph_type="plot",
plot_type="scatter",
solutions='all')
plot_type="bar"
ga_instance.plot_genes(plot_type="bar")
ga_instance.plot_genes(graph_type="plot",
plot_type="bar",
solutions='all')
graph_type="boxplot"
graph_type"boxplot"plot_type
plot_genes()solutions"all"
ga_instance.plot_genes(graph_type="boxplot")
ga_instance.plot_genes(graph_type="boxplot",
solutions='all')
graph_type="histogram"
graph_type="boxplot"graph_type="boxplot"plot_type
plot_genes()solutions"all"
ga_instance.plot_genes(graph_type="histogram")
ga_instance.plot_genes(graph_type="histogram",
solutions='all')
solutions="best"
pygad.helper
’
uniqueUnique
solve_duplicate_genes_randomly()
solve_duplicate_genes_by_space()
unique_int_gene_from_range()
unique_genes_by_space()unique_gene_by_space()
unique_gene_by_space()
pygad.nn
problem_typepygad.nn.train()pygad.nn.predict()
pygad.nn.InputLayer
(): pygad.nn.DenseLayer
pygad.nn.InputLayer
pygad.nn.InputLayer
num_neurons
num_neurons
input_layer = pygad.nn.InputLayer(num_neurons=20)
num_neuronspygad.nn.InputLayer
num_input_neurons = input_layer.num_neurons
pygad.nn.DenseLayer()
num_neurons
previous_layerprevious_layer
activation_function"sigmoid""sigmoid""relu""softmax"(), "None"(). "None"()
"None"
initial_weights
trained_weightsinitial_weights
previous_layerinput_layer
dense_layer = pygad.nn.DenseLayer(num_neurons=12,
previous_layer=input_layer,
activation_function="relu")
num_dense_neurons = dense_layer.num_neurons
dense_initail_weights = dense_layer.initial_weights
dense_layer
input_layer = dense_layer.previous_layer
num_input_neurons = input_layer.num_neurons
’ previous_layer
dense_layer2 = pygad.nn.DenseLayer(num_neurons=5,
previous_layer=dense_layer,
activation_function="relu")
dense_layer2dense_layerprevious_layerdense_layer
dense_layer = dense_layer2.previous_layer
dense_layer_neurons = dense_layer.num_neurons
dense_layer
dense_layer = dense_layer2.previous_layer
input_layer = dense_layer.previous_layer
num_input_neurons = input_layer.num_neurons
dense_layer2
previous_layer
previous_layerpygad.nn.DenseLayer
() ().
previous_layerprevious_layerprevious_layer().
(), whilewhileprevious_layer
layer = dense_layer2
pygad.nn
pygad.nn.layers_weights()
last_layer()
initialTrue(), ’ initial_weights False’ trained_weights
whileprevious_layerinitialTrueFalse
pygad.nn.layers_weights_as_vector()
layers_weights()
last_layer()
initialTrue(), ’ initial_weights False’ trained_weights
whileprevious_layerinitialTrueFalse
pygad.nn.layers_weights_as_matrix()
layers_weights_as_vectors()
last_layer()
vector_weights
whileprevious_layer
pygad.nn.layers_activations()
last_layer()
whileprevious_layer’ activation_function
pygad.nn.sigmoid()
sop
pygad.nn.relu()
()
sop
pygad.nn.softmax()
sop
pygad.nn.train()
num_epochs
last_layer()
data_inputs
data_outputs
problem_type"classification""regression"
learning_rate
pygad.nn.update_weights()
weights
network_error
learning_rate
pygad.nn.update_layers_trained_weights()
trained_weights(final_weights)
() trained_weights
last_layer()
final_weights
whileprevious_layertrained_weightsfinal_weights
pygad.nn.predict()
last_layer()
data_inputs
problem_type"classification""regression"
pygad.nn
pygad.nn.to_vector()
() array
array
pygad.nn.to_array()
vector
vector
shape
pygad.nn.sigmoid()
(): pygad.nn.relu()
pygad.nn.softmax()
pygad.nn
’
’
’
’
import numpy
import skimage.io, skimage.color, skimage.feature
import os
idx = 0
class_label = 0
for fruit_dir in fruits:
curr_dir = os.path.join(os.path.sep, fruit_dir)
all_imgs = os.listdir(os.getcwd()+curr_dir)
for img_file in all_imgs:
if img_file.endswith(".jpg"): # Ensures reading only JPG files.
fruit_data = skimage.io.imread(fname=os.path.sep.join([os.getcwd(), curr_
,→dir, img_file]), as_gray=False)
fruit_data_hsv = skimage.color.rgb2hsv(rgb=fruit_data)
hist = numpy.histogram(a=fruit_data_hsv[:, :, 0], bins=360)
dataset_features[idx, :] = hist[0]
outputs[idx] = class_label
idx = idx + 1
class_label = class_label + 1
pygad.nn.InputLayer
import pygad.nn
num_inputs = data_inputs.shape[1]
input_layer = pygad.nn.InputLayer(num_inputs)
pygad.nn.train()
pygad.nn.train(num_epochs=10,
last_layer=output_layer,
data_inputs=data_inputs,
data_outputs=data_outputs,
learning_rate=0.01)
pygad.nn.predict()
pygad.gann
pygad.nn
(),
import numpy
import pygad.nn
HL1_neurons = 2
import numpy
import pygad.nn
# Reading the data features. Check the 'extract_features.py' script for extracting␣
,→the features & preparing the outputs of the dataset.
# Optional step for filtering the features using the standard deviation.
features_STDs = numpy.std(a=data_inputs, axis=0)
data_inputs = data_inputs[:, features_STDs > 50]
# Reading the data outputs. Check the 'extract_features.py' script for extracting the␣
,→features & preparing the outputs of the dataset.
HL1_neurons = 150
HL2_neurons = 60
problem_typepygad.nn.train()pygad.nn.predict()"regression"
pygad.nn.train(...,
problem_type="regression")
predictions = pygad.nn.predict(...,
problem_type="regression")
"None"
pygad.gann
import numpy
import pygad.nn
HL1_neurons = 2
(https://www.kaggle.com/aungpyaeap/fish-market). (https://www.kaggle.com/aungpyaeap/fish-market/
download).
read_csv()
data = numpy.array(pandas.read_csv("Fish.csv"))
"None"problem_typepygad.nn.train()pygad.nn.predict()"regression"
pygad.nn.train()
data = numpy.array(pandas.read_csv("Fish.csv"))
HL1_neurons = 2
’
pygad.gann() pygadpygad.nn
pygad.gann.GANN
pygad.gannpygad.gann.GANN
__init__()
pygad.gann.GANN
pygad.gann.GANN
num_solutions()
num_neurons_input
num_neurons_output
num_neurons_hidden_layers=[](). []intintnum_neurons_hidden_layers=[10]
num_neurons_hidden_layers=[10, 5]
output_activation="softmax""softmax"
hidden_activations="relu"() () (). "relu"num_neurons_hidden_layershid-
den_activationsnum_neurons_hidden_layershidden_activations
pygad.gann.GANNpygad.gann.validate_network_parameters()
pygad.gann.GANNpygad.gann.GANN
parameters_validatedTrueFalse
population_networks()
pygad.gann.GANN
create_population()
create_population()(). pygad.gann.create_network()
() pygad.gann.GANN
population_networks
update_population_trained_weights()
update_population_trained_weights()trained_weights() population_trained_weights
population_trained_weightstrained_weights
pygad.gann
pygad.gann
pygad.gann.validate_network_parameters()
pygad.gann.GANN
pygad.gann.GANN
num_solutionsNoneNone
hidden_activations(num_neurons_hidden_layers).
() () ().
pygad.gann.create_network()
() ()
parameters_validatedpygad.gann.GANNnum_solutionscreate_network()
parameters_validatedFalsevalidate_network_parameters()
pygad.gann.population_as_vectors()
()
(), ().
population_networks()
().
pygad.gann.population_as_matrices()
()
(),
population_networks()
population_vectors
().
pygad.gann.GANN
pygad.GA
pygad.GA
’
()
() (200, 50)num_inputs
data_outputs = numpy.array([0,
1,
1,
0])
num_inputs = data_inputs.shape[1]
0(200)0N-1N
num_classes
pygad.gann.GANN
pygad.gann.GANN
num_solutions().
().
import pygad.gann
import pygad.nn
num_solutions = 6
GANN_instance = pygad.gann.GANN(num_solutions=num_solutions,
num_neurons_input=num_inputs,
num_neurons_hidden_layers=[2],
num_neurons_output=2,
hidden_activations=["relu"],
output_activation="softmax")
()
().
()
pygad.gann.population_as_vectors()
population_vectors = pygad.gann.population_as_vectors(population_networks=GANN_
,→instance.population_networks)
pygad.nn.predict()’ pygad.nn.predict()trained_weights
predictions = pygad.nn.predict(last_layer=GANN_instance.population_networks[sol_
,→ idx],
data_inputs=data_inputs)
correct_predictions = numpy.where(predictions == data_outputs)[0].size
solution_fitness = (correct_predictions/data_outputs.size)*100
return solution_fitness
pygad.nn.predict()’ trained_weights
pygad.GAon_generationpygad.GA
trained_weights
trained_weights
pygad.gann.population_as_matrices()
update_population_trained_weights()pygad.ganntrained_weights
def callback_generation(ga_instance):
global GANN_instance
population_matrices = pygad.gann.population_as_matrices(population_networks=GANN_
,→instance.population_networks, population_vectors=ga_instance.population)
GANN_instance.update_population_trained_weights(population_trained_
,→weights=population_matrices)
print(f"Generation = {ga_instance.generations_completed}")
print(f"Fitness = {ga_instance.best_solution()[1]}")
pygad.GA
pygad.GA
pygad.GA
initial_population = population_vectors.copy()
num_parents_mating = 4
num_generations = 500
mutation_percent_genes = 5
parent_selection_type = "sss"
crossover_type = "single_point"
mutation_type = "random"
keep_parents = 1
init_range_low = -2
init_range_high = 5
ga_instance = pygad.GA(num_generations=num_generations,
num_parents_mating=num_parents_mating,
initial_population=initial_population,
fitness_func=fitness_func,
mutation_percent_genes=mutation_percent_genes,
init_range_low=init_range_low,
init_range_high=init_range_high,
parent_selection_type=parent_selection_type,
crossover_type=crossover_type,
mutation_type=mutation_type,
keep_parents=keep_parents,
on_generation=callback_generation)
run()
pygad.GA
run()pygad.GAnum_generations
ga_instance.run()
run()plot_fitness()()
ga_instance.plot_fitness()
best_solution()pygad.GA
()
best_solution_generationpygad.GA
if ga_instance.best_solution_generation != -1:
print(f"Best fitness value reached after {ga_instance.best_solution_generation}␣
,→generations.")
pygad.nn.predict()
predictions = pygad.nn.predict(last_layer=GANN_instance.population_networks[solution_
,→idx], data_inputs=data_inputs)
predictions = pygad.nn.predict(last_layer=GANN_instance.population_networks[sol_
,→ idx],
data_inputs=data_inputs)
correct_predictions = numpy.where(predictions == data_outputs)[0].size
solution_fitness = (correct_predictions/data_outputs.size)*100
return solution_fitness
def callback_generation(ga_instance):
global GANN_instance, last_fitness
population_matrices = pygad.gann.population_as_matrices(population_networks=GANN_
,→instance.population_networks,
population_vectors=ga_
,→instance.population)
GANN_instance.update_population_trained_weights(population_trained_
,→ weights=population_matrices)
print(f"Generation = {ga_instance.generations_completed}")
print(f"Fitness = {ga_instance.best_solution()[1]}")
print(f"Change = {ga_instance.best_solution()[1] - last_fitness}")
last_fitness = ga_instance.best_solution()[1].copy()
# The length of the input vector for each sample (i.e. number of neurons in the input␣
,→layer).
()
()
num_inputs = data_inputs.shape[1]
# The number of neurons in the output layer (i.e. number of classes).
num_classes = 2
# population does not hold the numerical weights of the network instead it holds a␣
,→list of references to each last layer of each network (i.e. solution) in the␣
population_vectors = pygad.gann.population_as_vectors(population_networks=GANN_
,→instance.population_networks)
,→population.
# 2) Assign valid integer values to the sol_per_pop and num_genes parameters. If the␣
,→initial_population parameter exists, then the sol_per_pop and num_genes parameters␣
,→are useless.
initial_population = population_vectors.copy()
init_range_low = -2
init_range_high = 5
ga_instance = pygad.GA(num_generations=num_generations,
num_parents_mating=num_parents_mating,
initial_population=initial_population,
fitness_func=fitness_func,
()
()
mutation_percent_genes=mutation_percent_genes,
init_range_low=init_range_low,
init_range_high=init_range_high,
parent_selection_type=parent_selection_type,
crossover_type=crossover_type,
mutation_type=mutation_type,
keep_parents=keep_parents,
suppress_warnings=True,
on_generation=callback_generation)
ga_instance.run()
# After the generations complete, some plots are showed that summarize how the␣
,→outputs/fitness values evolve over generations.
ga_instance.plot_fitness()
if ga_instance.best_solution_generation != -1:
print(f"Best fitness value reached after {ga_instance.best_solution_generation}␣
,→generations.")
data_inputs=data_inputs)
print(f"Predictions of the trained network : {predictions}")
pygad.nnpygad.gann
num_neurons_outputpygad.gann.GANN
import numpy
import pygad
import pygad.nn
()
()
import pygad.gann
predictions = pygad.nn.predict(last_layer=GANN_instance.population_networks[sol_
,→ idx],
data_inputs=data_inputs)
correct_predictions = numpy.where(predictions == data_outputs)[0].size
solution_fitness = (correct_predictions/data_outputs.size)*100
return solution_fitness
def callback_generation(ga_instance):
global GANN_instance, last_fitness
population_matrices = pygad.gann.population_as_matrices(population_networks=GANN_
,→instance.population_networks,
population_vectors=ga_
,→instance.population)
GANN_instance.update_population_trained_weights(population_trained_
,→ weights=population_matrices)
print(f"Generation = {ga_instance.generations_completed}")
print(f"Fitness = {ga_instance.best_solution()[1]}")
print(f"Change = {ga_instance.best_solution()[1] - last_fitness}")
last_fitness = ga_instance.best_solution()[1].copy()
# Optional step of filtering the input data using the standard deviation.
features_STDs = numpy.std(a=data_inputs, axis=0)
data_inputs = data_inputs[:, features_STDs>50]
# The length of the input vector for each sample (i.e. number of neurons in the input␣
,→layer).
num_inputs = data_inputs.shape[1]
# The number of neurons in the output layer (i.e. number of classes).
num_classes = 4
# population does not hold the numerical weights of the network instead it holds a␣
,→list of references to each last layer of each network (i.e. solution) in the␣
population_vectors = pygad.gann.population_as_vectors(population_networks=GANN_
,→instance.population_networks)
,→population.
# 2) Assign valid integer values to the sol_per_pop and num_genes parameters. If the␣
,→initial_population parameter exists, then the sol_per_pop and num_genes parameters␣
,→are useless.
initial_population = population_vectors.copy()
ga_instance = pygad.GA(num_generations=num_generations,
num_parents_mating=num_parents_mating,
initial_population=initial_population,
fitness_func=fitness_func,
mutation_percent_genes=mutation_percent_genes,
parent_selection_type=parent_selection_type,
crossover_type=crossover_type,
mutation_type=mutation_type,
keep_parents=keep_parents,
on_generation=callback_generation)
ga_instance.run()
# After the generations complete, some plots are showed that summarize how the␣
,→outputs/fitness values evolve over generations.
ga_instance.plot_fitness()
()
()
# Returning the details of the best solution.
solution, solution_fitness, solution_idx = ga_instance.best_solution()
print(f"Parameters of the best solution : {solution}")
print(f"Fitness value of the best solution = {solution_fitness}")
print(f"Index of the best solution : {solution_idx}")
if ga_instance.best_solution_generation != -1:
print(f"Best fitness value reached after {ga_instance.best_solution_generation}␣
,→generations.")
data_inputs=data_inputs)
print(f"Predictions of the trained network : {predictions}")
GANN_instance = pygad.gann.GANN(...
output_activation="None")
pygad.nn.predict()problem_type"regression"
predictions = pygad.nn.predict(...,
problem_type="regression")
().
predictions = pygad.nn.predict(...,
problem_type="regression")
return solution_fitness
import numpy
import pygad
()
()
import pygad.nn
import pygad.gann
predictions = pygad.nn.predict(last_layer=GANN_instance.population_networks[sol_
,→ idx],
data_inputs=data_inputs, problem_type="regression")
solution_fitness = 1.0/numpy.mean(numpy.abs(predictions - data_outputs))
return solution_fitness
def callback_generation(ga_instance):
global GANN_instance, last_fitness
population_matrices = pygad.gann.population_as_matrices(population_networks=GANN_
,→instance.population_networks,
population_vectors=ga_
,→instance.population)
GANN_instance.update_population_trained_weights(population_trained_
,→ weights=population_matrices)
print(f"Generation = {ga_instance.generations_completed}")
print(f"Fitness = {ga_instance.best_solution(pop_fitness=ga_instance.last_
,→generation_fitness)[1]}")
print(f"Change = {ga_instance.best_solution(pop_fitness=ga_instance.last_
,→generation_fitness)[1] - last_fitness}")
last_fitness = ga_instance.best_solution(pop_fitness=ga_instance.last_generation_
,→ fitness)[1].copy()
# The length of the input vector for each sample (i.e. number of neurons in the input␣
,→layer).
num_inputs = data_inputs.shape[1]
# population does not hold the numerical weights of the network instead it holds a␣
,→list of references to each last layer of each network (i.e. solution) in the␣
population_vectors = pygad.gann.population_as_vectors(population_networks=GANN_
,→instance.population_networks)
,→population.
# 2) Assign valid integer values to the sol_per_pop and num_genes parameters. If the␣
,→initial_population parameter exists, then the sol_per_pop and num_genes parameters␣
,→are useless.
initial_population = population_vectors.copy()
init_range_low = -1
init_range_high = 1
ga_instance = pygad.GA(num_generations=num_generations,
num_parents_mating=num_parents_mating,
initial_population=initial_population,
fitness_func=fitness_func,
mutation_percent_genes=mutation_percent_genes,
init_range_low=init_range_low,
init_range_high=init_range_high,
parent_selection_type=parent_selection_type,
crossover_type=crossover_type,
mutation_type=mutation_type,
keep_parents=keep_parents,
on_generation=callback_generation)
ga_instance.run()
# After the generations complete, some plots are showed that summarize how the␣
,→outputs/fitness values evolve over generations.
()
()
ga_instance.plot_fitness()
if ga_instance.best_solution_generation != -1:
print(f"Best fitness value reached after {ga_instance.best_solution_generation}␣
,→generations.")
data_inputs=data_inputs,
problem_type="regression")
print(f"Predictions of the trained network : {predictions}")
data = numpy.array(pandas.read_csv("Fish.csv"))
"None"problem_typepygad.nn.train()pygad.nn.predict()"regression"
import numpy
import pygad
import pygad.nn
import pygad.gann
import pandas
predictions = pygad.nn.predict(last_layer=GANN_instance.population_networks[sol_
,→ idx],
data_inputs=data_inputs, problem_type="regression")
solution_fitness = 1.0/numpy.mean(numpy.abs(predictions - data_outputs))
return solution_fitness
def callback_generation(ga_instance):
global GANN_instance, last_fitness
population_matrices = pygad.gann.population_as_matrices(population_networks=GANN_
,→instance.population_networks,
population_vectors=ga_
,→instance.population)
GANN_instance.update_population_trained_weights(population_trained_
,→ weights=population_matrices)
print(f"Generation = {ga_instance.generations_completed}")
print(f"Fitness = {ga_instance.best_solution(pop_fitness=ga_instance.last_
,→generation_fitness)[1]}")
print(f"Change = {ga_instance.best_solution(pop_fitness=ga_instance.last_
,→generation_fitness)[1] - last_fitness}")
last_fitness = ga_instance.best_solution(pop_fitness=ga_instance.last_generation_
,→ fitness)[1].copy()
()
()
data = numpy.array(pandas.read_csv("../data/Fish.csv"))
# The length of the input vector for each sample (i.e. number of neurons in the input␣
,→layer).
num_inputs = data_inputs.shape[1]
# population does not hold the numerical weights of the network instead it holds a␣
,→list of references to each last layer of each network (i.e. solution) in the␣
population_vectors = pygad.gann.population_as_vectors(population_networks=GANN_
,→instance.population_networks)
,→population.
# 2) Assign valid integer values to the sol_per_pop and num_genes parameters. If the␣
,→initial_population parameter exists, then the sol_per_pop and num_genes parameters␣
,→are useless.
initial_population = population_vectors.copy()
()
()
mutation_type = "random" # Type of the mutation operator.
init_range_low = -1
init_range_high = 1
ga_instance = pygad.GA(num_generations=num_generations,
num_parents_mating=num_parents_mating,
initial_population=initial_population,
fitness_func=fitness_func,
mutation_percent_genes=mutation_percent_genes,
init_range_low=init_range_low,
init_range_high=init_range_high,
parent_selection_type=parent_selection_type,
crossover_type=crossover_type,
mutation_type=mutation_type,
keep_parents=keep_parents,
on_generation=callback_generation)
ga_instance.run()
# After the generations complete, some plots are showed that summarize how the␣
,→outputs/fitness values evolve over generations.
ga_instance.plot_fitness()
if ga_instance.best_solution_generation != -1:
print(f"Best fitness value reached after {ga_instance.best_solution_generation}␣
,→generations.")
data_inputs=data_inputs,
problem_type="regression")
print(f"Predictions of the trained network : {predictions}")
’
()
pygad.cnn.Input2D
pygad.cnn.Conv2D
pygad.cnn.MaxPooling2D
pygad.cnn.AveragePooling2D
pygad.cnn.Flatten
pygad.cnn.ReLU
pygad.cnn.Sigmoid
(): pygad.cnn.Dense
previous_layer
layer_input_size
layer_output_size
layer_outputNone
pygad.cnn.Input2D
pygad.cnn.Input2D
input_shape
Input2D
input_shape
layer_output_size
(50, 50, 3)
pygad.cnn.Input2D
input_shape = input_layer.input_shape
layer_output_size = input_layer.layer_output_size
pygad.cnn.Conv2D
pygad.cnn.Conv2D()
num_filters
kernel_size
previous_layerprevious_layerpygad.nn
activation_function=NoneNonerelusigmoid
filter_bank_size
initial_weights
trained_weightsinitial_weights
layer_input_size
layer_output_size
layer_output
previous_layerinput_layer
conv_layer = pygad.cnn.Conv2D(num_filters=2,
kernel_size=3,
previous_layer=input_layer,
activation_function=None)
filter_bank_size = conv_layer.filter_bank_size
conv_initail_weights = conv_layer.initial_weights
conv_layer
input_layer = conv_layer.previous_layer
input_shape = input_layer.num_neurons
previous_layerReLU
conv_layer2 = pygad.cnn.Conv2D(num_filters=2,
kernel_size=3,
previous_layer=conv_layer,
activation_function="relu")
conv_layer2conv_layerprevious_layerconv_layer
conv_layer = conv_layer2.previous_layer
filter_bank_size = conv_layer.filter_bank_size
conv_layer
conv_layer = conv_layer2.previous_layer
input_layer = conv_layer.previous_layer
input_shape = input_layer.num_neurons
pygad.cnn.MaxPooling2D
pygad.cnn.MaxPooling2D
pool_size
previous_layer
stride=2
layer_input_size
layer_output_size
layer_output
pygad.cnn.AveragePooling2D
pygad.cnn.AveragePooling2Dpygad.cnn.MaxPooling2D
pygad.cnn.Flatten
pygad.cnn.Flattenprevious_layer
previous_layer
layer_input_size
layer_output_size
layer_output
pygad.cnn.ReLU
pygad.cnn.ReLU
previous_layer
previous_layer
layer_input_size
layer_output_size
layer_output
pygad.cnn.Sigmoid
pygad.cnn.Sigmoidpygad.cnn.ReLU
pygad.cnn.Dense
pygad.cnn.Dense
num_neurons
previous_layer
activation_function"sigmoid""sigmoid""relu"softmax
initial_weights
trained_weightsinitial_weights
layer_input_size
layer_output_size
layer_output
pygad.cnn.Model
pygad.cnn.Model
last_layer().
epochs=10:
learning_rate=0.01
network_layersget_layers()pygad.cnn.Model
pygad.cnn.Model
get_layers()
train()
train_inputs
train_outputs
pygad.cnn.Model
feed_sample()
update_weights()
predict()
data_inputs
pygad.cnn.Model
summary()
pygad.cnn.sigmoid()
(): pygad.cnn.relu()
softmaxpygad.cnn.softmax()
pygad.cnn
’
’
’
’
pygad.cnn.Input2D
import pygad.cnn
sample_shape = train_inputs.shape[1:]
input_layer = pygad.cnn.Input2D(input_shape=sample_shape)
conv_layer1 = pygad.cnn.Conv2D(num_filters=2,
kernel_size=3,
previous_layer=input_layer,
activation_function=None)
relu_layer1 = pygad.cnn.Sigmoid(previous_layer=conv_layer1)
average_pooling_layer = pygad.cnn.AveragePooling2D(pool_size=2,
previous_layer=relu_layer1,
stride=2)
conv_layer2 = pygad.cnn.Conv2D(num_filters=3,
kernel_size=3,
previous_layer=average_pooling_layer,
activation_function=None)
relu_layer2 = pygad.cnn.ReLU(previous_layer=conv_layer2)
max_pooling_layer = pygad.cnn.MaxPooling2D(pool_size=2,
previous_layer=relu_layer2,
stride=2)
conv_layer3 = pygad.cnn.Conv2D(num_filters=1,
kernel_size=3,
previous_layer=max_pooling_layer,
activation_function=None)
relu_layer3 = pygad.cnn.ReLU(previous_layer=conv_layer3)
pooling_layer = pygad.cnn.AveragePooling2D(pool_size=2,
previous_layer=relu_layer3,
stride=2)
flatten_layer = pygad.cnn.Flatten(previous_layer=pooling_layer)
dense_layer1 = pygad.cnn.Dense(num_neurons=100,
previous_layer=flatten_layer,
activation_function="relu")
dense_layer2 = pygad.cnn.Dense(num_neurons=4,
previous_layer=dense_layer1,
activation_function="softmax")
pygad.cnn.Model
model = pygad.cnn.Model(last_layer=dense_layer2,
epochs=5,
learning_rate=0.01)
summary()pygad.cnn.Model
model.summary()
----------Network Architecture----------
<class 'pygad.cnn.Conv2D'>
<class 'pygad.cnn.Sigmoid'>
<class 'pygad.cnn.AveragePooling2D'>
<class 'pygad.cnn.Conv2D'>
<class 'pygad.cnn.ReLU'>
<class 'pygad.cnn.MaxPooling2D'>
<class 'pygad.cnn.Conv2D'>
<class 'pygad.cnn.ReLU'>
<class 'pygad.cnn.AveragePooling2D'>
<class 'pygad.cnn.Flatten'>
<class 'pygad.cnn.Dense'>
<class 'pygad.cnn.Dense'>
----------------------------------------
pygad.cnn.train()
model.train(train_inputs=train_inputs,
train_outputs=train_outputs)
pygad.cnn.predict()
predictions = model.predict(data_inputs=train_inputs)
num_wrong = numpy.where(predictions != train_outputs)[0]
num_correct = train_outputs.size - num_wrong.size
accuracy = 100 * (num_correct/train_outputs.size)
print(f"Number of correct classifications : {num_correct}.")
print(f"Number of wrong classifications : {num_wrong.size}.")
print(f"Classification accuracy : {accuracy}.")
pygad.gacnn
pygad.cnn
import numpy
import pygad.cnn
"""
Convolutional neural network implementation using NumPy
A tutorial that helps to get started (Building Convolutional Neural Network using␣
,→NumPy from Scratch) available in these links:
https://www.linkedin.com/pulse/building-convolutional-neural-network-using-numpy-
,→from-ahmed-gad
https://towardsdatascience.com/building-convolutional-neural-network-using-numpy-
,→from-scratch-b30aac50e50a
https://www.kdnuggets.com/2018/04/building-convolutional-neural-network-numpy-
,→scratch.html
train_inputs = numpy.load("dataset_inputs.npy")
train_outputs = numpy.load("dataset_outputs.npy")
sample_shape = train_inputs.shape[1:]
num_classes = 4
input_layer = pygad.cnn.Input2D(input_shape=sample_shape)
conv_layer1 = pygad.cnn.Conv2D(num_filters=2,
kernel_size=3,
previous_layer=input_layer,
activation_function=None)
relu_layer1 = pygad.cnn.Sigmoid(previous_layer=conv_layer1)
average_pooling_layer = pygad.cnn.AveragePooling2D(pool_size=2,
previous_layer=relu_layer1,
stride=2)
()
()
conv_layer2 = pygad.cnn.Conv2D(num_filters=3,
kernel_size=3,
previous_layer=average_pooling_layer,
activation_function=None)
relu_layer2 = pygad.cnn.ReLU(previous_layer=conv_layer2)
max_pooling_layer = pygad.cnn.MaxPooling2D(pool_size=2,
previous_layer=relu_layer2,
stride=2)
conv_layer3 = pygad.cnn.Conv2D(num_filters=1,
kernel_size=3,
previous_layer=max_pooling_layer,
activation_function=None)
relu_layer3 = pygad.cnn.ReLU(previous_layer=conv_layer3)
pooling_layer = pygad.cnn.AveragePooling2D(pool_size=2,
previous_layer=relu_layer3,
stride=2)
flatten_layer = pygad.cnn.Flatten(previous_layer=pooling_layer)
dense_layer1 = pygad.cnn.Dense(num_neurons=100,
previous_layer=flatten_layer,
activation_function="relu")
dense_layer2 = pygad.cnn.Dense(num_neurons=num_classes,
previous_layer=dense_layer1,
activation_function="softmax")
model = pygad.cnn.Model(last_layer=dense_layer2,
epochs=1,
learning_rate=0.01)
model.summary()
model.train(train_inputs=train_inputs,
train_outputs=train_outputs)
predictions = model.predict(data_inputs=train_inputs)
print(predictions)
’
pygad.gacnnpygadpygad.cnn
pygad.gacnn.GACNN
pygad.gacnnpygad.gacnn.GACNN()
__init__()
pygad.gacnn.GACNN
pygad.gacnn.GACNN
model
num_solutions()
pygad.gacnn.GACNNpygad.gacnn.GACNN
population_networks()
pygad.gacnn.GACNN
create_population()
create_population()().
population_networks
update_population_trained_weights()
update_population_trained_weights()trained_weights(pygad.cnn) ) popula-
tion_trained_weights
population_trained_weightstrained_weights
pygad.gacnn
pygad.gacnn
pygad.gacnn.population_as_vectors()
pygad.cnn.Model()
(), ().
population_networkspygad.cnn.Model
().
pygad.gacnn.population_as_matrices()
()
(),
population_networkspygad.cnn.Model
population_vectors
().
pygad.gacnn.GACNN
pygad.GA
pygad.GA
()
import numpy
train_inputs = numpy.load("dataset_inputs.npy")
train_outputs = numpy.load("dataset_outputs.npy")
0(80)0N-1N
import pygad.cnn
flatten_layer = pygad.cnn.Flatten(previous_layer=average_pooling_layer)
dense_layer = pygad.cnn.Dense(num_neurons=4,
previous_layer=flatten_layer,
activation_function="softmax")
pygad.cnn.Model
model = pygad.cnn.Model(last_layer=dense_layer,
epochs=5,
learning_rate=0.01)
summary()pygad.cnn.Model
model.summary()
----------Network Architecture----------
<class 'cnn.Conv2D'>
<class 'cnn.AveragePooling2D'>
<class 'cnn.Flatten'>
<class 'cnn.Dense'>
----------------------------------------
pygad.gacnn.GACNN
pygad.gacnn.GACNN
pygad.gacnn.GACNN
num_solutions(). model
import pygad.gacnn
GACNN_instance = pygad.gacnn.GACNN(model=model,
num_solutions=4)
pygad.gacnn.GACNN
()
pygad.gacnn.population_as_vectors()
population_vectors = gacnn.population_as_vectors(population_networks=GACNN_instance.
,→population_networks)
initial_population = population_vectors.copy()
pygad.cnn.predict()’ pygad.cnn.predict()trained_weights
predictions = GACNN_instance.population_networks[sol_idx].predict(data_
,→ inputs=data_inputs)
correct_predictions = numpy.where(predictions == data_outputs)[0].size
solution_fitness = (correct_predictions/data_outputs.size)*100
return solution_fitness
pygad.cnn.predict()’ trained_weights
pygad.GAon_generationpygad.GA
trained_weights
trained_weights
pygad.gacnn.population_as_matrices()
update_population_trained_weights()pygad.gacnntrained_weights
def callback_generation(ga_instance):
global GACNN_instance, last_fitness
population_matrices = gacnn.population_as_matrices(population_networks=GACNN_
,→instance.population_networks, population_vectors=ga_instance.population)
GACNN_instance.update_population_trained_weights(population_trained_
,→weights=population_matrices)
print(f"Generation = {ga_instance.generations_completed}")
pygad.GA
pygad.GA
pygad.GA
import pygad
num_parents_mating = 4
num_generations = 10
mutation_percent_genes = 5
ga_instance = pygad.GA(num_generations=num_generations,
num_parents_mating=num_parents_mating,
initial_population=initial_population,
fitness_func=fitness_func,
mutation_percent_genes=mutation_percent_genes,
on_generation=callback_generation)
run()
pygad.GA
run()pygad.GAnum_generations
ga_instance.run()
run()plot_fitness()
ga_instance.plot_fitness()
best_solution()pygad.GA
...
Fitness value of the best solution = 83.75
Index of the best solution : 0
Best fitness value reached after 4 generations.
pygad.cnn.predict()
predictions = pygad.cnn.predict(last_layer=GANN_instance.population_networks[solution_
,→idx], data_inputs=data_inputs)
import numpy
import pygad.cnn
import pygad.gacnn
import pygad
"""
Convolutional neural network implementation using NumPy
A tutorial that helps to get started (Building Convolutional Neural Network using␣
,→NumPy from Scratch) available in these links:
https://www.linkedin.com/pulse/building-convolutional-neural-network-using-numpy-
,→from-ahmed-gad
https://towardsdatascience.com/building-convolutional-neural-network-using-numpy-
,→from-scratch-b30aac50e50a
https://www.kdnuggets.com/2018/04/building-convolutional-neural-network-numpy-
,→scratch.html
()
()
predictions = GACNN_instance.population_networks[sol_idx].predict(data_
,→inputs=data_inputs)
return solution_fitness
def callback_generation(ga_instance):
global GACNN_instance, last_fitness
population_matrices = pygad.gacnn.population_as_matrices(population_
,→networks=GACNN_instance.population_networks,
population_vectors=ga_instance.
,→population)
GACNN_instance.update_population_trained_weights(population_trained_
,→ weights=population_matrices)
print(f"Generation = {ga_instance.generations_completed}")
print(f"Fitness = {ga_instance.best_solutions_fitness}")
data_inputs = numpy.load("dataset_inputs.npy")
data_outputs = numpy.load("dataset_outputs.npy")
sample_shape = data_inputs.shape[1:]
num_classes = 4
data_inputs = data_inputs
data_outputs = data_outputs
input_layer = pygad.cnn.Input2D(input_shape=sample_shape)
conv_layer1 = pygad.cnn.Conv2D(num_filters=2,
kernel_size=3,
previous_layer=input_layer,
activation_function="relu")
average_pooling_layer = pygad.cnn.AveragePooling2D(pool_size=5,
previous_layer=conv_layer1,
stride=3)
flatten_layer = pygad.cnn.Flatten(previous_layer=average_pooling_layer)
dense_layer2 = pygad.cnn.Dense(num_neurons=num_classes,
previous_layer=flatten_layer,
activation_function="softmax")
model = pygad.cnn.Model(last_layer=dense_layer2,
epochs=1,
learning_rate=0.01)
model.summary()
GACNN_instance = pygad.gacnn.GACNN(model=model,
num_solutions=4)
# GACNN_instance.update_population_trained_weights(population_trained_
,→weights=population_matrices)
()
()
# population does not hold the numerical weights of the network instead it holds a␣
,→list of references to each last layer of each network (i.e. solution) in the␣
population_vectors = pygad.gacnn.population_as_vectors(population_networks=GACNN_
,→instance.population_networks)
,→population.
# 2) Assign valid integer values to the sol_per_pop and num_genes parameters. If the␣
,→initial_population parameter exists, then the sol_per_pop and num_genes parameters␣
,→are useless.
initial_population = population_vectors.copy()
ga_instance = pygad.GA(num_generations=num_generations,
num_parents_mating=num_parents_mating,
initial_population=initial_population,
fitness_func=fitness_func,
mutation_percent_genes=mutation_percent_genes,
on_generation=callback_generation)
ga_instance.run()
# After the generations complete, some plots are showed that summarize how the␣
,→outputs/fitness values evolve over generations.
ga_instance.plot_fitness()
if ga_instance.best_solution_generation != -1:
print(f"Best fitness value reached after {ga_instance.best_solution_generation}␣
,→generations.")
’
pygad.kerarsga().
KerasGA
model_weights_as_vector()
model_weights_as_matrix()
predict()
pygad.kerasga.KerasGA
pygad.GA
import tensorflow.keras
input_layer = tensorflow.keras.layers.Input(3)
dense_layer1 = tensorflow.keras.layers.Dense(5, activation="relu")
output_layer = tensorflow.keras.layers.Dense(1, activation="linear")
model = tensorflow.keras.Sequential()
model.add(input_layer)
model.add(dense_layer1)
model.add(output_layer)
input_layer = tensorflow.keras.layers.Input(3)
dense_layer1 = tensorflow.keras.layers.Dense(5, activation="relu")(input_layer)
output_layer = tensorflow.keras.layers.Dense(1, activation="linear")(dense_layer1)
pygad.kerasga.KerasGA
pygad.kerasgaKerasGA
__init__()
pygad.kerasga.KerasGA
model
num_solutions
pygad.kerasga.KerasGApopulation_weights
model
num_solutions
population_weights
KerasGA
pygad.kerasga.KerasGA
create_population()
create_population()population_weights
pygad.kerasga
pygad.kerasga
pygad.kerasga.model_weights_as_vector()
model_weights_as_vector()model
trainabletrainable=False
model
pygad.kerasga.model_weights_as_matrix()
model_weights_as_matrix()
model
weights_vector
pygad.kerasga.predict()
predict()
model
solution
data
batch_size=None().
verbose=None:
steps=None().
()batch_sizeverbosesteps
pygad.kerasga
import tensorflow.keras
import pygad.kerasga
import numpy
import pygad
predictions = pygad.kerasga.predict(model=model,
solution=solution,
data=data_inputs)
mae = tensorflow.keras.losses.MeanAbsoluteError()
abs_error = mae(data_outputs, predictions).numpy() + 0.00000001
solution_fitness = 1.0/abs_error
return solution_fitness
def on_generation(ga_instance):
print(f"Generation = {ga_instance.generations_completed}")
print(f"Fitness = {ga_instance.best_solution()[1]}")
input_layer = tensorflow.keras.layers.Input(3)
dense_layer1 = tensorflow.keras.layers.Dense(5, activation="relu")(input_layer)
output_layer = tensorflow.keras.layers.Dense(1, activation="linear")(dense_layer1)
keras_ga = pygad.kerasga.KerasGA(model=model,
num_solutions=10)
# Data inputs
data_inputs = numpy.array([[0.02, 0.1, 0.15],
[0.7, 0.6, 0.8],
[1.5, 1.2, 1.7],
[3.2, 2.9, 3.1]])
# Data outputs
data_outputs = numpy.array([[0.1],
[0.6],
[1.3],
[2.5]])
# Prepare the PyGAD parameters. Check the documentation for more information: https://
,→pygad.readthedocs.io/en/latest/pygad.html#pygad-ga-class
()
()
initial_population = keras_ga.population_weights # Initial population of network␣
,→weights
ga_instance = pygad.GA(num_generations=num_generations,
num_parents_mating=num_parents_mating,
initial_population=initial_population,
fitness_func=fitness_func,
on_generation=on_generation)
ga_instance.run()
# After the generations complete, some plots are showed that summarize how the␣
,→outputs/fitness values evolve over generations.
mae = tensorflow.keras.losses.MeanAbsoluteError()
abs_error = mae(data_outputs, predictions).numpy()
print(f"Absolute Error : {abs_error}")
import tensorflow.keras
input_layer = tensorflow.keras.layers.Input(3)
dense_layer1 = tensorflow.keras.layers.Dense(5, activation="relu")(input_layer)
output_layer = tensorflow.keras.layers.Dense(1, activation="linear")(dense_layer1)
input_layer = tensorflow.keras.layers.Input(3)
dense_layer1 = tensorflow.keras.layers.Dense(5, activation="relu")
output_layer = tensorflow.keras.layers.Dense(1, activation="linear")
model = tensorflow.keras.Sequential()
model.add(input_layer)
model.add(dense_layer1)
model.add(output_layer)
pygad.kerasga.KerasGA
pygad.kerasga.KerasGA
import pygad.kerasga
keras_ga = pygad.kerasga.KerasGA(model=model,
num_solutions=10)
import numpy
# Data inputs
data_inputs = numpy.array([[0.02, 0.1, 0.15],
[0.7, 0.6, 0.8],
[1.5, 1.2, 1.7],
[3.2, 2.9, 3.1]])
# Data outputs
data_outputs = numpy.array([[0.1],
[0.6],
[1.3],
[2.5]])
predict()()
predictions = pygad.kerasga.predict(model=model,
solution=solution,
data=data_inputs)
mae = tensorflow.keras.losses.MeanAbsoluteError()
abs_error = mae(data_outputs, predictions).numpy() + 0.00000001
solution_fitness = 1.0/abs_error
return solution_fitness
pygad.GA
pygad.GAinitial_population
# Prepare the PyGAD parameters. Check the documentation for more information: https://
,→pygad.readthedocs.io/en/latest/pygad.html#pygad-ga-class
ga_instance = pygad.GA(num_generations=num_generations,
num_parents_mating=num_parents_mating,
initial_population=initial_population,
fitness_func=fitness_func,
on_generation=on_generation)
run()
ga_instance.run()
plot_fitness()
predict()
# Fetch the parameters of the best solution.
predictions = pygad.kerasga.predict(model=model,
solution=solution,
data=data_inputs)
print(f"Predictions : \n{predictions}")
Predictions :
[[0.09935353]
[0.63082725]
[1.2765523 ]
[2.4999595 ]]
mae = tensorflow.keras.losses.MeanAbsoluteError()
abs_error = mae(data_outputs, predictions).numpy()
print(f"Absolute Error : {abs_error}")
’
import tensorflow.keras
import pygad.kerasga
import numpy
import pygad
predictions = pygad.kerasga.predict(model=model,
solution=solution,
data=data_inputs)
bce = tensorflow.keras.losses.BinaryCrossentropy()
solution_fitness = 1.0 / (bce(data_outputs, predictions).numpy() + 0.00000001)
return solution_fitness
def on_generation(ga_instance):
print(f"Generation = {ga_instance.generations_completed}")
()
()
print(f"Fitness = {ga_instance.best_solution()[1]}")
keras_ga = pygad.kerasga.KerasGA(model=model,
num_solutions=10)
# Prepare the PyGAD parameters. Check the documentation for more information: https://
,→pygad.readthedocs.io/en/latest/pygad.html#pygad-ga-class
# After the generations complete, some plots are showed that summarize how the␣
,→outputs/fitness values evolve over generations.
bce = tensorflow.keras.losses.BinaryCrossentropy()
solution_fitness = 1.0 / (bce(data_outputs, predictions).numpy() + 0.00000001)
739.240.0013527311
Predictions :
[[9.9694413e-01 3.0558957e-03]
[5.0176249e-04 9.9949825e-01]
[1.8470541e-03 9.9815291e-01]
[9.9999976e-01 2.0538971e-07]]
Accuracy : 1.0
()
import tensorflow.keras
import pygad.kerasga
import numpy
import pygad
predictions = pygad.kerasga.predict(model=model,
solution=solution,
data=data_inputs)
()
()
cce = tensorflow.keras.losses.CategoricalCrossentropy()
solution_fitness = 1.0 / (cce(data_outputs, predictions).numpy() + 0.00000001)
return solution_fitness
def on_generation(ga_instance):
print(f"Generation = {ga_instance.generations_completed}")
print(f"Fitness = {ga_instance.best_solution()[1]}")
keras_ga = pygad.kerasga.KerasGA(model=model,
num_solutions=10)
# Data inputs
data_inputs = numpy.load("../data/dataset_features.npy")
# Data outputs
data_outputs = numpy.load("../data/outputs.npy")
data_outputs = tensorflow.keras.utils.to_categorical(data_outputs)
# Prepare the PyGAD parameters. Check the documentation for more information: https://
,→pygad.readthedocs.io/en/latest/pygad.html#pygad-ga-class
# After the generations complete, some plots are showed that summarize how the␣
,→outputs/fitness values evolve over generations.
()
cce = tensorflow.keras.losses.CategoricalCrossentropy()
solution_fitness = 1.0 / (cce(data_outputs, predictions).numpy() + 0.00000001)
()
(100, 100, 3)
tensorflow.keras.utils.to_categorical()
import numpy
data_inputs = numpy.load("../data/dataset_features.npy")
data_outputs = numpy.load("../data/outputs.npy")
data_outputs = tensorflow.keras.utils.to_categorical(data_outputs)
Fitness value of the best solution = 4.197464252185969
Index of the best solution : 0
Categorical Crossentropy : 0.23823906
Accuracy : 0.9852192
()
import tensorflow.keras
import pygad.kerasga
import numpy
import pygad
predictions = pygad.kerasga.predict(model=model,
solution=solution,
data=data_inputs)
cce = tensorflow.keras.losses.CategoricalCrossentropy()
solution_fitness = 1.0 / (cce(data_outputs, predictions).numpy() + 0.00000001)
return solution_fitness
def on_generation(ga_instance):
()
()
print(f"Generation = {ga_instance.generations_completed}")
print(f"Fitness = {ga_instance.best_solution()[1]}")
keras_ga = pygad.kerasga.KerasGA(model=model,
num_solutions=10)
# Data inputs
data_inputs = numpy.load("../data/dataset_inputs.npy")
# Data outputs
data_outputs = numpy.load("../data/dataset_outputs.npy")
data_outputs = tensorflow.keras.utils.to_categorical(data_outputs)
# Prepare the PyGAD parameters. Check the documentation for more information: https://
,→pygad.readthedocs.io/en/latest/pygad.html#pygad-ga-class
# After the generations complete, some plots are showed that summarize how the␣
,→outputs/fitness values evolve over generations.
import numpy
data_inputs = numpy.load("../data/dataset_inputs.npy")
data_outputs = numpy.load("../data/dataset_outputs.npy")
data_outputs = tensorflow.keras.utils.to_categorical(data_outputs)
Fitness value of the best solution = 2.7462310258668805
Index of the best solution : 0
Categorical Crossentropy : 0.3641354
Accuracy : 0.75
()
tensorflow.keras.preprocessing.image.ImageDataGenerator
import tensorflow as tf
import tensorflow.keras
import pygad.kerasga
import pygad
predictions = pygad.kerasga.predict(model=model,
solution=solution,
data=train_generator)
()
()
cce = tensorflow.keras.losses.CategoricalCrossentropy()
solution_fitness = 1.0 / (cce(data_outputs, predictions).numpy() + 0.00000001)
return solution_fitness
def on_generation(ga_instance):
print("Generation = {ga_instance.generations_completed}")
print("Fitness = {ga_instance.best_solution(ga_instance.last_generation_
,→fitness)[1]}")
num_classes = 2
img_size = 224
# Create a simple CNN. This does not gurantee high classification accuracy.
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Input(shape=(img_size, img_size, 3)))
model.add(tf.keras.layers.Conv2D(32, (3,3), activation="relu", padding="same"))
model.add(tf.keras.layers.MaxPooling2D((2, 2)))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dropout(rate=0.2))
model.add(tf.keras.layers.Dense(num_classes, activation="softmax"))
keras_ga = pygad.kerasga.KerasGA(model=model,
num_solutions=10)
data_generator = tf.keras.preprocessing.image.ImageDataGenerator()
train_generator = data_generator.flow_from_directory(dataset_path,
class_mode='categorical',
target_size=(224, 224),
batch_size=32,
shuffle=False)
# train_generator.class_indices
data_outputs = tf.keras.utils.to_categorical(train_generator.labels)
# Check the documentation for more information about the parameters: https://pygad.
,→readthedocs.io/en/latest/pygad.html#pygad-ga-class
# After the generations complete, some plots are showed that summarize how the␣
,→outputs/fitness values evolve over generations.
()
()
ga_instance.plot_fitness(title="PyGAD & Keras - Iteration vs. Fitness", linewidth=4)
predictions = pygad.kerasga.predict(model=model,
solution=solution,
data=train_generator)
# print(f"Predictions : \n{predictions}")
’
pygad.torchga().
TorchGA
model_weights_as_vector()
model_weights_as_dict()
predict()
pygad.torchga.TorchGA
pygad.GA
import torch
input_layer = torch.nn.Linear(3, 5)
relu_layer = torch.nn.ReLU()
output_layer = torch.nn.Linear(5, 1)
model = torch.nn.Sequential(input_layer,
relu_layer,
output_layer)
pygad.torchga.TorchGA
pygad.torchgaTorchGA
__init__()
pygad.torchga.TorchGA
model
num_solutions
pygad.torchga.TorchGApopulation_weights
model
num_solutions
population_weights
TorchGA
pygad.torchga.TorchGA
create_population()
create_population()population_weights
pygad.torchga
pygad.torchga
pygad.torchga.model_weights_as_vector()
model_weights_as_vector()model
model
pygad.torch.model_weights_as_dict()
model_weights_as_dict()
model
weights_vector
state_dict()load_state_dict()’
pygad.torchga.predict()
predict()
model
solution
data
import torch
import torchga
import pygad
predictions = pygad.torchga.predict(model=model,
solution=solution,
()
pygad.torchga
()
data=data_inputs)
return solution_fitness
def on_generation(ga_instance):
print(f"Generation = {ga_instance.generations_completed}")
print(f"Fitness = {ga_instance.best_solution()[1]}")
model = torch.nn.Sequential(input_layer,
relu_layer,
output_layer)
# print(model)
torch_ga = torchga.TorchGA(model=model,
num_solutions=10)
loss_function = torch.nn.L1Loss()
# Data inputs
data_inputs = torch.tensor([[0.02, 0.1, 0.15],
[0.7, 0.6, 0.8],
[1.5, 1.2, 1.7],
[3.2, 2.9, 3.1]])
# Data outputs
data_outputs = torch.tensor([[0.1],
[0.6],
[1.3],
[2.5]])
# Prepare the PyGAD parameters. Check the documentation for more information: https://
,→pygad.readthedocs.io/en/latest/pygad.html#pygad-ga-class
ga_instance = pygad.GA(num_generations=num_generations,
num_parents_mating=num_parents_mating,
initial_population=initial_population,
fitness_func=fitness_func,
on_generation=on_generation)
ga_instance.run()
()
()
# After the generations complete, some plots are showed that summarize how the␣
,→outputs/fitness values evolve over generations.
import torch
input_layer = torch.nn.Linear(3, 5)
relu_layer = torch.nn.ReLU()
output_layer = torch.nn.Linear(5, 1)
model = torch.nn.Sequential(input_layer,
relu_layer,
output_layer)
pygad.torchga.TorchGA
pygad.torchga.TorchGA
import pygad.torchga
torch_ga = torchga.TorchGA(model=model,
num_solutions=10)
import numpy
# Data inputs
data_inputs = numpy.array([[0.02, 0.1, 0.15],
[0.7, 0.6, 0.8],
[1.5, 1.2, 1.7],
[3.2, 2.9, 3.1]])
()
()
# Data outputs
data_outputs = numpy.array([[0.1],
[0.6],
[1.3],
[2.5]])
()
loss_function = torch.nn.L1Loss()
predictions = pygad.torchga.predict(model=model,
solution=solution,
data=data_inputs)
return solution_fitness
pygad.GA
pygad.GAinitial_population
# Prepare the PyGAD parameters. Check the documentation for more information: https://
,→pygad.readthedocs.io/en/latest/pygad.html#pygad-ga-class
ga_instance = pygad.GA(num_generations=num_generations,
num_parents_mating=num_parents_mating,
initial_population=initial_population,
fitness_func=fitness_func,
on_generation=on_generation)
run()
ga_instance.run()
plot_fitness()
best_solution()
model_weights_as_dict()
predictions = pygad.torchga.predict(model=model,
solution=solution,
data=data_inputs)
print("Predictions : \n", predictions.detach().numpy())
Predictions :
[[0.08401088]
()
()
[0.60939324]
[1.3010881 ]
[2.5010352 ]]
’
import torch
import torchga
import pygad
predictions = pygad.torchga.predict(model=model,
solution=solution,
data=data_inputs)
return solution_fitness
def on_generation(ga_instance):
print(f"Generation = {ga_instance.generations_completed}")
print(f"Fitness = {ga_instance.best_solution()[1]}")
model = torch.nn.Sequential(input_layer,
relu_layer,
dense_layer,
output_layer)
# print(model)
torch_ga = torchga.TorchGA(model=model,
num_solutions=10)
loss_function = torch.nn.BCELoss()
# Prepare the PyGAD parameters. Check the documentation for more information: https://
,→pygad.readthedocs.io/en/latest/pygad.html#pygad-ga-class
# After the generations complete, some plots are showed that summarize how the␣
,→outputs/fitness values evolve over generations.
model = torch.nn.Sequential(input_layer,
relu_layer,
dense_layer,
output_layer)
loss_function = torch.nn.BCELoss()
100000000.00.0
Fitness value of the best solution = 100000000.0
Predictions :
[[1.0000000e+00 1.3627675e-10]
[3.8521746e-09 1.0000000e+00]
[4.2789325e-10 1.0000000e+00]
[1.0000000e+00 3.3668417e-09]]
Accuracy : 1.0
()
import torch
import torchga
import pygad
import numpy
predictions = pygad.torchga.predict(model=model,
solution=solution,
data=data_inputs)
return solution_fitness
def on_generation(ga_instance):
print(f"Generation = {ga_instance.generations_completed}")
print(f"Fitness = {ga_instance.best_solution()[1]}")
model = torch.nn.Sequential(input_layer,
relu_layer,
dense_layer,
output_layer)
torch_ga = torchga.TorchGA(model=model,
num_solutions=10)
loss_function = torch.nn.CrossEntropyLoss()
()
()
# Data inputs
data_inputs = torch.from_numpy(numpy.load("dataset_features.npy")).float()
# Data outputs
data_outputs = torch.from_numpy(numpy.load("outputs.npy")).long()
# The next 2 lines are equivelant to this Keras function to perform 1-hot encoding:␣
,→tensorflow.keras.utils.to_categorical(data_outputs)
# temp_outs[numpy.arange(data_outputs.shape[0]), numpy.uint8(data_outputs)] = 1
# Prepare the PyGAD parameters. Check the documentation for more information: https://
,→pygad.readthedocs.io/en/latest/pygad.html#pygad-ga-class
# After the generations complete, some plots are showed that summarize how the␣
,→outputs/fitness values evolve over generations.
()
loss_function = torch.nn.CrossEntropyLoss()
()
(100, 100, 3)
import numpy
data_inputs = numpy.load("dataset_features.npy")
data_outputs = numpy.load("outputs.npy")
import torch
import torchga
import pygad
import numpy
predictions = pygad.torchga.predict(model=model,
solution=solution,
data=data_inputs)
return solution_fitness
def on_generation(ga_instance):
print(f"Generation = {ga_instance.generations_completed}")
print(f"Fitness = {ga_instance.best_solution()[1]}")
flatten_layer1 = torch.nn.Flatten()
# The value 768 is pre-computed by tracing the sizes of the layers' outputs.
dense_layer1 = torch.nn.Linear(in_features=768, out_features=15)
relu_layer3 = torch.nn.ReLU()
model = torch.nn.Sequential(input_layer,
relu_layer1,
max_pool1,
conv_layer2,
relu_layer2,
flatten_layer1,
dense_layer1,
relu_layer3,
dense_layer2,
output_layer)
torch_ga = torchga.TorchGA(model=model,
num_solutions=10)
()
()
loss_function = torch.nn.CrossEntropyLoss()
# Data inputs
data_inputs = torch.from_numpy(numpy.load("dataset_inputs.npy")).float()
data_inputs = data_inputs.reshape((data_inputs.shape[0], data_inputs.shape[3], data_
,→inputs.shape[1], data_inputs.shape[2]))
# Data outputs
data_outputs = torch.from_numpy(numpy.load("dataset_outputs.npy")).long()
# Prepare the PyGAD parameters. Check the documentation for more information: https://
,→pygad.readthedocs.io/en/latest/pygad.html#pygad-ga-class
# After the generations complete, some plots are showed that summarize how the␣
,→outputs/fitness values evolve over generations.
flatten_layer1 = torch.nn.Flatten()
# The value 768 is pre-computed by tracing the sizes of the layers' outputs.
dense_layer1 = torch.nn.Linear(in_features=768, out_features=15)
relu_layer3 = torch.nn.ReLU()
model = torch.nn.Sequential(input_layer,
relu_layer1,
max_pool1,
conv_layer2,
relu_layer2,
flatten_layer1,
dense_layer1,
relu_layer3,
dense_layer2,
output_layer)
import numpy
data_inputs = numpy.load("dataset_inputs.npy")
data_outputs = numpy.load("dataset_outputs.npy")
Fitness value of the best solution = 1.3009520689219258
Index of the best solution : 0
Crossentropy : 0.7686678
Accuracy : 0.975
fitness_func
(init_range_low init_range_high)
__code__
sol_idx
initial_populationNonesol_per_popnum_genes
sol_per_popnum_genesNone
callback_generation
best_solution()
best_solution_generationrun()
best_solution_fitnessbest_solutions_fitness().
()
generations_completed0None
mutation_by_replacement(mutation_type="random"). muta-
tion_by_replacement=TrueFalse
mutation_type="random"mutation_by_replacement=False()
mutation_type="random"mutation_by_replacement=True()
Nonemutation_typecrossover_typeNone
pygad.cnn
pygad.gacnn
pygad.plot_result()titlexlabelylabel
pygad.nn
pygad.nn.predict_outputs()pygad.nn.predict()
pygad.nn.train_network()pygad.nn.train()
delay_after_gen0.0
callback_generationstoprun()
num_generationscallback_generationstop
callback_generation
def func_generation(ga_instance):
if ga_instance.best_solution()[1] >= 70:
return "stop"
pygad.GAcrossover_probabilitymutation_probability
crossover_probability
mutation_probability
linewidthplot_result()
’
() () () () () ()
gene_spacepygad.GA“‘<https://pygad.readthedocs.io/en/latest/pygad_more.html#
more-about-the-gene-space-parameter>‘
initial_population
gene_typeintfloatgene_spaceNone
on_starton_fitnesson_parentson_crossoveron_mutationon_generationon_stop
learning_ratepygad.nn.train()
problem_typepygad.nn.train()pygad.nn.predict()
"None""sigmoid""relu""softmax""None"
pygad.nn
problem_typepygad.nn.train()pygad.nn.predict()"regression"
"None"-infinity+infinity
pygad.nn
pygad.gann
problem_typepygad.nn.train()pygad.nn.predict()"regression"
output_activationpygad.gann.GANN"None"
pygad.gann
problem_type"classification"(). ().
problem_typeregression
kerasga
crossover_probability
best_solutions_fitness
save_best_solutionsFalseTruebest_solutionsFalsebest_solutions
crossover_type"scattered"
gene_space
(gene_type, crossover_probability, mutation_probability, delay_after_gen)
intfloatnumpy.intnumpy.int8numpy.int16numpy.int32numpy.int64numpy.float
numpy.float16numpy.float32numpy.float64
pygad.torchga
“” ():
run()best_solution_fitness
parent_selection_typesss(), keep_parents
mutation_percent_genes"default"mutation_percent_genes"default"
mutation_percent_genes>0<=100
warningsprint()
boolsuppress_warningspygad.GAFalse
adaptive_mutation_population_fitness()
best_solution()pop_fitnessNonecal_pop_fitness()
save_best_solutions=True
() gene_space[0, 1]
last_generation_fitnesslast_generation_parentslast_generation_offspring_crossover
last_generation_offspring_mutationon_generation()
initial_populationinitial_populationgene_typeinitial_population((1, 1),
(3, 3), (5, 5), (7, 7))intgene_typefloatintintinitial_populationgene_type
[]
(),
boolallow_duplicate_genesTrueFalse
last_generation_fitnesslast_generation_fitness
Nonecrossover_typemutation_type
gene_typelist/tuple/numpy.ndarraygene_type“‘<https://pygad.readthedocs.io/en/latest/
pygad_more.html#more-about-the-gene-type-parameter>‘
boolgene_type_singlepygad.GATruegene_typegene_typelist/tuple/numpy.ndarray
gene_type_singleFalse
mutation_by_replacementgene_spaceNonegene_space=[None, [5, 6]]muta-
tion_by_replacementNone
Nonegene_space(gene_space=[None, [5, 6]]), Nonegene_space
gene_type
save_best_solutionsTrueibest_solutionsi+1best_solutions
last_generation_parents_indices
last_generation_fitnesslast_generation_parents_indices
Nonegene_space(gene_space=[[1, 2, 3], [5, 6, None]]), Nonegene_space
gene_space"step""low""high"{"low": 0, "high": 30, "step": 2}() “‘<https://pygad.
readthedocs.io/en/latest/pygad_more.html#more-about-the-gene-space-parameter>‘
predict()pygad.kerasgapygad.torchga
stop_criteriastrreachsaturatereachrun()reach"reach_40">saturatesaturate
"saturate_7"run()
Falsesave_solutionspygad.GATruesolutions
plot_result()plot_fitness()
plot_fitness()pygad.GAfont_size=14save_dir=Nonecolor="#3870FF"
plot_type="plot"font_sizesave_dirNonecolorplot_type"plot"(), "scatter", "bar"
titleplot_fitness()"PyGAD - Generation vs. Fitness""PyGAD - Iteration vs.
Fitness"
plot_new_solution_rate()plot_fitness()save_solutions=Truepygad.GA’
plot_genes()plot_fitness()graph_typefill_colorsolutionsgraph_type"plot"
(), "boxplot", "histogram"fill_colorgraph_type"boxplot""histogram"solutions
"all""best"
gene_typefloatfloatlisttuplenumpy.ndarray[float, 2]0.12340.12“‘<https://pygad.
readthedocs.io/en/latest/pygad_more.html#more-about-the-gene-type-parameter>‘
keep_parents
kerasgatorchga
mutation_typecrossover_typeparent_selection_typepygad.GA
tqdm
import pygad
import numpy
import tqdm
equation_inputs = [4,-2,3.5]
desired_output = 44
num_generations = 10000
with tqdm.tqdm(total=num_generations) as pbar:
ga_instance = pygad.GA(num_generations=num_generations,
sol_per_pop=5,
num_parents_mating=2,
num_genes=len(equation_inputs),
fitness_func=fitness_func,
on_generation=lambda _: pbar.update(1))
ga_instance.run()
ga_instance.plot_result()
ga_instance(save()
ga_instance.save("test")
on_generationon_generation_progress()
import pygad
import numpy
import tqdm
equation_inputs = [4,-2,3.5]
desired_output = 44
def on_generation_progress(ga):
pbar.update(1)
num_generations = 100
with tqdm.tqdm(total=num_generations) as pbar:
ga_instance = pygad.GA(num_generations=num_generations,
sol_per_pop=5,
num_parents_mating=2,
num_genes=len(equation_inputs),
fitness_func=fitness_func,
on_generation=on_generation_progress)
ga_instance.run()
ga_instance.plot_result()
ga_instance.save("test")
solutionssolutions_fitnesssave_solutionsTruesolutions_fitness
(solutions, solutions_fitness, best_solutions, best_solutions_fitness) run()
run()
(mutation_type="adaptive"). https://github.com/ahmedfgad/GeneticAlgorithmPython/issues/
65
previous_generation_fitnesspygad.GAlast_generation_fitness
cal_pop_fitness()’ previous_generation_fitness’ ()
gene_space[(), ]’ ()
allow_duplicate_genes(mutation_type=None).
tournament_selection()
save_solutions=True
parallel_processingpygad.GA
run_completedFalse
run()self.best_solutions, self.best_solutions_fitness, self.solutions,
self.solutions_fitnessrun()run()
()
crossover_type=None
keep_elitism
last_generation_elitism
random_seed
pygad.TorchGA