2
2
import pygad .kerasga
3
3
import numpy
4
4
import pygad
5
+ import gc
6
+
5
7
6
8
def fitness_func (ga_instanse , solution , sol_idx ):
7
9
global data_inputs , data_outputs , keras_ga , model
@@ -11,27 +13,33 @@ def fitness_func(ga_instanse, solution, sol_idx):
11
13
data = data_inputs )
12
14
13
15
cce = tensorflow .keras .losses .CategoricalCrossentropy ()
14
- solution_fitness = 1.0 / (cce (data_outputs , predictions ).numpy () + 0.00000001 )
16
+ solution_fitness = 1.0 / \
17
+ (cce (data_outputs , predictions ).numpy () + 0.00000001 )
15
18
16
19
return solution_fitness
17
20
21
+
18
22
def on_generation (ga_instance ):
19
23
print (f"Generation = { ga_instance .generations_completed } " )
20
24
print (f"Fitness = { ga_instance .best_solution ()[1 ]} " )
25
+ gc .collect () # can useful for not exploding the memory usage on notebooks (ipynb) freeing memory
26
+
21
27
22
28
# Build the keras model using the functional API.
23
29
input_layer = tensorflow .keras .layers .Input (shape = (100 , 100 , 3 ))
24
30
conv_layer1 = tensorflow .keras .layers .Conv2D (filters = 5 ,
25
31
kernel_size = 7 ,
26
32
activation = "relu" )(input_layer )
27
- max_pool1 = tensorflow .keras .layers .MaxPooling2D (pool_size = (5 ,5 ),
33
+ max_pool1 = tensorflow .keras .layers .MaxPooling2D (pool_size = (5 , 5 ),
28
34
strides = 5 )(conv_layer1 )
29
35
conv_layer2 = tensorflow .keras .layers .Conv2D (filters = 3 ,
30
36
kernel_size = 3 ,
31
37
activation = "relu" )(max_pool1 )
32
- flatten_layer = tensorflow .keras .layers .Flatten ()(conv_layer2 )
33
- dense_layer = tensorflow .keras .layers .Dense (15 , activation = "relu" )(flatten_layer )
34
- output_layer = tensorflow .keras .layers .Dense (4 , activation = "softmax" )(dense_layer )
38
+ flatten_layer = tensorflow .keras .layers .Flatten ()(conv_layer2 )
39
+ dense_layer = tensorflow .keras .layers .Dense (
40
+ 15 , activation = "relu" )(flatten_layer )
41
+ output_layer = tensorflow .keras .layers .Dense (
42
+ 4 , activation = "softmax" )(dense_layer )
35
43
36
44
model = tensorflow .keras .Model (inputs = input_layer , outputs = output_layer )
37
45
@@ -47,13 +55,15 @@ def on_generation(ga_instance):
47
55
data_outputs = tensorflow .keras .utils .to_categorical (data_outputs )
48
56
49
57
# Prepare the PyGAD parameters. Check the documentation for more information: https://pygad.readthedocs.io/en/latest/README_pygad_ReadTheDocs.html#pygad-ga-class
50
- num_generations = 200 # Number of generations.
51
- num_parents_mating = 5 # Number of solutions to be selected as parents in the mating pool.
52
- initial_population = keras_ga .population_weights # Initial population of network weights.
58
+ num_generations = 200 # Number of generations.
59
+ # Number of solutions to be selected as parents in the mating pool.
60
+ num_parents_mating = 5
61
+ # Initial population of network weights.
62
+ initial_population = keras_ga .population_weights
53
63
54
64
# Create an instance of the pygad.GA class
55
- ga_instance = pygad .GA (num_generations = num_generations ,
56
- num_parents_mating = num_parents_mating ,
65
+ ga_instance = pygad .GA (num_generations = num_generations ,
66
+ num_parents_mating = num_parents_mating ,
57
67
initial_population = initial_population ,
58
68
fitness_func = fitness_func ,
59
69
on_generation = on_generation )
@@ -62,7 +72,8 @@ def on_generation(ga_instance):
62
72
ga_instance .run ()
63
73
64
74
# After the generations complete, some plots are showed that summarize how the outputs/fitness values evolve over generations.
65
- ga_instance .plot_fitness (title = "PyGAD & Keras - Iteration vs. Fitness" , linewidth = 4 )
75
+ ga_instance .plot_fitness (
76
+ title = "PyGAD & Keras - Iteration vs. Fitness" , linewidth = 4 )
66
77
67
78
# Returning the details of the best solution.
68
79
solution , solution_fitness , solution_idx = ga_instance .best_solution ()
0 commit comments