forked from ahmedfgad/GeneticAlgorithmPython
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathimage_classification_CNN.py
101 lines (78 loc) · 4.04 KB
/
image_classification_CNN.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
import tensorflow.keras
import pygad.kerasga
import numpy
import pygad
import gc
def fitness_func(ga_instanse, solution, sol_idx):
global data_inputs, data_outputs, keras_ga, model
predictions = pygad.kerasga.predict(model=model,
solution=solution,
data=data_inputs)
cce = tensorflow.keras.losses.CategoricalCrossentropy()
solution_fitness = 1.0 / \
(cce(data_outputs, predictions).numpy() + 0.00000001)
return solution_fitness
def on_generation(ga_instance):
print(f"Generation = {ga_instance.generations_completed}")
print(f"Fitness = {ga_instance.best_solution()[1]}")
gc.collect() # can useful for not exploding the memory usage on notebooks (ipynb) freeing memory
# Build the keras model using the functional API.
input_layer = tensorflow.keras.layers.Input(shape=(100, 100, 3))
conv_layer1 = tensorflow.keras.layers.Conv2D(filters=5,
kernel_size=7,
activation="relu")(input_layer)
max_pool1 = tensorflow.keras.layers.MaxPooling2D(pool_size=(5, 5),
strides=5)(conv_layer1)
conv_layer2 = tensorflow.keras.layers.Conv2D(filters=3,
kernel_size=3,
activation="relu")(max_pool1)
flatten_layer = tensorflow.keras.layers.Flatten()(conv_layer2)
dense_layer = tensorflow.keras.layers.Dense(
15, activation="relu")(flatten_layer)
output_layer = tensorflow.keras.layers.Dense(
4, activation="softmax")(dense_layer)
model = tensorflow.keras.Model(inputs=input_layer, outputs=output_layer)
# Create an instance of the pygad.kerasga.KerasGA class to build the initial population.
keras_ga = pygad.kerasga.KerasGA(model=model,
num_solutions=10)
# Data inputs
data_inputs = numpy.load("../data/dataset_inputs.npy")
# Data outputs
data_outputs = numpy.load("../data/dataset_outputs.npy")
data_outputs = tensorflow.keras.utils.to_categorical(data_outputs)
# Prepare the PyGAD parameters. Check the documentation for more information: https://pygad.readthedocs.io/en/latest/README_pygad_ReadTheDocs.html#pygad-ga-class
num_generations = 200 # Number of generations.
# Number of solutions to be selected as parents in the mating pool.
num_parents_mating = 5
# Initial population of network weights.
initial_population = keras_ga.population_weights
# Create an instance of the pygad.GA class
ga_instance = pygad.GA(num_generations=num_generations,
num_parents_mating=num_parents_mating,
initial_population=initial_population,
fitness_func=fitness_func,
on_generation=on_generation)
# Start the genetic algorithm evolution.
ga_instance.run()
# After the generations complete, some plots are showed that summarize how the outputs/fitness values evolve over generations.
ga_instance.plot_fitness(
title="PyGAD & Keras - Iteration vs. Fitness", linewidth=4)
# Returning the details of the best solution.
solution, solution_fitness, solution_idx = ga_instance.best_solution()
print(f"Fitness value of the best solution = {solution_fitness}")
print(f"Index of the best solution : {solution_idx}")
predictions = pygad.kerasga.predict(model=model,
solution=solution,
data=data_inputs)
# print("Predictions : \n", predictions)
# Calculate the categorical crossentropy for the trained model.
cce = tensorflow.keras.losses.CategoricalCrossentropy()
print(f"Categorical Crossentropy : {cce(data_outputs, predictions).numpy()}")
# Calculate the classification accuracy for the trained model.
ca = tensorflow.keras.metrics.CategoricalAccuracy()
ca.update_state(data_outputs, predictions)
accuracy = ca.result().numpy()
print(f"Accuracy : {accuracy}")
# model.compile(optimizer="Adam", loss="mse", metrics=["mae"])
# _ = model.fit(x, y, verbose=0)
# r = model.predict(data_inputs)