-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain.py
101 lines (84 loc) · 2.72 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
import os
import random
import shutil
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from shutil import copyfile
import numpy as np
import pandas as pd
from tensorflow.keras.utils import load_img, img_to_array
data_dir = 'DATASET'
# Define parameters
batch_size = 32
img_height = 224
img_width = 224
class MyCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if logs.get('acc') > 0.98 and logs.get('val_acc') > 0.98:
print(" accuracy and validation_accuracy > 98%")
self.model.stop_training = True
callbacks = MyCallback()
# Use ImageDataGenerator to preprocess and augment the data
train_datagen = ImageDataGenerator(
rescale=1/255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
validation_split=0.2 # Split the data into training and validation sets
)
# validation_datagen = ImageDataGenerator(
# rescale = 1/255
# )
train_generator = train_datagen.flow_from_directory(
data_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='categorical',
subset='training' # Specify that this is the training set
)
validation_generator = train_datagen.flow_from_directory(
data_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='categorical',
subset='validation' # Specify that this is the validation set
)
# Building the Model
model = tf.keras.models.Sequential([
# Note the input shape is the desired size of the image 224x224 with 3 bytes color
# This is the first convolution
tf.keras.layers.Conv2D(64, (3, 3), activation='relu', input_shape=(224, 224, 3)),
tf.keras.layers.MaxPooling2D(2, 2),
# The second convolution
tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
# The third convolution
tf.keras.layers.Conv2D(128, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
# The fourth convolution
tf.keras.layers.Conv2D(256, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
# Flatten the results to feed into a DNN
tf.keras.layers.Flatten(),
tf.keras.layers.Dropout(0.5),
# 512 neuron hidden layer
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(7 , activation='softmax')
])
# Print the model summary
model.summary()
model.compile(
loss='categorical_crossentropy',
optimizer='adam',
metrics=['acc'],
)
model.fit(
train_generator,
epochs=500,
validation_data=(validation_generator),
callbacks=[callbacks]
)
model.save('model.h5')