-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy pathmodelGenerator.py
91 lines (65 loc) · 2.58 KB
/
modelGenerator.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn
from sklearn.externals import joblib
def simplify_ages(df):
df.Age = df.Age.fillna(-0.5)
bins = (-1, 0, 5, 12, 18, 25, 35, 60, 120)
group_names = ['Unknown', 'Baby', 'Child', 'Teenager', 'Student', 'Young Adult', 'Adult', 'Senior']
categories = pd.cut(df.Age, bins, labels=group_names)
df.Age = categories
return df
def transform_features(df):
df = simplify_ages(df)
return df
#Normalize#########################################################
from sklearn import preprocessing
def encode_features(df_train):
features = ['Age']
for feature in features:
le = preprocessing.LabelEncoder()
le = le.fit(df_train[feature])
df_train[feature] = le.transform(df_train[feature])
return df_train
def to_str(var):
return str(list(np.reshape(np.asarray(var), (1, np.size(var)))[0]))[1:-1]
#Train################################################################
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import make_scorer, accuracy_score
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
sklearnv = 'sklearn version = ' + sklearn.__version__
print(sklearnv)
data_train = pd.read_csv('./csv_training_data/Final_TrainingSet.csv')
data_train = transform_features(data_train)
data_train = encode_features(data_train)
X_all = data_train.drop(['Disease'], axis=1)
y_all = data_train['Disease']
num_test = 0.20
X_train, X_test, y_train, y_test = train_test_split(X_all, y_all, test_size=num_test, random_state=23)
# Choose the type of classifier.
clf = RandomForestClassifier()
# Choose some parameter combinations to try
parameters = {'n_estimators': [4, 6, 9],
'max_features': ['log2', 'sqrt','auto'],
'criterion': ['entropy', 'gini'],
'max_depth': [2, 3, 5, 10],
'min_samples_split': [2, 3, 5],
'min_samples_leaf': [1,5,8]
}
# Type of scoring used to compare parameter combinations
acc_scorer = make_scorer(accuracy_score)
# Run the grid search
grid_obj = GridSearchCV(clf, parameters, scoring=acc_scorer)
grid_obj = grid_obj.fit(X_train, y_train)
# Set the clf to the best combination of parameters
clf = grid_obj.best_estimator_
# Fit the best algorithm to the data.
clf.fit(X_train, y_train)
predictions = clf.predict(X_test)
print("ACCURACY = ", accuracy_score(y_test, predictions))
# save the model to disk
filename = 'eye_finalized_model.sav'
joblib.dump(clf, filename)