Skip to content
Snippets Groups Projects
Commit 1038f423 authored by paul_pvc's avatar paul_pvc
Browse files

added Gabor Filter with mean and variance, update soon

parent 2bcaecc8
No related branches found
No related tags found
No related merge requests found
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.neural_network import MLPClassifier
from xgboost import XGBClassifier
......@@ -10,12 +12,31 @@ import TP
path1_t = "./Init/Mer"
path2_t = "./Init/Ailleurs"
"""S = TP.buildSampleFromPath(path1_t, path2_t)
classifier, S_test, y_test, S_train, y_train = TP.fitFromHisto(S, SVC())
TP.predictFromHisto(S, classifier)"""
"""print()
print("Erreur empirique щ(ºДºщ):", TP.computeError(S_train), "erreurs")
print("Erreur réelle ( ͡° _ʖ ͡°):", TP.computeError(S_test), "erreurs")
print("Taux de réussite (╯°□°)╯︵ ┻━┻ : ", TP.computeScore(S_test), "%")"""
svc_params = {"kernel": ('linear', 'rbf'), "C": [1,10]}
xgb_params = {"n_estimators": [1,10], "max_depth" : [0,10], "max_leaves": [0, 10],
"grow_policy" : ("depthwise", "lossguide"), "learning_rate" : [0.01, 0.2],
"booster" :("gbtree", "gblinear", "dart")}
rand_forest_params = {"n_estimators" : [100, 200], "criterion" : ("gini", "entropy", "log_loss")}
knn_params = {"n_neighbors" : [1, 10], "weights" : ("uniform", 'distance'), "algorithm" : ("auto", "ball_tree", "kd_tree", "brute"),
"leaf_size" : [15,45], "p" : [1., 3.]}
result = GridSearchCV(SVC(), svc_params)
S = TP.buildSampleFromPath(path1_t, path2_t)
classifier, S_test, y_test, S_train, y_train = TP.fitFromHisto(S, XGBClassifier())
TP.predictFromHisto(S, classifier)
print()
#classifier, S_test, y_test, S_train, y_train = TP.fitFromHisto(S, result)
"""TP.predictFromHisto(S, classifier)
print("Erreur empirique щ(ºДºщ):", TP.computeError(S_train), "erreurs")
print("Erreur réelle ( ͡° _ʖ ͡°):", TP.computeError(S_test), "erreurs")
print("Taux de réussite (╯°□°)╯︵ ┻━┻ : ", TP.computeScore(S_test), "%")
print(TP.get_cross_val_score(classifier, S_train, S_test, y_train, y_test))
print("Taux de réussite (╯°□°)╯︵ ┻━┻ : ", TP.computeScore(S_test), "%")"""
print("Taux de réussite en cross validation SVC: ", TP.get_cross_val_score(result, S), "%")
#print("Taux de réussite en cross validation XGBOOST: ", TP.get_cross_val_score(XGBClassifier(), S_train, S_test, y_train, y_test), "%")
#print("Taux de réussite en cross validation randomForest: ", TP.get_cross_val_score(GridSearchCV(RandomForestClassifier(), rand_forest_params), S_train, S_test, y_train, y_test), "%")
#print("Taux de réussite en cross validation KNeighbors: ", TP.get_cross_val_score(GridSearchCV(KNeighborsClassifier(), knn_params), S_train, S_test, y_train, y_test), "%")
#TP.computePredictionFile(classifier, TP.fetch_images_to_dict("./Init/Data CC2"))
\ No newline at end of file
......@@ -7,6 +7,7 @@ from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from skimage.feature import graycomatrix, graycoprops
from sklearn.model_selection import cross_val_score
from skimage.filters import gabor
import math
from sklearn.naive_bayes import GaussianNB
......@@ -66,7 +67,7 @@ def compute_glcm_caracteristics(image_gl):
"""
image_arr= np.array(image_gl)
#print(image_arr.shape)
glcm = graycomatrix(image_arr, distances=[1], angles=[0], levels=256,
glcm = graycomatrix(image_arr, distances=[10], angles=[3], levels=256,
symmetric=True, normed=True)
return [graycoprops(glcm, 'dissimilarity')[0, 0], graycoprops(glcm, 'correlation')[0, 0], graycoprops(glcm, 'contrast')[0, 0],
graycoprops(glcm, 'energy')[0, 0], graycoprops(glcm, 'homogeneity')[0, 0]]
......@@ -126,6 +127,7 @@ def computeDict(image_path, path, y_true_value, max_size: tuple):
"X_glcm_data": compute_glcm_caracteristics(image_gl),
#"X_splitted_histo": compute_4_histos(resized),
#"X_splitted_glcm": compute_4_glcm(image_gl),
"gabor_features": get_gabor_filters(image_gl),
"y_true_class": y_true_value,
"y_predicted_class": None}
......@@ -149,9 +151,25 @@ def computeHisto(image: PIL.Image.Image):
"""
return image.histogram()
def get_gabor_filters(image):
image_arr = np.asarray(image)
#print(image_arr.shape, image_arr)
#frequencies = [0.2]
thetas = [0, np.pi / 2]
features = []
for theta in thetas:
filt_real, filt_imag = gabor(image_arr, frequency=0.2, theta=theta)
features.append(filt_real.mean()) # Moyenne du filtre réel
features.append(filt_real.var()) # Variance du filtre réel
features.append(filt_imag.mean()) # Moyenne du filtre imaginaire
features.append(filt_imag.var()) # Variance du filtre imaginaire
return np.array(features).tolist()
def extract_relevant_data(l: dict) -> list:
return l["X_histo"] + l["X_glcm_data"]
return l["X_histo"] + l["gabor_features"] + l["X_glcm_data"]
#78% l["X_histo"] + l["X_glcm_data"]
def fitFromHisto(S, algo):
......@@ -254,10 +272,15 @@ def computePredictionFile(classifier, images_test=None):
file.close()
def get_cross_val_score(classifier, S_train, S_test, y_train, y_test):
def get_cross_val_score(classifier, S):
df = pd.DataFrame(S)
y = np.array(df["y_true_class"])
S_train, S_test, y_train, y_test = train_test_split(S, y, test_size=0.2)
X_train = np.array([np.array(extract_relevant_data(l)) for l in S_train])
X_test = np.array([np.array(extract_relevant_data(l)) for l in S_test])
X = np.concatenate([X_train,X_test])
y = np.concatenate([y_train,y_test])
scores = cross_val_score(classifier, X, y, cv=20)
return np.mean(scores)
\ No newline at end of file
scores = cross_val_score(classifier, X, y, cv=10)
return np.mean(scores)*100
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment