Commit 44116aed authored by Gabriel Kirsten's avatar Gabriel Kirsten
Browse files

Merge branch 'master' of http://git.inovisao.ucdb.br/inovisao/pynovisao into feature/pseudo-label

parents 3b33e917 c8a8ce6c
*.pyc *.pyc
data/* data/*
models_checkpoints/*
venv/*
!data/demo.jpg !data/demo.jpg
!data/pynovisao.png !data/pynovisao.png
!data/demo/.gitignore !data/demo/.gitignore
......
...@@ -17,16 +17,16 @@ fi ...@@ -17,16 +17,16 @@ fi
echo "[PYNOVISAO INSTALLER] Updating apt-get..." echo "[PYNOVISAO INSTALLER] Updating apt-get..."
sudo apt-get -qq update sudo apt-get -qq update
echo "[PYNOVISAO INSTALLER] Installing prerequisites..." echo "[PYNOVISAO INSTALLER] Installing prerequisites..."
sudo apt-get -qq -y install libfreetype6-dev tk tk-dev python-pip default-jre default-jdk weka weka-doc python-tk python-matplotlib python-opencv sudo apt-get -qq -y install libfreetype6-dev tk tk-dev python-pip openjdk-8-jre openjdk-8-jdk weka weka-doc python-tk python-matplotlib
source ~/.bashrc
echo "[PYNOVISAO INSTALLER] Upgrading pip..." echo "[PYNOVISAO INSTALLER] Upgrading pip..."
pip install --upgrade pip --quiet sudo pip install --upgrade pip --quiet
# Numpy must be installed before installing javabridge # Numpy must be installed before installing javabridge
echo "[PYNOVISAO INSTALLER] Installing numpy..." echo "[PYNOVISAO INSTALLER] Installing numpy..."
pip install numpy --quiet sudo pip install numpy==1.14.5 --quiet
echo "[PYNOVISAO INSTALLER] Installing libraries..." echo "[PYNOVISAO INSTALLER] Installing libraries..."
pip install -r requeriments.txt --quiet sudo pip install -r requeriments.txt --quiet
printf "${YELLOW} \n======== WARNING ========\n" printf "${YELLOW} \n======== WARNING ========\n"
printf "The Keras is necessary so that it is possible to use CNN. It is recommended to install the version for GPU processing (if available) but it is also possible to use CPU processing.\n" printf "The Keras is necessary so that it is possible to use CNN. It is recommended to install the version for GPU processing (if available) but it is also possible to use CPU processing.\n"
......
# Título: Pynovisao # Título: Pynovisao
## Autores (ordem alfabética): Adair da Silva Oliveira Junior, Alessandro dos Santos Ferreira, Diego André Sant'Ana(diegoandresantana@gmail.com),Everton Castelão Tetila(evertontetila@gmail.com), Gabriel Kirsten Menezes(gabriel.kirsten@hotmail.com), Hemerson Pistori (pistori@ucdb.br), Nícolas Alessandro de Souza Belete(nicolas.belete@gmail.com) ## Autores (ordem alfabética): Adair da Silva Oliveira Junior, Alessandro dos Santos Ferreira, Diego André Sant'Ana(diegoandresantana@gmail.com), Diogo Nunes Gonçalves(dnunesgoncalves@gmail.com), Everton Castelão Tetila(evertontetila@gmail.com), Felipe Silveira(eng.fe.silveira@gmail.com), Gabriel Kirsten Menezes(gabriel.kirsten@hotmail.com), Gilberto Astolfi(gilbertoastolfi@gmail.com), Hemerson Pistori (pistori@ucdb.br), Nícolas Alessandro de Souza Belete(nicolas.belete@gmail.com)
## Resumo: ## Resumo:
......
...@@ -7,15 +7,16 @@ sympy ...@@ -7,15 +7,16 @@ sympy
nose nose
networkx networkx
scikit-image scikit-image
javabridge javabridge==1.0.18
python-weka-wrapper python-weka-wrapper
cycler==0.10 cycler==0.10
cython cython
h5py h5py
scikit-learn scikit-learn
tensorflow-gpu
statistics statistics
pandas_ml pandas_ml
pyxdg pyxdg
opencv-contrib-python opencv-contrib-python
python-interface python-interface
tqdm
sklearn
...@@ -11,8 +11,8 @@ ...@@ -11,8 +11,8 @@
import io import io
import itertools import itertools
import os import os
import threading import multiprocessing
from multiprocessing import Process, Manager
from interface.interface import InterfaceException as IException from interface.interface import InterfaceException as IException
from util.file_utils import File from util.file_utils import File
...@@ -23,7 +23,9 @@ from extractor import Extractor ...@@ -23,7 +23,9 @@ from extractor import Extractor
from tqdm import tqdm from tqdm import tqdm
import sys import sys
if not sys.warnoptions:
import warnings
warnings.simplefilter("ignore")
class FeatureExtractor(object): class FeatureExtractor(object):
"""Handle the feature extraction.""" """Handle the feature extraction."""
...@@ -40,13 +42,11 @@ class FeatureExtractor(object): ...@@ -40,13 +42,11 @@ class FeatureExtractor(object):
self.tkParent=tkParent self.tkParent=tkParent
def extract_all(self, dataset, output_file=None, dirs=None, overwrite=True): def extract_all(self, dataset, output_file=None, dirs=None, overwrite=True):
self.labels = []
self.types = []
self.data = [] self.data = Manager().list() #is a necessary because have a problem with use Process and normaly declaration
self.threads = [] self.threads = []
self.labels = [] self.labels = Manager().list()
self.types = [] self.types = Manager().list()
"""Runs the feature extraction algorithms on all images of dataset. """Runs the feature extraction algorithms on all images of dataset.
Parameters Parameters
...@@ -101,6 +101,7 @@ class FeatureExtractor(object): ...@@ -101,6 +101,7 @@ class FeatureExtractor(object):
with tqdm(total=len(self.threads)) as pbar: with tqdm(total=len(self.threads)) as pbar:
for t in self.threads: for t in self.threads:
t.start() t.start()
pbar.update(1)
pbar.close() pbar.close()
self.print_console("Waiting for workers to finish extracting attributes from images!") self.print_console("Waiting for workers to finish extracting attributes from images!")
...@@ -108,7 +109,6 @@ class FeatureExtractor(object): ...@@ -108,7 +109,6 @@ class FeatureExtractor(object):
for t in self.threads: for t in self.threads:
t.join() t.join()
ppbar.update(1) ppbar.update(1)
ppbar.close() ppbar.close()
self.print_console("The process was completed with "+str(len(self.threads))+" images!") self.print_console("The process was completed with "+str(len(self.threads))+" images!")
if len(self.data) == 0: if len(self.data) == 0:
...@@ -116,7 +116,7 @@ class FeatureExtractor(object): ...@@ -116,7 +116,7 @@ class FeatureExtractor(object):
# Save the output file in ARFF format # Save the output file in ARFF format
# self._save_output(File.get_filename(dataset), classes, self.labels, self.types, self.data, output_file) # self._save_output(File.get_filename(dataset), classes, self.labels, self.types, self.data, output_file)
self._save_output(File.get_filename(dataset), classes, self.labels, self.types, self.data, output_file) self._save_output(File.get_filename(dataset), classes, self.labels[0], self.types[0], self.data, output_file)
end_time = TimeUtils.get_time() end_time = TimeUtils.get_time()
return output_file, (end_time - start_time) return output_file, (end_time - start_time)
...@@ -130,9 +130,8 @@ class FeatureExtractor(object): ...@@ -130,9 +130,8 @@ class FeatureExtractor(object):
for item in items : for item in items :
if item.startswith('.'): if item.startswith('.'):
continue continue
#th = threading.Thread(target=self.sub_job_extractor,args=(item, dataset, cl, classes))
th = threading.Thread(target=self.sub_job_extractor,args=(item, dataset, cl, classes)) th = multiprocessing.Process(target=self.sub_job_extractor,args=(item, dataset, cl, classes))
self.threads.append(th) self.threads.append(th)
...@@ -149,14 +148,17 @@ class FeatureExtractor(object): ...@@ -149,14 +148,17 @@ class FeatureExtractor(object):
if len(self.data) > 0: if len(self.data) > 0:
values = list( values = list(
itertools.chain.from_iterable(zip(*([extractor().run(image) for extractor in self.extractors]))[2])) itertools.chain.from_iterable(zip(*([extractor().run(image) for extractor in self.extractors]))[2]))
self.data.append(values + [cl if cl in classes else classes[0]]) self.data.append(values + [cl if cl in classes else classes[0]])
else: else:
self.labels, self.types, values = [list(itertools.chain.from_iterable(ret)) labs, tys, values = [list(itertools.chain.from_iterable(ret))
for ret in for ret in
zip(*(extractor().run(image) for extractor in self.extractors))] zip(*(extractor().run(image) for extractor in self.extractors))]
self.labels.append(labs)
self.types.append(tys)
self.data.append(values + [cl if cl in classes else classes[0]]) self.data.append(values + [cl if cl in classes else classes[0]])
def extract_one_file(self, dataset, image_path, output_file=None): def extract_one_file(self, dataset, image_path, output_file=None):
"""Runs the feature extraction algorithms on specific image. """Runs the feature extraction algorithms on specific image.
......
...@@ -8,6 +8,11 @@ ...@@ -8,6 +8,11 @@
Name: hog.py Name: hog.py
Author: Alessandro dos Santos Ferreira ( santosferreira.alessandro@gmail.com ) Author: Alessandro dos Santos Ferreira ( santosferreira.alessandro@gmail.com )
Change parameter Visualise for Visualize because is deprecaded
Date:02/01/2019
Author: Diego Andre Sant Ana
""" """
from skimage import feature from skimage import feature
...@@ -36,13 +41,13 @@ class HOG(Extractor): ...@@ -36,13 +41,13 @@ class HOG(Extractor):
features : tuple features : tuple
Returns a tuple containing a list of labels, type and values for each feature extracted. Returns a tuple containing a list of labels, type and values for each feature extracted.
""" """
image_grayscale = ImageUtils.image_grayscale(image, bgr = True) image_grayscale = ImageUtils.image_grayscale(image, bgr=True)
image_128x128 = ImageUtils.image_resize(image_grayscale, 128, 128) image_128x128 = ImageUtils.image_resize(image_grayscale, 128, 128)
values, _ = feature.hog(image_128x128, orientations=8, pixels_per_cell=(32, 32), values, _ = feature.hog(image_128x128, orientations=8, pixels_per_cell=(32, 32),
cells_per_block=(1, 1), visualise=True) cells_per_block=(1, 1), visualise=True)
labels = [m+n for m,n in zip(['hog_'] * len(values),map(str,range(0,len(values))))] labels = [m + n for m, n in zip(['hog_'] * len(values), map(str, range(0, len(values))))]
types = [Extractor.NUMERIC] * len(labels) types = [Extractor.NUMERIC] * len(labels)
return labels, types, list(values) return labels, types, list(values)
...@@ -16,7 +16,7 @@ import cv2 ...@@ -16,7 +16,7 @@ import cv2
from util.utils import ImageUtils from util.utils import ImageUtils
from skimage.measure import regionprops, moments, moments_central from skimage.measure import regionprops, moments, moments_central
from skimage.morphology import label from skimage.morphology import label
import numpy as np
from extractor import Extractor from extractor import Extractor
class RawCentralMoments(Extractor): class RawCentralMoments(Extractor):
...@@ -54,7 +54,7 @@ class RawCentralMoments(Extractor): ...@@ -54,7 +54,7 @@ class RawCentralMoments(Extractor):
row = m[0, 1] / m[0, 0] row = m[0, 1] / m[0, 0]
col = m[1, 0] / m[0, 0] col = m[1, 0] / m[0, 0]
mu = measure.moments_central(image_binary, row, col) mu = measure.moments_central(image_binary, center=(row, col), order=3)
values_mu = [mu[p, q] for (p, q) in self._moments_order] values_mu = [mu[p, q] for (p, q) in self._moments_order]
labels_mu = [M+str(p)+str(q) for M,(p,q) in zip(['Mu_'] * len(self._moments_order), self._moments_order)] labels_mu = [M+str(p)+str(q) for M,(p,q) in zip(['Mu_'] * len(self._moments_order), self._moments_order)]
...@@ -104,8 +104,9 @@ class HuMoments(Extractor): ...@@ -104,8 +104,9 @@ class HuMoments(Extractor):
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
values_hu= cv2.HuMoments(cv2.moments(image)).flatten() values_hu= cv2.HuMoments(cv2.moments(image)).flatten()
values_hu = list(values_hu)
values_hu= np.nan_to_num(values_hu)
labels_hu = [m+n for m,n in zip(['Hu_'] * len(values_hu),map(str,range(0,len(values_hu))))] labels_hu = [m+n for m,n in zip(['Hu_'] * len(values_hu),map(str,range(0,len(values_hu))))]
labels = labels_hu labels = labels_hu
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
Name: pynovisao.py Name: pynovisao.py
Author: Alessandro dos Santos Ferreira ( santosferreira.alessandro@gmail.com ) Author: Alessandro dos Santos Ferreira ( santosferreira.alessandro@gmail.com )
""" """
import gc
from collections import OrderedDict from collections import OrderedDict
import numpy as np import numpy as np
import os import os
...@@ -30,8 +30,10 @@ from util.file_utils import File ...@@ -30,8 +30,10 @@ from util.file_utils import File
from util.utils import TimeUtils from util.utils import TimeUtils
from util.utils import MetricUtils from util.utils import MetricUtils
from util.x11_colors import X11Colors from util.x11_colors import X11Colors
import multiprocessing
from multiprocessing import Process, Manager
import threading
from tqdm import tqdm
class Act(object): class Act(object):
"""Store all actions of Pynovisao.""" """Store all actions of Pynovisao."""
...@@ -199,7 +201,7 @@ class Act(object): ...@@ -199,7 +201,7 @@ class Act(object):
If there's no image opened. If there's no image opened.
""" """
if self._const_image is None: if self._const_image is None:
raise IException("Image not found") raise IException("Image not found! Open an image to test, select in the menu the option File>Open Image!")
if self.tk.close_image(): if self.tk.close_image():
self.tk.write_log("Closing image...") self.tk.write_log("Closing image...")
...@@ -386,7 +388,7 @@ class Act(object): ...@@ -386,7 +388,7 @@ class Act(object):
If there's no image opened. If there's no image opened.
""" """
if self._const_image is None: if self._const_image is None:
raise IException("Image not found") raise IException("Image not found! Open an image to test, select in the menu the option File>Open Image!")
self.tk.write_log("Running %s...", self.segmenter.get_name()) self.tk.write_log("Running %s...", self.segmenter.get_name())
...@@ -421,7 +423,7 @@ class Act(object): ...@@ -421,7 +423,7 @@ class Act(object):
if new_config[extractor].value == True ] if new_config[extractor].value == True ]
if len(self.extractors) == 0: if len(self.extractors) == 0:
raise IException("Please select at least one extractor") raise IException("Please select an extractor from the menu under Features Extraction> Select extractors! ")
self.tk.append_log("\nConfig updated:\n%s", self.tk.append_log("\nConfig updated:\n%s",
'\n'.join(["%s: %s" % (new_config[extractor].label, "on" if new_config[extractor].value==True else "off") '\n'.join(["%s: %s" % (new_config[extractor].label, "on" if new_config[extractor].value==True else "off")
...@@ -459,7 +461,7 @@ class Act(object): ...@@ -459,7 +461,7 @@ class Act(object):
The user must install the required dependencies to classifiers. The user must install the required dependencies to classifiers.
""" """
if self.classifier is None: if self.classifier is None:
raise IException("Classifier not found!") raise IException("Classifier not found! Select from the menu the option Training>Choose Classifier!")
title = "Choosing a classifier" title = "Choosing a classifier"
self.tk.write_log(title) self.tk.write_log(title)
...@@ -488,7 +490,7 @@ class Act(object): ...@@ -488,7 +490,7 @@ class Act(object):
The user must install the required dependencies to classifiers. The user must install the required dependencies to classifiers.
""" """
if self.classifier is None: if self.classifier is None:
raise IException("Classifier not found!") raise IException("Classifier not found! Select from the menu the option Training>Choose Classifier!")
title = "Configuring %s" % self.classifier.get_name() title = "Configuring %s" % self.classifier.get_name()
self.tk.write_log(title) self.tk.write_log(title)
...@@ -518,10 +520,10 @@ class Act(object): ...@@ -518,10 +520,10 @@ class Act(object):
If there's no image opened. If there's no image opened.
""" """
if self.classifier is None: if self.classifier is None:
raise IException("Classifier not found!") raise IException("Classifier not found! Select from the menu the option Training>Choose Classifier!")
if self._const_image is None: if self._const_image is None:
raise IException("Image not found") raise IException("Image not found! Open an image to test, select in the menu the option File>Open Image!")
self.tk.write_log("Running %s...", self.classifier.get_name()) self.tk.write_log("Running %s...", self.classifier.get_name())
self.tk.append_log("\n%s", str(self.classifier.get_summary_config())) self.tk.append_log("\n%s", str(self.classifier.get_summary_config()))
...@@ -547,13 +549,18 @@ class Act(object): ...@@ -547,13 +549,18 @@ class Act(object):
self.tk.append_log("Generating test images... (%0.3f seconds)", (TimeUtils.get_time() - start_time)) self.tk.append_log("Generating test images... (%0.3f seconds)", (TimeUtils.get_time() - start_time))
len_segments = {} len_segments = {}
print("Wait to complete processes all images!")
with tqdm(total=len(list_segments)) as pppbar:
for idx_segment in list_segments: for idx_segment in list_segments:
segment, size_segment, idx_segment = self.segmenter.get_segment(self, idx_segment=idx_segment)[:-1] segment, size_segment, idx_segment = self.segmenter.get_segment(self, idx_segment=idx_segment)[:-1]
# Problem here! Dataset removed. # Problem here! Dataset removed.
filepath = File.save_only_class_image(segment, self.dataset, tmp, self._image_name, idx_segment) filepath = File.save_only_class_image(segment, self.dataset, tmp, self._image_name, idx_segment)
len_segments[idx_segment] = size_segment len_segments[idx_segment] = size_segment
pppbar.update(1)
pppbar.close()
gc.collect()
# Perform the feature extraction of all segments in image ( not applied to ConvNets ). # Perform the feature extraction of all segments in image ( not applied to ConvNets ).
if self.classifier.must_extract_features(): if self.classifier.must_extract_features():
self.tk.append_log("Running extractors on test images... (%0.3f seconds)", (TimeUtils.get_time() - start_time)) self.tk.append_log("Running extractors on test images... (%0.3f seconds)", (TimeUtils.get_time() - start_time))
...@@ -701,7 +708,7 @@ class Act(object): ...@@ -701,7 +708,7 @@ class Act(object):
The user must install the required dependencies to classifiers. The user must install the required dependencies to classifiers.
""" """
if self.classifier is None: if self.classifier is None:
raise IException("Classifier not found!") raise IException("Classifier not found! Select from the menu the option Training>Choose Classifier!")
if self.classifier.must_train(): if self.classifier.must_train():
self.tk.write_log("Creating training data...") self.tk.write_log("Creating training data...")
...@@ -726,7 +733,7 @@ class Act(object): ...@@ -726,7 +733,7 @@ class Act(object):
The user must install the required dependencies to classifiers. The user must install the required dependencies to classifiers.
""" """
if self.classifier is None: if self.classifier is None:
raise IException("Classifier not found!") raise IException("Classifier not found! Select from the menu the option Training>Choose Classifier!")
if self.tk.ask_ok_cancel("Experimenter All", "This may take several minutes to complete. Are you sure?"): if self.tk.ask_ok_cancel("Experimenter All", "This may take several minutes to complete. Are you sure?"):
if self.classifier.must_train(): if self.classifier.must_train():
...@@ -744,7 +751,7 @@ class Act(object): ...@@ -744,7 +751,7 @@ class Act(object):
def about(self): def about(self):
self.tk.show_info("Pynovisao\n\nVersion 1.0.0\n\nAuthors:\nAlessandro Ferreira\nHemerson Pistori") self.tk.show_info("Pynovisao\n\nVersion 1.0.0\n\nAuthors:\nAdair da Silva Oliveira Junior\nAlessandro dos Santos Ferreira\nDiego Andre Sant Ana\nDiogo Nunes Goncalves\nEverton Castelao Tetila\nFelipe Silveira\nGabriel Kirsten Menezes\nGilberto Astolfi\nHemerson Pistori\nNicolas Alessandro de Souza Belete")
def func_not_available(self): def func_not_available(self):
...@@ -855,7 +862,7 @@ class Act(object): ...@@ -855,7 +862,7 @@ class Act(object):
def run_classifier_folder(self, foldername=None): def run_classifier_folder(self, foldername=None):
if self.classifier is None: if self.classifier is None:
raise IException("Classifier not found!") raise IException("Classifier not found! Select from the menu the option Training>Choose Classifier!")
if foldername is None: if foldername is None:
foldername = self.tk.utils.ask_directory() foldername = self.tk.utils.ask_directory()
...@@ -951,6 +958,7 @@ class Act(object): ...@@ -951,6 +958,7 @@ class Act(object):
np.savetxt(f, all_frequency_weighted_IU, fmt='%.5f') np.savetxt(f, all_frequency_weighted_IU, fmt='%.5f')
f.close() f.close()
def run_grafic_confusion_matrix(self): def run_grafic_confusion_matrix(self):
''' '''
Generate a a graphical confusion matrix where images are classified and according to classification go to the wrong or right folder. Generate a a graphical confusion matrix where images are classified and according to classification go to the wrong or right folder.
...@@ -1036,15 +1044,21 @@ class Act(object): ...@@ -1036,15 +1044,21 @@ class Act(object):
self.tk.write_log(header_output_middle + 'Initializing...') self.tk.write_log(header_output_middle + 'Initializing...')
total = str(len(images)) total = str(len(images))
# internal function in method for create threads, cannot change for Process(Have a problem with JVM Instances)
total = str(len(images))
print("Waiting finish classification!")
for i, image_path in enumerate(images): for i, image_path in enumerate(images):
original_name=reduce(lambda a,b:a+b, image_path) original_name = reduce(lambda a, b: a + b, image_path)
real_class_path=matrix_path+human+image_path[1] real_class_path = matrix_path + human + image_path[1]
predicted=self.classifier.single_classify(original_name, folder, self.extractors, classes) predicted = self.classifier.single_classify(original_name, folder, self.extractors, classes)
message = header_output_middle + str(i + 1) + ' of ' + total + ' images classifield.' message = header_output_middle + str(i + 1) + ' of ' + total + ' images classifield.'
self.tk.write_log(message) self.tk.write_log(message)
predicted_class_path = real_class_path+computer+predicted predicted_class_path = real_class_path + computer + predicted
predicted_name=predicted_class_path+image_path[2] predicted_name = predicted_class_path + image_path[2]
symlink(original_name, predicted_name) symlink(original_name, predicted_name)
message = header_output + 'Saved in ' + matrix_path message = header_output + 'Saved in ' + matrix_path
self.tk.write_log(message) self.tk.write_log(message)
#!/bin/bash
#
# Script - convert tif to png
#
# Name: script_convertall.sh
# Author: Gabriel Kirsten Menezes (gabriel.kirsten@hotmail.com)
#
echo "[SCRIPT CONVERT ALL] Initializing..."
dir_train="../../data/demo_split/train"
dir_validation="../../data/demo_split/validation"
echo "[SCRIPT CONVERT ALL] Converting train..."
for dir_class in `ls $dir_train`;
do
echo "[SCRIPT CONVERT ALL] Converting class -" $dir_class;
convert $dir_train/$dir_class/* $dir_train/$dir_class/$dir_class.png
echo "[SCRIPT CONVERT ALL] Removing all .tif files in $dir_class ..."
rm $dir_train/$dir_class/*.tif
done
echo "[SCRIPT CONVERT ALL] Converting validation..."
for dir_class in `ls $dir_validation`;
do
echo "[SCRIPT CONVERT ALL] Converting class -" $dir_class;
convert $dir_validation/$dir_class/* $dir_validation/$dir_class/$dir_class.png
echo "[SCRIPT CONVERT ALL] Removing all .tif files in $dir_class ..."
rm $dir_validation/$dir_class/*.tif
done
echo "[SCRIPT CONVERT ALL] OK! DONE."
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment