Commit 40c0028c authored by Diogo Nunes Gonçalves's avatar Diogo Nunes Gonçalves
Browse files

Funcao cnn_keras e segnet_keras adicionadas

parent 1896c8e3
File mode changed from 100644 to 100755
......@@ -10,3 +10,4 @@ javabridge
python-weka-wrapper
cycler
cython
h5py
......@@ -18,9 +18,18 @@ except Exception as e:
CNNKeras = None
print e.message
try:
from .segnet_keras import SEGNETKeras
except Exception as e:
SEGNETKeras = None
print e.message
__all__ = ["classifier",
"cnn_caffe",
"cnn_keras",
"segnet_keras",
"weka_classifiers"
]
......@@ -33,7 +42,9 @@ _classifier_list = OrderedDict( [
["cnn_caffe", Config("Invalid" if CNNCaffe is None else CNNCaffe.__name__,
WekaClassifiers is None and CNNCaffe is not None, bool, meta=CNNCaffe, hidden=CNNCaffe is None)],
["cnn_keras", Config("Invalid" if CNNKeras is None else CNNKeras.__name__,
CNNKeras is not None, bool, meta=CNNKeras, hidden=CNNKeras is None)],
CNNKeras is not None, bool, meta=CNNKeras, hidden=CNNKeras is None)],
["segnet_keras", Config("Invalid" if SEGNETKeras is None else SEGNETKeras.__name__,
SEGNETKeras is not None, bool, meta=SEGNETKeras, hidden=SEGNETKeras is None)],
["weka_classifiers", Config("Invalid" if WekaClassifiers is None else WekaClassifiers.__name__,
WekaClassifiers is not None, bool, meta=WekaClassifiers, hidden=WekaClassifiers is None)]
] )
......@@ -44,4 +55,5 @@ def get_classifier_config():
def set_classifier_config(configs):
_classifier_list["cnn_caffe"] = Config.nvl_config(configs["cnn_caffe"], _classifier_list["cnn_caffe"])
_classifier_list["cnn_keras"] = Config.nvl_config(configs["cnn_keras"], _classifier_list["cnn_keras"])
_classifier_list["segnet_keras"] = Config.nvl_config(configs["segnet_keras"], _classifier_list["segnet_keras"])
_classifier_list["weka_classifiers"] = Config.nvl_config(configs["weka_classifiers"], _classifier_list["weka_classifiers"])
\ No newline at end of file
......@@ -90,7 +90,7 @@ class Classifier(object):
pass
@abstractmethod
def classify(self, dataset, test_dir = None, test_data = None):
def classify(self, dataset, test_dir = None, test_data = None, image = None):
"""Perform the classification.
Implement this method to extend this class with a new classifier algorithm.
"""
......
......@@ -116,7 +116,7 @@ class CNNCaffe(Classifier):
return summary
def classify(self, dataset, test_dir, test_data):
def classify(self, dataset, test_dir, test_data, image):
"""Perform the classification.
Parameters
......
This diff is collapsed.
This diff is collapsed.
......@@ -136,7 +136,7 @@ class WekaClassifiers(Classifier):
self.classifier.build_classifier(self.data)
def classify(self, dataset, test_dir, test_data):
def classify(self, dataset, test_dir, test_data, image):
"""Perform the classification.
Parameters
......
......@@ -71,6 +71,9 @@ if __name__ == "__main__":
tk.add_command("Configure", act.config_segmenter, 'g')
tk.add_separator()
tk.add_command("Execute", act.run_segmenter, 'S')
tk.add_separator()
tk.add_command("Assign using labeled image", act.assign_using_labeled_image, 'l')
tk.add_command("Execute folder", act.run_segmenter_folder)
tk.add_menu("Feature Extraction")
tk.add_command("Select extractors", act.select_extractors, 'e')
......@@ -86,6 +89,7 @@ if __name__ == "__main__":
tk.add_menu("Classification")
tk.add_command("Load h5 weight (only for CNNs)", act.open_weight)
tk.add_command("Execute", act.run_classifier, 'C')
tk.add_command("Execute folder", act.run_classifier_folder)
tk.add_menu("Experimenter")
tk.add_check_button("Ground Truth", act.toggle_ground_truth, default_state = False)
......
......@@ -10,9 +10,12 @@
from collections import OrderedDict
import numpy as np
import os
import interface
import types
import cv2
from interface.interface import InterfaceException as IException
from PIL import Image
import segmentation
import extraction
......@@ -22,8 +25,10 @@ from classification import Classifier
import util
from util.config import Config
from util.file_utils import File as f
from util.file_utils import File
from util.utils import TimeUtils
from util.utils import MetricUtils
from util.x11_colors import X11Colors
class Act(object):
......@@ -55,6 +60,7 @@ class Act(object):
self._image = None
self._const_image = None
self._mask_image = None
self._image_name = None
self._init_dataset(args["dataset"])
......@@ -80,7 +86,7 @@ class Act(object):
directory = directory[:-1]
self.dataset = directory
f.create_dir(self.dataset)
File.create_dir(self.dataset)
def _init_classes(self, classes = None, colors = None):
"""Initialize the classes of dataset.
......@@ -94,9 +100,20 @@ class Act(object):
List de colors representing the color of classe, in same order. If not informed, chooses a color at random.
"""
self.classes = []
classes = sorted(f.list_dirs(self.dataset)) if classes is None else classes.split()
colors = [] if colors is None else colors.split()
dataset_description_path = File.make_path(self.dataset, '.dataset_description.txt')
if os.path.exists(dataset_description_path):
colors = []
classes = []
file = open(dataset_description_path, "r")
for line in file:
class_info = line.replace("\n", "").split(",")
classes.append(class_info[0])
colors.append(class_info[1])
else:
classes = sorted(File.list_dirs(self.dataset)) if classes is None else classes.split()
colors = [] if colors is None else colors.split()
if(len(classes) > 0):
for i in range(0, len(classes)):
......@@ -104,7 +121,7 @@ class Act(object):
else:
self.add_class(dialog = False, color='Green')
self.add_class(dialog = False, color='Yellow')
self._current_class = 0
......@@ -137,7 +154,7 @@ class Act(object):
self._gt_segments[idx_segment] = self.classes[self._current_class]["name"].value
elif self._dataset_generator == True:
filepath = f.save_class_image(segment, self.dataset, self.classes[self._current_class]["name"].value, self._image_name, idx_segment)
filepath = File.save_class_image(segment, self.dataset, self.classes[self._current_class]["name"].value, self._image_name, idx_segment)
if filepath:
self.tk.append_log("\nSegment saved in %s", filepath)
......@@ -145,8 +162,8 @@ class Act(object):
imagename = self.tk.utils.ask_image_name()
if imagename:
self._image = f.open_image(imagename)
self._image_name = f.get_filename(imagename)
self._image = File.open_image(imagename)
self._image_name = File.get_filename(imagename)
self.tk.write_log("Opening %s...", self._image_name)
self.tk.add_image(self._image, self._image_name, onclick)
......@@ -160,6 +177,7 @@ class Act(object):
def open_weight(self):
"""Open a new weight."""
self.weight_path = self.tk.utils.ask_weight_name()
self.classifier.weight_path = self.weight_path
def restore_image(self):
"""Refresh the image and clean the segmentation.
......@@ -356,7 +374,7 @@ class Act(object):
self.tk.dialogue_config(title, current_config, process_config)
def run_segmenter(self):
def run_segmenter(self, refresh_image=True):
"""Do the segmentation of image, using the current segmenter.
Raises
......@@ -375,7 +393,8 @@ class Act(object):
self._gt_segments = [None]*(max(self.segmenter.get_list_segments())+1)
self.tk.refresh_image(self._image)
if refresh_image:
self.tk.refresh_image(self._image)
def select_extractors(self):
......@@ -439,6 +458,7 @@ class Act(object):
self.tk.write_log(title)
current_config = classification.get_classifier_config()
def process_config():
"""Update the current classifier."""
......@@ -513,7 +533,7 @@ class Act(object):
# New and optimized classification
tmp = ".tmp"
f.remove_dir(f.make_path(self.dataset, tmp))
File.remove_dir(File.make_path(self.dataset, tmp))
self.tk.append_log("Generating test images... (%0.3f seconds)", (TimeUtils.get_time() - start_time))
......@@ -521,7 +541,7 @@ class Act(object):
for idx_segment in list_segments:
segment, size_segment, idx_segment = self.segmenter.get_segment(self, idx_segment=idx_segment)[:-1]
filepath = f.save_class_image(segment, self.dataset, tmp, self._image_name, idx_segment)
filepath = File.save_class_image(segment, self.dataset, tmp, self._image_name, idx_segment)
len_segments[idx_segment] = size_segment
# Perform the feature extraction of all segments in image ( not applied to ConvNets ).
......@@ -532,32 +552,52 @@ class Act(object):
self.tk.append_log("Running classifier on test data... (%0.3f seconds)", (TimeUtils.get_time() - start_time))
# Get the label corresponding to predict class for each segment of image.
labels = self.classifier.classify(self.dataset, test_dir=tmp, test_data="test.arff")
f.remove_dir(f.make_path(self.dataset, tmp))
self.tk.append_log("Painting segments... (%0.3f seconds)", (TimeUtils.get_time() - start_time))
# If ground truth mode, show alternative results
if self._ground_truth == True:
return self._show_ground_truth(list_segments, len_segments, labels, start_time)
labels = self.classifier.classify(self.dataset, test_dir=tmp, test_data="test.arff", image=self._const_image)
File.remove_dir(File.make_path(self.dataset, tmp))
# Create a popup with results of classification.
popup_info = "%s\n" % str(self.classifier.get_summary_config())
len_total = sum([len_segments[idx] for idx in len_segments])
popup_info += "%-16s%-16s%0.2f%%\n" % ("Total", str(len_total), (len_total*100.0)/len_total)
# Paint the image.
for cl in self.classes:
idx_segment = [ list_segments[idx] for idx in range(0, len(labels)) if cl["name"].value == labels[idx]]
if len(idx_segment) > 0:
self._image, _ = self.segmenter.paint_segment(self._image, cl["color"].value, idx_segment=idx_segment, border=False)
len_classes = sum([len_segments[idx] for idx in idx_segment])
popup_info += "%-16s%-16s%0.2f%%\n" % (cl["name"].value, str(len_classes), (len_classes*100.0)/len_total)
# Result is the class for each superpixel
if type(labels) is types.ListType:
self.tk.append_log("Painting segments... (%0.3f seconds)", (TimeUtils.get_time() - start_time))
# If ground truth mode, show alternative results
if self._ground_truth == True:
return self._show_ground_truth(list_segments, len_segments, labels, start_time)
self.tk.refresh_image(self._image)
self.tk.popup(popup_info)
# Create a popup with results of classification.
popup_info = "%s\n" % str(self.classifier.get_summary_config())
len_total = sum([len_segments[idx] for idx in len_segments])
popup_info += "%-16s%-16s%0.2f%%\n" % ("Total", str(len_total), (len_total*100.0)/len_total)
# Paint the image.
self._mask_image = np.zeros(self._const_image.shape[:-1], dtype="uint8")
height, width, channels = self._image.shape
self.class_color = np.zeros((height,width,3), np.uint8)
for (c, cl) in enumerate(self.classes):
idx_segment = [ list_segments[idx] for idx in range(0, len(labels)) if cl["name"].value == labels[idx]]
if len(idx_segment) > 0:
self._image, _ = self.segmenter.paint_segment(self._image, cl["color"].value, idx_segment=idx_segment, border=False)
for idx in idx_segment:
self._mask_image[self.segmenter._segments == idx] = c
self.class_color[self.segmenter._segments == idx] = X11Colors.get_color(cl["color"].value)
len_classes = sum([len_segments[idx] for idx in idx_segment])
popup_info += "%-16s%-16s%0.2f%%\n" % (cl["name"].value, str(len_classes), (len_classes*100.0)/len_total)
self.tk.refresh_image(self._image)
self.tk.popup(popup_info)
else:
# Result is an image
self._mask_image = labels
height, width, channels = self._image.shape
self.class_color = np.zeros((height,width,3), np.uint8)
for (c, cl) in enumerate(self.classes):
self.class_color[labels == c] = X11Colors.get_color(cl["color"].value)
self._image = cv2.addWeighted(self._const_image, 0.7, self.class_color, 0.3, 0)
self.tk.refresh_image(self._image)
end_time = TimeUtils.get_time()
......@@ -698,3 +738,203 @@ class Act(object):
"""Use this method to bind menu options not available."""
self.tk.write_log("This functionality is not available right now.")
def assign_using_labeled_image(self, imagename = None, refresh_image=True):
"""Open a new image.
Parameters
----------
imagename : string, optional, default = None
Filepath of image. If not informed open a dialog to choose.
"""
if len(self.segmenter.get_list_segments()) == 0:
self.tk.write_log("Error: Image not segmented")
return
if self._image is None:
self.tk.write_log("Error: Open the image to be targeted")
return
if imagename is None:
imagename = self.tk.utils.ask_image_name()
if imagename:
self._image_gt = File.open_image_lut(imagename)
self._image_gt_name = File.get_filename(imagename)
self.tk.write_log("Opening %s...", self._image_gt_name)
qtd_classes = len(self.classes)
qtd_superpixel = len(self.segmenter.get_list_segments())
tam_gt = self._image_gt.shape
tam_im = self._image.shape
if len(tam_gt) > 2:
self.tk.write_log("Color image is not supported. You must open a gray-scale image")
return
if tam_gt[0] != tam_im[0] or tam_gt[1] != tam_im[1]:
self.tk.write_log("Images with different sizes")
return
#hist_classes_superpixels = np.zeros((qtd_superpixel, qtd_classes), np.int)
#for i in range(0, tam_gt[0]):
# for j in range(0, tam_gt[1]):
# class_pixel = self._image_gt[i,j]
# if class_pixel > qtd_classes:
# self.tk.write_log("There is no class for the pixel [%d,%d] = %d on the image", i, j, class_pixel)
# else:
# #segment, size_segment, idx_segment, run_time = self.segmenter.get_segment(px = j, py = i)
# idx_segment = self.segmenter._segments[i, j]
# hist_classes_superpixels[idx_segment, class_pixel] = hist_classes_superpixels[idx_segment, class_pixel] + 1
# if i % 10 == 0:
# self.tk.write_log("Annotating row %d of %d", i, tam_gt[0])
qtd_bad_superpixels = 0
for idx_segment in range(0, qtd_superpixel):
hist_classes_superpixels = np.histogram(self._image_gt[self.segmenter._segments == idx_segment], bins=range(0,len(self.classes)+1))[0]
idx_class = np.argmax(hist_classes_superpixels)
sum_vector = np.sum(hist_classes_superpixels)
if refresh_image:
self._image, run_time = self.segmenter.paint_segment(self._image, self.classes[idx_class]["color"].value, idx_segment = [idx_segment])
#self.tk.append_log("posicao maior = %x -- soma vetor %d", x, sum_vector)
if hist_classes_superpixels[idx_class]/sum_vector < 0.5:
qtd_bad_superpixels = qtd_bad_superpixels + 1
if self._ground_truth == True:
self._gt_segments[idx_segment] = self.classes[self._current_class]["name"].value
elif self._dataset_generator == True:
if idx_segment % 10 == 0:
self.tk.write_log("Saving %d of %d", (idx_segment+1), qtd_superpixel)
segment, size_segment, idx_segment, run_time = self.segmenter.get_segment(idx_segment = idx_segment)
filepath = File.save_class_image(segment, self.dataset, self.classes[idx_class]["name"].value, self._image_name, idx_segment)
if filepath:
self.tk.append_log("\nSegment saved in %s", filepath)
self.tk.refresh_image(self._image)
self.tk.write_log("%d bad annotated superpixels of %d superpixel (%0.2f)", qtd_bad_superpixels, qtd_superpixel, (float(qtd_bad_superpixels)/qtd_superpixel)*100)
def run_segmenter_folder(self, foldername=None):
if foldername is None:
foldername = self.tk.utils.ask_directory()
valid_images_extension = ['.jpg', '.png', '.gif', '.jpeg', '.tif']
fileimages = [name for name in os.listdir(foldername)
if os.path.splitext(name)[-1].lower() in valid_images_extension]
for (i,file) in enumerate(fileimages):
path_file = os.path.join(foldername, file)
self.open_image(path_file)
self.run_segmenter(refresh_image=False)
label_image = (os.path.splitext(file)[-2] + '_json')
self.assign_using_labeled_image(os.path.join(foldername, label_image, 'label.png'), refresh_image=False)
self.tk.write_log("%d of %d images", i, len(fileimages))
def run_classifier_folder(self, foldername=None):
if self.classifier is None:
raise IException("Classifier not found!")
if foldername is None:
foldername = self.tk.utils.ask_directory()
valid_images_extension = ['.jpg', '.png', '.gif', '.jpeg', '.tif']
fileimages = [name for name in os.listdir(foldername)
if os.path.splitext(name)[-1].lower() in valid_images_extension]
fileimages.sort()
all_accuracy = []
all_IoU = []
all_frequency_weighted_IU = []
for file in fileimages:
path_file = os.path.join(foldername, file)
self.open_image(path_file)
self.run_classifier()
label_image = os.path.join(foldername, (os.path.splitext(file)[-2] + '_json'), 'label.png')
self._image_gt = File.open_image_lut(label_image)
self._image_gt_name = File.get_filename(label_image)
tam_gt = self._image_gt.shape
tam_im = self._mask_image.shape
if len(tam_gt) > 2:
self.tk.write_log("Color image is not supported. You must open a gray-scale image")
return
if tam_gt[0] != tam_im[0] or tam_gt[1] != tam_im[1]:
self.tk.write_log("Images with different sizes")
return
confusion_matrix = MetricUtils.confusion_matrix(self._mask_image, self._image_gt)
[mean_accuracy, accuracy] = MetricUtils.mean_accuracy(self._mask_image, self._image_gt)
[mean_IoU, IoU] = MetricUtils.mean_IU(self._mask_image, self._image_gt)
frequency_weighted_IU = MetricUtils.frequency_weighted_IU(self._mask_image, self._image_gt)
print('Matriz de Confusao')
print(confusion_matrix)
print('Mean Pixel Accuracy')
print(mean_accuracy)
print('Pixel accuracy per class')
print(accuracy)
print('Mean Intersction over Union')
print(mean_IoU)
print('Intersction over Union per class')
print(IoU)
print('Frequency Weighted IU')
print(frequency_weighted_IU)
all_accuracy.append(accuracy)
all_IoU.append(IoU)
all_frequency_weighted_IU.append(frequency_weighted_IU)
if not os.path.exists("../models_results/"):
os.makedirs("../models_results/")
path = File.make_path("../models_results/" + file + ".txt")
path_img = File.make_path("../models_results/" + file + "_seg1.tif")
path_img2 = File.make_path("../models_results/" + file + "_seg2.tif")
img = Image.fromarray(self._image)
img.save(path_img)
img = Image.fromarray(self.class_color)
img.save(path_img2)
f=open(path,'ab')
np.savetxt(f, ['Matriz de confusao'], fmt='%s')
np.savetxt(f, confusion_matrix, fmt='%.5f')
np.savetxt(f, ['\nAcuracia'], fmt='%s')
np.savetxt(f, accuracy, fmt='%.5f')
np.savetxt(f, ['\nInterseccao sobre uniao'], fmt='%s')
np.savetxt(f, IoU, fmt='%.5f')
np.savetxt(f, ['\nInterseccao sobre uniao com peso'], fmt='%s')
np.savetxt(f, [frequency_weighted_IU], fmt='%.5f')
f.close()
path = File.make_path("../models_results/all_metrics.txt")
f=open(path,'ab')
np.savetxt(f, ['All Acuracia'], fmt='%s')
np.savetxt(f, all_accuracy, fmt='%.5f')
np.savetxt(f, ['\nAll IoU'], fmt='%s')
np.savetxt(f, all_IoU, fmt='%.5f')
np.savetxt(f, ['\nAll Frequency Weighted IU'], fmt='%s')
np.savetxt(f, all_frequency_weighted_IU, fmt='%.5f')
f.close()
......@@ -66,6 +66,7 @@ class Segmenter(object):
Implement this method to extend this class with a new segmenter algorithm.
"""
pass
@abstractmethod
def run(self, image):
......
......@@ -14,6 +14,8 @@ import os
import shutil
from skimage.util import img_as_float
from scipy import ndimage
from PIL import Image
import numpy as np
class File(object):
"""Set of utilities to handle files."""
......@@ -86,6 +88,33 @@ class File(object):
#return img_as_float(image)
return image
@staticmethod
def open_image_lut(filepath):
"""Open a image.
Parameters
----------
filepath : string
Filepath of a file.
Returns
-------
image : 3-channel color image.
Return the image opened.
Raises
------
IOError
Error opening the image.
"""
image = Image.open(filepath)
if image is None:
raise IOError('Image not opened')
return np.array(image)
@staticmethod
def save_image(image, directory, filename, ext = '.tif'):
"""Save a image.
......
......@@ -10,6 +10,8 @@
import cv2
import random
import time
from sklearn.metrics import confusion_matrix
import numpy as np
class ColorUtils(object):
"""Set of utilities to manipulate colors."""
......@@ -192,3 +194,178 @@ class TimeUtils(object):
Returns the time as a floating point number expressed in seconds since the epoch, in UTC.
"""
return time.time()
class MetricUtils(object):
"""Calculate segmentation metrics."""
@staticmethod
def confusion_matrix(eval_segm, gt_segm):
return confusion_matrix(gt_segm.flatten(), eval_segm.flatten())
@staticmethod
def pixel_accuracy(eval_segm, gt_segm):
"""sum_i(n_ii) / sum_i(t_i)"""
MetricUtils.check_size(eval_segm, gt_segm)
cl, n_cl = MetricUtils.extract_classes(gt_segm)
eval_mask, gt_mask = MetricUtils.extract_both_masks(eval_segm, gt_segm, cl, n_cl)
sum_n_ii = 0
sum_t_i = 0
for i, c in enumerate(cl):
curr_eval_mask = eval_mask[i, :, :]
curr_gt_mask = gt_mask[i, :, :]
sum_n_ii += np.sum(np.logical_and(curr_eval_mask, curr_gt_mask))
sum_t_i += np.sum(curr_gt_mask)
if (sum_t_i == 0):
pixel_accuracy_ = 0
else:
pixel_accuracy_ = sum_n_ii / sum_t_i
return pixel_accuracy_
@staticmethod
def mean_accuracy(eval_segm, gt_segm):
"""(1/n_cl) sum_i(n_ii/t_i)"""
MetricUtils.check_size(eval_segm, gt_segm)
cl, n_cl = MetricUtils.extract_classes(gt_segm)
eval_mask, gt_mask = MetricUtils.extract_both_masks(eval_segm, gt_segm, cl, n_cl)
accuracy = list([0]) * n_cl
for i, c in enumerate(cl):
curr_eval_mask = eval_mask[i, :, :]
curr_gt_mask = gt_mask[i, :, :]
n_ii = np.sum(np.logical_and(curr_eval_mask, curr_gt_mask))
t_i = np.sum(curr_gt_mask)
if (t_i != 0):
accuracy[i] = n_ii / t_i
mean_accuracy_ = np.mean(accuracy)
return mean_accuracy_, accuracy
@staticmethod
def mean_IU(eval_segm, gt_segm):
"""(1/n_cl) * sum_i(n_ii / (t_i + sum_j(n_ji) - n_ii))"""
MetricUtils.check_size(eval_segm, gt_segm)
cl, n_cl = MetricUtils.union_classes(eval_segm, gt_segm)
_, n_cl_gt = MetricUtils.extract_classes(gt_segm)
eval_mask, gt_mask = MetricUtils.extract_both_masks(eval_segm, gt_segm, cl, n_cl)
IU = list([0]) * n_cl
for i, c in enumerate(cl):
curr_eval_mask = eval_mask[i, :, :]
curr_gt_mask = gt_mask[i, :, :]
if (np.sum(curr_eval_mask) == 0) or (np.sum(curr_gt_mask) == 0):
continue
n_ii = np.sum(np.logical_and(curr_eval_mask, curr_gt_mask))
t_i = np.sum(curr_gt_mask)
n_ij = np.sum(curr_eval_mask)
IU[i] = n_ii / (t_i + n_ij - n_ii)
mean_IU_ = np.sum(IU) / n_cl_gt
return mean_IU_, IU
@staticmethod
def frequency_weighted_IU(eval_segm, gt_segm):
"""sum_k(t_k)^(-1) * sum_i((t_i*n_ii)/(t_i + sum_j(n_ji) - n_ii))"""
MetricUtils.check_size(eval_segm, gt_segm)
cl, n_cl = MetricUtils.union_classes(eval_segm, gt_segm)
eval_mask, gt_mask = MetricUtils.extract_both_masks(eval_segm, gt_segm, cl, n_cl)
frequency_weighted_IU_ = list([0]) * n_cl
for i, c in enumerate(cl):
curr_eval_mask = eval_mask[i, :, :]
curr_gt_mask = gt_mask[i, :, :]
if (np.sum(curr_eval_mask) == 0) or (np.sum(curr_gt_mask) == 0):
continue
n_ii = np.sum(np.logical_and(curr_eval_mask, curr_gt_mask))
t_i = np.sum(curr_gt_mask)
n_ij = np.sum(curr_eval_mask)
frequency_weighted_IU_[i] = (t_i * n_ii) / (t_i + n_ij - n_ii)
sum_k_t_k = MetricUtils.get_pixel_area(eval_segm)
frequency_weighted_IU_ = np.sum(frequency_weighted_IU_) / sum_k_t_k
return frequency_weighted_IU_
@staticmethod
def get_pixel_area(segm):
"""Auxiliary functions used during evaluation."""
return segm.shape[0] * segm.shape[1]
@staticmethod
def extract_both_masks(eval_segm, gt_segm, cl, n_cl):
eval_mask = MetricUtils.extract_masks(eval_segm, cl, n_cl)
gt_mask = MetricUtils.extract_masks(gt_segm, cl, n_cl)
return eval_mask, gt_mask
@staticmethod
def extract_classes(segm):
cl = np.unique(segm)
n_cl = len(cl)
return cl, n_cl
@staticmethod
def union_classes(eval_segm, gt_segm):
eval_cl, _ = MetricUtils.extract_classes(eval_segm)
gt_cl, _ = MetricUtils.extract_classes(gt_segm)
cl = np.union1d(eval_cl, gt_cl)
n_cl = len(cl)
return cl, n_cl
@staticmethod
def extract_masks(segm, cl, n_cl):
h, w = MetricUtils.segm_size(segm)
masks = np.zeros((n_cl, h, w))
for i, c in enumerate(cl):
masks[i, :, :] = segm == c
return masks
@staticmethod
def segm_size(segm):
try:
height = segm.shape[0]
width = segm.shape[1]
except IndexError:
raise
return height, width
@staticmethod
def check_size(eval_segm, gt_segm):
h_e, w_e = MetricUtils.segm_size(eval_segm)
h_g, w_g = MetricUtils.segm_size(gt_segm)
if (h_e != h_g) or (w_e != w_g):
raise EvalSegErr("DiffDim: Different dimensions of matrices!")
......@@ -206,6 +206,7 @@ class X11Colors(object):
idx = X11Colors.search_color(name)
return _x11_colors.values()[idx]
@staticmethod
def get_color_bgr(name):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment