Commit f2276820 authored by Diego André Sant'Ana's avatar Diego André Sant'Ana 🤞

Alter problems in code because python version

parent d5869921
...@@ -256,41 +256,40 @@ class FeatureExtractor(object): ...@@ -256,41 +256,40 @@ class FeatureExtractor(object):
output_file : string output_file : string
Path to output file. Path to output file.
""" """
if version_info[0] >= 3: if version_info[0] >= 3:
arff = open(output_file, 'wb') arff = open(output_file, 'wb')
arff.write(bytes(str("%s %s\n\n" % ('@relation', relation)), 'utf-8')) arff.write(bytes(str("%s %s\n\n" % ('@relation', relation)), 'utf-8'))
for label, t in zip(labels, types): for label, t in zip(labels, types):
arff.write(bytes(str("%s %s %s\n" % ('@attribute', label, t)), 'utf-8')) arff.write(bytes(str("%s %s %s\n" % ('@attribute', label, t)), 'utf-8'))
arff.write(bytes(str("%s %s {%s}\n\n" % ('@attribute', 'classe', ', '.join(classes))), 'utf-8')) arff.write(bytes(str("%s %s {%s}\n\n" % ('@attribute', 'classe', ', '.join(classes))), 'utf-8'))
arff.write(bytes(str('@data\n\n'), 'utf-8'))
arff.write(bytes(str('@data\n\n'), 'utf-8'))
for instance in data:
for instance in data: instance = map(str, instance)
instance = map(str, instance) line = ",".join(instance)
line = ",".join(instance) arff.write(bytes(str(line + "\n"), 'utf-8'))
arff.write(bytes(str(line + "\n"), 'utf-8'))
arff.close()
arff.close() else:
else: arff = open(output_file, 'wb')
arff = open(output_file, 'wb')
arff.write("%s %s\n\n" % ('@relation', relation)) arff.write("%s %s\n\n" % ('@relation', relation))
for label, t in zip(labels, types): for label, t in zip(labels, types):
arff.write("%s %s %s\n" % ('@attribute', label, t)) arff.write("%s %s %s\n" % ('@attribute', label, t))
arff.write("%s %s {%s}\n\n" % ('@attribute', 'classe', ', '.join(classes))) arff.write("%s %s {%s}\n\n" % ('@attribute', 'classe', ', '.join(classes)))
arff.write('@data\n\n') arff.write('@data\n\n')
for instance in data: for instance in data:
instance = map(str, instance) instance = map(str, instance)
line = ",".join(instance) line = ",".join(instance)
arff.write(line + "\n") arff.write(line + "\n")
arff.close() arff.close()
#method to equalize size of images #method to equalize size of images
def equalize_size_image(self, image): def equalize_size_image(self, image):
......
...@@ -17,7 +17,7 @@ from util.utils import ImageUtils ...@@ -17,7 +17,7 @@ from util.utils import ImageUtils
from skimage.measure import regionprops, moments, moments_central from skimage.measure import regionprops, moments, moments_central
from skimage.morphology import label from skimage.morphology import label
import numpy as np import numpy as np
from extractor import Extractor from .extractor import Extractor
import math import math
class RawCentralMoments(Extractor): class RawCentralMoments(Extractor):
...@@ -30,44 +30,44 @@ class RawCentralMoments(Extractor): ...@@ -30,44 +30,44 @@ class RawCentralMoments(Extractor):
def run(self, image): def run(self, image):
"""Calculate raw and central set of image moments of order 1 and 2. """Calculate raw and central set of image moments of order 1 and 2.
Parameters
----------
image : opencv image
image to be analyzed.
Parameters Returns
---------- -------
image : opencv image features : tuple
image to be analyzed. Returns a tuple containing a list of labels, type and values for each feature extracted.
"""
Returns #raw_moments = moments(image)
-------
features : tuple image_binary = ImageUtils.image_binary(image, bgr = True)
Returns a tuple containing a list of labels, type and values for each feature extracted.
""" m = measure.moments(image_binary)
#raw_moments = moments(image) m=np.nan_to_num(m)
image_binary = ImageUtils.image_binary(image, bgr = True) values_m = [m[p, q] for (p, q) in self._moments_order]
m = measure.moments(image_binary)
m=np.nan_to_num(m)
values_m = [m[p, q] for (p, q) in self._moments_order]
labels_m = [M+str(p)+str(q) for M,(p,q) in zip(['M_'] * len(self._moments_order), self._moments_order)]
row = m[0, 1] / m[0, 0]
col = m[1, 0] / m[0, 0]
mu = measure.moments_central(image_binary, center=(row, col), order=3)
mu=np.nan_to_num(mu)
values_mu = [mu[p, q] for (p, q) in self._moments_order]
labels_mu = [M+str(p)+str(q) for M,(p,q) in zip(['Mu_'] * len(self._moments_order), self._moments_order)]
labels = labels_m + labels_mu
types = [Extractor.NUMERIC] * len(labels)
values = values_m + values_mu
return labels, types, values labels_m = [M+str(p)+str(q) for M,(p,q) in zip(['M_'] * len(self._moments_order), self._moments_order)]
row = m[0, 1] / m[0, 0]
col = m[1, 0] / m[0, 0]
mu = measure.moments_central(image_binary, center=(row, col), order=3)
mu=np.nan_to_num(mu)
values_mu = [mu[p, q] for (p, q) in self._moments_order]
labels_mu = [M+str(p)+str(q) for M,(p,q) in zip(['Mu_'] * len(self._moments_order), self._moments_order)]
labels = labels_m + labels_mu
types = [Extractor.NUMERIC] * len(labels)
values = values_m + values_mu
return labels, types, values
class HuMoments(Extractor): class HuMoments(Extractor):
...@@ -78,44 +78,44 @@ class HuMoments(Extractor): ...@@ -78,44 +78,44 @@ class HuMoments(Extractor):
def run(self, image): def run(self, image):
"""Calculate Hu's set set of image moments. """Calculate Hu's set set of image moments.
Parameters Parameters
---------- ----------
image : opencv image image : opencv image
Image to be analyzed. mage to be analyzed.
Returns Returns
------- -------
features : tuple features : tuple
Returns a tuple containing a list of labels, type and values for each feature extracted. Returns a tuple containing a list of labels, type and values for each feature extracted.
""" """
"""image_binary = ImageUtils.image_binary(image, bgr = True) """image_binary = ImageUtils.image_binary(image, bgr = True)
m = measure.moments(image_binary)
row = m[0, 1] / m[0, 0]
col = m[1, 0] / m[0, 0]
mu = measure.moments_central(image_binary, row, col) m = measure.moments(image_binary)
nu = measure.moments_normalized(mu) row = m[0, 1] / m[0, 0]
hu = measure.moments_hu(nu) col = m[1, 0] / m[0, 0]
values_hu = list(hu)""" mu = measure.moments_central(image_binary, row, col)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
nu = measure.moments_normalized(mu)
hu = measure.moments_hu(nu)
values_hu = list(hu)"""
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
values_hu= cv2.HuMoments(cv2.moments(image)).flatten() values_hu= cv2.HuMoments(cv2.moments(image)).flatten()
values_hu= np.nan_to_num(values_hu) values_hu= np.nan_to_num(values_hu)
values_hu = list(values_hu) values_hu = list(values_hu)
labels_hu = [m+n for m,n in zip(['Hu_'] * len(values_hu),map(str,range(0,len(values_hu))))] labels_hu = [m+n for m,n in zip(['Hu_'] * len(values_hu),map(str,range(0,len(values_hu))))]
labels = labels_hu labels = labels_hu
types = [Extractor.NUMERIC] * len(labels) types = [Extractor.NUMERIC] * len(labels)
values = values_hu values = values_hu
return labels, types, values return labels, types, values
...@@ -566,7 +566,7 @@ class Act(object): ...@@ -566,7 +566,7 @@ class Act(object):
print("Wait to complete processes all images!") print("Wait to complete processes all images!")
with tqdm(total=len(list_segments)) as pppbar: with tqdm(total=len(list_segments)) as pppbar:
for idx_segment in list_segments: for idx_segment in list_segments:
segment, size_segment, idx_segment = self.segmenter.get_segment(self, idx_segment=idx_segment)[:-1] segment, size_segment,xml_file, idx_segment = self.segmenter.get_segment(self, idx_segment=idx_segment)[:-1]
# Problem here! Dataset removed. # Problem here! Dataset removed.
filepath = File.save_only_class_image(segment, self.dataset, tmp, self._image_name, idx_segment) filepath = File.save_only_class_image(segment, self.dataset, tmp, self._image_name, idx_segment)
len_segments[idx_segment] = size_segment len_segments[idx_segment] = size_segment
......
...@@ -16,7 +16,7 @@ from skimage.util import img_as_float, img_as_ubyte ...@@ -16,7 +16,7 @@ from skimage.util import img_as_float, img_as_ubyte
from util.config import Config from util.config import Config
from util.utils import TimeUtils from util.utils import TimeUtils
from util.x11_colors import X11Colors from util.x11_colors import X11Colors
from pascal_voc_writer import Writer as wr
from abc import ABCMeta, abstractmethod from abc import ABCMeta, abstractmethod
class SkimageSegmenter(object): class SkimageSegmenter(object):
...@@ -117,8 +117,9 @@ class SkimageSegmenter(object): ...@@ -117,8 +117,9 @@ class SkimageSegmenter(object):
# Get the rectangle that encompasses the countour # Get the rectangle that encompasses the countour
x,y,w,h = cv2.boundingRect(max_contour) x,y,w,h = cv2.boundingRect(max_contour)
# Create the object for this segment in the .XML file # Create the object for this segment in the .XML file
xml_file.addObject(name_segment, x, y, x+w, y+h) if xml_file is not None:
xml_file.addObject(name_segment, x, y, x+w, y+h)
segment = segment[y:y+h, x:x+w] segment = segment[y:y+h, x:x+w]
end_time = TimeUtils.get_time() end_time = TimeUtils.get_time()
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment