Skip to content
GitLab
Projects
Groups
Snippets
Help
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Open sidebar
inovisao
pynovisao
Commits
7c3986ee
Commit
7c3986ee
authored
Mar 08, 2019
by
Geazy Menezes
Browse files
Merge branch 'DesempenhoNovosProcessadores_CorrecaoMensagens_HiddenWarnings'
parents
25e00437
6232bfa2
Changes
6
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
132 additions
and
73 deletions
+132
-73
.gitignore
.gitignore
+2
-0
src/extraction/feature_extraction.py
src/extraction/feature_extraction.py
+18
-16
src/extraction/hog.py
src/extraction/hog.py
+10
-5
src/extraction/image_moments.py
src/extraction/image_moments.py
+5
-4
src/pynovisao.py
src/pynovisao.py
+62
-48
src/util/split_convertall.sh
src/util/split_convertall.sh
+35
-0
No files found.
.gitignore
View file @
7c3986ee
*.pyc
*.pyc
data/*
data/*
models_checkpoints/*
venv/*
!data/demo.jpg
!data/demo.jpg
!data/pynovisao.png
!data/pynovisao.png
!data/demo/.gitignore
!data/demo/.gitignore
...
...
src/extraction/feature_extraction.py
View file @
7c3986ee
...
@@ -11,8 +11,8 @@
...
@@ -11,8 +11,8 @@
import
io
import
io
import
itertools
import
itertools
import
os
import
os
import
thread
ing
import
multiprocess
ing
from
multiprocessing
import
Process
,
Manager
from
interface.interface
import
InterfaceException
as
IException
from
interface.interface
import
InterfaceException
as
IException
from
util.file_utils
import
File
from
util.file_utils
import
File
...
@@ -23,7 +23,9 @@ from extractor import Extractor
...
@@ -23,7 +23,9 @@ from extractor import Extractor
from
tqdm
import
tqdm
from
tqdm
import
tqdm
import
sys
import
sys
if
not
sys
.
warnoptions
:
import
warnings
warnings
.
simplefilter
(
"ignore"
)
class
FeatureExtractor
(
object
):
class
FeatureExtractor
(
object
):
"""Handle the feature extraction."""
"""Handle the feature extraction."""
...
@@ -40,13 +42,11 @@ class FeatureExtractor(object):
...
@@ -40,13 +42,11 @@ class FeatureExtractor(object):
self
.
tkParent
=
tkParent
self
.
tkParent
=
tkParent
def
extract_all
(
self
,
dataset
,
output_file
=
None
,
dirs
=
None
,
overwrite
=
True
):
def
extract_all
(
self
,
dataset
,
output_file
=
None
,
dirs
=
None
,
overwrite
=
True
):
self
.
labels
=
[]
self
.
types
=
[]
self
.
data
=
Manager
().
list
()
#is a necessary because have a problem with use Process and normaly declaration
self
.
data
=
[]
self
.
threads
=
[]
self
.
threads
=
[]
self
.
labels
=
[]
self
.
labels
=
Manager
().
list
()
self
.
types
=
[]
self
.
types
=
Manager
().
list
()
"""Runs the feature extraction algorithms on all images of dataset.
"""Runs the feature extraction algorithms on all images of dataset.
Parameters
Parameters
...
@@ -101,6 +101,7 @@ class FeatureExtractor(object):
...
@@ -101,6 +101,7 @@ class FeatureExtractor(object):
with
tqdm
(
total
=
len
(
self
.
threads
))
as
pbar
:
with
tqdm
(
total
=
len
(
self
.
threads
))
as
pbar
:
for
t
in
self
.
threads
:
for
t
in
self
.
threads
:
t
.
start
()
t
.
start
()
pbar
.
update
(
1
)
pbar
.
close
()
pbar
.
close
()
self
.
print_console
(
"Waiting for workers to finish extracting attributes from images!"
)
self
.
print_console
(
"Waiting for workers to finish extracting attributes from images!"
)
...
@@ -108,7 +109,6 @@ class FeatureExtractor(object):
...
@@ -108,7 +109,6 @@ class FeatureExtractor(object):
for
t
in
self
.
threads
:
for
t
in
self
.
threads
:
t
.
join
()
t
.
join
()
ppbar
.
update
(
1
)
ppbar
.
update
(
1
)
ppbar
.
close
()
ppbar
.
close
()
self
.
print_console
(
"The process was completed with "
+
str
(
len
(
self
.
threads
))
+
" images!"
)
self
.
print_console
(
"The process was completed with "
+
str
(
len
(
self
.
threads
))
+
" images!"
)
if
len
(
self
.
data
)
==
0
:
if
len
(
self
.
data
)
==
0
:
...
@@ -116,7 +116,7 @@ class FeatureExtractor(object):
...
@@ -116,7 +116,7 @@ class FeatureExtractor(object):
# Save the output file in ARFF format
# Save the output file in ARFF format
# self._save_output(File.get_filename(dataset), classes, self.labels, self.types, self.data, output_file)
# self._save_output(File.get_filename(dataset), classes, self.labels, self.types, self.data, output_file)
self
.
_save_output
(
File
.
get_filename
(
dataset
),
classes
,
self
.
labels
,
self
.
types
,
self
.
data
,
output_file
)
self
.
_save_output
(
File
.
get_filename
(
dataset
),
classes
,
self
.
labels
[
0
]
,
self
.
types
[
0
]
,
self
.
data
,
output_file
)
end_time
=
TimeUtils
.
get_time
()
end_time
=
TimeUtils
.
get_time
()
return
output_file
,
(
end_time
-
start_time
)
return
output_file
,
(
end_time
-
start_time
)
...
@@ -130,9 +130,8 @@ class FeatureExtractor(object):
...
@@ -130,9 +130,8 @@ class FeatureExtractor(object):
for
item
in
items
:
for
item
in
items
:
if
item
.
startswith
(
'.'
):
if
item
.
startswith
(
'.'
):
continue
continue
#th = threading.Thread(target=self.sub_job_extractor,args=(item, dataset, cl, classes))
th
=
threading
.
Thread
(
target
=
self
.
sub_job_extractor
,
args
=
(
item
,
dataset
,
cl
,
classes
))
th
=
multiprocessing
.
Process
(
target
=
self
.
sub_job_extractor
,
args
=
(
item
,
dataset
,
cl
,
classes
))
self
.
threads
.
append
(
th
)
self
.
threads
.
append
(
th
)
...
@@ -149,14 +148,17 @@ class FeatureExtractor(object):
...
@@ -149,14 +148,17 @@ class FeatureExtractor(object):
if
len
(
self
.
data
)
>
0
:
if
len
(
self
.
data
)
>
0
:
values
=
list
(
values
=
list
(
itertools
.
chain
.
from_iterable
(
zip
(
*
([
extractor
().
run
(
image
)
for
extractor
in
self
.
extractors
]))[
2
]))
itertools
.
chain
.
from_iterable
(
zip
(
*
([
extractor
().
run
(
image
)
for
extractor
in
self
.
extractors
]))[
2
]))
self
.
data
.
append
(
values
+
[
cl
if
cl
in
classes
else
classes
[
0
]])
self
.
data
.
append
(
values
+
[
cl
if
cl
in
classes
else
classes
[
0
]])
else
:
else
:
self
.
lab
el
s
,
self
.
type
s
,
values
=
[
list
(
itertools
.
chain
.
from_iterable
(
ret
))
labs
,
ty
s
,
values
=
[
list
(
itertools
.
chain
.
from_iterable
(
ret
))
for
ret
in
for
ret
in
zip
(
*
(
extractor
().
run
(
image
)
for
extractor
in
self
.
extractors
))]
zip
(
*
(
extractor
().
run
(
image
)
for
extractor
in
self
.
extractors
))]
self
.
labels
.
append
(
labs
)
self
.
types
.
append
(
tys
)
self
.
data
.
append
(
values
+
[
cl
if
cl
in
classes
else
classes
[
0
]])
self
.
data
.
append
(
values
+
[
cl
if
cl
in
classes
else
classes
[
0
]])
def
extract_one_file
(
self
,
dataset
,
image_path
,
output_file
=
None
):
def
extract_one_file
(
self
,
dataset
,
image_path
,
output_file
=
None
):
"""Runs the feature extraction algorithms on specific image.
"""Runs the feature extraction algorithms on specific image.
...
...
src/extraction/hog.py
View file @
7c3986ee
...
@@ -8,6 +8,11 @@
...
@@ -8,6 +8,11 @@
Name: hog.py
Name: hog.py
Author: Alessandro dos Santos Ferreira ( santosferreira.alessandro@gmail.com )
Author: Alessandro dos Santos Ferreira ( santosferreira.alessandro@gmail.com )
Change parameter Visualise for Visualize because is deprecaded
Date:02/01/2019
Author: Diego Andre Sant Ana
"""
"""
from
skimage
import
feature
from
skimage
import
feature
...
@@ -36,13 +41,13 @@ class HOG(Extractor):
...
@@ -36,13 +41,13 @@ class HOG(Extractor):
features : tuple
features : tuple
Returns a tuple containing a list of labels, type and values for each feature extracted.
Returns a tuple containing a list of labels, type and values for each feature extracted.
"""
"""
image_grayscale
=
ImageUtils
.
image_grayscale
(
image
,
bgr
=
True
)
image_grayscale
=
ImageUtils
.
image_grayscale
(
image
,
bgr
=
True
)
image_128x128
=
ImageUtils
.
image_resize
(
image_grayscale
,
128
,
128
)
image_128x128
=
ImageUtils
.
image_resize
(
image_grayscale
,
128
,
128
)
values
,
_
=
feature
.
hog
(
image_128x128
,
orientations
=
8
,
pixels_per_cell
=
(
32
,
32
),
values
,
_
=
feature
.
hog
(
image_128x128
,
orientations
=
8
,
pixels_per_cell
=
(
32
,
32
),
cells_per_block
=
(
1
,
1
),
visualise
=
True
)
cells_per_block
=
(
1
,
1
),
visualise
=
True
)
labels
=
[
m
+
n
for
m
,
n
in
zip
([
'hog_'
]
*
len
(
values
),
map
(
str
,
range
(
0
,
len
(
values
))))]
labels
=
[
m
+
n
for
m
,
n
in
zip
([
'hog_'
]
*
len
(
values
),
map
(
str
,
range
(
0
,
len
(
values
))))]
types
=
[
Extractor
.
NUMERIC
]
*
len
(
labels
)
types
=
[
Extractor
.
NUMERIC
]
*
len
(
labels
)
return
labels
,
types
,
list
(
values
)
return
labels
,
types
,
list
(
values
)
src/extraction/image_moments.py
View file @
7c3986ee
...
@@ -16,7 +16,7 @@ import cv2
...
@@ -16,7 +16,7 @@ import cv2
from
util.utils
import
ImageUtils
from
util.utils
import
ImageUtils
from
skimage.measure
import
regionprops
,
moments
,
moments_central
from
skimage.measure
import
regionprops
,
moments
,
moments_central
from
skimage.morphology
import
label
from
skimage.morphology
import
label
import
numpy
as
np
from
extractor
import
Extractor
from
extractor
import
Extractor
class
RawCentralMoments
(
Extractor
):
class
RawCentralMoments
(
Extractor
):
...
@@ -53,8 +53,8 @@ class RawCentralMoments(Extractor):
...
@@ -53,8 +53,8 @@ class RawCentralMoments(Extractor):
row
=
m
[
0
,
1
]
/
m
[
0
,
0
]
row
=
m
[
0
,
1
]
/
m
[
0
,
0
]
col
=
m
[
1
,
0
]
/
m
[
0
,
0
]
col
=
m
[
1
,
0
]
/
m
[
0
,
0
]
mu
=
measure
.
moments_central
(
image_binary
,
row
,
col
)
mu
=
measure
.
moments_central
(
image_binary
,
center
=
(
row
,
col
)
,
order
=
3
)
values_mu
=
[
mu
[
p
,
q
]
for
(
p
,
q
)
in
self
.
_moments_order
]
values_mu
=
[
mu
[
p
,
q
]
for
(
p
,
q
)
in
self
.
_moments_order
]
labels_mu
=
[
M
+
str
(
p
)
+
str
(
q
)
for
M
,(
p
,
q
)
in
zip
([
'Mu_'
]
*
len
(
self
.
_moments_order
),
self
.
_moments_order
)]
labels_mu
=
[
M
+
str
(
p
)
+
str
(
q
)
for
M
,(
p
,
q
)
in
zip
([
'Mu_'
]
*
len
(
self
.
_moments_order
),
self
.
_moments_order
)]
...
@@ -104,8 +104,9 @@ class HuMoments(Extractor):
...
@@ -104,8 +104,9 @@ class HuMoments(Extractor):
image
=
cv2
.
cvtColor
(
image
,
cv2
.
COLOR_BGR2GRAY
)
image
=
cv2
.
cvtColor
(
image
,
cv2
.
COLOR_BGR2GRAY
)
values_hu
=
cv2
.
HuMoments
(
cv2
.
moments
(
image
)).
flatten
()
values_hu
=
cv2
.
HuMoments
(
cv2
.
moments
(
image
)).
flatten
()
values_hu
=
list
(
values_hu
)
values_hu
=
np
.
nan_to_num
(
values_hu
)
labels_hu
=
[
m
+
n
for
m
,
n
in
zip
([
'Hu_'
]
*
len
(
values_hu
),
map
(
str
,
range
(
0
,
len
(
values_hu
))))]
labels_hu
=
[
m
+
n
for
m
,
n
in
zip
([
'Hu_'
]
*
len
(
values_hu
),
map
(
str
,
range
(
0
,
len
(
values_hu
))))]
labels
=
labels_hu
labels
=
labels_hu
...
...
src/pynovisao.py
View file @
7c3986ee
...
@@ -5,7 +5,7 @@
...
@@ -5,7 +5,7 @@
Name: pynovisao.py
Name: pynovisao.py
Author: Alessandro dos Santos Ferreira ( santosferreira.alessandro@gmail.com )
Author: Alessandro dos Santos Ferreira ( santosferreira.alessandro@gmail.com )
"""
"""
import
gc
from
collections
import
OrderedDict
from
collections
import
OrderedDict
import
numpy
as
np
import
numpy
as
np
import
os
import
os
...
@@ -30,8 +30,10 @@ from util.file_utils import File
...
@@ -30,8 +30,10 @@ from util.file_utils import File
from
util.utils
import
TimeUtils
from
util.utils
import
TimeUtils
from
util.utils
import
MetricUtils
from
util.utils
import
MetricUtils
from
util.x11_colors
import
X11Colors
from
util.x11_colors
import
X11Colors
import
multiprocessing
from
multiprocessing
import
Process
,
Manager
import
threading
from
tqdm
import
tqdm
class
Act
(
object
):
class
Act
(
object
):
"""Store all actions of Pynovisao."""
"""Store all actions of Pynovisao."""
...
@@ -199,7 +201,7 @@ class Act(object):
...
@@ -199,7 +201,7 @@ class Act(object):
If there's no image opened.
If there's no image opened.
"""
"""
if
self
.
_const_image
is
None
:
if
self
.
_const_image
is
None
:
raise
IException
(
"Image not found"
)
raise
IException
(
"Image not found
! Open an image to test, select in the menu the option File>Open Image!
"
)
if
self
.
tk
.
close_image
():
if
self
.
tk
.
close_image
():
self
.
tk
.
write_log
(
"Closing image..."
)
self
.
tk
.
write_log
(
"Closing image..."
)
...
@@ -386,7 +388,7 @@ class Act(object):
...
@@ -386,7 +388,7 @@ class Act(object):
If there's no image opened.
If there's no image opened.
"""
"""
if
self
.
_const_image
is
None
:
if
self
.
_const_image
is
None
:
raise
IException
(
"Image not found"
)
raise
IException
(
"Image not found
! Open an image to test, select in the menu the option File>Open Image!
"
)
self
.
tk
.
write_log
(
"Running %s..."
,
self
.
segmenter
.
get_name
())
self
.
tk
.
write_log
(
"Running %s..."
,
self
.
segmenter
.
get_name
())
...
@@ -421,7 +423,7 @@ class Act(object):
...
@@ -421,7 +423,7 @@ class Act(object):
if
new_config
[
extractor
].
value
==
True
]
if
new_config
[
extractor
].
value
==
True
]
if
len
(
self
.
extractors
)
==
0
:
if
len
(
self
.
extractors
)
==
0
:
raise
IException
(
"Please select a
t least one
extractor"
)
raise
IException
(
"Please select a
n extractor from the menu under Features Extraction> Select
extractor
s!
"
)
self
.
tk
.
append_log
(
"
\n
Config updated:
\n
%s"
,
self
.
tk
.
append_log
(
"
\n
Config updated:
\n
%s"
,
'
\n
'
.
join
([
"%s: %s"
%
(
new_config
[
extractor
].
label
,
"on"
if
new_config
[
extractor
].
value
==
True
else
"off"
)
'
\n
'
.
join
([
"%s: %s"
%
(
new_config
[
extractor
].
label
,
"on"
if
new_config
[
extractor
].
value
==
True
else
"off"
)
...
@@ -459,7 +461,7 @@ class Act(object):
...
@@ -459,7 +461,7 @@ class Act(object):
The user must install the required dependencies to classifiers.
The user must install the required dependencies to classifiers.
"""
"""
if
self
.
classifier
is
None
:
if
self
.
classifier
is
None
:
raise
IException
(
"Classifier not found!"
)
raise
IException
(
"Classifier not found!
Select from the menu the option Training>Choose Classifier!
"
)
title
=
"Choosing a classifier"
title
=
"Choosing a classifier"
self
.
tk
.
write_log
(
title
)
self
.
tk
.
write_log
(
title
)
...
@@ -488,7 +490,7 @@ class Act(object):
...
@@ -488,7 +490,7 @@ class Act(object):
The user must install the required dependencies to classifiers.
The user must install the required dependencies to classifiers.
"""
"""
if
self
.
classifier
is
None
:
if
self
.
classifier
is
None
:
raise
IException
(
"Classifier not found!"
)
raise
IException
(
"Classifier not found!
Select from the menu the option Training>Choose Classifier!
"
)
title
=
"Configuring %s"
%
self
.
classifier
.
get_name
()
title
=
"Configuring %s"
%
self
.
classifier
.
get_name
()
self
.
tk
.
write_log
(
title
)
self
.
tk
.
write_log
(
title
)
...
@@ -509,7 +511,7 @@ class Act(object):
...
@@ -509,7 +511,7 @@ class Act(object):
def
run_classifier
(
self
):
def
run_classifier
(
self
):
"""Run the classifier on the current image.
"""Run the classifier on the current image.
As result, paint the image with color corresponding to predicted class of all segment.
As result, paint the image with color corresponding to predicted class of all segment.
Raises
Raises
------
------
IException 'You must install python-weka-wrapper'
IException 'You must install python-weka-wrapper'
...
@@ -518,48 +520,53 @@ class Act(object):
...
@@ -518,48 +520,53 @@ class Act(object):
If there's no image opened.
If there's no image opened.
"""
"""
if
self
.
classifier
is
None
:
if
self
.
classifier
is
None
:
raise
IException
(
"Classifier not found!"
)
raise
IException
(
"Classifier not found!
Select from the menu the option Training>Choose Classifier!
"
)
if
self
.
_const_image
is
None
:
if
self
.
_const_image
is
None
:
raise
IException
(
"Image not found"
)
raise
IException
(
"Image not found
! Open an image to test, select in the menu the option File>Open Image!
"
)
self
.
tk
.
write_log
(
"Running %s..."
,
self
.
classifier
.
get_name
())
self
.
tk
.
write_log
(
"Running %s..."
,
self
.
classifier
.
get_name
())
self
.
tk
.
append_log
(
"
\n
%s"
,
str
(
self
.
classifier
.
get_summary_config
()))
self
.
tk
.
append_log
(
"
\n
%s"
,
str
(
self
.
classifier
.
get_summary_config
()))
#self.classifier.set
#self.classifier.set
start_time
=
TimeUtils
.
get_time
()
start_time
=
TimeUtils
.
get_time
()
# Perform a segmentation, if needed.
# Perform a segmentation, if needed.
list_segments
=
self
.
segmenter
.
get_list_segments
()
list_segments
=
self
.
segmenter
.
get_list_segments
()
if
len
(
list_segments
)
==
0
:
if
len
(
list_segments
)
==
0
:
self
.
tk
.
append_log
(
"Running %s... (%0.3f seconds)"
,
self
.
segmenter
.
get_name
(),
(
TimeUtils
.
get_time
()
-
start_time
))
self
.
tk
.
append_log
(
"Running %s... (%0.3f seconds)"
,
self
.
segmenter
.
get_name
(),
(
TimeUtils
.
get_time
()
-
start_time
))
self
.
_image
,
_
=
self
.
segmenter
.
run
(
self
.
_const_image
)
self
.
_image
,
_
=
self
.
segmenter
.
run
(
self
.
_const_image
)
self
.
tk
.
refresh_image
(
self
.
_image
)
self
.
tk
.
refresh_image
(
self
.
_image
)
list_segments
=
self
.
segmenter
.
get_list_segments
()
list_segments
=
self
.
segmenter
.
get_list_segments
()
self
.
_gt_segments
=
[
None
]
*
(
max
(
list_segments
)
+
1
)
self
.
_gt_segments
=
[
None
]
*
(
max
(
list_segments
)
+
1
)
# New and optimized classification
# New and optimized classification
tmp
=
".tmp"
tmp
=
".tmp"
File
.
remove_dir
(
File
.
make_path
(
self
.
dataset
,
tmp
))
File
.
remove_dir
(
File
.
make_path
(
self
.
dataset
,
tmp
))
self
.
tk
.
append_log
(
"Generating test images... (%0.3f seconds)"
,
(
TimeUtils
.
get_time
()
-
start_time
))
self
.
tk
.
append_log
(
"Generating test images... (%0.3f seconds)"
,
(
TimeUtils
.
get_time
()
-
start_time
))
len_segments
=
{}
len_segments
=
{}
for
idx_segment
in
list_segments
:
segment
,
size_segment
,
idx_segment
=
self
.
segmenter
.
get_segment
(
self
,
idx_segment
=
idx_segment
)[:
-
1
]
print
(
"Wait to complete processes all images!"
)
with
tqdm
(
total
=
len
(
list_segments
))
as
pppbar
:
# Problem here! Dataset removed.
for
idx_segment
in
list_segments
:
filepath
=
File
.
save_only_class_image
(
segment
,
self
.
dataset
,
tmp
,
self
.
_image_name
,
idx_segment
)
segment
,
size_segment
,
idx_segment
=
self
.
segmenter
.
get_segment
(
self
,
idx_segment
=
idx_segment
)[:
-
1
]
len_segments
[
idx_segment
]
=
size_segment
# Problem here! Dataset removed.
filepath
=
File
.
save_only_class_image
(
segment
,
self
.
dataset
,
tmp
,
self
.
_image_name
,
idx_segment
)
len_segments
[
idx_segment
]
=
size_segment
pppbar
.
update
(
1
)
pppbar
.
close
()
gc
.
collect
()
# Perform the feature extraction of all segments in image ( not applied to ConvNets ).
# Perform the feature extraction of all segments in image ( not applied to ConvNets ).
if
self
.
classifier
.
must_extract_features
():
if
self
.
classifier
.
must_extract_features
():
self
.
tk
.
append_log
(
"Running extractors on test images... (%0.3f seconds)"
,
(
TimeUtils
.
get_time
()
-
start_time
))
self
.
tk
.
append_log
(
"Running extractors on test images... (%0.3f seconds)"
,
(
TimeUtils
.
get_time
()
-
start_time
))
fextractor
=
FeatureExtractor
(
self
.
extractors
)
fextractor
=
FeatureExtractor
(
self
.
extractors
)
output_file
,
_
=
fextractor
.
extract_all
(
self
.
dataset
,
"test"
,
dirs
=
[
tmp
])
output_file
,
_
=
fextractor
.
extract_all
(
self
.
dataset
,
"test"
,
dirs
=
[
tmp
])
self
.
tk
.
append_log
(
"Running classifier on test data... (%0.3f seconds)"
,
(
TimeUtils
.
get_time
()
-
start_time
))
self
.
tk
.
append_log
(
"Running classifier on test data... (%0.3f seconds)"
,
(
TimeUtils
.
get_time
()
-
start_time
))
# Get the label corresponding to predict class for each segment of image.
# Get the label corresponding to predict class for each segment of image.
...
@@ -569,17 +576,17 @@ class Act(object):
...
@@ -569,17 +576,17 @@ class Act(object):
# Result is the class for each superpixel
# Result is the class for each superpixel
if
type
(
labels
)
is
types
.
ListType
:
if
type
(
labels
)
is
types
.
ListType
:
self
.
tk
.
append_log
(
"Painting segments... (%0.3f seconds)"
,
(
TimeUtils
.
get_time
()
-
start_time
))
self
.
tk
.
append_log
(
"Painting segments... (%0.3f seconds)"
,
(
TimeUtils
.
get_time
()
-
start_time
))
# If ground truth mode, show alternative results
# If ground truth mode, show alternative results
if
self
.
_ground_truth
==
True
:
if
self
.
_ground_truth
==
True
:
return
self
.
_show_ground_truth
(
list_segments
,
len_segments
,
labels
,
start_time
)
return
self
.
_show_ground_truth
(
list_segments
,
len_segments
,
labels
,
start_time
)
# Create a popup with results of classification.
# Create a popup with results of classification.
popup_info
=
"%s
\n
"
%
str
(
self
.
classifier
.
get_summary_config
())
popup_info
=
"%s
\n
"
%
str
(
self
.
classifier
.
get_summary_config
())
len_total
=
sum
([
len_segments
[
idx
]
for
idx
in
len_segments
])
len_total
=
sum
([
len_segments
[
idx
]
for
idx
in
len_segments
])
popup_info
+=
"%-16s%-16s%0.2f%%
\n
"
%
(
"Total"
,
str
(
len_total
),
(
len_total
*
100.0
)
/
len_total
)
popup_info
+=
"%-16s%-16s%0.2f%%
\n
"
%
(
"Total"
,
str
(
len_total
),
(
len_total
*
100.0
)
/
len_total
)
# Paint the image.
# Paint the image.
self
.
_mask_image
=
np
.
zeros
(
self
.
_const_image
.
shape
[:
-
1
],
dtype
=
"uint8"
)
self
.
_mask_image
=
np
.
zeros
(
self
.
_const_image
.
shape
[:
-
1
],
dtype
=
"uint8"
)
height
,
width
,
channels
=
self
.
_image
.
shape
height
,
width
,
channels
=
self
.
_image
.
shape
...
@@ -591,7 +598,7 @@ class Act(object):
...
@@ -591,7 +598,7 @@ class Act(object):
for
idx
in
idx_segment
:
for
idx
in
idx_segment
:
self
.
_mask_image
[
self
.
segmenter
.
_segments
==
idx
]
=
c
self
.
_mask_image
[
self
.
segmenter
.
_segments
==
idx
]
=
c
self
.
class_color
[
self
.
segmenter
.
_segments
==
idx
]
=
X11Colors
.
get_color
(
cl
[
"color"
].
value
)
self
.
class_color
[
self
.
segmenter
.
_segments
==
idx
]
=
X11Colors
.
get_color
(
cl
[
"color"
].
value
)
len_classes
=
sum
([
len_segments
[
idx
]
for
idx
in
idx_segment
])
len_classes
=
sum
([
len_segments
[
idx
]
for
idx
in
idx_segment
])
popup_info
+=
"%-16s%-16s%0.2f%%
\n
"
%
(
cl
[
"name"
].
value
,
str
(
len_classes
),
(
len_classes
*
100.0
)
/
len_total
)
popup_info
+=
"%-16s%-16s%0.2f%%
\n
"
%
(
cl
[
"name"
].
value
,
str
(
len_classes
),
(
len_classes
*
100.0
)
/
len_total
)
...
@@ -610,18 +617,18 @@ class Act(object):
...
@@ -610,18 +617,18 @@ class Act(object):
self
.
_image
=
cv2
.
addWeighted
(
self
.
_const_image
,
0.7
,
self
.
class_color
,
0.3
,
0
)
self
.
_image
=
cv2
.
addWeighted
(
self
.
_const_image
,
0.7
,
self
.
class_color
,
0.3
,
0
)
self
.
tk
.
refresh_image
(
self
.
_image
)
self
.
tk
.
refresh_image
(
self
.
_image
)
end_time
=
TimeUtils
.
get_time
()
end_time
=
TimeUtils
.
get_time
()
self
.
tk
.
append_log
(
"
\n
Classification finished"
)
self
.
tk
.
append_log
(
"
\n
Classification finished"
)
self
.
tk
.
append_log
(
"Time elapsed: %0.3f seconds"
,
(
end_time
-
start_time
))
self
.
tk
.
append_log
(
"Time elapsed: %0.3f seconds"
,
(
end_time
-
start_time
))
def
run_training
(
self
):
def
run_training
(
self
):
start_time
=
TimeUtils
.
get_time
()
start_time
=
TimeUtils
.
get_time
()
# Training do not need an image opened (consider removing these two lines)
# Training do not need an image opened (consider removing these two lines)
# if self._const_image is None:
# if self._const_image is None:
# raise IException("Image not found")
# raise IException("Image not found")
if
self
.
classifier
.
must_train
():
if
self
.
classifier
.
must_train
():
...
@@ -701,7 +708,7 @@ class Act(object):
...
@@ -701,7 +708,7 @@ class Act(object):
The user must install the required dependencies to classifiers.
The user must install the required dependencies to classifiers.
"""
"""
if
self
.
classifier
is
None
:
if
self
.
classifier
is
None
:
raise
IException
(
"Classifier not found!"
)
raise
IException
(
"Classifier not found!
Select from the menu the option Training>Choose Classifier!
"
)
if
self
.
classifier
.
must_train
():
if
self
.
classifier
.
must_train
():
self
.
tk
.
write_log
(
"Creating training data..."
)
self
.
tk
.
write_log
(
"Creating training data..."
)
...
@@ -726,12 +733,12 @@ class Act(object):
...
@@ -726,12 +733,12 @@ class Act(object):
The user must install the required dependencies to classifiers.
The user must install the required dependencies to classifiers.
"""
"""
if
self
.
classifier
is
None
:
if
self
.
classifier
is
None
:
raise
IException
(
"Classifier not found!"
)
raise
IException
(
"Classifier not found!
Select from the menu the option Training>Choose Classifier!
"
)
if
self
.
tk
.
ask_ok_cancel
(
"Experimenter All"
,
"This may take several minutes to complete. Are you sure?"
):
if
self
.
tk
.
ask_ok_cancel
(
"Experimenter All"
,
"This may take several minutes to complete. Are you sure?"
):
if
self
.
classifier
.
must_train
():
if
self
.
classifier
.
must_train
():
self
.
tk
.
write_log
(
"Creating training data..."
)
self
.
tk
.
write_log
(
"Creating training data..."
)
fextractor
=
FeatureExtractor
(
self
.
extractors
)
fextractor
=
FeatureExtractor
(
self
.
extractors
)
output_file
,
run_time
=
fextractor
.
extract_all
(
self
.
dataset
,
"training"
,
overwrite
=
False
)
output_file
,
run_time
=
fextractor
.
extract_all
(
self
.
dataset
,
"training"
,
overwrite
=
False
)
self
.
classifier
.
train
(
self
.
dataset
,
"training"
)
self
.
classifier
.
train
(
self
.
dataset
,
"training"
)
...
@@ -855,7 +862,7 @@ class Act(object):
...
@@ -855,7 +862,7 @@ class Act(object):
def
run_classifier_folder
(
self
,
foldername
=
None
):
def
run_classifier_folder
(
self
,
foldername
=
None
):
if
self
.
classifier
is
None
:
if
self
.
classifier
is
None
:
raise
IException
(
"Classifier not found!"
)
raise
IException
(
"Classifier not found!
Select from the menu the option Training>Choose Classifier!
"
)
if
foldername
is
None
:
if
foldername
is
None
:
foldername
=
self
.
tk
.
utils
.
ask_directory
()
foldername
=
self
.
tk
.
utils
.
ask_directory
()
...
@@ -951,6 +958,7 @@ class Act(object):
...
@@ -951,6 +958,7 @@ class Act(object):
np
.
savetxt
(
f
,
all_frequency_weighted_IU
,
fmt
=
'%.5f'
)
np
.
savetxt
(
f
,
all_frequency_weighted_IU
,
fmt
=
'%.5f'
)
f
.
close
()
f
.
close
()
def
run_grafic_confusion_matrix
(
self
):
def
run_grafic_confusion_matrix
(
self
):
'''
'''
Generate a a graphical confusion matrix where images are classified and according to classification go to the wrong or right folder.
Generate a a graphical confusion matrix where images are classified and according to classification go to the wrong or right folder.
...
@@ -1036,15 +1044,21 @@ class Act(object):
...
@@ -1036,15 +1044,21 @@ class Act(object):
self
.
tk
.
write_log
(
header_output_middle
+
'Initializing...'
)
self
.
tk
.
write_log
(
header_output_middle
+
'Initializing...'
)
total
=
str
(
len
(
images
))
total
=
str
(
len
(
images
))
# internal function in method for create threads, cannot change for Process(Have a problem with JVM Instances)
total
=
str
(
len
(
images
))
print
(
"Waiting finish classification!"
)
for
i
,
image_path
in
enumerate
(
images
):
for
i
,
image_path
in
enumerate
(
images
):
original_name
=
reduce
(
lambda
a
,
b
:
a
+
b
,
image_path
)
original_name
=
reduce
(
lambda
a
,
b
:
a
+
b
,
image_path
)
real_class_path
=
matrix_path
+
human
+
image_path
[
1
]
real_class_path
=
matrix_path
+
human
+
image_path
[
1
]
predicted
=
self
.
classifier
.
single_classify
(
original_name
,
folder
,
self
.
extractors
,
classes
)
predicted
=
self
.
classifier
.
single_classify
(
original_name
,
folder
,
self
.
extractors
,
classes
)
message
=
header_output_middle
+
str
(
i
+
1
)
+
' of '
+
total
+
' images classifield.'
message
=
header_output_middle
+
str
(
i
+
1
)
+
' of '
+
total
+
' images classifield.'
self
.
tk
.
write_log
(
message
)
self
.
tk
.
write_log
(
message
)
predicted_class_path
=
real_class_path
+
computer
+
predicted
predicted_class_path
=
real_class_path
+
computer
+
predicted
predicted_name
=
predicted_class_path
+
image_path
[
2
]
predicted_name
=
predicted_class_path
+
image_path
[
2
]
symlink
(
original_name
,
predicted_name
)
symlink
(
original_name
,
predicted_name
)
message
=
header_output
+
'Saved in '
+
matrix_path