-
-
Notifications
You must be signed in to change notification settings - Fork 16
/
EcoAssist_GUI.py
executable file
·8383 lines (7446 loc) · 457 KB
/
EcoAssist_GUI.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# coding=utf-8
# GUI to simplify camera trap image analysis with species recognition models
# https://addaxdatascience.com/ecoassist/
# Created by Peter van Lunteren
# Latest edit by Peter van Lunteren on 8 Nov 2024
# TODO: LAT LON 0 0 - filter out the 0,0 coords for map creation
# TODO: JSON - remove the original json if not running EcoAssist in Timelapse mode. No need to keep that anymore.
# TODO: JSON - remove the part where MD stores its typical threshold values etc in the EcoAssist altered json. It doesn't make sense anymore if the detection caterogies are changed.
# TODO: VIDEO - create video tutorials of all the steps (simple mode, advanced mode, annotation, postprocessing, etc.)
# TODO: INSTALL WIZARD - https://jrsoftware.org/isinfo.php#features ask chatGDP "how to create a install wizard around a batch script"
# TODO: SMOOTH - either average or logit
# TODO: EMPTIES - add a checkbox for folder separation where you can skip the empties from being copied
# TODO: LOG SEQUENCE INFO - add sequence information to JSON, CSV, and XSLX
# TODO: SEQ SEP - add feature to separate images into sequence subdirs. Something like "treat sequence as detection" or "Include all images in the sequence" while doing the separation step.
# TODO: VIDEO PROCESSING - if you process a video with a species model, it will ID each animal on each frame. Chances are high that you'll end up with false postivites. We'll want to smooth this. Take an average or something.
# TODO: INSTALL - make install files more robust by adding || { echo } to every line. At the end check for all gits and environments, etc.
# TODO: WEBSITE - add info about zip install on windows install page. While you're at it, also print info about ZIP install when internet connection issues
# TODO: INFO - add a messagebox when the deployment is done via advanced mode. Now it just says there were errors. Perhaps just one messagebox with extra text if there are errors or warnings. And some counts.
# TODO: SCRIPT COMPILING - dummy start ecoassist directly after installation so all the scripts are already compiled
# TODO: ENVIRONMENTS - implement the automatic installs of env.yml files for new models
# TODO: INSTALL - why put the shortcut not on the dekstop on windows?
# TODO: N_CORES - add UI "--ncores” option - see email Dan "mambaforge vs. miniforge"
# TODO: REPORTS - add postprocessing reports - see email Dan "mambaforge vs. miniforge"
# TODO: MINOR - By the way, in the EcoAssist UI, I think the frame extraction status popup uses the same wording as the detection popup. They both say something about "frame X of Y". I think for the frame extraction, it should be "video X of Y".
# TODO: JSON - keep track of the original confidence scores whenever it changes (from detection to classification, after human verification, etc.)
# TODO: SMALL FIXES - see list from Saul ('RE: tentative agenda / discussion points') - 12 July 01:11.
# TODO: ERROR - get rid of error: How about an ok/cancel dialog that simply asks: ‘Recognition file already exists. Do you want me to over-write it?’
# TODO: UNINSTALL - adjust the install script with a flag "uninstall" so that it removed the Ecoassist_Files and the environments. Then make an batch file that executes this on button click.
# TODO: ANNOTATION - improve annotation experience
# - make one progress windows in stead of all separate pbars when using large jsons
# - I've converted pyqt5 to pyside6 for apple silicon so we don't need to install it via homebrew
# the unix install clones a pyside6 branch of my human-in-the-loop fork. Test windows on this
# on this version too and make it the default
# - implement image progress status into main labelimg window, so you don't have two separate windows
# - apparently you still get images in which a class is found under the annotation threshold,
# it should count only the images that have classes above the set annotation threshold,
# at this point it only checks whether it should draw an bbox or not, but still shows the image
# - Add custom shortcuts. See email Grant ('Possible software feature').
# - Add option to order chronological See email Grant ('A few questions I've come up with').
# - If you press the '?' button in the selection window, it doesn't scroll all the way down anymore. So
# adjust the scroll region, of make an option to close the help text
# - shift forcus on first label. See email Grant ('Another small request').
# - get rid of the default label pane in the top right. Or at least make it less prominent.
# - remove the X cross to remove the box label pane. No need to have an option to remove it. It's difficult to get it back on macOS.
# - see if you can add the conf of the bbox in the box label pane too. just for clarification purposes for threshhold settings (see email Grant "Showing confidence level")
# - there should be a setting that shows box labels inside the image. turn this on by default.
# - remove the messagebox that warns you that you're not completely done with the human verification before postprocess. just do it.
# - why do I ask if the user is done after verification anyway? why not just take the results as they are and accept it?
# - take the annotation confidence ranges the same as the image confidence ranges if the user specified them. Otherwise use 0.6-1.0.
# - When I zoom in, I always zoom in on the center, and then I can’t manage to move the image.
# - I figured out when the label becomes greyed out. For me, it happens when I draw a bounding box myself, and then when I go to the next image, "edit label" is greyed out. If I then close the annotation (but not the entire app) and continue, it works again.
#import packages like a very pointy half christmas tree
import os
import re
import sys
import cv2
import git
import json
import math
import time
import glob
import random
import signal
import shutil
import pickle
import folium
import argparse
import calendar
import platform
import requests
import tempfile
import datetime
import traceback
import subprocess
import webbrowser
import numpy as np
import PIL.ExifTags
import pandas as pd
import tkinter as tk
import customtkinter
import seaborn as sns
from tqdm import tqdm
from tkinter import *
from pathlib import Path
import plotly.express as px
from subprocess import Popen
from functools import partial
from tkinter.font import Font
from GPSPhoto import gpsphoto
from CTkTable import CTkTable
import matplotlib.pyplot as plt
import plotly.graph_objects as go
import xml.etree.cElementTree as ET
from PIL import ImageTk, Image, ImageFile
from RangeSlider.RangeSlider import RangeSliderH
from tkinter import filedialog, ttk, messagebox as mb
from folium.plugins import HeatMap, Draw, MarkerCluster
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
# set global variables
EcoAssist_files = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
ImageFile.LOAD_TRUNCATED_IMAGES = True
CLS_DIR = os.path.join(EcoAssist_files, "models", "cls")
DET_DIR = os.path.join(EcoAssist_files, "models", "det")
# set versions
with open(os.path.join(EcoAssist_files, 'EcoAssist', 'version.txt'), 'r') as file:
current_EA_version = file.read().strip()
corresponding_model_info_version = "5"
# colors and images
EA_blue_color = '#3B8ED0'
EA_green_color = '#96CF7A'
PIL_gradient = PIL.Image.open(os.path.join(EcoAssist_files, "EcoAssist", "imgs", "gradient.png"))
PIL_logo = PIL.Image.open(os.path.join(EcoAssist_files, "EcoAssist", "imgs", "logo_small_bg.png"))
PIL_advanc_top_banner = PIL.Image.open(os.path.join(EcoAssist_files, "EcoAssist", "imgs", "advanc_top_banner.png"))
PIL_simple_top_banner = PIL.Image.open(os.path.join(EcoAssist_files, "EcoAssist", "imgs", "simple_top_banner.png"))
PIL_checkmark = PIL.Image.open(os.path.join(EcoAssist_files, "EcoAssist", "imgs", "checkmark.png"))
PIL_dir_image = PIL.Image.open(os.path.join(EcoAssist_files, "EcoAssist", "imgs", "image-gallery.png"))
PIL_mdl_image = PIL.Image.open(os.path.join(EcoAssist_files, "EcoAssist", "imgs", "tech.png"))
PIL_spp_image = PIL.Image.open(os.path.join(EcoAssist_files, "EcoAssist", "imgs", "paw.png"))
PIL_run_image = PIL.Image.open(os.path.join(EcoAssist_files, "EcoAssist", "imgs", "shuttle.png"))
launch_count_file = os.path.join(EcoAssist_files, 'launch_count.json')
# insert pythonpath
sys.path.insert(0, os.path.join(EcoAssist_files))
sys.path.insert(0, os.path.join(EcoAssist_files, "ai4eutils"))
sys.path.insert(0, os.path.join(EcoAssist_files, "yolov5"))
sys.path.insert(0, os.path.join(EcoAssist_files, "cameratraps"))
# import modules from forked repositories
from visualise_detection.bounding_box import bounding_box as bb
# log pythonpath
print(sys.path)
# set DPI awareness on Windows
if platform.system() == "Windows":
import ctypes
try:
# attempt
ctypes.windll.shcore.SetProcessDpiAwareness(1)
except AttributeError:
# fallback for older versions of Windows
ctypes.windll.user32.SetProcessDPIAware()
# load previous settings
def load_global_vars():
var_file = os.path.join(EcoAssist_files, "EcoAssist", "global_vars.json")
with open(var_file, 'r') as file:
variables = json.load(file)
return variables
global_vars = load_global_vars()
# language settings
languages_available = ['English', 'Español']
lang_idx = global_vars["lang_idx"]
step_txt = ['Step', 'Paso']
browse_txt = ['Browse', 'Examinar']
cancel_txt = ["Cancel", "Cancelar"]
change_folder_txt = ['Change folder', '¿Cambiar carpeta']
view_results_txt = ['View results', 'Ver resultados']
custom_model_txt = ['Custom model', "Otro modelo"]
again_txt = ['Again?', '¿Otra vez?']
eg_txt = ['E.g.', 'Ejem.']
show_txt = ["Show", "Mostrar"]
new_project_txt = ["<new project>", "<nuevo proyecto>"]
warning_txt = ["Warning", "Advertencia"]
information_txt = ["Information", "Información"]
error_txt = ["Error", "Error"]
select_txt = ["Select", "Seleccionar"]
invalid_value_txt = ["Invalid value", "Valor no válido"]
none_txt = ["None", "Ninguno"]
of_txt = ["of", "de"]
suffixes_for_sim_none = [" - just show me where the animals are",
" - muéstrame dónde están los animales"]
#############################################
############# BACKEND FUNCTIONS #############
#############################################
# post-process files
def postprocess(src_dir, dst_dir, thresh, sep, file_placement, sep_conf, vis, crp, exp, plt, exp_format, data_type):
# log
print(f"EXECUTED: {sys._getframe().f_code.co_name}({locals()})\n")
# update progress window
progress_window.update_values(process = f"{data_type}_pst", status = "load")
# plt needs csv files so make sure to produce them, even if the user didn't specify
# if the user didn't speficy to export to csv, make sure to remove them later on
remove_csv = False
if plt and not exp:
# except if the csv are already created ofcourse
if not (os.path.isfile(os.path.join(dst_dir, "results_detections.csv")) and
os.path.isfile(os.path.join(dst_dir, "results_files.csv"))):
exp = True
exp_format = dpd_options_exp_format[lang_idx][1] # CSV
remove_csv = True
# get correct json file
if data_type == "img":
recognition_file = os.path.join(src_dir, "image_recognition_file.json")
else:
recognition_file = os.path.join(src_dir, "video_recognition_file.json")
# check if user is not in the middle of an annotation session
if data_type == "img" and get_hitl_var_in_json(recognition_file) == "in-progress":
if not mb.askyesno("Verification session in progress", f"Your verification session is not yet done. You can finish the session "
f"by clicking 'Continue' at '{lbl_hitl_main_txt[lang_idx]}', or just continue to post-process "
"with the results as they are now.\n\nDo you want to continue to post-process?"):
return
# init vars
global cancel_var
start_time = time.time()
nloop = 1
# warn user
if data_type == "vid":
if vis or crp or plt:
check_json_presence_and_warn_user(["visualize, crop, or plot", "visualizar, recortar o trazar"][lang_idx],
["visualizing, cropping, or plotting", "visualizando, recortando o trazando"][lang_idx],
["visualization, cropping, and plotting", "visualización, recorte y trazado"][lang_idx])
vis, crp, plt = [False] * 3
# fetch label map
label_map = fetch_label_map_from_json(recognition_file)
inverted_label_map = {v: k for k, v in label_map.items()}
# create list with colours for visualisation
if vis:
colors = ["fuchsia", "blue", "orange", "yellow", "green", "red", "aqua", "navy", "teal", "olive", "lime", "maroon", "purple"]
colors = colors * 30
# make sure json has relative paths
json_paths_converted = False
if check_json_paths(recognition_file) != "relative":
make_json_relative(recognition_file)
json_paths_converted = True
# set cancel bool
cancel_var = False
# open json file
with open(recognition_file) as image_recognition_file_content:
data = json.load(image_recognition_file_content)
n_images = len(data['images'])
# initialise the csv files
# csv files are always created, no matter what the user specified as export format
# these csv files are then converted to the desired format and deleted, if required
if exp:
# for files
csv_for_files = os.path.join(dst_dir, "results_files.csv")
if not os.path.isfile(csv_for_files):
df = pd.DataFrame(list(), columns=["absolute_path", "relative_path", "data_type", "n_detections", "max_confidence", "human_verified",
'DateTimeOriginal', 'DateTime', 'DateTimeDigitized', 'Latitude', 'Longitude', 'GPSLink', 'Altitude', 'Make',
'Model', 'Flash', 'ExifOffset', 'ResolutionUnit', 'YCbCrPositioning', 'XResolution', 'YResolution',
'ExifVersion', 'ComponentsConfiguration', 'FlashPixVersion', 'ColorSpace', 'ExifImageWidth',
'ISOSpeedRatings', 'ExifImageHeight', 'ExposureMode', 'WhiteBalance', 'SceneCaptureType',
'ExposureTime', 'Software', 'Sharpness', 'Saturation', 'ReferenceBlackWhite'])
df.to_csv(csv_for_files, encoding='utf-8', index=False)
# for detections
csv_for_detections = os.path.join(dst_dir, "results_detections.csv")
if not os.path.isfile(csv_for_detections):
df = pd.DataFrame(list(), columns=["absolute_path", "relative_path", "data_type", "label", "confidence", "human_verified", "bbox_left",
"bbox_top", "bbox_right", "bbox_bottom", "file_height", "file_width", 'DateTimeOriginal', 'DateTime',
'DateTimeDigitized', 'Latitude', 'Longitude', 'GPSLink', 'Altitude', 'Make', 'Model', 'Flash', 'ExifOffset',
'ResolutionUnit', 'YCbCrPositioning', 'XResolution', 'YResolution', 'ExifVersion', 'ComponentsConfiguration',
'FlashPixVersion', 'ColorSpace', 'ExifImageWidth', 'ISOSpeedRatings', 'ExifImageHeight', 'ExposureMode',
'WhiteBalance', 'SceneCaptureType', 'ExposureTime', 'Software', 'Sharpness', 'Saturation', 'ReferenceBlackWhite'])
df.to_csv(csv_for_detections, encoding='utf-8', index=False)
# set global vars
global postprocessing_error_log
postprocessing_error_log = os.path.join(dst_dir, "postprocessing_error_log.txt")
# count the number of rows to make sure it doesn't exceed the limit for an excel sheet
if exp and exp_format == dpd_options_exp_format[lang_idx][0]: # if exp_format is the first option in the dropdown menu -> XLSX
n_rows_files = 1
n_rows_detections = 1
for image in data['images']:
n_rows_files += 1
if 'detections' in image:
for detection in image['detections']:
if detection["conf"] >= thresh:
n_rows_detections += 1
if n_rows_detections > 1048576 or n_rows_files > 1048576:
mb.showerror(["To many rows", "Demasiadas filas"][lang_idx],
["The XLSX file you are trying to create is too large!\n\nThe maximum number of rows in an XSLX file is "
f"1048576, while you are trying to create a sheet with {max(n_rows_files, n_rows_detections)} rows.\n\nIf"
" you require the results in XLSX format, please run the process on smaller chunks so that it doesn't "
f"exceed Microsoft's row limit. Or choose CSV as {lbl_exp_format_txt[lang_idx]} in advanced mode.",
"¡El archivo XLSX que está intentando crear es demasiado grande!\n\nEl número máximo de filas en un archivo"
f" XSLX es 1048576, mientras que usted está intentando crear una hoja con {max(n_rows_files, n_rows_detections)}"
" filas.\n\nSi necesita los resultados en formato XLSX, ejecute el proceso en trozos más pequeños para que no "
f"supere el límite de filas de Microsoft. O elija CSV como {lbl_exp_format_txt[lang_idx]} en modo avanzado."][lang_idx])
return
# loop through images
for image in data['images']:
# cancel process if required
if cancel_var:
break
# check for failure
if "failure" in image:
# write warnings to log file
with open(postprocessing_error_log, 'a+') as f:
f.write(f"File '{image['file']}' was skipped by post processing features because '{image['failure']}'\n")
f.close()
# calculate stats
elapsed_time_sep = str(datetime.timedelta(seconds=round(time.time() - start_time)))
time_left_sep = str(datetime.timedelta(seconds=round(((time.time() - start_time) * n_images / nloop) - (time.time() - start_time))))
progress_window.update_values(process = f"{data_type}_pst",
status = "running",
cur_it = nloop,
tot_it = n_images,
time_ela = elapsed_time_sep,
time_rem = time_left_sep,
cancel_func = cancel)
nloop += 1
root.update()
# skip this iteration
continue
# get image info
file = image['file']
detections_list = image['detections']
n_detections = len(detections_list)
# check if it has been manually verified
manually_checked = False
if 'manually_checked' in image:
if image['manually_checked']:
manually_checked = True
# init vars
max_detection_conf = 0.0
unique_labels = []
bbox_info = []
# open files
if vis or crp or exp:
if data_type == "img":
im_to_vis = cv2.imread(os.path.normpath(os.path.join(src_dir, file)))
# check if that image was able to be loaded
if im_to_vis is None:
with open(postprocessing_error_log, 'a+') as f:
f.write(f"File '{image['file']}' was skipped by post processing features. This might be due to the file being moved or deleted after analysis, or because of a special character in the file path.\n")
f.close()
elapsed_time_sep = str(datetime.timedelta(seconds=round(time.time() - start_time)))
time_left_sep = str(datetime.timedelta(seconds=round(((time.time() - start_time) * n_images / nloop) - (time.time() - start_time))))
progress_window.update_values(process = f"{data_type}_pst",
status = "running",
cur_it = nloop,
tot_it = n_images,
time_ela = elapsed_time_sep,
time_rem = time_left_sep,
cancel_func = cancel)
nloop += 1
root.update()
continue
im_to_crop_path = os.path.join(src_dir, file)
# load old image and extract EXIF
origImage = Image.open(os.path.join(src_dir, file))
try:
exif = origImage.info['exif']
except:
exif = None
origImage.close()
else:
vid = cv2.VideoCapture(os.path.join(src_dir, file))
# read image dates etc
if exp:
# try to read metadata
try:
img_for_exif = PIL.Image.open(os.path.join(src_dir, file))
metadata = {
PIL.ExifTags.TAGS[k]: v
for k, v in img_for_exif._getexif().items()
if k in PIL.ExifTags.TAGS
}
img_for_exif.close()
except:
metadata = {'GPSInfo': None,
'ResolutionUnit': None,
'ExifOffset': None,
'Make': None,
'Model': None,
'DateTime': None,
'YCbCrPositioning': None,
'XResolution': None,
'YResolution': None,
'ExifVersion': None,
'ComponentsConfiguration': None,
'ShutterSpeedValue': None,
'DateTimeOriginal': None,
'DateTimeDigitized': None,
'FlashPixVersion': None,
'UserComment': None,
'ColorSpace': None,
'ExifImageWidth': None,
'ExifImageHeight': None}
# try to add GPS data
try:
gpsinfo = gpsphoto.getGPSData(os.path.join(src_dir, file))
if 'Latitude' in gpsinfo and 'Longitude' in gpsinfo:
gpsinfo['GPSLink'] = f"https://maps.google.com/?q={gpsinfo['Latitude']},{gpsinfo['Longitude']}"
except:
gpsinfo = {'Latitude': None,
'Longitude': None,
'GPSLink': None}
# combine metadata and gps data
exif_data = {**metadata, **gpsinfo}
# check if datetime values can be found
exif_params = []
for param in ['DateTimeOriginal', 'DateTime', 'DateTimeDigitized', 'Latitude', 'Longitude', 'GPSLink', 'Altitude', 'Make', 'Model',
'Flash', 'ExifOffset', 'ResolutionUnit', 'YCbCrPositioning', 'XResolution', 'YResolution', 'ExifVersion',
'ComponentsConfiguration', 'FlashPixVersion', 'ColorSpace', 'ExifImageWidth', 'ISOSpeedRatings',
'ExifImageHeight', 'ExposureMode', 'WhiteBalance', 'SceneCaptureType', 'ExposureTime', 'Software',
'Sharpness', 'Saturation', 'ReferenceBlackWhite']:
try:
if param.startswith('DateTime'):
datetime_raw = str(exif_data[param])
param_value = datetime.datetime.strptime(datetime_raw, '%Y:%m:%d %H:%M:%S').strftime('%d/%m/%y %H:%M:%S')
else:
param_value = str(exif_data[param])
except:
param_value = "NA"
exif_params.append(param_value)
# loop through detections
if 'detections' in image:
for detection in image['detections']:
# get confidence
conf = detection["conf"]
# write max conf
if manually_checked:
max_detection_conf = "NA"
elif conf > max_detection_conf:
max_detection_conf = conf
# if above user specified thresh
if conf >= thresh:
# change conf to string for verified images
if manually_checked:
conf = "NA"
# get detection info
category = detection["category"]
label = label_map[category]
if sep:
unique_labels.append(label)
unique_labels = sorted(list(set(unique_labels)))
# get bbox info
if vis or crp or exp:
if data_type == "img":
height, width = im_to_vis.shape[:2]
else:
height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
w_box = detection['bbox'][2]
h_box = detection['bbox'][3]
xo = detection['bbox'][0] + (w_box/2)
yo = detection['bbox'][1] + (h_box/2)
left = int(round(detection['bbox'][0] * width))
top = int(round(detection['bbox'][1] * height))
right = int(round(w_box * width)) + left
bottom = int(round(h_box * height)) + top
# store in list
bbox_info.append([label, conf, manually_checked, left, top, right, bottom, height, width, xo, yo, w_box, h_box])
# separate files
if sep:
if n_detections == 0:
file = move_files(file, "empty", file_placement, max_detection_conf, sep_conf, dst_dir, src_dir, manually_checked)
else:
if len(unique_labels) > 1:
labels_str = "_".join(unique_labels)
file = move_files(file, labels_str, file_placement, max_detection_conf, sep_conf, dst_dir, src_dir, manually_checked)
elif len(unique_labels) == 0:
file = move_files(file, "empty", file_placement, max_detection_conf, sep_conf, dst_dir, src_dir, manually_checked)
else:
file = move_files(file, label, file_placement, max_detection_conf, sep_conf, dst_dir, src_dir, manually_checked)
# collect info to append to csv files
if exp:
# file info
row = pd.DataFrame([[src_dir, file, data_type, len(bbox_info), max_detection_conf, manually_checked, *exif_params]])
row.to_csv(csv_for_files, encoding='utf-8', mode='a', index=False, header=False)
# detections info
rows = []
for bbox in bbox_info:
row = [src_dir, file, data_type, *bbox[:9], *exif_params]
rows.append(row)
rows = pd.DataFrame(rows)
rows.to_csv(csv_for_detections, encoding='utf-8', mode='a', index=False, header=False)
# visualize images
if vis and len(bbox_info) > 0:
for bbox in bbox_info:
if manually_checked:
vis_label = f"{bbox[0]} (verified)"
else:
conf_label = round(bbox[1], 2) if round(bbox[1], 2) != 1.0 else 0.99
vis_label = f"{bbox[0]} {conf_label}"
color = colors[int(inverted_label_map[bbox[0]])]
bb.add(im_to_vis, *bbox[3:7], vis_label, color)
im = os.path.join(dst_dir, file)
Path(os.path.dirname(im)).mkdir(parents=True, exist_ok=True)
cv2.imwrite(im, im_to_vis)
# load new image and save exif
if (exif != None):
image_new = Image.open(im)
image_new.save(im, exif=exif)
image_new.close()
# crop images
if crp and len(bbox_info) > 0:
counter = 1
for bbox in bbox_info:
# if files have been moved
if sep:
im_to_crp = Image.open(os.path.join(dst_dir,file))
else:
im_to_crp = Image.open(im_to_crop_path)
crp_im = im_to_crp.crop((bbox[3:7]))
im_to_crp.close()
filename, file_extension = os.path.splitext(file)
im_path = os.path.join(dst_dir, filename + '_crop' + str(counter) + '_' + bbox[0] + file_extension)
Path(os.path.dirname(im_path)).mkdir(parents=True, exist_ok=True)
crp_im.save(im_path)
counter += 1
# load new image and save exif
if (exif != None):
image_new = Image.open(im_path)
image_new.save(im_path, exif=exif)
image_new.close()
# calculate stats
elapsed_time_sep = str(datetime.timedelta(seconds=round(time.time() - start_time)))
time_left_sep = str(datetime.timedelta(seconds=round(((time.time() - start_time) * n_images / nloop) - (time.time() - start_time))))
progress_window.update_values(process = f"{data_type}_pst",
status = "running",
cur_it = nloop,
tot_it = n_images,
time_ela = elapsed_time_sep,
time_rem = time_left_sep,
cancel_func = cancel)
nloop += 1
root.update()
# create summary csv
if exp:
csv_for_summary = os.path.join(dst_dir, "results_summary.csv")
if os.path.exists(csv_for_summary):
os.remove(csv_for_summary)
det_info = pd.DataFrame(pd.read_csv(csv_for_detections, dtype=dtypes, low_memory=False))
summary = pd.DataFrame(det_info.groupby(['label', 'data_type']).size().sort_values(ascending=False).reset_index(name='n_detections'))
summary.to_csv(csv_for_summary, encoding='utf-8', mode='w', index=False, header=True)
# convert csv to xlsx if required
if exp and exp_format == dpd_options_exp_format[lang_idx][0]: # if exp_format is the first option in the dropdown menu -> XLSX
xlsx_path = os.path.join(dst_dir, "results.xlsx")
# check if the excel file exists, e.g. when processing both img and vid
dfs = []
for result_type in ['detections', 'files', 'summary']:
csv_path = os.path.join(dst_dir, f"results_{result_type}.csv")
if os.path.isfile(xlsx_path):
# if so, add new rows to existing ones
df_xlsx = pd.read_excel(xlsx_path, sheet_name=result_type)
df_csv = pd.read_csv(os.path.join(dst_dir, f"results_{result_type}.csv"), dtype=dtypes, low_memory=False)
df = pd.concat([df_xlsx, df_csv], ignore_index=True)
else:
df = pd.read_csv(os.path.join(dst_dir, f"results_{result_type}.csv"), dtype=dtypes, low_memory=False)
dfs.append(df)
# plt needs the csv's, so don't remove just yet
if not plt:
if os.path.isfile(csv_path):
os.remove(csv_path)
# overwrite rows to xlsx file
with pd.ExcelWriter(xlsx_path, engine='openpyxl') as writer:
for idx, result_type in enumerate(['detections', 'files', 'summary']):
df = dfs[idx]
if result_type in ['detections', 'files']:
df['DateTimeOriginal'] = pd.to_datetime(df['DateTimeOriginal'], format='%d/%m/%y %H:%M:%S')
df['DateTime'] = pd.to_datetime(df['DateTime'], format='%d/%m/%y %H:%M:%S')
df['DateTimeDigitized'] = pd.to_datetime(df['DateTimeDigitized'], format='%d/%m/%y %H:%M:%S')
df.to_excel(writer, sheet_name=result_type, index=None, header=True)
# convert csv to coco format if required
if exp and exp_format == dpd_options_exp_format[lang_idx][2]: # COCO
# init vars
coco_path = os.path.join(dst_dir, "results_coco.json")
detections_df = pd.read_csv(os.path.join(dst_dir, f"results_detections.csv"), dtype=dtypes, low_memory=False)
files_df = pd.read_csv(os.path.join(dst_dir, f"results_files.csv"), dtype=dtypes, low_memory=False)
# convert csv to coco format
csv_to_coco(
detections_df=detections_df,
files_df=files_df,
output_path=coco_path
)
# only plt needs the csv's, so if the user didn't specify plt, remove csvs
if not plt:
for result_type in ['detections', 'files', 'summary']:
csv_path = os.path.join(dst_dir, f"results_{result_type}.csv")
if os.path.isfile(csv_path):
os.remove(csv_path)
# change json paths back, if converted earlier
if json_paths_converted:
make_json_absolute(recognition_file)
# let the user know it's done
progress_window.update_values(process = f"{data_type}_pst", status = "done")
root.update()
# create graphs
if plt:
produce_plots(dst_dir)
# if user wants XLSX or COCO as output, or if user didn't specify exp all-
# together but the files were created for plt -> remove CSV files
if (exp and exp_format == dpd_options_exp_format[lang_idx][0]) or \
(exp and exp_format == dpd_options_exp_format[lang_idx][2]) or \
remove_csv:
for result_type in ['detections', 'files', 'summary']:
csv_path = os.path.join(dst_dir, f"results_{result_type}.csv")
if os.path.isfile(csv_path):
os.remove(csv_path)
# convert csv to coco format
def csv_to_coco(detections_df, files_df, output_path):
# log
print(f"EXECUTED: {sys._getframe().f_code.co_name}\n")
# init coco structure
coco = {
"images": [],
"annotations": [],
"categories": [],
"licenses": [{
"id": 1,
"name": "Unknown",
"url": "NA"
}],
"info": {
"description": f"Object detection results exported from EcoAssist (v{str(current_EA_version)}).",
"url": "https://addaxdatascience.com/ecoassist/",
"date_created": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
}
}
# prepare categories and category mapping
category_mapping = {}
current_category_id = 1
# assign categories from detections
for label in detections_df['label'].unique():
if label not in category_mapping:
category_mapping[label] = current_category_id
coco['categories'].append({
"id": current_category_id,
"name": label
})
current_category_id += 1
# process each image and its detections
annotation_id = 1
for _, file_info in files_df.iterrows():
# create image entry
image_id = len(coco['images']) + 1
image_entry = {
"id": image_id,
"width": int(file_info['ExifImageWidth']),
"height": int(file_info['ExifImageHeight']),
"file_name": file_info['relative_path'],
"license": 1,
"date_captured": datetime.datetime.strptime(file_info['DateTimeOriginal'],
"%d/%m/%y %H:%M:%S").strftime("%Y-%m-%d %H:%M:%S")
}
coco['images'].append(image_entry)
# add annotations for this image
image_detections = detections_df[detections_df['relative_path'] == file_info['relative_path']]
for _, detection in image_detections.iterrows():
bbox_left = int(detection['bbox_left'])
bbox_top = int(detection['bbox_top'])
bbox_right = int(detection['bbox_right'])
bbox_bottom = int(detection['bbox_bottom'])
bbox_width = bbox_right - bbox_left
bbox_height = bbox_bottom - bbox_top
annotation_entry = {
"id": annotation_id,
"image_id": image_id,
"category_id": category_mapping[detection['label']],
"bbox": [bbox_left, bbox_top, bbox_width, bbox_height],
"area": float(bbox_width * bbox_height),
"iscrowd": 0
}
coco['annotations'].append(annotation_entry)
annotation_id += 1
# save when done
with open(output_path, 'w') as output_file:
json.dump(coco, output_file, indent=4)
# set data types for csv inport so that the machine doesn't run out of memory with large files (>0.5M rows)
dtypes = {
'absolute_path': 'str',
'relative_path': 'str',
'data_type': 'str',
'label': 'str',
'confidence': 'float64',
'human_verified': 'bool',
'bbox_left': 'str',
'bbox_top': 'str',
'bbox_right': 'str',
'bbox_bottom': 'str',
'file_height': 'str',
'file_width': 'str',
'DateTimeOriginal': 'str',
'DateTime': 'str',
'DateTimeDigitized': 'str',
'Latitude': 'str',
'Longitude': 'str',
'GPSLink': 'str',
'Altitude': 'str',
'Make': 'str',
'Model': 'str',
'Flash': 'str',
'ExifOffset': 'str',
'ResolutionUnit': 'str',
'YCbCrPositioning': 'str',
'XResolution': 'str',
'YResolution': 'str',
'ExifVersion': 'str',
'ComponentsConfiguration': 'str',
'FlashPixVersion': 'str',
'ColorSpace': 'str',
'ExifImageWidth': 'str',
'ISOSpeedRatings': 'str',
'ExifImageHeight': 'str',
'ExposureMode': 'str',
'WhiteBalance': 'str',
'SceneCaptureType': 'str',
'ExposureTime': 'str',
'Software': 'str',
'Sharpness': 'str',
'Saturation': 'str',
'ReferenceBlackWhite': 'str',
'n_detections': 'int64',
'max_confidence': 'float64',
}
# open progress window and initiate the post-process progress window
def start_postprocess():
# log
print(f"EXECUTED: {sys._getframe().f_code.co_name}({locals()})\n")
# save settings for next time
write_global_vars({
"lang_idx": lang_idx,
"var_separate_files": var_separate_files.get(),
"var_file_placement": var_file_placement.get(),
"var_sep_conf": var_sep_conf.get(),
"var_vis_files": var_vis_files.get(),
"var_crp_files": var_crp_files.get(),
"var_exp": var_exp.get(),
"var_exp_format_idx": dpd_options_exp_format[lang_idx].index(var_exp_format.get()),
"var_plt": var_plt.get(),
"var_thresh": var_thresh.get()
})
# fix user input
src_dir = var_choose_folder.get()
dst_dir = var_output_dir.get()
thresh = var_thresh.get()
sep = var_separate_files.get()
file_placement = var_file_placement.get()
sep_conf = var_sep_conf.get()
vis = var_vis_files.get()
crp = var_crp_files.get()
exp = var_exp.get()
plt = var_plt.get()
exp_format = var_exp_format.get()
# init cancel variable
global cancel_var
cancel_var = False
# check which json files are present
img_json = False
if os.path.isfile(os.path.join(src_dir, "image_recognition_file.json")):
img_json = True
vid_json = False
if os.path.isfile(os.path.join(src_dir, "video_recognition_file.json")):
vid_json = True
if not img_json and not vid_json:
mb.showerror(error_txt[lang_idx], ["No model output file present. Make sure you run step 2 before post-processing the files.",
"No hay archivo de salida del modelo. Asegúrese de ejecutar el paso 2 antes de postprocesar"
" los archivos."][lang_idx])
return
# check if destination dir is valid and set to input dir if not
if dst_dir in ["", "/", "\\", ".", "~", ":"] or not os.path.isdir(dst_dir):
mb.showerror(["Destination folder not set", "Carpeta de destino no establecida."][lang_idx],
["Destination folder not set.\n\n You have not specified where the post-processing results should be placed or the set "
"folder does not exist. This is required.",
"Carpeta de destino no establecida. No ha especificado dónde deben colocarse los resultados del postprocesamiento o la "
"carpeta establecida no existe. Esto opción es obligatoria."][lang_idx])
return
# warn user if the original files will be overwritten with visualized files
if os.path.normpath(dst_dir) == os.path.normpath(src_dir) and vis and not sep:
if not mb.askyesno(["Original images will be overwritten", "Las imágenes originales se sobrescribirán."][lang_idx],
[f"WARNING! The visualized images will be placed in the folder with the original data: '{src_dir}'. By doing this, you will overwrite the original images"
" with the visualized ones. Visualizing is permanent and cannot be undone. Are you sure you want to continue?",
f"ATENCIÓN. Las imágenes visualizadas se colocarán en la carpeta con los datos originales: '{src_dir}'. Al hacer esto, se sobrescribirán las imágenes "
"originales con las visualizadas. La visualización es permanente y no se puede deshacer. ¿Está seguro de que desea continuar?"][lang_idx]):
return
# warn user if images will be moved and visualized
if sep and file_placement == 1 and vis:
if not mb.askyesno(["Original images will be overwritten", "Las imágenes originales se sobrescribirán."][lang_idx],
[f"WARNING! You specified to visualize the original images. Visualizing is permanent and cannot be undone. If you don't want to visualize the original "
f"images, please select 'Copy' as '{lbl_file_placement_txt}'. Are you sure you want to continue with the current settings?",
"ATENCIÓN. Ha especificado visualizar las imágenes originales. La visualización es permanente y no puede deshacerse. Si no desea visualizar las "
f"imágenes originales, seleccione 'Copiar' como '{lbl_file_placement_txt}'. ¿Está seguro de que desea continuar con la configuración actual?"][lang_idx]):
return
# initialise progress window with processes
processes = []
if img_json:
processes.append("img_pst")
if plt:
processes.append("plt")
if vid_json:
processes.append("vid_pst")
global progress_window
progress_window = ProgressWindow(processes = processes)
progress_window.open()
try:
# postprocess images
if img_json:
postprocess(src_dir, dst_dir, thresh, sep, file_placement, sep_conf, vis, crp, exp, plt, exp_format, data_type = "img")
# postprocess videos
if vid_json and not cancel_var:
postprocess(src_dir, dst_dir, thresh, sep, file_placement, sep_conf, vis, crp, exp, plt, exp_format, data_type = "vid")
# complete
complete_frame(fth_step)
# check if there are postprocessing errors written
if os.path.isfile(postprocessing_error_log):
mb.showwarning(warning_txt[lang_idx], [f"One or more files failed to be analysed by the model (e.g., corrupt files) and will be skipped by "
f"post-processing features. See\n\n'{postprocessing_error_log}'\n\nfor more info.",
f"Uno o más archivos no han podido ser analizados por el modelo (por ejemplo, ficheros corruptos) y serán "
f"omitidos por las funciones de post-procesamiento. Para más información, véase\n\n'{postprocessing_error_log}'"][lang_idx])
# close progress window
progress_window.close()
# check window transparency
reset_window_transparency()
except Exception as error:
# log error
print("ERROR:\n" + str(error) + "\n\nDETAILS:\n" + str(traceback.format_exc()) + "\n\n")
# show error
mb.showerror(title=error_txt[lang_idx],
message=["An error has occurred", "Ha ocurrido un error"][lang_idx] + " (EcoAssist v" + current_EA_version + "): '" + str(error) + "'.",
detail=traceback.format_exc())
# close window
progress_window.close()
# function to produce graphs and maps
def produce_plots(results_dir):
# update internal progressbar via a tmdq stats
def update_pbar_plt():
pbar.update(1)
tqdm_stats = pbar.format_dict
progress_window.update_values(process = "plt",
status = "running",
cur_it = tqdm_stats['n'],
tot_it = tqdm_stats['total'],
time_ela = str(datetime.timedelta(seconds=round(tqdm_stats['elapsed']))),
time_rem = str(datetime.timedelta(seconds=round((tqdm_stats['total'] - tqdm_stats['n']) / tqdm_stats['n'] * tqdm_stats['elapsed'] if tqdm_stats['n'] else 0))),
cancel_func = cancel)
# create all time plots
def create_time_plots(data, save_path_base, temporal_units, pbar, counts_df):
# maximum number of ticks per x axis
max_n_ticks = 50
# define specific functions per plot type
def plot_obs_over_time_total_static(time_unit):
plt.figure(figsize=(10, 6))
combined_data = grouped_data.sum(axis=0).resample(time_format_mapping[time_unit]['freq']).sum()
plt.bar(combined_data.index.strftime(time_format_mapping[time_unit]['time_format']), combined_data, width=0.9)
plt.suptitle("")
plt.title(f'Total observations (grouped per {time_unit}, n = {counts_df["count"].sum()})')
plt.ylabel('Count')
plt.xlabel(time_unit)
plt.xticks(rotation=90)
x_vals = np.arange(len(combined_data))
tick_step = max(len(combined_data) // max_n_ticks, 1)
selected_ticks = x_vals[::tick_step]
while_iteration = 0
while len(selected_ticks) >= max_n_ticks:
tick_step += 1
while_iteration += 1
selected_ticks = x_vals[::tick_step]
if while_iteration > 100:
break
selected_labels = combined_data.index.strftime(time_format_mapping[time_unit]['time_format'])[::tick_step]
plt.xticks(selected_ticks, selected_labels)
plt.tight_layout()
save_path = os.path.join(save_path_base, "graphs", "bar-charts", time_format_mapping[time_unit]['dir'], "combined-single-layer.png")
Path(os.path.dirname(save_path)).mkdir(parents=True, exist_ok=True)
plt.savefig(save_path)
update_pbar_plt()
def plot_obs_over_time_total_interactive(time_unit):
combined_data = grouped_data.sum(axis=0).resample(time_format_mapping[time_unit]['freq']).sum()
hover_text = [f'Period: {date}<br>Count: {count}<extra></extra>'
for date, count in zip(combined_data.index.strftime(time_format_mapping[time_unit]['time_format']),
combined_data)]
fig = go.Figure(data=[go.Bar(x=combined_data.index.strftime(time_format_mapping[time_unit]['time_format']),
y=combined_data,
hovertext=hover_text,
hoverinfo='text')])
fig.update_traces(hovertemplate='%{hovertext}')
fig.update_layout(title=f'Total observations (grouped per {time_unit})',
xaxis_title='Period',
yaxis_title='Count',
xaxis_tickangle=90)
save_path = os.path.join(save_path_base, "graphs", "bar-charts", time_format_mapping[time_unit]['dir'], "combined-single-layer.html")
Path(os.path.dirname(save_path)).mkdir(parents=True, exist_ok=True)
fig.write_html(save_path)
update_pbar_plt()
def plot_obs_over_time_combined_static(time_unit):
plt.figure(figsize=(10, 6))
for label in grouped_data.index:
grouped_data_indexed = grouped_data.loc[label].resample(time_format_mapping[time_unit]['freq']).sum()
plt.plot(grouped_data_indexed.index.strftime(time_format_mapping[time_unit]['time_format']), grouped_data_indexed, label=label)
plt.suptitle("")
plt.title(f'Observations over time (grouped per {time_unit}, n = {counts_df["count"].sum()})')
plt.ylabel('Count')
plt.xticks(rotation=90)