-
-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathutils.py
58 lines (43 loc) · 1.67 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
import cv2
import numpy as np
import torch
def tensor_to_cv2(image: torch.Tensor):
np_array: np.ndarray = image.squeeze(0).cpu().numpy()
np_array = (np_array * 255).astype(np.uint8)
np_array = cv2.cvtColor(np_array, cv2.COLOR_RGB2BGR)
return np_array
def to_np(tsr: torch.Tensor, color=cv2.COLOR_RGB2BGR) -> np.ndarray:
np_arr = tsr.cpu().numpy()
np_arr = (np_arr * 255).astype(np.uint8)
np_arr = cv2.cvtColor(np_arr, color)
return np_arr
def batched_tensor_to_cv2_list(
tensor_imgs: torch.Tensor, color=cv2.COLOR_RGB2BGR
) -> list[np.ndarray]:
return [to_np(tsr, color) for tsr in tensor_imgs]
def cv2_img_to_tensor(np_array: np.ndarray):
tensor = cv2_to_a_tensor(np_array).unsqueeze(0)
return tensor
def cv2_to_a_tensor(np_arr: np.ndarray):
if np_arr.dtype == np.float64:
np_arr = np_arr.astype(np.float32)
np_arr = cv2.cvtColor(np_arr, cv2.COLOR_BGR2RGB)
np_arr = np_arr.astype(np.float32) / 255.0
tensor = torch.from_numpy(np_arr)
return tensor
def resize_cv2_list(np_lst: list[np.ndarray], sample_fr: np.ndarray):
return [cv2.resize(fr, (sample_fr.shape[1], sample_fr.shape[0])) for fr in np_lst]
def out_video(predictions: list[np.ndarray]):
out_tensor_list = []
for i in predictions:
out_img = cv2_to_a_tensor(i)
out_tensor_list.append(out_img)
images = torch.stack(out_tensor_list, dim=0)
return images
def process_msk_lst(msks: list[np.ndarray]):
msk_arr_seq: list[np.ndarray] = []
for msk_fr in msks:
# _, msk = cv2.threshold(msk_fr, 1, 255, cv2.THRESH_BINARY)
# msk_arr_seq.append(msk)
msk_arr_seq.append(msk_fr)
return msk_arr_seq