forked from GuyTevet/motion-diffusion-model
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathrender.py
86 lines (77 loc) · 3.9 KB
/
render.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
import argparse
import logging
# import torch
# from data_loaders.p2m.tools import inverse
logger = logging.getLogger(__name__)
import numpy as np
import os
# from body_models.smplh import SMPLH
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--dataset-name', default='t2m', help='dataset name')
parser.add_argument('--batch-size', default=32, help='batch_size')
parser.add_argument('--min-motion-len', default=24, type=int, help='the minimum of motion length')
parser.add_argument('--mode', default="video", help="mode of rendering")
parser.add_argument('--npy',
default="/mnt/disk_1/jinpeng/motion-diffusion-model/save/p2m_humanml_trans_enc_512_126_temos/samples_p2m_humanml_trans_enc_512_126_temos_000050000_seed10_the_person_walked_forward_and_is_picking_up_his_toolbox/results.npy",
type=str, help="mode of rendering")
args = parser.parse_args()
return args
def render_cli(data, output, mode, downsample):
init = True
# import numpy as np
print("Begining Rendering")
from visualize.render.blender import render
# data = np.load(path)[0]
# if not os.path.exists(output):
# # os.system(r"touch {}".format(output))
# os.makedirs(output, mode=0o777)
frames_folder = render(data, frames_folder=output,
denoising=True,
oldrender=True,
canonicalize=True,
exact_frame=0.5,
num=8, mode=mode,
faces_path='/mnt/disk_1/jinpeng/motion-diffusion-model/body_models/smplh.faces',
downsample=True,
always_on_floor=False,
init=init,
gt=False)
init = False
if __name__ == '__main__':
# Testing set
# pr = np.load('/mnt/disk_1/jinpeng/motion-diffusion-model/save/0813_cross/samples_0813_cross_000300000_seed10'
# '/test_results.npy',
# allow_pickle=True)
# gt = np.load('/mnt/disk_1/jinpeng/motion-diffusion-model/save/0813_cross/samples_0813_cross_000300000_seed10'
# '/test_gt_results.npy', allow_pickle=True)
# namelist = np.load('/mnt/disk_1/jinpeng/motion-diffusion-model/dataset/namelist.npy', allow_pickle=True)
#
# # Training Code test set
# # pr = np.load('/mnt/disk_1/jinpeng/motion-diffusion-model/save/p2m_temos_0812_test_loss'
# # '/samples_p2m_temos_0812_test_loss_000100000_seed10/train_codetest.npy',
# # allow_pickle=True)
# # print("Rendering")
# # gt = np.load('/mnt/disk_1/jinpeng/motion-diffusion-model/save/p2m_temos_0812_test_loss'
# # '/samples_p2m_temos_0812_test_loss_000100000_seed10/train_codetest_gt.npy', allow_pickle=True)
# # namelist = np.load('/mnt/disk_1/jinpeng/motion-diffusion-model/dataset/namelist_train_codetest_0813.npy', allow_pickle=True)
# mode = 'sequence'
# output_gt = "/mnt/disk_1/jinpeng/motion-diffusion-model/0814_cross_test_gt"
# for i, file in enumerate(range(len(namelist))):
# render_cli(
# data=gt[file],
# output=os.path.join(output_gt, namelist[file]), downsample=False, mode=mode)
# data shape: ( 64, 6890, 3)
mode = 'video'
path = '/mnt/disk_1/jinpeng/motion-diffusion-model/1006_gptprompt_3/2023-10-06-18-33-53'
data_path = []
for file in os.listdir(path):
if '.json' not in file:
for name_1 in os.listdir(os.path.join(path, file)):
if 'vertices' in name_1:
data_path.append(os.path.join(path, file, name_1))
for npy_path in data_path:
file = np.load(npy_path)[0]
render_cli(
data=file,
output=npy_path.replace(npy_path.split('/')[-1], npy_path.split('/')[-1].split('.')[0]), downsample=False, mode=mode)