forked from theOneAndOnlyOne/BeReel
-
Notifications
You must be signed in to change notification settings - Fork 0
/
recap.py
145 lines (121 loc) · 3.88 KB
/
recap.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
from moviepy.editor import *
import numpy as np
import os
import cv2
from PIL import Image, ImageDraw, ImageFont
def bezier_curve(t, p0, p1, p2, p3):
return (
(1 - t) ** 3 * p0
+ 3 * (1 - t) ** 2 * t * p1
+ 3 * (1 - t) * t**2 * p2
+ t**3 * p3
)
def add_text_to_frame(
frame,
text,
font_size,
frame_width,
frame_height,
font_path=os.getcwd()
+ os.path.sep
+ "static"
+ os.path.sep
+ "fonts"
+ os.path.sep
+ "Inter-SemiBold.ttf",
position=(0, 0),
text_color=(0, 0, 255),
):
# Convert OpenCV frame to Pillow Image
img_pil = Image.fromarray(cv2.cvtColor(np.uint8(frame), cv2.COLOR_BGR2RGB))
# Create a draw object
draw = ImageDraw.Draw(img_pil)
# Specify font and size
font = ImageFont.truetype(font_path, font_size)
# Get text size
text_size = draw.textbbox((0, 0), text, font=font)
# Calculate the center of the frame with respect to text size
center_x = int((frame_width - text_size[2]) / 2) + position[0]
center_y = int((frame_height - text_size[3]) / 2) + position[1]
# Add text to the image
draw.text((center_x, center_y), text, font=font, fill=text_color)
# Convert Pillow Image back to OpenCV frame
frame_with_text = cv2.cvtColor(np.array(img_pil), cv2.COLOR_RGB2BGR)
return frame_with_text
def generate_video(image_folder, output_path, frames_per_second, t_duration):
images = sorted(
[
os.path.join(image_folder, img)
for img in os.listdir(image_folder)
if img.endswith(".webp")
]
)
# Generate the duration for each image based on bezier curve
durations = []
for i in range(len(images)):
t = i / len(images)
durations.append(bezier_curve(t, 2.0, 0.4, 0.3, 0.9))
total_duration = np.sum(durations)
normalized_durations = [
duration * (t_duration / total_duration) for duration in durations
]
print("Generating part1")
part1 = ImageSequenceClip(images, durations=[2 / len(images)] * len(images))
part1.write_videofile("part1.mp4", fps=frames_per_second)
cap = cv2.VideoCapture("part1.mp4")
fps = int(cap.get(cv2.CAP_PROP_FPS))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
out = cv2.VideoWriter(
"output.mp4", cv2.VideoWriter_fourcc(*"mp4v"), fps, (width, height)
)
while cap.isOpened():
ret, frame = cap.read()
if ret == True:
frame = add_text_to_frame(
frame,
"RECAP 2023",
200,
width,
height,
text_color=(255, 255, 255),
)
out.write(frame)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
else:
break
# frame with red background
blank = np.zeros((height, width, 3), np.uint8)
blank = add_text_to_frame(
blank,
"RECAP 2023",
200,
width,
height,
text_color=(255, 255, 255),
)
for _ in range(75):
out.write(blank)
cap.release()
out.release()
print("Generating part2")
part2 = ImageSequenceClip(images, durations=normalized_durations)
# combine part1 and part2
clips = [VideoFileClip("output.mp4"), part2]
final_clip = concatenate_videoclips(clips, method="chain")
final_clip.write_videofile(
output_path, fps=frames_per_second, threads=6, codec="libx264"
)
# delete part1
os.remove("part1.mp4")
# delete output.mp4
os.remove("output.mp4")
def butidRecap():
image_folder = os.path.join(os.getcwd(), "combined")
output_path = (
os.getcwd() + os.path.sep + "static" + os.path.sep + "slideshow_test.mp4"
)
frames_per_second = 30
total_duration = 30
generate_video(image_folder, output_path, frames_per_second, total_duration)