Skip to content

Commit

Permalink
core: frame pacing
Browse files Browse the repository at this point in the history
Use frame timing and render time statistic to pace frames.

Right now the criteria are simple:

* Don't render multiple frames in one vblank cycle. Otherwise the
  rendered frame will be delay multiple cycles, which isn't ideal.
* Start rendering as late as possible while still hit vblank.

Refresh rate is estimated from a rolling average of frame timing. Render
time is predicted from the rolling maximum of past 128 frames. The
window size still needs to be investigated.

Professionals might laugh at how rudimentary this is, but hopefully this
is better than what we had before. Which is absolutely nothing at all.

Signed-off-by: Yuxuan Shui <yshuiv7@gmail.com>
  • Loading branch information
yshui committed Dec 13, 2022
1 parent a014191 commit 3935718
Show file tree
Hide file tree
Showing 2 changed files with 137 additions and 25 deletions.
7 changes: 4 additions & 3 deletions src/common.h
Original file line number Diff line number Diff line change
Expand Up @@ -148,9 +148,8 @@ typedef struct session {
ev_timer unredir_timer;
/// Timer for fading
ev_timer fade_timer;
/// Use an ev_idle callback for drawing
/// So we only start drawing when events are processed
ev_idle draw_idle;
/// Use an ev_timer callback for drawing
ev_timer draw_timer;
/// Called every time we have timeouts or new data on socket,
/// so we can be sure if xcb read from X socket at anytime during event
/// handling, we will not left any event unhandled in the queue
Expand Down Expand Up @@ -239,6 +238,8 @@ typedef struct session {
uint64_t last_msc;
/// When did we render our last frame.
uint64_t last_render;
/// Whether we can perform frame pacing.
bool frame_pacing;

struct rolling_avg *frame_time;

Expand Down
155 changes: 133 additions & 22 deletions src/picom.c
Original file line number Diff line number Diff line change
Expand Up @@ -186,10 +186,60 @@ static inline struct managed_win *find_win_all(session_t *ps, const xcb_window_t
return w;
}

/// How many seconds into the future should we start rendering the next frame.
double next_frame_offset(session_t *ps) {
int render_time = rolling_max_get_max(ps->render_stats);
if (render_time < 0) {
// We don't have enough data yet, just render immediately.
return 0;
}
int frame_time = (int)rolling_avg_get_avg(ps->frame_time);
auto next_msc = ps->last_msc + (uint64_t)frame_time;
auto deadline = next_msc - (uint64_t)render_time;

const uint64_t slack = 1000;
struct timespec now;
clock_gettime(CLOCK_MONOTONIC, &now);
auto now_us = (uint64_t)now.tv_sec * 1000000 + (uint64_t)now.tv_nsec / 1000;
if (now_us + slack >= deadline) {
// We are already late, render immediately.
log_trace("Already late, rendering immediately, last_msc: %" PRIu64
", render_time: %d, frame_time: %d, now_us: %" PRIu64,
ps->last_msc, render_time, frame_time, now_us);
return 0;
}
log_trace("Delay: %lf s, last_msc: %" PRIu64 ", render_time: %d, frame_time: %d, "
"now_us: %" PRIu64 ", next_msc: %" PRIu64,
(double)(deadline - now_us - slack) / 1000000.0, ps->last_msc,
render_time, frame_time, now_us, next_msc);
return (double)(deadline - now_us - slack) / 1000000.0;
}

void queue_redraw(session_t *ps) {
// Whether we have already rendered for the current frame.
// If frame pacing is not enabled, pretend this is false.
bool already_rendered = (ps->last_render > ps->last_msc) && ps->frame_pacing;
if (already_rendered) {
struct timespec now;
clock_gettime(CLOCK_MONOTONIC, &now);
uint64_t now_us = (uint64_t)now.tv_sec * 1000000 + (uint64_t)now.tv_nsec / 1000;
log_trace("Already rendered for this frame, not queuing redraw. Rendered "
"%d us ago, last msc %d us ago",
(int)(now_us - ps->last_render), (int)(now_us - ps->last_msc));
}
// If --benchmark is used, redraw is always queued
if (!ps->redraw_needed && !ps->o.benchmark) {
ev_idle_start(ps->loop, &ps->draw_idle);
if (!ps->redraw_needed && !ps->o.benchmark && !already_rendered) {
if (ps->frame_pacing) {
// Our loop can be blocked by frame present, which cause ev_now to
// drift away from the real time. We need to correct it.
ev_timer_set(&ps->draw_timer,
next_frame_offset(ps) + ev_time() - ev_now(ps->loop), 0);
} else {
// Not doing frame pacing, just redraw immediately
ev_timer_set(&ps->draw_timer, 0, 0);
}
assert(!ev_is_active(&ps->draw_timer));
ev_timer_start(ps->loop, &ps->draw_timer);
}
ps->redraw_needed = true;
}
Expand Down Expand Up @@ -1301,6 +1351,12 @@ static bool redirect_start(session_t *ps) {
// Must call XSync() here
x_sync(ps->c);

ps->frame_pacing = true;
if (ps->o.legacy_backends || ps->backend_data->ops->swap_buffers == NULL) {
// Disable frame pacing if we are using a legacy backend or the backend
// does front buffer rendering.
ps->frame_pacing = false;
}
auto err = xcb_request_check(ps->c, select_input);
if (err) {
ps->present_event_id = XCB_NONE;
Expand All @@ -1318,6 +1374,10 @@ static bool redirect_start(session_t *ps) {
if (ps->present_event_id != XCB_NONE) {
ps->present_event = xcb_register_for_special_xge(
ps->c, &xcb_present_id, ps->present_event_id, NULL);
} else {
log_error("Failed to initialize the Present extension, frame pacing "
"disabled.");
ps->frame_pacing = false;
}

ps->redirected = true;
Expand Down Expand Up @@ -1386,15 +1446,53 @@ static void handle_present_events(session_t *ps) {
if (cne->ust <= ps->last_msc) {
continue;
}
if (!XCB_AWAIT_VOID(xcb_present_notify_msc, ps->c,
session_get_target_window(ps), 0,
cne->msc + 1, 0, 0)) {
log_error("PresentNotifyMSC failed, frame pacing "
"disabled.");
ps->frame_pacing = false;
}
struct timespec now;
clock_gettime(CLOCK_MONOTONIC, &now);
uint64_t now_usec =
(uint64_t)(now.tv_sec * 1000000 + now.tv_nsec / 1000);
uint64_t drift;
if (cne->ust > now_usec) {
drift = cne->ust - now_usec;
} else {
drift = now_usec - cne->ust;
}
if (ps->last_msc != 0) {
int frame_time = (int)(cne->ust - ps->last_msc);
rolling_avg_push(ps->frame_time, frame_time);
log_trace("Frame time: %d us, rolling average: %lf us,"
"msc: %" PRIu64,
"msc: %" PRIu64 ", offset: %" PRIu64,
frame_time, rolling_avg_get_avg(ps->frame_time),
cne->ust);
cne->ust, drift);
}
ps->last_msc = cne->ust;
if (drift > 1000000 && ps->frame_pacing) {
log_error("Temporal anomaly detected, frame pacing "
"disabled. (Are we running inside a time "
"namespace?), %" PRIu64 " %" PRIu64,
now_usec, ps->last_msc);
ps->frame_pacing = false;
// We could have deferred a frame in queue_redraw()
// because of frame pacing. Unconditionally queue a frame
// for sim1plicity.
queue_redraw(ps);
}
if (ps->frame_pacing && ps->redraw_needed &&
!ev_is_active(&ps->draw_timer)) {
log_trace("Frame pacing: queueing redraw");
// We deferred a frame in queue_redraw() because of
// frame pacing. Schedule it now.
ev_timer_set(
&ps->draw_timer,
next_frame_offset(ps) + ev_time() - ev_now(ps->loop), 0);
ev_timer_start(ps->loop, &ps->draw_timer);
}
}
}
}
Expand Down Expand Up @@ -1521,7 +1619,9 @@ static void handle_pending_updates(EV_P_ struct session *ps) {

static void draw_callback_impl(EV_P_ session_t *ps, int revents attr_unused) {
struct timespec start;
clock_gettime(CLOCK_MONOTONIC, &start);
if (ps->frame_pacing) {
clock_gettime(CLOCK_MONOTONIC, &start);
}
handle_pending_updates(EV_A_ ps);

if (ps->first_frame) {
Expand Down Expand Up @@ -1607,14 +1707,21 @@ static void draw_callback_impl(EV_P_ session_t *ps, int revents attr_unused) {
exit(0);
}

struct timespec end;
clock_gettime(CLOCK_MONOTONIC, &end);
int render_time_ms = (int)((end.tv_sec - start.tv_sec) * 1000000 +
(end.tv_nsec - start.tv_nsec) / 1000);
rolling_max_push(ps->render_stats, render_time_ms);
if (ps->frame_pacing) {
struct timespec end;
clock_gettime(CLOCK_MONOTONIC, &end);
int render_time_ms = (int)((end.tv_sec - start.tv_sec) * 1000000 +
(end.tv_nsec - start.tv_nsec) / 1000);
rolling_max_push(ps->render_stats, render_time_ms);

log_trace("Render time: %d us, rolling max: %d us", render_time_ms,
rolling_max_get_max(ps->render_stats));
ps->last_render =
(uint64_t)(end.tv_sec * 1000000 + end.tv_nsec / 1000);

log_trace("Render time: %d us, rolling max: %d us %d, now: "
"%" PRIu64,
render_time_ms, rolling_max_get_max(ps->render_stats),
ps->frame_pacing, ps->last_render);
}

if (ps->backend_data->ops->swap_buffers) {
ps->backend_data->ops->swap_buffers(ps->backend_data, &reg_damage);
Expand All @@ -1632,18 +1739,21 @@ static void draw_callback_impl(EV_P_ session_t *ps, int revents attr_unused) {
// TODO(yshui) Investigate how big the X critical section needs to be. There are
// suggestions that rendering should be in the critical section as well.

// Queue redraw if animation is running. This should be picked up by next present
// event.
ps->redraw_needed = animation;
}

static void draw_callback(EV_P_ ev_idle *w, int revents) {
session_t *ps = session_ptr(w, draw_idle);
static void draw_callback(EV_P_ ev_timer *w, int revents) {
session_t *ps = session_ptr(w, draw_timer);

draw_callback_impl(EV_A_ ps, revents);
ev_timer_stop(EV_A_ w);

// Don't do painting non-stop unless we are in benchmark mode, or if
// draw_callback_impl thinks we should continue painting.
if (!ps->o.benchmark && !ps->redraw_needed) {
ev_idle_stop(EV_A_ & ps->draw_idle);
// Immediately start next frame if we are in benchmark mode.
if (ps->o.benchmark) {
ev_timer_set(w, 0, 0);
ev_timer_start(EV_A_ w);
}
}

Expand Down Expand Up @@ -2249,7 +2359,7 @@ static session_t *session_init(int argc, char **argv, Display *dpy,
ev_io_init(&ps->xiow, x_event_callback, ConnectionNumber(ps->dpy), EV_READ);
ev_io_start(ps->loop, &ps->xiow);
ev_init(&ps->unredir_timer, tmout_unredir_callback);
ev_idle_init(&ps->draw_idle, draw_callback);
ev_init(&ps->draw_timer, draw_callback);

ev_init(&ps->fade_timer, fade_timer_callback);

Expand Down Expand Up @@ -2541,7 +2651,7 @@ static void session_destroy(session_t *ps) {
// Stop libev event handlers
ev_timer_stop(ps->loop, &ps->unredir_timer);
ev_timer_stop(ps->loop, &ps->fade_timer);
ev_idle_stop(ps->loop, &ps->draw_idle);
ev_timer_stop(ps->loop, &ps->draw_timer);
ev_prepare_stop(ps->loop, &ps->event_check);
ev_signal_stop(ps->loop, &ps->usr1_signal);
ev_signal_stop(ps->loop, &ps->int_signal);
Expand All @@ -2553,9 +2663,10 @@ static void session_destroy(session_t *ps) {
* @param ps current session
*/
static void session_run(session_t *ps) {
// In benchmark mode, we want draw_idle handler to always be active
// In benchmark mode, we want draw_timer handler to always be active
if (ps->o.benchmark) {
ev_idle_start(ps->loop, &ps->draw_idle);
ev_timer_set(&ps->draw_timer, 0, 0);
ev_timer_start(ps->loop, &ps->draw_timer);
} else {
// Let's draw our first frame!
queue_redraw(ps);
Expand Down

0 comments on commit 3935718

Please sign in to comment.