Skip to content

Commit

Permalink
Revert "unix: make loops and watchers usable after fork()"
Browse files Browse the repository at this point in the history
This reverts commit fd7ce57,
because it is useless, and appears it may call `abort()` according to CI testing.
  • Loading branch information
vtjnash committed Nov 14, 2018
1 parent 404ad8f commit afe1a90
Show file tree
Hide file tree
Showing 18 changed files with 3 additions and 1,011 deletions.
1 change: 0 additions & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,6 @@ set(uv_test_sources
test/test-env-vars.c
test/test-error.c
test/test-fail-always.c
test/test-fork.c
test/test-fs-copyfile.c
test/test-fs-event.c
test/test-fs-poll.c
Expand Down
1 change: 0 additions & 1 deletion Makefile.am
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,6 @@ test_run_tests_SOURCES = test/blackhole-server.c \
test/test-fs-event.c \
test/test-fs-poll.c \
test/test-fs.c \
test/test-fork.c \
test/test-getters-setters.c \
test/test-get-currentexe.c \
test/test-get-loadavg.c \
Expand Down
54 changes: 0 additions & 54 deletions docs/src/loop.rst
Original file line number Diff line number Diff line change
Expand Up @@ -172,60 +172,6 @@ API
Walk the list of handles: `walk_cb` will be executed with the given `arg`.
.. c:function:: int uv_loop_fork(uv_loop_t* loop)
.. versionadded:: 1.12.0
Reinitialize any kernel state necessary in the child process after
a :man:`fork(2)` system call.
Previously started watchers will continue to be started in the
child process.
It is necessary to explicitly call this function on every event
loop created in the parent process that you plan to continue to
use in the child, including the default loop (even if you don't
continue to use it in the parent). This function must be called
before calling :c:func:`uv_run` or any other API function using
the loop in the child. Failure to do so will result in undefined
behaviour, possibly including duplicate events delivered to both
parent and child or aborting the child process.
When possible, it is preferred to create a new loop in the child
process instead of reusing a loop created in the parent. New loops
created in the child process after the fork should not use this
function.
This function is not implemented on Windows, where it returns ``UV_ENOSYS``.
.. caution::
This function is experimental. It may contain bugs, and is subject to
change or removal. API and ABI stability is not guaranteed.
.. note::
On Mac OS X, if directory FS event handles were in use in the
parent process *for any event loop*, the child process will no
longer be able to use the most efficient FSEvent
implementation. Instead, uses of directory FS event handles in
the child will fall back to the same implementation used for
files and on other kqueue-based systems.
.. caution::
On AIX and SunOS, FS event handles that were already started in
the parent process at the time of forking will *not* deliver
events in the child process; they must be closed and restarted.
On all other platforms, they will continue to work normally
without any further intervention.
.. caution::

Any previous value returned from :c:func:`uv_backend_fd` is now
invalid. That function must be called again to determine the
correct backend file descriptor.

.. c:function:: void* uv_loop_get_data(const uv_loop_t* loop)
Returns `loop->data`.
Expand Down
1 change: 0 additions & 1 deletion include/uv.h
Original file line number Diff line number Diff line change
Expand Up @@ -274,7 +274,6 @@ UV_EXTERN void uv_loop_delete(uv_loop_t*);
UV_EXTERN size_t uv_loop_size(void);
UV_EXTERN int uv_loop_alive(const uv_loop_t* loop);
UV_EXTERN int uv_loop_configure(uv_loop_t* loop, uv_loop_option option, ...);
UV_EXTERN int uv_loop_fork(uv_loop_t* loop);

UV_EXTERN int uv_run(uv_loop_t*, uv_run_mode mode);
UV_EXTERN void uv_stop(uv_loop_t*);
Expand Down
23 changes: 1 addition & 22 deletions src/threadpool.c
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@ UV_DESTRUCTOR(static void cleanup(void)) {
#endif


static void init_threads(void) {
static void init_once(void) {
unsigned int i;
const char* val;
uv_sem_t sem;
Expand Down Expand Up @@ -232,27 +232,6 @@ static void init_threads(void) {
}


#ifndef _WIN32
static void reset_once(void) {
uv_once_t child_once = UV_ONCE_INIT;
memcpy(&once, &child_once, sizeof(child_once));
}
#endif


static void init_once(void) {
#ifndef _WIN32
/* Re-initialize the threadpool after fork.
* Note that this discards the global mutex and condition as well
* as the work queue.
*/
if (pthread_atfork(NULL, NULL, &reset_once))
abort();
#endif
init_threads();
}


void uv__work_submit(uv_loop_t* loop,
struct uv__work* w,
enum uv__work_kind kind,
Expand Down
7 changes: 0 additions & 7 deletions src/unix/aix.c
Original file line number Diff line number Diff line change
Expand Up @@ -104,13 +104,6 @@ void uv__platform_loop_delete(uv_loop_t* loop) {
}


int uv__io_fork(uv_loop_t* loop) {
uv__platform_loop_delete(loop);

return uv__platform_loop_init(loop);
}


int uv__io_check_fd(uv_loop_t* loop, int fd) {
struct poll_ctl pc;

Expand Down
10 changes: 0 additions & 10 deletions src/unix/async.c
Original file line number Diff line number Diff line change
Expand Up @@ -200,16 +200,6 @@ static int uv__async_start(uv_loop_t* loop) {
}


int uv__async_fork(uv_loop_t* loop) {
if (loop->async_io_watcher.fd == -1) /* never started */
return 0;

uv__async_stop(loop);

return uv__async_start(loop);
}


void uv__async_stop(uv_loop_t* loop) {
if (loop->async_io_watcher.fd == -1)
return;
Expand Down
8 changes: 0 additions & 8 deletions src/unix/internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -193,13 +193,10 @@ void uv__io_feed(uv_loop_t* loop, uv__io_t* w);
int uv__io_active(const uv__io_t* w, unsigned int events);
int uv__io_check_fd(uv_loop_t* loop, int fd);
void uv__io_poll(uv_loop_t* loop, int timeout); /* in milliseconds or -1 */
int uv__io_fork(uv_loop_t* loop);
int uv__fd_exists(uv_loop_t* loop, int fd);

/* async */
void uv__async_stop(uv_loop_t* loop);
int uv__async_fork(uv_loop_t* loop);


/* loop */
void uv__run_idle(uv_loop_t* loop);
Expand Down Expand Up @@ -231,7 +228,6 @@ int uv_pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb);
void uv__signal_close(uv_signal_t* handle);
void uv__signal_global_once_init(void);
void uv__signal_loop_cleanup(uv_loop_t* loop);
int uv__signal_loop_fork(uv_loop_t* loop);

/* platform specific */
uint64_t uv__hrtime(uv_clocktype_t type);
Expand Down Expand Up @@ -288,10 +284,6 @@ UV_UNUSED(static char* uv__basename_r(const char* path)) {
return s + 1;
}

#if defined(__linux__)
int uv__inotify_fork(uv_loop_t* loop, void* old_watchers);
#endif

typedef int (*uv__peersockfunc)(int, struct sockaddr*, socklen_t*);

int uv__getsockpeername(const uv_handle_t* handle,
Expand Down
37 changes: 1 addition & 36 deletions src/unix/kqueue.c
Original file line number Diff line number Diff line change
Expand Up @@ -59,38 +59,6 @@ int uv__kqueue_init(uv_loop_t* loop) {
}


#if defined(__APPLE__)
static int uv__has_forked_with_cfrunloop;
#endif

int uv__io_fork(uv_loop_t* loop) {
int err;
loop->backend_fd = -1;
err = uv__kqueue_init(loop);
if (err)
return err;

#if defined(__APPLE__)
if (loop->cf_state != NULL) {
/* We cannot start another CFRunloop and/or thread in the child
process; CF aborts if you try or if you try to touch the thread
at all to kill it. So the best we can do is ignore it from now
on. This means we can't watch directories in the same way
anymore (like other BSDs). It also means we cannot properly
clean up the allocated resources; calling
uv__fsevents_loop_delete from uv_loop_close will crash the
process. So we sidestep the issue by pretending like we never
started it in the first place.
*/
uv__has_forked_with_cfrunloop = 1;
uv__free(loop->cf_state);
loop->cf_state = NULL;
}
#endif
return err;
}


int uv__io_check_fd(uv_loop_t* loop, int fd) {
struct kevent ev;
int rc;
Expand Down Expand Up @@ -471,9 +439,6 @@ int uv_fs_event_start(uv_fs_event_t* handle,
handle->cb = cb;

#if defined(__APPLE__)
if (uv__has_forked_with_cfrunloop)
goto fallback;

/* Nullify field to perform checks later */
handle->cf_cb = NULL;
handle->realpath = NULL;
Expand Down Expand Up @@ -508,7 +473,7 @@ int uv_fs_event_stop(uv_fs_event_t* handle) {
uv__handle_stop(handle);

#if defined(__APPLE__)
if (uv__has_forked_with_cfrunloop || uv__fsevents_close(handle))
if (uv__fsevents_close(handle))
#endif /* defined(__APPLE__) */
{
uv__io_close(handle->loop, &handle->event_watcher);
Expand Down
18 changes: 0 additions & 18 deletions src/unix/linux-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -108,24 +108,6 @@ int uv__platform_loop_init(uv_loop_t* loop) {
}


int uv__io_fork(uv_loop_t* loop) {
int err;
void* old_watchers;

old_watchers = loop->inotify_watchers;

uv__close(loop->backend_fd);
loop->backend_fd = -1;
uv__platform_loop_delete(loop);

err = uv__platform_loop_init(loop);
if (err)
return err;

return uv__inotify_fork(loop, old_watchers);
}


void uv__platform_loop_delete(uv_loop_t* loop) {
if (loop->inotify_fd == -1) return;
uv__io_stop(loop, &loop->inotify_read_watcher, POLLIN);
Expand Down
67 changes: 0 additions & 67 deletions src/unix/linux-inotify.c
Original file line number Diff line number Diff line change
Expand Up @@ -61,8 +61,6 @@ static void uv__inotify_read(uv_loop_t* loop,
uv__io_t* w,
unsigned int revents);

static void maybe_free_watcher_list(struct watcher_list* w,
uv_loop_t* loop);

static int new_inotify_fd(void) {
int err;
Expand Down Expand Up @@ -110,71 +108,6 @@ static int init_inotify(uv_loop_t* loop) {
}


int uv__inotify_fork(uv_loop_t* loop, void* old_watchers) {
/* Open the inotify_fd, and re-arm all the inotify watchers. */
int err;
struct watcher_list* tmp_watcher_list_iter;
struct watcher_list* watcher_list;
struct watcher_list tmp_watcher_list;
QUEUE queue;
QUEUE* q;
uv_fs_event_t* handle;
char* tmp_path;

if (old_watchers != NULL) {
/* We must restore the old watcher list to be able to close items
* out of it.
*/
loop->inotify_watchers = old_watchers;

QUEUE_INIT(&tmp_watcher_list.watchers);
/* Note that the queue we use is shared with the start and stop()
* functions, making QUEUE_FOREACH unsafe to use. So we use the
* QUEUE_MOVE trick to safely iterate. Also don't free the watcher
* list until we're done iterating. c.f. uv__inotify_read.
*/
RB_FOREACH_SAFE(watcher_list, watcher_root,
CAST(&old_watchers), tmp_watcher_list_iter) {
watcher_list->iterating = 1;
QUEUE_MOVE(&watcher_list->watchers, &queue);
while (!QUEUE_EMPTY(&queue)) {
q = QUEUE_HEAD(&queue);
handle = QUEUE_DATA(q, uv_fs_event_t, watchers);
/* It's critical to keep a copy of path here, because it
* will be set to NULL by stop() and then deallocated by
* maybe_free_watcher_list
*/
tmp_path = uv__strdup(handle->path);
assert(tmp_path != NULL);
QUEUE_REMOVE(q);
QUEUE_INSERT_TAIL(&watcher_list->watchers, q);
uv_fs_event_stop(handle);

QUEUE_INSERT_TAIL(&tmp_watcher_list.watchers, &handle->watchers);
handle->path = tmp_path;
}
watcher_list->iterating = 0;
maybe_free_watcher_list(watcher_list, loop);
}

QUEUE_MOVE(&tmp_watcher_list.watchers, &queue);
while (!QUEUE_EMPTY(&queue)) {
q = QUEUE_HEAD(&queue);
QUEUE_REMOVE(q);
handle = QUEUE_DATA(q, uv_fs_event_t, watchers);
tmp_path = handle->path;
handle->path = NULL;
err = uv_fs_event_start(handle, handle->cb, tmp_path, 0);
uv__free(tmp_path);
if (err)
return err;
}
}

return 0;
}


static struct watcher_list* find_watcher(uv_loop_t* loop, int wd) {
struct watcher_list w;
w.wd = wd;
Expand Down
33 changes: 0 additions & 33 deletions src/unix/loop.c
Original file line number Diff line number Diff line change
Expand Up @@ -110,39 +110,6 @@ int uv_loop_init(uv_loop_t* loop) {
}


int uv_loop_fork(uv_loop_t* loop) {
int err;
unsigned int i;
uv__io_t* w;

err = uv__io_fork(loop);
if (err)
return err;

err = uv__async_fork(loop);
if (err)
return err;

err = uv__signal_loop_fork(loop);
if (err)
return err;

/* Rearm all the watchers that aren't re-queued by the above. */
for (i = 0; i < loop->nwatchers; i++) {
w = loop->watchers[i];
if (w == NULL)
continue;

if (w->pevents != 0 && QUEUE_EMPTY(&w->watcher_queue)) {
w->events = 0; /* Force re-registration in uv__io_poll. */
QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
}
}

return 0;
}


void uv__loop_close(uv_loop_t* loop) {
uv__signal_loop_cleanup(loop);
uv__platform_loop_delete(loop);
Expand Down
Loading

0 comments on commit afe1a90

Please sign in to comment.