forked from cory/tildefriends
libuv 1.46.0.
git-svn-id: https://www.unprompted.com/svn/projects/tildefriends/trunk@4336 ed5197a5-7fde-0310-b194-c3ffbd925b24
This commit is contained in:
14
deps/libuv/src/unix/aix.c
vendored
14
deps/libuv/src/unix/aix.c
vendored
@ -136,7 +136,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
struct pollfd pqry;
|
||||
struct pollfd* pe;
|
||||
struct poll_ctl pc;
|
||||
QUEUE* q;
|
||||
struct uv__queue* q;
|
||||
uv__io_t* w;
|
||||
uint64_t base;
|
||||
uint64_t diff;
|
||||
@ -151,18 +151,18 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
int reset_timeout;
|
||||
|
||||
if (loop->nfds == 0) {
|
||||
assert(QUEUE_EMPTY(&loop->watcher_queue));
|
||||
assert(uv__queue_empty(&loop->watcher_queue));
|
||||
return;
|
||||
}
|
||||
|
||||
lfields = uv__get_internal_fields(loop);
|
||||
|
||||
while (!QUEUE_EMPTY(&loop->watcher_queue)) {
|
||||
q = QUEUE_HEAD(&loop->watcher_queue);
|
||||
QUEUE_REMOVE(q);
|
||||
QUEUE_INIT(q);
|
||||
while (!uv__queue_empty(&loop->watcher_queue)) {
|
||||
q = uv__queue_head(&loop->watcher_queue);
|
||||
uv__queue_remove(q);
|
||||
uv__queue_init(q);
|
||||
|
||||
w = QUEUE_DATA(q, uv__io_t, watcher_queue);
|
||||
w = uv__queue_data(q, uv__io_t, watcher_queue);
|
||||
assert(w->pevents != 0);
|
||||
assert(w->fd >= 0);
|
||||
assert(w->fd < (int) loop->nwatchers);
|
||||
|
52
deps/libuv/src/unix/async.c
vendored
52
deps/libuv/src/unix/async.c
vendored
@ -55,7 +55,7 @@ int uv_async_init(uv_loop_t* loop, uv_async_t* handle, uv_async_cb async_cb) {
|
||||
handle->pending = 0;
|
||||
handle->u.fd = 0; /* This will be used as a busy flag. */
|
||||
|
||||
QUEUE_INSERT_TAIL(&loop->async_handles, &handle->queue);
|
||||
uv__queue_insert_tail(&loop->async_handles, &handle->queue);
|
||||
uv__handle_start(handle);
|
||||
|
||||
return 0;
|
||||
@ -124,7 +124,7 @@ static void uv__async_spin(uv_async_t* handle) {
|
||||
|
||||
void uv__async_close(uv_async_t* handle) {
|
||||
uv__async_spin(handle);
|
||||
QUEUE_REMOVE(&handle->queue);
|
||||
uv__queue_remove(&handle->queue);
|
||||
uv__handle_stop(handle);
|
||||
}
|
||||
|
||||
@ -132,8 +132,8 @@ void uv__async_close(uv_async_t* handle) {
|
||||
static void uv__async_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
|
||||
char buf[1024];
|
||||
ssize_t r;
|
||||
QUEUE queue;
|
||||
QUEUE* q;
|
||||
struct uv__queue queue;
|
||||
struct uv__queue* q;
|
||||
uv_async_t* h;
|
||||
_Atomic int *pending;
|
||||
|
||||
@ -157,13 +157,13 @@ static void uv__async_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
|
||||
abort();
|
||||
}
|
||||
|
||||
QUEUE_MOVE(&loop->async_handles, &queue);
|
||||
while (!QUEUE_EMPTY(&queue)) {
|
||||
q = QUEUE_HEAD(&queue);
|
||||
h = QUEUE_DATA(q, uv_async_t, queue);
|
||||
uv__queue_move(&loop->async_handles, &queue);
|
||||
while (!uv__queue_empty(&queue)) {
|
||||
q = uv__queue_head(&queue);
|
||||
h = uv__queue_data(q, uv_async_t, queue);
|
||||
|
||||
QUEUE_REMOVE(q);
|
||||
QUEUE_INSERT_TAIL(&loop->async_handles, q);
|
||||
uv__queue_remove(q);
|
||||
uv__queue_insert_tail(&loop->async_handles, q);
|
||||
|
||||
/* Atomically fetch and clear pending flag */
|
||||
pending = (_Atomic int*) &h->pending;
|
||||
@ -241,8 +241,8 @@ static int uv__async_start(uv_loop_t* loop) {
|
||||
|
||||
|
||||
void uv__async_stop(uv_loop_t* loop) {
|
||||
QUEUE queue;
|
||||
QUEUE* q;
|
||||
struct uv__queue queue;
|
||||
struct uv__queue* q;
|
||||
uv_async_t* h;
|
||||
|
||||
if (loop->async_io_watcher.fd == -1)
|
||||
@ -251,13 +251,13 @@ void uv__async_stop(uv_loop_t* loop) {
|
||||
/* Make sure no other thread is accessing the async handle fd after the loop
|
||||
* cleanup.
|
||||
*/
|
||||
QUEUE_MOVE(&loop->async_handles, &queue);
|
||||
while (!QUEUE_EMPTY(&queue)) {
|
||||
q = QUEUE_HEAD(&queue);
|
||||
h = QUEUE_DATA(q, uv_async_t, queue);
|
||||
uv__queue_move(&loop->async_handles, &queue);
|
||||
while (!uv__queue_empty(&queue)) {
|
||||
q = uv__queue_head(&queue);
|
||||
h = uv__queue_data(q, uv_async_t, queue);
|
||||
|
||||
QUEUE_REMOVE(q);
|
||||
QUEUE_INSERT_TAIL(&loop->async_handles, q);
|
||||
uv__queue_remove(q);
|
||||
uv__queue_insert_tail(&loop->async_handles, q);
|
||||
|
||||
uv__async_spin(h);
|
||||
}
|
||||
@ -275,20 +275,20 @@ void uv__async_stop(uv_loop_t* loop) {
|
||||
|
||||
|
||||
int uv__async_fork(uv_loop_t* loop) {
|
||||
QUEUE queue;
|
||||
QUEUE* q;
|
||||
struct uv__queue queue;
|
||||
struct uv__queue* q;
|
||||
uv_async_t* h;
|
||||
|
||||
if (loop->async_io_watcher.fd == -1) /* never started */
|
||||
return 0;
|
||||
|
||||
QUEUE_MOVE(&loop->async_handles, &queue);
|
||||
while (!QUEUE_EMPTY(&queue)) {
|
||||
q = QUEUE_HEAD(&queue);
|
||||
h = QUEUE_DATA(q, uv_async_t, queue);
|
||||
uv__queue_move(&loop->async_handles, &queue);
|
||||
while (!uv__queue_empty(&queue)) {
|
||||
q = uv__queue_head(&queue);
|
||||
h = uv__queue_data(q, uv_async_t, queue);
|
||||
|
||||
QUEUE_REMOVE(q);
|
||||
QUEUE_INSERT_TAIL(&loop->async_handles, q);
|
||||
uv__queue_remove(q);
|
||||
uv__queue_insert_tail(&loop->async_handles, q);
|
||||
|
||||
/* The state of any thread that set pending is now likely corrupt in this
|
||||
* child because the user called fork, so just clear these flags and move
|
||||
|
67
deps/libuv/src/unix/core.c
vendored
67
deps/libuv/src/unix/core.c
vendored
@ -344,7 +344,7 @@ static void uv__finish_close(uv_handle_t* handle) {
|
||||
}
|
||||
|
||||
uv__handle_unref(handle);
|
||||
QUEUE_REMOVE(&handle->handle_queue);
|
||||
uv__queue_remove(&handle->handle_queue);
|
||||
|
||||
if (handle->close_cb) {
|
||||
handle->close_cb(handle);
|
||||
@ -380,7 +380,7 @@ int uv_backend_fd(const uv_loop_t* loop) {
|
||||
static int uv__loop_alive(const uv_loop_t* loop) {
|
||||
return uv__has_active_handles(loop) ||
|
||||
uv__has_active_reqs(loop) ||
|
||||
!QUEUE_EMPTY(&loop->pending_queue) ||
|
||||
!uv__queue_empty(&loop->pending_queue) ||
|
||||
loop->closing_handles != NULL;
|
||||
}
|
||||
|
||||
@ -389,8 +389,8 @@ static int uv__backend_timeout(const uv_loop_t* loop) {
|
||||
if (loop->stop_flag == 0 &&
|
||||
/* uv__loop_alive(loop) && */
|
||||
(uv__has_active_handles(loop) || uv__has_active_reqs(loop)) &&
|
||||
QUEUE_EMPTY(&loop->pending_queue) &&
|
||||
QUEUE_EMPTY(&loop->idle_handles) &&
|
||||
uv__queue_empty(&loop->pending_queue) &&
|
||||
uv__queue_empty(&loop->idle_handles) &&
|
||||
(loop->flags & UV_LOOP_REAP_CHILDREN) == 0 &&
|
||||
loop->closing_handles == NULL)
|
||||
return uv__next_timeout(loop);
|
||||
@ -399,7 +399,7 @@ static int uv__backend_timeout(const uv_loop_t* loop) {
|
||||
|
||||
|
||||
int uv_backend_timeout(const uv_loop_t* loop) {
|
||||
if (QUEUE_EMPTY(&loop->watcher_queue))
|
||||
if (uv__queue_empty(&loop->watcher_queue))
|
||||
return uv__backend_timeout(loop);
|
||||
/* Need to call uv_run to update the backend fd state. */
|
||||
return 0;
|
||||
@ -424,15 +424,15 @@ int uv_run(uv_loop_t* loop, uv_run_mode mode) {
|
||||
* while loop for UV_RUN_DEFAULT. Otherwise timers only need to be executed
|
||||
* once, which should be done after polling in order to maintain proper
|
||||
* execution order of the conceptual event loop. */
|
||||
if (mode == UV_RUN_DEFAULT) {
|
||||
if (r)
|
||||
uv__update_time(loop);
|
||||
if (mode == UV_RUN_DEFAULT && r != 0 && loop->stop_flag == 0) {
|
||||
uv__update_time(loop);
|
||||
uv__run_timers(loop);
|
||||
}
|
||||
|
||||
while (r != 0 && loop->stop_flag == 0) {
|
||||
can_sleep =
|
||||
QUEUE_EMPTY(&loop->pending_queue) && QUEUE_EMPTY(&loop->idle_handles);
|
||||
uv__queue_empty(&loop->pending_queue) &&
|
||||
uv__queue_empty(&loop->idle_handles);
|
||||
|
||||
uv__run_pending(loop);
|
||||
uv__run_idle(loop);
|
||||
@ -448,7 +448,7 @@ int uv_run(uv_loop_t* loop, uv_run_mode mode) {
|
||||
|
||||
/* Process immediate callbacks (e.g. write_cb) a small fixed number of
|
||||
* times to avoid loop starvation.*/
|
||||
for (r = 0; r < 8 && !QUEUE_EMPTY(&loop->pending_queue); r++)
|
||||
for (r = 0; r < 8 && !uv__queue_empty(&loop->pending_queue); r++)
|
||||
uv__run_pending(loop);
|
||||
|
||||
/* Run one final update on the provider_idle_time in case uv__io_poll
|
||||
@ -827,17 +827,17 @@ int uv_fileno(const uv_handle_t* handle, uv_os_fd_t* fd) {
|
||||
|
||||
|
||||
static void uv__run_pending(uv_loop_t* loop) {
|
||||
QUEUE* q;
|
||||
QUEUE pq;
|
||||
struct uv__queue* q;
|
||||
struct uv__queue pq;
|
||||
uv__io_t* w;
|
||||
|
||||
QUEUE_MOVE(&loop->pending_queue, &pq);
|
||||
uv__queue_move(&loop->pending_queue, &pq);
|
||||
|
||||
while (!QUEUE_EMPTY(&pq)) {
|
||||
q = QUEUE_HEAD(&pq);
|
||||
QUEUE_REMOVE(q);
|
||||
QUEUE_INIT(q);
|
||||
w = QUEUE_DATA(q, uv__io_t, pending_queue);
|
||||
while (!uv__queue_empty(&pq)) {
|
||||
q = uv__queue_head(&pq);
|
||||
uv__queue_remove(q);
|
||||
uv__queue_init(q);
|
||||
w = uv__queue_data(q, uv__io_t, pending_queue);
|
||||
w->cb(loop, w, POLLOUT);
|
||||
}
|
||||
}
|
||||
@ -892,8 +892,8 @@ static void maybe_resize(uv_loop_t* loop, unsigned int len) {
|
||||
void uv__io_init(uv__io_t* w, uv__io_cb cb, int fd) {
|
||||
assert(cb != NULL);
|
||||
assert(fd >= -1);
|
||||
QUEUE_INIT(&w->pending_queue);
|
||||
QUEUE_INIT(&w->watcher_queue);
|
||||
uv__queue_init(&w->pending_queue);
|
||||
uv__queue_init(&w->watcher_queue);
|
||||
w->cb = cb;
|
||||
w->fd = fd;
|
||||
w->events = 0;
|
||||
@ -919,8 +919,8 @@ void uv__io_start(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
|
||||
return;
|
||||
#endif
|
||||
|
||||
if (QUEUE_EMPTY(&w->watcher_queue))
|
||||
QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
|
||||
if (uv__queue_empty(&w->watcher_queue))
|
||||
uv__queue_insert_tail(&loop->watcher_queue, &w->watcher_queue);
|
||||
|
||||
if (loop->watchers[w->fd] == NULL) {
|
||||
loop->watchers[w->fd] = w;
|
||||
@ -945,8 +945,8 @@ void uv__io_stop(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
|
||||
w->pevents &= ~events;
|
||||
|
||||
if (w->pevents == 0) {
|
||||
QUEUE_REMOVE(&w->watcher_queue);
|
||||
QUEUE_INIT(&w->watcher_queue);
|
||||
uv__queue_remove(&w->watcher_queue);
|
||||
uv__queue_init(&w->watcher_queue);
|
||||
w->events = 0;
|
||||
|
||||
if (w == loop->watchers[w->fd]) {
|
||||
@ -955,14 +955,14 @@ void uv__io_stop(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
|
||||
loop->nfds--;
|
||||
}
|
||||
}
|
||||
else if (QUEUE_EMPTY(&w->watcher_queue))
|
||||
QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
|
||||
else if (uv__queue_empty(&w->watcher_queue))
|
||||
uv__queue_insert_tail(&loop->watcher_queue, &w->watcher_queue);
|
||||
}
|
||||
|
||||
|
||||
void uv__io_close(uv_loop_t* loop, uv__io_t* w) {
|
||||
uv__io_stop(loop, w, POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI);
|
||||
QUEUE_REMOVE(&w->pending_queue);
|
||||
uv__queue_remove(&w->pending_queue);
|
||||
|
||||
/* Remove stale events for this file descriptor */
|
||||
if (w->fd != -1)
|
||||
@ -971,8 +971,8 @@ void uv__io_close(uv_loop_t* loop, uv__io_t* w) {
|
||||
|
||||
|
||||
void uv__io_feed(uv_loop_t* loop, uv__io_t* w) {
|
||||
if (QUEUE_EMPTY(&w->pending_queue))
|
||||
QUEUE_INSERT_TAIL(&loop->pending_queue, &w->pending_queue);
|
||||
if (uv__queue_empty(&w->pending_queue))
|
||||
uv__queue_insert_tail(&loop->pending_queue, &w->pending_queue);
|
||||
}
|
||||
|
||||
|
||||
@ -1020,8 +1020,8 @@ int uv_getrusage(uv_rusage_t* rusage) {
|
||||
/* Most platforms report ru_maxrss in kilobytes; macOS and Solaris are
|
||||
* the outliers because of course they are.
|
||||
*/
|
||||
#if defined(__APPLE__) && !TARGET_OS_IPHONE
|
||||
rusage->ru_maxrss /= 1024; /* macOS reports bytes. */
|
||||
#if defined(__APPLE__)
|
||||
rusage->ru_maxrss /= 1024; /* macOS and iOS report bytes. */
|
||||
#elif defined(__sun)
|
||||
rusage->ru_maxrss /= getpagesize() / 1024; /* Solaris reports pages. */
|
||||
#endif
|
||||
@ -1271,6 +1271,10 @@ static int uv__getpwuid_r(uv_passwd_t *pwd, uid_t uid) {
|
||||
|
||||
|
||||
int uv_os_get_group(uv_group_t* grp, uv_uid_t gid) {
|
||||
#if defined(__ANDROID__) && __ANDROID_API__ < 24
|
||||
/* This function getgrgid_r() was added in Android N (level 24) */
|
||||
return UV_ENOSYS;
|
||||
#else
|
||||
struct group gp;
|
||||
struct group* result;
|
||||
char* buf;
|
||||
@ -1347,6 +1351,7 @@ int uv_os_get_group(uv_group_t* grp, uv_uid_t gid) {
|
||||
uv__free(buf);
|
||||
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
64
deps/libuv/src/unix/fs.c
vendored
64
deps/libuv/src/unix/fs.c
vendored
@ -55,9 +55,13 @@
|
||||
# define HAVE_PREADV 0
|
||||
#endif
|
||||
|
||||
/* preadv() and pwritev() were added in Android N (level 24) */
|
||||
#if defined(__linux__) && !(defined(__ANDROID__) && __ANDROID_API__ < 24)
|
||||
# define TRY_PREADV 1
|
||||
#endif
|
||||
|
||||
#if defined(__linux__)
|
||||
# include <sys/sendfile.h>
|
||||
# include <sys/utsname.h>
|
||||
#endif
|
||||
|
||||
#if defined(__sun)
|
||||
@ -457,7 +461,7 @@ static ssize_t uv__fs_preadv(uv_file fd,
|
||||
|
||||
|
||||
static ssize_t uv__fs_read(uv_fs_t* req) {
|
||||
#if defined(__linux__)
|
||||
#if TRY_PREADV
|
||||
static _Atomic int no_preadv;
|
||||
#endif
|
||||
unsigned int iovmax;
|
||||
@ -481,13 +485,13 @@ static ssize_t uv__fs_read(uv_fs_t* req) {
|
||||
#if HAVE_PREADV
|
||||
result = preadv(req->file, (struct iovec*) req->bufs, req->nbufs, req->off);
|
||||
#else
|
||||
# if defined(__linux__)
|
||||
# if TRY_PREADV
|
||||
if (atomic_load_explicit(&no_preadv, memory_order_relaxed)) retry:
|
||||
# endif
|
||||
{
|
||||
result = uv__fs_preadv(req->file, req->bufs, req->nbufs, req->off);
|
||||
}
|
||||
# if defined(__linux__)
|
||||
# if TRY_PREADV
|
||||
else {
|
||||
result = preadv(req->file,
|
||||
(struct iovec*) req->bufs,
|
||||
@ -899,31 +903,6 @@ out:
|
||||
|
||||
|
||||
#ifdef __linux__
|
||||
static unsigned uv__kernel_version(void) {
|
||||
static _Atomic unsigned cached_version;
|
||||
struct utsname u;
|
||||
unsigned version;
|
||||
unsigned major;
|
||||
unsigned minor;
|
||||
unsigned patch;
|
||||
|
||||
version = atomic_load_explicit(&cached_version, memory_order_relaxed);
|
||||
if (version != 0)
|
||||
return version;
|
||||
|
||||
if (-1 == uname(&u))
|
||||
return 0;
|
||||
|
||||
if (3 != sscanf(u.release, "%u.%u.%u", &major, &minor, &patch))
|
||||
return 0;
|
||||
|
||||
version = major * 65536 + minor * 256 + patch;
|
||||
atomic_store_explicit(&cached_version, version, memory_order_relaxed);
|
||||
|
||||
return version;
|
||||
}
|
||||
|
||||
|
||||
/* Pre-4.20 kernels have a bug where CephFS uses the RADOS copy-from command
|
||||
* in copy_file_range() when it shouldn't. There is no workaround except to
|
||||
* fall back to a regular copy.
|
||||
@ -1182,8 +1161,8 @@ static ssize_t uv__fs_lutime(uv_fs_t* req) {
|
||||
|
||||
|
||||
static ssize_t uv__fs_write(uv_fs_t* req) {
|
||||
#if defined(__linux__)
|
||||
static int no_pwritev;
|
||||
#if TRY_PREADV
|
||||
static _Atomic int no_pwritev;
|
||||
#endif
|
||||
ssize_t r;
|
||||
|
||||
@ -1211,20 +1190,20 @@ static ssize_t uv__fs_write(uv_fs_t* req) {
|
||||
#if HAVE_PREADV
|
||||
r = pwritev(req->file, (struct iovec*) req->bufs, req->nbufs, req->off);
|
||||
#else
|
||||
# if defined(__linux__)
|
||||
if (no_pwritev) retry:
|
||||
# if TRY_PREADV
|
||||
if (atomic_load_explicit(&no_pwritev, memory_order_relaxed)) retry:
|
||||
# endif
|
||||
{
|
||||
r = pwrite(req->file, req->bufs[0].base, req->bufs[0].len, req->off);
|
||||
}
|
||||
# if defined(__linux__)
|
||||
# if TRY_PREADV
|
||||
else {
|
||||
r = pwritev(req->file,
|
||||
(struct iovec*) req->bufs,
|
||||
req->nbufs,
|
||||
req->off);
|
||||
if (r == -1 && errno == ENOSYS) {
|
||||
no_pwritev = 1;
|
||||
atomic_store_explicit(&no_pwritev, 1, memory_order_relaxed);
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
@ -1926,6 +1905,9 @@ int uv_fs_link(uv_loop_t* loop,
|
||||
uv_fs_cb cb) {
|
||||
INIT(LINK);
|
||||
PATH2;
|
||||
if (cb != NULL)
|
||||
if (uv__iou_fs_link(loop, req))
|
||||
return 0;
|
||||
POST;
|
||||
}
|
||||
|
||||
@ -1938,6 +1920,9 @@ int uv_fs_mkdir(uv_loop_t* loop,
|
||||
INIT(MKDIR);
|
||||
PATH;
|
||||
req->mode = mode;
|
||||
if (cb != NULL)
|
||||
if (uv__iou_fs_mkdir(loop, req))
|
||||
return 0;
|
||||
POST;
|
||||
}
|
||||
|
||||
@ -2089,6 +2074,9 @@ int uv_fs_rename(uv_loop_t* loop,
|
||||
uv_fs_cb cb) {
|
||||
INIT(RENAME);
|
||||
PATH2;
|
||||
if (cb != NULL)
|
||||
if (uv__iou_fs_rename(loop, req))
|
||||
return 0;
|
||||
POST;
|
||||
}
|
||||
|
||||
@ -2135,6 +2123,9 @@ int uv_fs_symlink(uv_loop_t* loop,
|
||||
INIT(SYMLINK);
|
||||
PATH2;
|
||||
req->flags = flags;
|
||||
if (cb != NULL)
|
||||
if (uv__iou_fs_symlink(loop, req))
|
||||
return 0;
|
||||
POST;
|
||||
}
|
||||
|
||||
@ -2142,6 +2133,9 @@ int uv_fs_symlink(uv_loop_t* loop,
|
||||
int uv_fs_unlink(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
|
||||
INIT(UNLINK);
|
||||
PATH;
|
||||
if (cb != NULL)
|
||||
if (uv__iou_fs_unlink(loop, req))
|
||||
return 0;
|
||||
POST;
|
||||
}
|
||||
|
||||
|
84
deps/libuv/src/unix/fsevents.c
vendored
84
deps/libuv/src/unix/fsevents.c
vendored
@ -80,13 +80,13 @@ enum uv__cf_loop_signal_type_e {
|
||||
typedef enum uv__cf_loop_signal_type_e uv__cf_loop_signal_type_t;
|
||||
|
||||
struct uv__cf_loop_signal_s {
|
||||
QUEUE member;
|
||||
struct uv__queue member;
|
||||
uv_fs_event_t* handle;
|
||||
uv__cf_loop_signal_type_t type;
|
||||
};
|
||||
|
||||
struct uv__fsevents_event_s {
|
||||
QUEUE member;
|
||||
struct uv__queue member;
|
||||
int events;
|
||||
char path[1];
|
||||
};
|
||||
@ -98,7 +98,7 @@ struct uv__cf_loop_state_s {
|
||||
FSEventStreamRef fsevent_stream;
|
||||
uv_sem_t fsevent_sem;
|
||||
uv_mutex_t fsevent_mutex;
|
||||
void* fsevent_handles[2];
|
||||
struct uv__queue fsevent_handles;
|
||||
unsigned int fsevent_handle_count;
|
||||
};
|
||||
|
||||
@ -150,22 +150,22 @@ static void (*pFSEventStreamStop)(FSEventStreamRef);
|
||||
|
||||
#define UV__FSEVENTS_PROCESS(handle, block) \
|
||||
do { \
|
||||
QUEUE events; \
|
||||
QUEUE* q; \
|
||||
struct uv__queue events; \
|
||||
struct uv__queue* q; \
|
||||
uv__fsevents_event_t* event; \
|
||||
int err; \
|
||||
uv_mutex_lock(&(handle)->cf_mutex); \
|
||||
/* Split-off all events and empty original queue */ \
|
||||
QUEUE_MOVE(&(handle)->cf_events, &events); \
|
||||
uv__queue_move(&(handle)->cf_events, &events); \
|
||||
/* Get error (if any) and zero original one */ \
|
||||
err = (handle)->cf_error; \
|
||||
(handle)->cf_error = 0; \
|
||||
uv_mutex_unlock(&(handle)->cf_mutex); \
|
||||
/* Loop through events, deallocating each after processing */ \
|
||||
while (!QUEUE_EMPTY(&events)) { \
|
||||
q = QUEUE_HEAD(&events); \
|
||||
event = QUEUE_DATA(q, uv__fsevents_event_t, member); \
|
||||
QUEUE_REMOVE(q); \
|
||||
while (!uv__queue_empty(&events)) { \
|
||||
q = uv__queue_head(&events); \
|
||||
event = uv__queue_data(q, uv__fsevents_event_t, member); \
|
||||
uv__queue_remove(q); \
|
||||
/* NOTE: Checking uv__is_active() is required here, because handle \
|
||||
* callback may close handle and invoking it after it will lead to \
|
||||
* incorrect behaviour */ \
|
||||
@ -193,14 +193,14 @@ static void uv__fsevents_cb(uv_async_t* cb) {
|
||||
|
||||
/* Runs in CF thread, pushed event into handle's event list */
|
||||
static void uv__fsevents_push_event(uv_fs_event_t* handle,
|
||||
QUEUE* events,
|
||||
struct uv__queue* events,
|
||||
int err) {
|
||||
assert(events != NULL || err != 0);
|
||||
uv_mutex_lock(&handle->cf_mutex);
|
||||
|
||||
/* Concatenate two queues */
|
||||
if (events != NULL)
|
||||
QUEUE_ADD(&handle->cf_events, events);
|
||||
uv__queue_add(&handle->cf_events, events);
|
||||
|
||||
/* Propagate error */
|
||||
if (err != 0)
|
||||
@ -224,12 +224,12 @@ static void uv__fsevents_event_cb(const FSEventStreamRef streamRef,
|
||||
char* path;
|
||||
char* pos;
|
||||
uv_fs_event_t* handle;
|
||||
QUEUE* q;
|
||||
struct uv__queue* q;
|
||||
uv_loop_t* loop;
|
||||
uv__cf_loop_state_t* state;
|
||||
uv__fsevents_event_t* event;
|
||||
FSEventStreamEventFlags flags;
|
||||
QUEUE head;
|
||||
struct uv__queue head;
|
||||
|
||||
loop = info;
|
||||
state = loop->cf_state;
|
||||
@ -238,9 +238,9 @@ static void uv__fsevents_event_cb(const FSEventStreamRef streamRef,
|
||||
|
||||
/* For each handle */
|
||||
uv_mutex_lock(&state->fsevent_mutex);
|
||||
QUEUE_FOREACH(q, &state->fsevent_handles) {
|
||||
handle = QUEUE_DATA(q, uv_fs_event_t, cf_member);
|
||||
QUEUE_INIT(&head);
|
||||
uv__queue_foreach(q, &state->fsevent_handles) {
|
||||
handle = uv__queue_data(q, uv_fs_event_t, cf_member);
|
||||
uv__queue_init(&head);
|
||||
|
||||
/* Process and filter out events */
|
||||
for (i = 0; i < numEvents; i++) {
|
||||
@ -318,10 +318,10 @@ static void uv__fsevents_event_cb(const FSEventStreamRef streamRef,
|
||||
event->events = UV_CHANGE;
|
||||
}
|
||||
|
||||
QUEUE_INSERT_TAIL(&head, &event->member);
|
||||
uv__queue_insert_tail(&head, &event->member);
|
||||
}
|
||||
|
||||
if (!QUEUE_EMPTY(&head))
|
||||
if (!uv__queue_empty(&head))
|
||||
uv__fsevents_push_event(handle, &head, 0);
|
||||
}
|
||||
uv_mutex_unlock(&state->fsevent_mutex);
|
||||
@ -403,7 +403,7 @@ static void uv__fsevents_destroy_stream(uv__cf_loop_state_t* state) {
|
||||
static void uv__fsevents_reschedule(uv__cf_loop_state_t* state,
|
||||
uv_loop_t* loop,
|
||||
uv__cf_loop_signal_type_t type) {
|
||||
QUEUE* q;
|
||||
struct uv__queue* q;
|
||||
uv_fs_event_t* curr;
|
||||
CFArrayRef cf_paths;
|
||||
CFStringRef* paths;
|
||||
@ -446,9 +446,9 @@ static void uv__fsevents_reschedule(uv__cf_loop_state_t* state,
|
||||
|
||||
q = &state->fsevent_handles;
|
||||
for (; i < path_count; i++) {
|
||||
q = QUEUE_NEXT(q);
|
||||
q = uv__queue_next(q);
|
||||
assert(q != &state->fsevent_handles);
|
||||
curr = QUEUE_DATA(q, uv_fs_event_t, cf_member);
|
||||
curr = uv__queue_data(q, uv_fs_event_t, cf_member);
|
||||
|
||||
assert(curr->realpath != NULL);
|
||||
paths[i] =
|
||||
@ -486,8 +486,8 @@ final:
|
||||
|
||||
/* Broadcast error to all handles */
|
||||
uv_mutex_lock(&state->fsevent_mutex);
|
||||
QUEUE_FOREACH(q, &state->fsevent_handles) {
|
||||
curr = QUEUE_DATA(q, uv_fs_event_t, cf_member);
|
||||
uv__queue_foreach(q, &state->fsevent_handles) {
|
||||
curr = uv__queue_data(q, uv_fs_event_t, cf_member);
|
||||
uv__fsevents_push_event(curr, NULL, err);
|
||||
}
|
||||
uv_mutex_unlock(&state->fsevent_mutex);
|
||||
@ -606,7 +606,7 @@ static int uv__fsevents_loop_init(uv_loop_t* loop) {
|
||||
if (err)
|
||||
goto fail_sem_init;
|
||||
|
||||
QUEUE_INIT(&loop->cf_signals);
|
||||
uv__queue_init(&loop->cf_signals);
|
||||
|
||||
err = uv_sem_init(&state->fsevent_sem, 0);
|
||||
if (err)
|
||||
@ -616,7 +616,7 @@ static int uv__fsevents_loop_init(uv_loop_t* loop) {
|
||||
if (err)
|
||||
goto fail_fsevent_mutex_init;
|
||||
|
||||
QUEUE_INIT(&state->fsevent_handles);
|
||||
uv__queue_init(&state->fsevent_handles);
|
||||
state->fsevent_need_reschedule = 0;
|
||||
state->fsevent_handle_count = 0;
|
||||
|
||||
@ -675,7 +675,7 @@ fail_mutex_init:
|
||||
void uv__fsevents_loop_delete(uv_loop_t* loop) {
|
||||
uv__cf_loop_signal_t* s;
|
||||
uv__cf_loop_state_t* state;
|
||||
QUEUE* q;
|
||||
struct uv__queue* q;
|
||||
|
||||
if (loop->cf_state == NULL)
|
||||
return;
|
||||
@ -688,10 +688,10 @@ void uv__fsevents_loop_delete(uv_loop_t* loop) {
|
||||
uv_mutex_destroy(&loop->cf_mutex);
|
||||
|
||||
/* Free any remaining data */
|
||||
while (!QUEUE_EMPTY(&loop->cf_signals)) {
|
||||
q = QUEUE_HEAD(&loop->cf_signals);
|
||||
s = QUEUE_DATA(q, uv__cf_loop_signal_t, member);
|
||||
QUEUE_REMOVE(q);
|
||||
while (!uv__queue_empty(&loop->cf_signals)) {
|
||||
q = uv__queue_head(&loop->cf_signals);
|
||||
s = uv__queue_data(q, uv__cf_loop_signal_t, member);
|
||||
uv__queue_remove(q);
|
||||
uv__free(s);
|
||||
}
|
||||
|
||||
@ -735,22 +735,22 @@ static void* uv__cf_loop_runner(void* arg) {
|
||||
static void uv__cf_loop_cb(void* arg) {
|
||||
uv_loop_t* loop;
|
||||
uv__cf_loop_state_t* state;
|
||||
QUEUE* item;
|
||||
QUEUE split_head;
|
||||
struct uv__queue* item;
|
||||
struct uv__queue split_head;
|
||||
uv__cf_loop_signal_t* s;
|
||||
|
||||
loop = arg;
|
||||
state = loop->cf_state;
|
||||
|
||||
uv_mutex_lock(&loop->cf_mutex);
|
||||
QUEUE_MOVE(&loop->cf_signals, &split_head);
|
||||
uv__queue_move(&loop->cf_signals, &split_head);
|
||||
uv_mutex_unlock(&loop->cf_mutex);
|
||||
|
||||
while (!QUEUE_EMPTY(&split_head)) {
|
||||
item = QUEUE_HEAD(&split_head);
|
||||
QUEUE_REMOVE(item);
|
||||
while (!uv__queue_empty(&split_head)) {
|
||||
item = uv__queue_head(&split_head);
|
||||
uv__queue_remove(item);
|
||||
|
||||
s = QUEUE_DATA(item, uv__cf_loop_signal_t, member);
|
||||
s = uv__queue_data(item, uv__cf_loop_signal_t, member);
|
||||
|
||||
/* This was a termination signal */
|
||||
if (s->handle == NULL)
|
||||
@ -778,7 +778,7 @@ int uv__cf_loop_signal(uv_loop_t* loop,
|
||||
item->type = type;
|
||||
|
||||
uv_mutex_lock(&loop->cf_mutex);
|
||||
QUEUE_INSERT_TAIL(&loop->cf_signals, &item->member);
|
||||
uv__queue_insert_tail(&loop->cf_signals, &item->member);
|
||||
|
||||
state = loop->cf_state;
|
||||
assert(state != NULL);
|
||||
@ -807,7 +807,7 @@ int uv__fsevents_init(uv_fs_event_t* handle) {
|
||||
handle->realpath_len = strlen(handle->realpath);
|
||||
|
||||
/* Initialize event queue */
|
||||
QUEUE_INIT(&handle->cf_events);
|
||||
uv__queue_init(&handle->cf_events);
|
||||
handle->cf_error = 0;
|
||||
|
||||
/*
|
||||
@ -832,7 +832,7 @@ int uv__fsevents_init(uv_fs_event_t* handle) {
|
||||
/* Insert handle into the list */
|
||||
state = handle->loop->cf_state;
|
||||
uv_mutex_lock(&state->fsevent_mutex);
|
||||
QUEUE_INSERT_TAIL(&state->fsevent_handles, &handle->cf_member);
|
||||
uv__queue_insert_tail(&state->fsevent_handles, &handle->cf_member);
|
||||
state->fsevent_handle_count++;
|
||||
state->fsevent_need_reschedule = 1;
|
||||
uv_mutex_unlock(&state->fsevent_mutex);
|
||||
@ -872,7 +872,7 @@ int uv__fsevents_close(uv_fs_event_t* handle) {
|
||||
/* Remove handle from the list */
|
||||
state = handle->loop->cf_state;
|
||||
uv_mutex_lock(&state->fsevent_mutex);
|
||||
QUEUE_REMOVE(&handle->cf_member);
|
||||
uv__queue_remove(&handle->cf_member);
|
||||
state->fsevent_handle_count--;
|
||||
state->fsevent_need_reschedule = 1;
|
||||
uv_mutex_unlock(&state->fsevent_mutex);
|
||||
|
11
deps/libuv/src/unix/internal.h
vendored
11
deps/libuv/src/unix/internal.h
vendored
@ -335,20 +335,30 @@ int uv__iou_fs_close(uv_loop_t* loop, uv_fs_t* req);
|
||||
int uv__iou_fs_fsync_or_fdatasync(uv_loop_t* loop,
|
||||
uv_fs_t* req,
|
||||
uint32_t fsync_flags);
|
||||
int uv__iou_fs_link(uv_loop_t* loop, uv_fs_t* req);
|
||||
int uv__iou_fs_mkdir(uv_loop_t* loop, uv_fs_t* req);
|
||||
int uv__iou_fs_open(uv_loop_t* loop, uv_fs_t* req);
|
||||
int uv__iou_fs_read_or_write(uv_loop_t* loop,
|
||||
uv_fs_t* req,
|
||||
int is_read);
|
||||
int uv__iou_fs_rename(uv_loop_t* loop, uv_fs_t* req);
|
||||
int uv__iou_fs_statx(uv_loop_t* loop,
|
||||
uv_fs_t* req,
|
||||
int is_fstat,
|
||||
int is_lstat);
|
||||
int uv__iou_fs_symlink(uv_loop_t* loop, uv_fs_t* req);
|
||||
int uv__iou_fs_unlink(uv_loop_t* loop, uv_fs_t* req);
|
||||
#else
|
||||
#define uv__iou_fs_close(loop, req) 0
|
||||
#define uv__iou_fs_fsync_or_fdatasync(loop, req, fsync_flags) 0
|
||||
#define uv__iou_fs_link(loop, req) 0
|
||||
#define uv__iou_fs_mkdir(loop, req) 0
|
||||
#define uv__iou_fs_open(loop, req) 0
|
||||
#define uv__iou_fs_read_or_write(loop, req, is_read) 0
|
||||
#define uv__iou_fs_rename(loop, req) 0
|
||||
#define uv__iou_fs_statx(loop, req, is_fstat, is_lstat) 0
|
||||
#define uv__iou_fs_symlink(loop, req) 0
|
||||
#define uv__iou_fs_unlink(loop, req) 0
|
||||
#endif
|
||||
|
||||
#if defined(__APPLE__)
|
||||
@ -429,6 +439,7 @@ int uv__statx(int dirfd,
|
||||
struct uv__statx* statxbuf);
|
||||
void uv__statx_to_stat(const struct uv__statx* statxbuf, uv_stat_t* buf);
|
||||
ssize_t uv__getrandom(void* buf, size_t buflen, unsigned flags);
|
||||
unsigned uv__kernel_version(void);
|
||||
#endif
|
||||
|
||||
typedef int (*uv__peersockfunc)(int, struct sockaddr*, socklen_t*);
|
||||
|
18
deps/libuv/src/unix/kqueue.c
vendored
18
deps/libuv/src/unix/kqueue.c
vendored
@ -133,7 +133,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
struct timespec spec;
|
||||
unsigned int nevents;
|
||||
unsigned int revents;
|
||||
QUEUE* q;
|
||||
struct uv__queue* q;
|
||||
uv__io_t* w;
|
||||
uv_process_t* process;
|
||||
sigset_t* pset;
|
||||
@ -152,19 +152,19 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
int reset_timeout;
|
||||
|
||||
if (loop->nfds == 0) {
|
||||
assert(QUEUE_EMPTY(&loop->watcher_queue));
|
||||
assert(uv__queue_empty(&loop->watcher_queue));
|
||||
return;
|
||||
}
|
||||
|
||||
lfields = uv__get_internal_fields(loop);
|
||||
nevents = 0;
|
||||
|
||||
while (!QUEUE_EMPTY(&loop->watcher_queue)) {
|
||||
q = QUEUE_HEAD(&loop->watcher_queue);
|
||||
QUEUE_REMOVE(q);
|
||||
QUEUE_INIT(q);
|
||||
while (!uv__queue_empty(&loop->watcher_queue)) {
|
||||
q = uv__queue_head(&loop->watcher_queue);
|
||||
uv__queue_remove(q);
|
||||
uv__queue_init(q);
|
||||
|
||||
w = QUEUE_DATA(q, uv__io_t, watcher_queue);
|
||||
w = uv__queue_data(q, uv__io_t, watcher_queue);
|
||||
assert(w->pevents != 0);
|
||||
assert(w->fd >= 0);
|
||||
assert(w->fd < (int) loop->nwatchers);
|
||||
@ -307,8 +307,8 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
|
||||
/* Handle kevent NOTE_EXIT results */
|
||||
if (ev->filter == EVFILT_PROC) {
|
||||
QUEUE_FOREACH(q, &loop->process_handles) {
|
||||
process = QUEUE_DATA(q, uv_process_t, queue);
|
||||
uv__queue_foreach(q, &loop->process_handles) {
|
||||
process = uv__queue_data(q, uv_process_t, queue);
|
||||
if (process->pid == fd) {
|
||||
process->flags |= UV_HANDLE_REAP;
|
||||
loop->flags |= UV_LOOP_REAP_CHILDREN;
|
||||
|
259
deps/libuv/src/unix/linux.c
vendored
259
deps/libuv/src/unix/linux.c
vendored
@ -48,6 +48,7 @@
|
||||
#include <sys/sysinfo.h>
|
||||
#include <sys/sysmacros.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/utsname.h>
|
||||
#include <time.h>
|
||||
#include <unistd.h>
|
||||
|
||||
@ -150,6 +151,11 @@ enum {
|
||||
UV__IORING_OP_CLOSE = 19,
|
||||
UV__IORING_OP_STATX = 21,
|
||||
UV__IORING_OP_EPOLL_CTL = 29,
|
||||
UV__IORING_OP_RENAMEAT = 35,
|
||||
UV__IORING_OP_UNLINKAT = 36,
|
||||
UV__IORING_OP_MKDIRAT = 37,
|
||||
UV__IORING_OP_SYMLINKAT = 38,
|
||||
UV__IORING_OP_LINKAT = 39,
|
||||
};
|
||||
|
||||
enum {
|
||||
@ -162,6 +168,10 @@ enum {
|
||||
UV__IORING_SQ_CQ_OVERFLOW = 2u,
|
||||
};
|
||||
|
||||
enum {
|
||||
UV__MKDIRAT_SYMLINKAT_LINKAT = 1u,
|
||||
};
|
||||
|
||||
struct uv__io_cqring_offsets {
|
||||
uint32_t head;
|
||||
uint32_t tail;
|
||||
@ -257,7 +267,7 @@ STATIC_ASSERT(EPOLL_CTL_MOD < 4);
|
||||
|
||||
struct watcher_list {
|
||||
RB_ENTRY(watcher_list) entry;
|
||||
QUEUE watchers;
|
||||
struct uv__queue watchers;
|
||||
int iterating;
|
||||
char* path;
|
||||
int wd;
|
||||
@ -300,6 +310,31 @@ static struct watcher_root* uv__inotify_watchers(uv_loop_t* loop) {
|
||||
}
|
||||
|
||||
|
||||
unsigned uv__kernel_version(void) {
|
||||
static _Atomic unsigned cached_version;
|
||||
struct utsname u;
|
||||
unsigned version;
|
||||
unsigned major;
|
||||
unsigned minor;
|
||||
unsigned patch;
|
||||
|
||||
version = atomic_load_explicit(&cached_version, memory_order_relaxed);
|
||||
if (version != 0)
|
||||
return version;
|
||||
|
||||
if (-1 == uname(&u))
|
||||
return 0;
|
||||
|
||||
if (3 != sscanf(u.release, "%u.%u.%u", &major, &minor, &patch))
|
||||
return 0;
|
||||
|
||||
version = major * 65536 + minor * 256 + patch;
|
||||
atomic_store_explicit(&cached_version, version, memory_order_relaxed);
|
||||
|
||||
return version;
|
||||
}
|
||||
|
||||
|
||||
ssize_t
|
||||
uv__fs_copy_file_range(int fd_in,
|
||||
off_t* off_in,
|
||||
@ -385,6 +420,9 @@ int uv__io_uring_register(int fd, unsigned opcode, void* arg, unsigned nargs) {
|
||||
|
||||
|
||||
static int uv__use_io_uring(void) {
|
||||
#if defined(__ANDROID_API__)
|
||||
return 0; /* Possibly available but blocked by seccomp. */
|
||||
#else
|
||||
/* Ternary: unknown=0, yes=1, no=-1 */
|
||||
static _Atomic int use_io_uring;
|
||||
char* val;
|
||||
@ -399,6 +437,7 @@ static int uv__use_io_uring(void) {
|
||||
}
|
||||
|
||||
return use > 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
@ -503,6 +542,10 @@ static void uv__iou_init(int epollfd,
|
||||
iou->sqelen = sqelen;
|
||||
iou->ringfd = ringfd;
|
||||
iou->in_flight = 0;
|
||||
iou->flags = 0;
|
||||
|
||||
if (uv__kernel_version() >= /* 5.15.0 */ 0x050F00)
|
||||
iou->flags |= UV__MKDIRAT_SYMLINKAT_LINKAT;
|
||||
|
||||
for (i = 0; i <= iou->sqmask; i++)
|
||||
iou->sqarray[i] = i; /* Slot -> sqe identity mapping. */
|
||||
@ -684,7 +727,7 @@ static struct uv__io_uring_sqe* uv__iou_get_sqe(struct uv__iou* iou,
|
||||
req->work_req.loop = loop;
|
||||
req->work_req.work = NULL;
|
||||
req->work_req.done = NULL;
|
||||
QUEUE_INIT(&req->work_req.wq);
|
||||
uv__queue_init(&req->work_req.wq);
|
||||
|
||||
uv__req_register(loop, req);
|
||||
iou->in_flight++;
|
||||
@ -714,6 +757,17 @@ int uv__iou_fs_close(uv_loop_t* loop, uv_fs_t* req) {
|
||||
struct uv__io_uring_sqe* sqe;
|
||||
struct uv__iou* iou;
|
||||
|
||||
/* Work around a poorly understood bug in older kernels where closing a file
|
||||
* descriptor pointing to /foo/bar results in ETXTBSY errors when trying to
|
||||
* execve("/foo/bar") later on. The bug seems to have been fixed somewhere
|
||||
* between 5.15.85 and 5.15.90. I couldn't pinpoint the responsible commit
|
||||
* but good candidates are the several data race fixes. Interestingly, it
|
||||
* seems to manifest only when running under Docker so the possibility of
|
||||
* a Docker bug can't be completely ruled out either. Yay, computers.
|
||||
*/
|
||||
if (uv__kernel_version() < /* 5.15.90 */ 0x050F5A)
|
||||
return 0;
|
||||
|
||||
iou = &uv__get_internal_fields(loop)->iou;
|
||||
|
||||
sqe = uv__iou_get_sqe(iou, loop, req);
|
||||
@ -754,6 +808,55 @@ int uv__iou_fs_fsync_or_fdatasync(uv_loop_t* loop,
|
||||
}
|
||||
|
||||
|
||||
int uv__iou_fs_link(uv_loop_t* loop, uv_fs_t* req) {
|
||||
struct uv__io_uring_sqe* sqe;
|
||||
struct uv__iou* iou;
|
||||
|
||||
iou = &uv__get_internal_fields(loop)->iou;
|
||||
|
||||
if (!(iou->flags & UV__MKDIRAT_SYMLINKAT_LINKAT))
|
||||
return 0;
|
||||
|
||||
sqe = uv__iou_get_sqe(iou, loop, req);
|
||||
if (sqe == NULL)
|
||||
return 0;
|
||||
|
||||
sqe->addr = (uintptr_t) req->path;
|
||||
sqe->fd = AT_FDCWD;
|
||||
sqe->addr2 = (uintptr_t) req->new_path;
|
||||
sqe->len = AT_FDCWD;
|
||||
sqe->opcode = UV__IORING_OP_LINKAT;
|
||||
|
||||
uv__iou_submit(iou);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
int uv__iou_fs_mkdir(uv_loop_t* loop, uv_fs_t* req) {
|
||||
struct uv__io_uring_sqe* sqe;
|
||||
struct uv__iou* iou;
|
||||
|
||||
iou = &uv__get_internal_fields(loop)->iou;
|
||||
|
||||
if (!(iou->flags & UV__MKDIRAT_SYMLINKAT_LINKAT))
|
||||
return 0;
|
||||
|
||||
sqe = uv__iou_get_sqe(iou, loop, req);
|
||||
if (sqe == NULL)
|
||||
return 0;
|
||||
|
||||
sqe->addr = (uintptr_t) req->path;
|
||||
sqe->fd = AT_FDCWD;
|
||||
sqe->len = req->mode;
|
||||
sqe->opcode = UV__IORING_OP_MKDIRAT;
|
||||
|
||||
uv__iou_submit(iou);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
int uv__iou_fs_open(uv_loop_t* loop, uv_fs_t* req) {
|
||||
struct uv__io_uring_sqe* sqe;
|
||||
struct uv__iou* iou;
|
||||
@ -776,16 +879,86 @@ int uv__iou_fs_open(uv_loop_t* loop, uv_fs_t* req) {
|
||||
}
|
||||
|
||||
|
||||
int uv__iou_fs_rename(uv_loop_t* loop, uv_fs_t* req) {
|
||||
struct uv__io_uring_sqe* sqe;
|
||||
struct uv__iou* iou;
|
||||
|
||||
iou = &uv__get_internal_fields(loop)->iou;
|
||||
|
||||
sqe = uv__iou_get_sqe(iou, loop, req);
|
||||
if (sqe == NULL)
|
||||
return 0;
|
||||
|
||||
sqe->addr = (uintptr_t) req->path;
|
||||
sqe->fd = AT_FDCWD;
|
||||
sqe->addr2 = (uintptr_t) req->new_path;
|
||||
sqe->len = AT_FDCWD;
|
||||
sqe->opcode = UV__IORING_OP_RENAMEAT;
|
||||
|
||||
uv__iou_submit(iou);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
int uv__iou_fs_symlink(uv_loop_t* loop, uv_fs_t* req) {
|
||||
struct uv__io_uring_sqe* sqe;
|
||||
struct uv__iou* iou;
|
||||
|
||||
iou = &uv__get_internal_fields(loop)->iou;
|
||||
|
||||
if (!(iou->flags & UV__MKDIRAT_SYMLINKAT_LINKAT))
|
||||
return 0;
|
||||
|
||||
sqe = uv__iou_get_sqe(iou, loop, req);
|
||||
if (sqe == NULL)
|
||||
return 0;
|
||||
|
||||
sqe->addr = (uintptr_t) req->path;
|
||||
sqe->fd = AT_FDCWD;
|
||||
sqe->addr2 = (uintptr_t) req->new_path;
|
||||
sqe->opcode = UV__IORING_OP_SYMLINKAT;
|
||||
|
||||
uv__iou_submit(iou);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
int uv__iou_fs_unlink(uv_loop_t* loop, uv_fs_t* req) {
|
||||
struct uv__io_uring_sqe* sqe;
|
||||
struct uv__iou* iou;
|
||||
|
||||
iou = &uv__get_internal_fields(loop)->iou;
|
||||
|
||||
sqe = uv__iou_get_sqe(iou, loop, req);
|
||||
if (sqe == NULL)
|
||||
return 0;
|
||||
|
||||
sqe->addr = (uintptr_t) req->path;
|
||||
sqe->fd = AT_FDCWD;
|
||||
sqe->opcode = UV__IORING_OP_UNLINKAT;
|
||||
|
||||
uv__iou_submit(iou);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
int uv__iou_fs_read_or_write(uv_loop_t* loop,
|
||||
uv_fs_t* req,
|
||||
int is_read) {
|
||||
struct uv__io_uring_sqe* sqe;
|
||||
struct uv__iou* iou;
|
||||
|
||||
/* For the moment, if iovcnt is greater than IOV_MAX, fallback to the
|
||||
* threadpool. In the future we might take advantage of IOSQE_IO_LINK. */
|
||||
if (req->nbufs > IOV_MAX)
|
||||
return 0;
|
||||
/* If iovcnt is greater than IOV_MAX, cap it to IOV_MAX on reads and fallback
|
||||
* to the threadpool on writes */
|
||||
if (req->nbufs > IOV_MAX) {
|
||||
if (is_read)
|
||||
req->nbufs = IOV_MAX;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
iou = &uv__get_internal_fields(loop)->iou;
|
||||
|
||||
@ -1092,7 +1265,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
struct uv__iou* ctl;
|
||||
struct uv__iou* iou;
|
||||
int real_timeout;
|
||||
QUEUE* q;
|
||||
struct uv__queue* q;
|
||||
uv__io_t* w;
|
||||
sigset_t* sigmask;
|
||||
sigset_t sigset;
|
||||
@ -1138,11 +1311,11 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
|
||||
memset(&e, 0, sizeof(e));
|
||||
|
||||
while (!QUEUE_EMPTY(&loop->watcher_queue)) {
|
||||
q = QUEUE_HEAD(&loop->watcher_queue);
|
||||
w = QUEUE_DATA(q, uv__io_t, watcher_queue);
|
||||
QUEUE_REMOVE(q);
|
||||
QUEUE_INIT(q);
|
||||
while (!uv__queue_empty(&loop->watcher_queue)) {
|
||||
q = uv__queue_head(&loop->watcher_queue);
|
||||
w = uv__queue_data(q, uv__io_t, watcher_queue);
|
||||
uv__queue_remove(q);
|
||||
uv__queue_init(q);
|
||||
|
||||
op = EPOLL_CTL_MOD;
|
||||
if (w->events == 0)
|
||||
@ -1479,6 +1652,8 @@ int uv_cpu_info(uv_cpu_info_t** ci, int* count) {
|
||||
static const char model_marker[] = "CPU part\t: ";
|
||||
#elif defined(__mips__)
|
||||
static const char model_marker[] = "cpu model\t\t: ";
|
||||
#elif defined(__loongarch__)
|
||||
static const char model_marker[] = "cpu family\t\t: ";
|
||||
#else
|
||||
static const char model_marker[] = "model name\t: ";
|
||||
#endif
|
||||
@ -2097,8 +2272,8 @@ static int uv__inotify_fork(uv_loop_t* loop, struct watcher_list* root) {
|
||||
struct watcher_list* tmp_watcher_list_iter;
|
||||
struct watcher_list* watcher_list;
|
||||
struct watcher_list tmp_watcher_list;
|
||||
QUEUE queue;
|
||||
QUEUE* q;
|
||||
struct uv__queue queue;
|
||||
struct uv__queue* q;
|
||||
uv_fs_event_t* handle;
|
||||
char* tmp_path;
|
||||
|
||||
@ -2110,41 +2285,41 @@ static int uv__inotify_fork(uv_loop_t* loop, struct watcher_list* root) {
|
||||
*/
|
||||
loop->inotify_watchers = root;
|
||||
|
||||
QUEUE_INIT(&tmp_watcher_list.watchers);
|
||||
uv__queue_init(&tmp_watcher_list.watchers);
|
||||
/* Note that the queue we use is shared with the start and stop()
|
||||
* functions, making QUEUE_FOREACH unsafe to use. So we use the
|
||||
* QUEUE_MOVE trick to safely iterate. Also don't free the watcher
|
||||
* functions, making uv__queue_foreach unsafe to use. So we use the
|
||||
* uv__queue_move trick to safely iterate. Also don't free the watcher
|
||||
* list until we're done iterating. c.f. uv__inotify_read.
|
||||
*/
|
||||
RB_FOREACH_SAFE(watcher_list, watcher_root,
|
||||
uv__inotify_watchers(loop), tmp_watcher_list_iter) {
|
||||
watcher_list->iterating = 1;
|
||||
QUEUE_MOVE(&watcher_list->watchers, &queue);
|
||||
while (!QUEUE_EMPTY(&queue)) {
|
||||
q = QUEUE_HEAD(&queue);
|
||||
handle = QUEUE_DATA(q, uv_fs_event_t, watchers);
|
||||
uv__queue_move(&watcher_list->watchers, &queue);
|
||||
while (!uv__queue_empty(&queue)) {
|
||||
q = uv__queue_head(&queue);
|
||||
handle = uv__queue_data(q, uv_fs_event_t, watchers);
|
||||
/* It's critical to keep a copy of path here, because it
|
||||
* will be set to NULL by stop() and then deallocated by
|
||||
* maybe_free_watcher_list
|
||||
*/
|
||||
tmp_path = uv__strdup(handle->path);
|
||||
assert(tmp_path != NULL);
|
||||
QUEUE_REMOVE(q);
|
||||
QUEUE_INSERT_TAIL(&watcher_list->watchers, q);
|
||||
uv__queue_remove(q);
|
||||
uv__queue_insert_tail(&watcher_list->watchers, q);
|
||||
uv_fs_event_stop(handle);
|
||||
|
||||
QUEUE_INSERT_TAIL(&tmp_watcher_list.watchers, &handle->watchers);
|
||||
uv__queue_insert_tail(&tmp_watcher_list.watchers, &handle->watchers);
|
||||
handle->path = tmp_path;
|
||||
}
|
||||
watcher_list->iterating = 0;
|
||||
maybe_free_watcher_list(watcher_list, loop);
|
||||
}
|
||||
|
||||
QUEUE_MOVE(&tmp_watcher_list.watchers, &queue);
|
||||
while (!QUEUE_EMPTY(&queue)) {
|
||||
q = QUEUE_HEAD(&queue);
|
||||
QUEUE_REMOVE(q);
|
||||
handle = QUEUE_DATA(q, uv_fs_event_t, watchers);
|
||||
uv__queue_move(&tmp_watcher_list.watchers, &queue);
|
||||
while (!uv__queue_empty(&queue)) {
|
||||
q = uv__queue_head(&queue);
|
||||
uv__queue_remove(q);
|
||||
handle = uv__queue_data(q, uv_fs_event_t, watchers);
|
||||
tmp_path = handle->path;
|
||||
handle->path = NULL;
|
||||
err = uv_fs_event_start(handle, handle->cb, tmp_path, 0);
|
||||
@ -2166,7 +2341,7 @@ static struct watcher_list* find_watcher(uv_loop_t* loop, int wd) {
|
||||
|
||||
static void maybe_free_watcher_list(struct watcher_list* w, uv_loop_t* loop) {
|
||||
/* if the watcher_list->watchers is being iterated over, we can't free it. */
|
||||
if ((!w->iterating) && QUEUE_EMPTY(&w->watchers)) {
|
||||
if ((!w->iterating) && uv__queue_empty(&w->watchers)) {
|
||||
/* No watchers left for this path. Clean up. */
|
||||
RB_REMOVE(watcher_root, uv__inotify_watchers(loop), w);
|
||||
inotify_rm_watch(loop->inotify_fd, w->wd);
|
||||
@ -2181,8 +2356,8 @@ static void uv__inotify_read(uv_loop_t* loop,
|
||||
const struct inotify_event* e;
|
||||
struct watcher_list* w;
|
||||
uv_fs_event_t* h;
|
||||
QUEUE queue;
|
||||
QUEUE* q;
|
||||
struct uv__queue queue;
|
||||
struct uv__queue* q;
|
||||
const char* path;
|
||||
ssize_t size;
|
||||
const char *p;
|
||||
@ -2225,7 +2400,7 @@ static void uv__inotify_read(uv_loop_t* loop,
|
||||
* What can go wrong?
|
||||
* A callback could call uv_fs_event_stop()
|
||||
* and the queue can change under our feet.
|
||||
* So, we use QUEUE_MOVE() trick to safely iterate over the queue.
|
||||
* So, we use uv__queue_move() trick to safely iterate over the queue.
|
||||
* And we don't free the watcher_list until we're done iterating.
|
||||
*
|
||||
* First,
|
||||
@ -2233,13 +2408,13 @@ static void uv__inotify_read(uv_loop_t* loop,
|
||||
* not to free watcher_list.
|
||||
*/
|
||||
w->iterating = 1;
|
||||
QUEUE_MOVE(&w->watchers, &queue);
|
||||
while (!QUEUE_EMPTY(&queue)) {
|
||||
q = QUEUE_HEAD(&queue);
|
||||
h = QUEUE_DATA(q, uv_fs_event_t, watchers);
|
||||
uv__queue_move(&w->watchers, &queue);
|
||||
while (!uv__queue_empty(&queue)) {
|
||||
q = uv__queue_head(&queue);
|
||||
h = uv__queue_data(q, uv_fs_event_t, watchers);
|
||||
|
||||
QUEUE_REMOVE(q);
|
||||
QUEUE_INSERT_TAIL(&w->watchers, q);
|
||||
uv__queue_remove(q);
|
||||
uv__queue_insert_tail(&w->watchers, q);
|
||||
|
||||
h->cb(h, path, events, 0);
|
||||
}
|
||||
@ -2301,13 +2476,13 @@ int uv_fs_event_start(uv_fs_event_t* handle,
|
||||
|
||||
w->wd = wd;
|
||||
w->path = memcpy(w + 1, path, len);
|
||||
QUEUE_INIT(&w->watchers);
|
||||
uv__queue_init(&w->watchers);
|
||||
w->iterating = 0;
|
||||
RB_INSERT(watcher_root, uv__inotify_watchers(loop), w);
|
||||
|
||||
no_insert:
|
||||
uv__handle_start(handle);
|
||||
QUEUE_INSERT_TAIL(&w->watchers, &handle->watchers);
|
||||
uv__queue_insert_tail(&w->watchers, &handle->watchers);
|
||||
handle->path = w->path;
|
||||
handle->cb = cb;
|
||||
handle->wd = wd;
|
||||
@ -2328,7 +2503,7 @@ int uv_fs_event_stop(uv_fs_event_t* handle) {
|
||||
handle->wd = -1;
|
||||
handle->path = NULL;
|
||||
uv__handle_stop(handle);
|
||||
QUEUE_REMOVE(&handle->watchers);
|
||||
uv__queue_remove(&handle->watchers);
|
||||
|
||||
maybe_free_watcher_list(w, handle->loop);
|
||||
|
||||
|
20
deps/libuv/src/unix/loop-watcher.c
vendored
20
deps/libuv/src/unix/loop-watcher.c
vendored
@ -32,7 +32,7 @@
|
||||
int uv_##name##_start(uv_##name##_t* handle, uv_##name##_cb cb) { \
|
||||
if (uv__is_active(handle)) return 0; \
|
||||
if (cb == NULL) return UV_EINVAL; \
|
||||
QUEUE_INSERT_HEAD(&handle->loop->name##_handles, &handle->queue); \
|
||||
uv__queue_insert_head(&handle->loop->name##_handles, &handle->queue); \
|
||||
handle->name##_cb = cb; \
|
||||
uv__handle_start(handle); \
|
||||
return 0; \
|
||||
@ -40,21 +40,21 @@
|
||||
\
|
||||
int uv_##name##_stop(uv_##name##_t* handle) { \
|
||||
if (!uv__is_active(handle)) return 0; \
|
||||
QUEUE_REMOVE(&handle->queue); \
|
||||
uv__queue_remove(&handle->queue); \
|
||||
uv__handle_stop(handle); \
|
||||
return 0; \
|
||||
} \
|
||||
\
|
||||
void uv__run_##name(uv_loop_t* loop) { \
|
||||
uv_##name##_t* h; \
|
||||
QUEUE queue; \
|
||||
QUEUE* q; \
|
||||
QUEUE_MOVE(&loop->name##_handles, &queue); \
|
||||
while (!QUEUE_EMPTY(&queue)) { \
|
||||
q = QUEUE_HEAD(&queue); \
|
||||
h = QUEUE_DATA(q, uv_##name##_t, queue); \
|
||||
QUEUE_REMOVE(q); \
|
||||
QUEUE_INSERT_TAIL(&loop->name##_handles, q); \
|
||||
struct uv__queue queue; \
|
||||
struct uv__queue* q; \
|
||||
uv__queue_move(&loop->name##_handles, &queue); \
|
||||
while (!uv__queue_empty(&queue)) { \
|
||||
q = uv__queue_head(&queue); \
|
||||
h = uv__queue_data(q, uv_##name##_t, queue); \
|
||||
uv__queue_remove(q); \
|
||||
uv__queue_insert_tail(&loop->name##_handles, q); \
|
||||
h->name##_cb(h); \
|
||||
} \
|
||||
} \
|
||||
|
28
deps/libuv/src/unix/loop.c
vendored
28
deps/libuv/src/unix/loop.c
vendored
@ -50,20 +50,20 @@ int uv_loop_init(uv_loop_t* loop) {
|
||||
sizeof(lfields->loop_metrics.metrics));
|
||||
|
||||
heap_init((struct heap*) &loop->timer_heap);
|
||||
QUEUE_INIT(&loop->wq);
|
||||
QUEUE_INIT(&loop->idle_handles);
|
||||
QUEUE_INIT(&loop->async_handles);
|
||||
QUEUE_INIT(&loop->check_handles);
|
||||
QUEUE_INIT(&loop->prepare_handles);
|
||||
QUEUE_INIT(&loop->handle_queue);
|
||||
uv__queue_init(&loop->wq);
|
||||
uv__queue_init(&loop->idle_handles);
|
||||
uv__queue_init(&loop->async_handles);
|
||||
uv__queue_init(&loop->check_handles);
|
||||
uv__queue_init(&loop->prepare_handles);
|
||||
uv__queue_init(&loop->handle_queue);
|
||||
|
||||
loop->active_handles = 0;
|
||||
loop->active_reqs.count = 0;
|
||||
loop->nfds = 0;
|
||||
loop->watchers = NULL;
|
||||
loop->nwatchers = 0;
|
||||
QUEUE_INIT(&loop->pending_queue);
|
||||
QUEUE_INIT(&loop->watcher_queue);
|
||||
uv__queue_init(&loop->pending_queue);
|
||||
uv__queue_init(&loop->watcher_queue);
|
||||
|
||||
loop->closing_handles = NULL;
|
||||
uv__update_time(loop);
|
||||
@ -85,7 +85,7 @@ int uv_loop_init(uv_loop_t* loop) {
|
||||
err = uv__process_init(loop);
|
||||
if (err)
|
||||
goto fail_signal_init;
|
||||
QUEUE_INIT(&loop->process_handles);
|
||||
uv__queue_init(&loop->process_handles);
|
||||
|
||||
err = uv_rwlock_init(&loop->cloexec_lock);
|
||||
if (err)
|
||||
@ -152,9 +152,9 @@ int uv_loop_fork(uv_loop_t* loop) {
|
||||
if (w == NULL)
|
||||
continue;
|
||||
|
||||
if (w->pevents != 0 && QUEUE_EMPTY(&w->watcher_queue)) {
|
||||
if (w->pevents != 0 && uv__queue_empty(&w->watcher_queue)) {
|
||||
w->events = 0; /* Force re-registration in uv__io_poll. */
|
||||
QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
|
||||
uv__queue_insert_tail(&loop->watcher_queue, &w->watcher_queue);
|
||||
}
|
||||
}
|
||||
|
||||
@ -180,7 +180,7 @@ void uv__loop_close(uv_loop_t* loop) {
|
||||
}
|
||||
|
||||
uv_mutex_lock(&loop->wq_mutex);
|
||||
assert(QUEUE_EMPTY(&loop->wq) && "thread pool work queue not empty!");
|
||||
assert(uv__queue_empty(&loop->wq) && "thread pool work queue not empty!");
|
||||
assert(!uv__has_active_reqs(loop));
|
||||
uv_mutex_unlock(&loop->wq_mutex);
|
||||
uv_mutex_destroy(&loop->wq_mutex);
|
||||
@ -192,8 +192,8 @@ void uv__loop_close(uv_loop_t* loop) {
|
||||
uv_rwlock_destroy(&loop->cloexec_lock);
|
||||
|
||||
#if 0
|
||||
assert(QUEUE_EMPTY(&loop->pending_queue));
|
||||
assert(QUEUE_EMPTY(&loop->watcher_queue));
|
||||
assert(uv__queue_empty(&loop->pending_queue));
|
||||
assert(uv__queue_empty(&loop->watcher_queue));
|
||||
assert(loop->nfds == 0);
|
||||
#endif
|
||||
|
||||
|
24
deps/libuv/src/unix/os390-syscalls.c
vendored
24
deps/libuv/src/unix/os390-syscalls.c
vendored
@ -27,7 +27,7 @@
|
||||
#include <termios.h>
|
||||
#include <sys/msg.h>
|
||||
|
||||
static QUEUE global_epoll_queue;
|
||||
static struct uv__queue global_epoll_queue;
|
||||
static uv_mutex_t global_epoll_lock;
|
||||
static uv_once_t once = UV_ONCE_INIT;
|
||||
|
||||
@ -178,18 +178,18 @@ static void after_fork(void) {
|
||||
|
||||
|
||||
static void child_fork(void) {
|
||||
QUEUE* q;
|
||||
struct uv__queue* q;
|
||||
uv_once_t child_once = UV_ONCE_INIT;
|
||||
|
||||
/* reset once */
|
||||
memcpy(&once, &child_once, sizeof(child_once));
|
||||
|
||||
/* reset epoll list */
|
||||
while (!QUEUE_EMPTY(&global_epoll_queue)) {
|
||||
while (!uv__queue_empty(&global_epoll_queue)) {
|
||||
uv__os390_epoll* lst;
|
||||
q = QUEUE_HEAD(&global_epoll_queue);
|
||||
QUEUE_REMOVE(q);
|
||||
lst = QUEUE_DATA(q, uv__os390_epoll, member);
|
||||
q = uv__queue_head(&global_epoll_queue);
|
||||
uv__queue_remove(q);
|
||||
lst = uv__queue_data(q, uv__os390_epoll, member);
|
||||
uv__free(lst->items);
|
||||
lst->items = NULL;
|
||||
lst->size = 0;
|
||||
@ -201,7 +201,7 @@ static void child_fork(void) {
|
||||
|
||||
|
||||
static void epoll_init(void) {
|
||||
QUEUE_INIT(&global_epoll_queue);
|
||||
uv__queue_init(&global_epoll_queue);
|
||||
if (uv_mutex_init(&global_epoll_lock))
|
||||
abort();
|
||||
|
||||
@ -225,7 +225,7 @@ uv__os390_epoll* epoll_create1(int flags) {
|
||||
lst->items[lst->size - 1].revents = 0;
|
||||
uv_once(&once, epoll_init);
|
||||
uv_mutex_lock(&global_epoll_lock);
|
||||
QUEUE_INSERT_TAIL(&global_epoll_queue, &lst->member);
|
||||
uv__queue_insert_tail(&global_epoll_queue, &lst->member);
|
||||
uv_mutex_unlock(&global_epoll_lock);
|
||||
}
|
||||
|
||||
@ -352,14 +352,14 @@ int epoll_wait(uv__os390_epoll* lst, struct epoll_event* events,
|
||||
|
||||
|
||||
int epoll_file_close(int fd) {
|
||||
QUEUE* q;
|
||||
struct uv__queue* q;
|
||||
|
||||
uv_once(&once, epoll_init);
|
||||
uv_mutex_lock(&global_epoll_lock);
|
||||
QUEUE_FOREACH(q, &global_epoll_queue) {
|
||||
uv__queue_foreach(q, &global_epoll_queue) {
|
||||
uv__os390_epoll* lst;
|
||||
|
||||
lst = QUEUE_DATA(q, uv__os390_epoll, member);
|
||||
lst = uv__queue_data(q, uv__os390_epoll, member);
|
||||
if (fd < lst->size && lst->items != NULL && lst->items[fd].fd != -1)
|
||||
lst->items[fd].fd = -1;
|
||||
}
|
||||
@ -371,7 +371,7 @@ int epoll_file_close(int fd) {
|
||||
void epoll_queue_close(uv__os390_epoll* lst) {
|
||||
/* Remove epoll instance from global queue */
|
||||
uv_mutex_lock(&global_epoll_lock);
|
||||
QUEUE_REMOVE(&lst->member);
|
||||
uv__queue_remove(&lst->member);
|
||||
uv_mutex_unlock(&global_epoll_lock);
|
||||
|
||||
/* Free resources */
|
||||
|
2
deps/libuv/src/unix/os390-syscalls.h
vendored
2
deps/libuv/src/unix/os390-syscalls.h
vendored
@ -45,7 +45,7 @@ struct epoll_event {
|
||||
};
|
||||
|
||||
typedef struct {
|
||||
QUEUE member;
|
||||
struct uv__queue member;
|
||||
struct pollfd* items;
|
||||
unsigned long size;
|
||||
int msg_queue;
|
||||
|
14
deps/libuv/src/unix/os390.c
vendored
14
deps/libuv/src/unix/os390.c
vendored
@ -815,7 +815,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
uv__os390_epoll* ep;
|
||||
int have_signals;
|
||||
int real_timeout;
|
||||
QUEUE* q;
|
||||
struct uv__queue* q;
|
||||
uv__io_t* w;
|
||||
uint64_t base;
|
||||
int count;
|
||||
@ -827,19 +827,19 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
int reset_timeout;
|
||||
|
||||
if (loop->nfds == 0) {
|
||||
assert(QUEUE_EMPTY(&loop->watcher_queue));
|
||||
assert(uv__queue_empty(&loop->watcher_queue));
|
||||
return;
|
||||
}
|
||||
|
||||
lfields = uv__get_internal_fields(loop);
|
||||
|
||||
while (!QUEUE_EMPTY(&loop->watcher_queue)) {
|
||||
while (!uv__queue_empty(&loop->watcher_queue)) {
|
||||
uv_stream_t* stream;
|
||||
|
||||
q = QUEUE_HEAD(&loop->watcher_queue);
|
||||
QUEUE_REMOVE(q);
|
||||
QUEUE_INIT(q);
|
||||
w = QUEUE_DATA(q, uv__io_t, watcher_queue);
|
||||
q = uv__queue_head(&loop->watcher_queue);
|
||||
uv__queue_remove(q);
|
||||
uv__queue_init(q);
|
||||
w = uv__queue_data(q, uv__io_t, watcher_queue);
|
||||
|
||||
assert(w->pevents != 0);
|
||||
assert(w->fd >= 0);
|
||||
|
98
deps/libuv/src/unix/pipe.c
vendored
98
deps/libuv/src/unix/pipe.c
vendored
@ -41,26 +41,60 @@ int uv_pipe_init(uv_loop_t* loop, uv_pipe_t* handle, int ipc) {
|
||||
|
||||
|
||||
int uv_pipe_bind(uv_pipe_t* handle, const char* name) {
|
||||
return uv_pipe_bind2(handle, name, strlen(name), 0);
|
||||
}
|
||||
|
||||
|
||||
int uv_pipe_bind2(uv_pipe_t* handle,
|
||||
const char* name,
|
||||
size_t namelen,
|
||||
unsigned int flags) {
|
||||
struct sockaddr_un saddr;
|
||||
const char* pipe_fname;
|
||||
char* pipe_fname;
|
||||
int sockfd;
|
||||
int err;
|
||||
|
||||
pipe_fname = NULL;
|
||||
|
||||
if (flags & ~UV_PIPE_NO_TRUNCATE)
|
||||
return UV_EINVAL;
|
||||
|
||||
if (name == NULL)
|
||||
return UV_EINVAL;
|
||||
|
||||
if (namelen == 0)
|
||||
return UV_EINVAL;
|
||||
|
||||
#ifndef __linux__
|
||||
/* Abstract socket namespace only works on Linux. */
|
||||
if (*name == '\0')
|
||||
return UV_EINVAL;
|
||||
#endif
|
||||
|
||||
if (flags & UV_PIPE_NO_TRUNCATE)
|
||||
if (namelen > sizeof(saddr.sun_path))
|
||||
return UV_EINVAL;
|
||||
|
||||
/* Truncate long paths. Documented behavior. */
|
||||
if (namelen > sizeof(saddr.sun_path))
|
||||
namelen = sizeof(saddr.sun_path);
|
||||
|
||||
/* Already bound? */
|
||||
if (uv__stream_fd(handle) >= 0)
|
||||
return UV_EINVAL;
|
||||
if (uv__is_closing(handle)) {
|
||||
return UV_EINVAL;
|
||||
}
|
||||
/* Make a copy of the file name, it outlives this function's scope. */
|
||||
pipe_fname = uv__strdup(name);
|
||||
if (pipe_fname == NULL)
|
||||
return UV_ENOMEM;
|
||||
|
||||
/* We've got a copy, don't touch the original any more. */
|
||||
name = NULL;
|
||||
if (uv__is_closing(handle))
|
||||
return UV_EINVAL;
|
||||
|
||||
/* Make a copy of the file path unless it is an abstract socket.
|
||||
* We unlink the file later but abstract sockets disappear
|
||||
* automatically since they're not real file system entities.
|
||||
*/
|
||||
if (*name != '\0') {
|
||||
pipe_fname = uv__strdup(name);
|
||||
if (pipe_fname == NULL)
|
||||
return UV_ENOMEM;
|
||||
}
|
||||
|
||||
err = uv__socket(AF_UNIX, SOCK_STREAM, 0);
|
||||
if (err < 0)
|
||||
@ -68,7 +102,7 @@ int uv_pipe_bind(uv_pipe_t* handle, const char* name) {
|
||||
sockfd = err;
|
||||
|
||||
memset(&saddr, 0, sizeof saddr);
|
||||
uv__strscpy(saddr.sun_path, pipe_fname, sizeof(saddr.sun_path));
|
||||
memcpy(&saddr.sun_path, name, namelen);
|
||||
saddr.sun_family = AF_UNIX;
|
||||
|
||||
if (bind(sockfd, (struct sockaddr*)&saddr, sizeof saddr)) {
|
||||
@ -83,12 +117,12 @@ int uv_pipe_bind(uv_pipe_t* handle, const char* name) {
|
||||
|
||||
/* Success. */
|
||||
handle->flags |= UV_HANDLE_BOUND;
|
||||
handle->pipe_fname = pipe_fname; /* Is a strdup'ed copy. */
|
||||
handle->pipe_fname = pipe_fname; /* NULL or a strdup'ed copy. */
|
||||
handle->io_watcher.fd = sockfd;
|
||||
return 0;
|
||||
|
||||
err_socket:
|
||||
uv__free((void*)pipe_fname);
|
||||
uv__free(pipe_fname);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -176,11 +210,44 @@ void uv_pipe_connect(uv_connect_t* req,
|
||||
uv_pipe_t* handle,
|
||||
const char* name,
|
||||
uv_connect_cb cb) {
|
||||
uv_pipe_connect2(req, handle, name, strlen(name), 0, cb);
|
||||
}
|
||||
|
||||
|
||||
int uv_pipe_connect2(uv_connect_t* req,
|
||||
uv_pipe_t* handle,
|
||||
const char* name,
|
||||
size_t namelen,
|
||||
unsigned int flags,
|
||||
uv_connect_cb cb) {
|
||||
struct sockaddr_un saddr;
|
||||
int new_sock;
|
||||
int err;
|
||||
int r;
|
||||
|
||||
if (flags & ~UV_PIPE_NO_TRUNCATE)
|
||||
return UV_EINVAL;
|
||||
|
||||
if (name == NULL)
|
||||
return UV_EINVAL;
|
||||
|
||||
if (namelen == 0)
|
||||
return UV_EINVAL;
|
||||
|
||||
#ifndef __linux__
|
||||
/* Abstract socket namespace only works on Linux. */
|
||||
if (*name == '\0')
|
||||
return UV_EINVAL;
|
||||
#endif
|
||||
|
||||
if (flags & UV_PIPE_NO_TRUNCATE)
|
||||
if (namelen > sizeof(saddr.sun_path))
|
||||
return UV_EINVAL;
|
||||
|
||||
/* Truncate long paths. Documented behavior. */
|
||||
if (namelen > sizeof(saddr.sun_path))
|
||||
namelen = sizeof(saddr.sun_path);
|
||||
|
||||
new_sock = (uv__stream_fd(handle) == -1);
|
||||
|
||||
if (new_sock) {
|
||||
@ -191,7 +258,7 @@ void uv_pipe_connect(uv_connect_t* req,
|
||||
}
|
||||
|
||||
memset(&saddr, 0, sizeof saddr);
|
||||
uv__strscpy(saddr.sun_path, name, sizeof(saddr.sun_path));
|
||||
memcpy(&saddr.sun_path, name, namelen);
|
||||
saddr.sun_family = AF_UNIX;
|
||||
|
||||
do {
|
||||
@ -230,12 +297,13 @@ out:
|
||||
uv__req_init(handle->loop, req, UV_CONNECT);
|
||||
req->handle = (uv_stream_t*)handle;
|
||||
req->cb = cb;
|
||||
QUEUE_INIT(&req->queue);
|
||||
uv__queue_init(&req->queue);
|
||||
|
||||
/* Force callback to run on next tick in case of error. */
|
||||
if (err)
|
||||
uv__io_feed(handle->loop, &handle->io_watcher);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
14
deps/libuv/src/unix/posix-poll.c
vendored
14
deps/libuv/src/unix/posix-poll.c
vendored
@ -137,7 +137,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
sigset_t set;
|
||||
uint64_t time_base;
|
||||
uint64_t time_diff;
|
||||
QUEUE* q;
|
||||
struct uv__queue* q;
|
||||
uv__io_t* w;
|
||||
size_t i;
|
||||
unsigned int nevents;
|
||||
@ -149,19 +149,19 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
int reset_timeout;
|
||||
|
||||
if (loop->nfds == 0) {
|
||||
assert(QUEUE_EMPTY(&loop->watcher_queue));
|
||||
assert(uv__queue_empty(&loop->watcher_queue));
|
||||
return;
|
||||
}
|
||||
|
||||
lfields = uv__get_internal_fields(loop);
|
||||
|
||||
/* Take queued watchers and add their fds to our poll fds array. */
|
||||
while (!QUEUE_EMPTY(&loop->watcher_queue)) {
|
||||
q = QUEUE_HEAD(&loop->watcher_queue);
|
||||
QUEUE_REMOVE(q);
|
||||
QUEUE_INIT(q);
|
||||
while (!uv__queue_empty(&loop->watcher_queue)) {
|
||||
q = uv__queue_head(&loop->watcher_queue);
|
||||
uv__queue_remove(q);
|
||||
uv__queue_init(q);
|
||||
|
||||
w = QUEUE_DATA(q, uv__io_t, watcher_queue);
|
||||
w = uv__queue_data(q, uv__io_t, watcher_queue);
|
||||
assert(w->pevents != 0);
|
||||
assert(w->fd >= 0);
|
||||
assert(w->fd < (int) loop->nwatchers);
|
||||
|
50
deps/libuv/src/unix/process.c
vendored
50
deps/libuv/src/unix/process.c
vendored
@ -108,17 +108,17 @@ void uv__wait_children(uv_loop_t* loop) {
|
||||
int status;
|
||||
int options;
|
||||
pid_t pid;
|
||||
QUEUE pending;
|
||||
QUEUE* q;
|
||||
QUEUE* h;
|
||||
struct uv__queue pending;
|
||||
struct uv__queue* q;
|
||||
struct uv__queue* h;
|
||||
|
||||
QUEUE_INIT(&pending);
|
||||
uv__queue_init(&pending);
|
||||
|
||||
h = &loop->process_handles;
|
||||
q = QUEUE_HEAD(h);
|
||||
q = uv__queue_head(h);
|
||||
while (q != h) {
|
||||
process = QUEUE_DATA(q, uv_process_t, queue);
|
||||
q = QUEUE_NEXT(q);
|
||||
process = uv__queue_data(q, uv_process_t, queue);
|
||||
q = uv__queue_next(q);
|
||||
|
||||
#ifndef UV_USE_SIGCHLD
|
||||
if ((process->flags & UV_HANDLE_REAP) == 0)
|
||||
@ -149,18 +149,18 @@ void uv__wait_children(uv_loop_t* loop) {
|
||||
|
||||
assert(pid == process->pid);
|
||||
process->status = status;
|
||||
QUEUE_REMOVE(&process->queue);
|
||||
QUEUE_INSERT_TAIL(&pending, &process->queue);
|
||||
uv__queue_remove(&process->queue);
|
||||
uv__queue_insert_tail(&pending, &process->queue);
|
||||
}
|
||||
|
||||
h = &pending;
|
||||
q = QUEUE_HEAD(h);
|
||||
q = uv__queue_head(h);
|
||||
while (q != h) {
|
||||
process = QUEUE_DATA(q, uv_process_t, queue);
|
||||
q = QUEUE_NEXT(q);
|
||||
process = uv__queue_data(q, uv_process_t, queue);
|
||||
q = uv__queue_next(q);
|
||||
|
||||
QUEUE_REMOVE(&process->queue);
|
||||
QUEUE_INIT(&process->queue);
|
||||
uv__queue_remove(&process->queue);
|
||||
uv__queue_init(&process->queue);
|
||||
uv__handle_stop(process);
|
||||
|
||||
if (process->exit_cb == NULL)
|
||||
@ -176,13 +176,18 @@ void uv__wait_children(uv_loop_t* loop) {
|
||||
|
||||
process->exit_cb(process, exit_status, term_signal);
|
||||
}
|
||||
assert(QUEUE_EMPTY(&pending));
|
||||
assert(uv__queue_empty(&pending));
|
||||
}
|
||||
|
||||
/*
|
||||
* Used for initializing stdio streams like options.stdin_stream. Returns
|
||||
* zero on success. See also the cleanup section in uv_spawn().
|
||||
*/
|
||||
#if !(defined(__APPLE__) && (TARGET_OS_TV || TARGET_OS_WATCH))
|
||||
/* execvp is marked __WATCHOS_PROHIBITED __TVOS_PROHIBITED, so must be
|
||||
* avoided. Since this isn't called on those targets, the function
|
||||
* doesn't even need to be defined for them.
|
||||
*/
|
||||
static int uv__process_init_stdio(uv_stdio_container_t* container, int fds[2]) {
|
||||
int mask;
|
||||
int fd;
|
||||
@ -269,11 +274,6 @@ static void uv__write_errno(int error_fd) {
|
||||
}
|
||||
|
||||
|
||||
#if !(defined(__APPLE__) && (TARGET_OS_TV || TARGET_OS_WATCH))
|
||||
/* execvp is marked __WATCHOS_PROHIBITED __TVOS_PROHIBITED, so must be
|
||||
* avoided. Since this isn't called on those targets, the function
|
||||
* doesn't even need to be defined for them.
|
||||
*/
|
||||
static void uv__process_child_init(const uv_process_options_t* options,
|
||||
int stdio_count,
|
||||
int (*pipes)[2],
|
||||
@ -405,7 +405,6 @@ static void uv__process_child_init(const uv_process_options_t* options,
|
||||
|
||||
uv__write_errno(error_fd);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#if defined(__APPLE__)
|
||||
@ -952,6 +951,7 @@ static int uv__spawn_and_init_child(
|
||||
|
||||
return err;
|
||||
}
|
||||
#endif /* ISN'T TARGET_OS_TV || TARGET_OS_WATCH */
|
||||
|
||||
int uv_spawn(uv_loop_t* loop,
|
||||
uv_process_t* process,
|
||||
@ -978,7 +978,7 @@ int uv_spawn(uv_loop_t* loop,
|
||||
UV_PROCESS_WINDOWS_VERBATIM_ARGUMENTS)));
|
||||
|
||||
uv__handle_init(loop, (uv_handle_t*)process, UV_PROCESS);
|
||||
QUEUE_INIT(&process->queue);
|
||||
uv__queue_init(&process->queue);
|
||||
process->status = 0;
|
||||
|
||||
stdio_count = options->stdio_count;
|
||||
@ -1041,7 +1041,7 @@ int uv_spawn(uv_loop_t* loop,
|
||||
|
||||
process->pid = pid;
|
||||
process->exit_cb = options->exit_cb;
|
||||
QUEUE_INSERT_TAIL(&loop->process_handles, &process->queue);
|
||||
uv__queue_insert_tail(&loop->process_handles, &process->queue);
|
||||
uv__handle_start(process);
|
||||
}
|
||||
|
||||
@ -1103,10 +1103,10 @@ int uv_kill(int pid, int signum) {
|
||||
|
||||
|
||||
void uv__process_close(uv_process_t* handle) {
|
||||
QUEUE_REMOVE(&handle->queue);
|
||||
uv__queue_remove(&handle->queue);
|
||||
uv__handle_stop(handle);
|
||||
#ifdef UV_USE_SIGCHLD
|
||||
if (QUEUE_EMPTY(&handle->loop->process_handles))
|
||||
if (uv__queue_empty(&handle->loop->process_handles))
|
||||
uv_signal_stop(&handle->loop->child_watcher);
|
||||
#endif
|
||||
}
|
||||
|
8
deps/libuv/src/unix/signal.c
vendored
8
deps/libuv/src/unix/signal.c
vendored
@ -291,16 +291,16 @@ int uv__signal_loop_fork(uv_loop_t* loop) {
|
||||
|
||||
|
||||
void uv__signal_loop_cleanup(uv_loop_t* loop) {
|
||||
QUEUE* q;
|
||||
struct uv__queue* q;
|
||||
|
||||
/* Stop all the signal watchers that are still attached to this loop. This
|
||||
* ensures that the (shared) signal tree doesn't contain any invalid entries
|
||||
* entries, and that signal handlers are removed when appropriate.
|
||||
* It's safe to use QUEUE_FOREACH here because the handles and the handle
|
||||
* It's safe to use uv__queue_foreach here because the handles and the handle
|
||||
* queue are not modified by uv__signal_stop().
|
||||
*/
|
||||
QUEUE_FOREACH(q, &loop->handle_queue) {
|
||||
uv_handle_t* handle = QUEUE_DATA(q, uv_handle_t, handle_queue);
|
||||
uv__queue_foreach(q, &loop->handle_queue) {
|
||||
uv_handle_t* handle = uv__queue_data(q, uv_handle_t, handle_queue);
|
||||
|
||||
if (handle->type == UV_SIGNAL)
|
||||
uv__signal_stop((uv_signal_t*) handle);
|
||||
|
56
deps/libuv/src/unix/stream.c
vendored
56
deps/libuv/src/unix/stream.c
vendored
@ -94,8 +94,8 @@ void uv__stream_init(uv_loop_t* loop,
|
||||
stream->accepted_fd = -1;
|
||||
stream->queued_fds = NULL;
|
||||
stream->delayed_error = 0;
|
||||
QUEUE_INIT(&stream->write_queue);
|
||||
QUEUE_INIT(&stream->write_completed_queue);
|
||||
uv__queue_init(&stream->write_queue);
|
||||
uv__queue_init(&stream->write_completed_queue);
|
||||
stream->write_queue_size = 0;
|
||||
|
||||
if (loop->emfile_fd == -1) {
|
||||
@ -439,15 +439,15 @@ int uv__stream_open(uv_stream_t* stream, int fd, int flags) {
|
||||
|
||||
void uv__stream_flush_write_queue(uv_stream_t* stream, int error) {
|
||||
uv_write_t* req;
|
||||
QUEUE* q;
|
||||
while (!QUEUE_EMPTY(&stream->write_queue)) {
|
||||
q = QUEUE_HEAD(&stream->write_queue);
|
||||
QUEUE_REMOVE(q);
|
||||
struct uv__queue* q;
|
||||
while (!uv__queue_empty(&stream->write_queue)) {
|
||||
q = uv__queue_head(&stream->write_queue);
|
||||
uv__queue_remove(q);
|
||||
|
||||
req = QUEUE_DATA(q, uv_write_t, queue);
|
||||
req = uv__queue_data(q, uv_write_t, queue);
|
||||
req->error = error;
|
||||
|
||||
QUEUE_INSERT_TAIL(&stream->write_completed_queue, &req->queue);
|
||||
uv__queue_insert_tail(&stream->write_completed_queue, &req->queue);
|
||||
}
|
||||
}
|
||||
|
||||
@ -627,7 +627,7 @@ static void uv__drain(uv_stream_t* stream) {
|
||||
uv_shutdown_t* req;
|
||||
int err;
|
||||
|
||||
assert(QUEUE_EMPTY(&stream->write_queue));
|
||||
assert(uv__queue_empty(&stream->write_queue));
|
||||
if (!(stream->flags & UV_HANDLE_CLOSING)) {
|
||||
uv__io_stop(stream->loop, &stream->io_watcher, POLLOUT);
|
||||
uv__stream_osx_interrupt_select(stream);
|
||||
@ -714,7 +714,7 @@ static void uv__write_req_finish(uv_write_t* req) {
|
||||
uv_stream_t* stream = req->handle;
|
||||
|
||||
/* Pop the req off tcp->write_queue. */
|
||||
QUEUE_REMOVE(&req->queue);
|
||||
uv__queue_remove(&req->queue);
|
||||
|
||||
/* Only free when there was no error. On error, we touch up write_queue_size
|
||||
* right before making the callback. The reason we don't do that right away
|
||||
@ -731,7 +731,7 @@ static void uv__write_req_finish(uv_write_t* req) {
|
||||
/* Add it to the write_completed_queue where it will have its
|
||||
* callback called in the near future.
|
||||
*/
|
||||
QUEUE_INSERT_TAIL(&stream->write_completed_queue, &req->queue);
|
||||
uv__queue_insert_tail(&stream->write_completed_queue, &req->queue);
|
||||
uv__io_feed(stream->loop, &stream->io_watcher);
|
||||
}
|
||||
|
||||
@ -837,7 +837,7 @@ static int uv__try_write(uv_stream_t* stream,
|
||||
}
|
||||
|
||||
static void uv__write(uv_stream_t* stream) {
|
||||
QUEUE* q;
|
||||
struct uv__queue* q;
|
||||
uv_write_t* req;
|
||||
ssize_t n;
|
||||
int count;
|
||||
@ -851,11 +851,11 @@ static void uv__write(uv_stream_t* stream) {
|
||||
count = 32;
|
||||
|
||||
for (;;) {
|
||||
if (QUEUE_EMPTY(&stream->write_queue))
|
||||
if (uv__queue_empty(&stream->write_queue))
|
||||
return;
|
||||
|
||||
q = QUEUE_HEAD(&stream->write_queue);
|
||||
req = QUEUE_DATA(q, uv_write_t, queue);
|
||||
q = uv__queue_head(&stream->write_queue);
|
||||
req = uv__queue_data(q, uv_write_t, queue);
|
||||
assert(req->handle == stream);
|
||||
|
||||
n = uv__try_write(stream,
|
||||
@ -899,19 +899,19 @@ error:
|
||||
|
||||
static void uv__write_callbacks(uv_stream_t* stream) {
|
||||
uv_write_t* req;
|
||||
QUEUE* q;
|
||||
QUEUE pq;
|
||||
struct uv__queue* q;
|
||||
struct uv__queue pq;
|
||||
|
||||
if (QUEUE_EMPTY(&stream->write_completed_queue))
|
||||
if (uv__queue_empty(&stream->write_completed_queue))
|
||||
return;
|
||||
|
||||
QUEUE_MOVE(&stream->write_completed_queue, &pq);
|
||||
uv__queue_move(&stream->write_completed_queue, &pq);
|
||||
|
||||
while (!QUEUE_EMPTY(&pq)) {
|
||||
while (!uv__queue_empty(&pq)) {
|
||||
/* Pop a req off write_completed_queue. */
|
||||
q = QUEUE_HEAD(&pq);
|
||||
req = QUEUE_DATA(q, uv_write_t, queue);
|
||||
QUEUE_REMOVE(q);
|
||||
q = uv__queue_head(&pq);
|
||||
req = uv__queue_data(q, uv_write_t, queue);
|
||||
uv__queue_remove(q);
|
||||
uv__req_unregister(stream->loop, req);
|
||||
|
||||
if (req->bufs != NULL) {
|
||||
@ -1174,7 +1174,7 @@ int uv_shutdown(uv_shutdown_t* req, uv_stream_t* stream, uv_shutdown_cb cb) {
|
||||
stream->shutdown_req = req;
|
||||
stream->flags &= ~UV_HANDLE_WRITABLE;
|
||||
|
||||
if (QUEUE_EMPTY(&stream->write_queue))
|
||||
if (uv__queue_empty(&stream->write_queue))
|
||||
uv__io_feed(stream->loop, &stream->io_watcher);
|
||||
|
||||
return 0;
|
||||
@ -1227,7 +1227,7 @@ static void uv__stream_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
|
||||
uv__write_callbacks(stream);
|
||||
|
||||
/* Write queue drained. */
|
||||
if (QUEUE_EMPTY(&stream->write_queue))
|
||||
if (uv__queue_empty(&stream->write_queue))
|
||||
uv__drain(stream);
|
||||
}
|
||||
}
|
||||
@ -1270,7 +1270,7 @@ static void uv__stream_connect(uv_stream_t* stream) {
|
||||
stream->connect_req = NULL;
|
||||
uv__req_unregister(stream->loop, req);
|
||||
|
||||
if (error < 0 || QUEUE_EMPTY(&stream->write_queue)) {
|
||||
if (error < 0 || uv__queue_empty(&stream->write_queue)) {
|
||||
uv__io_stop(stream->loop, &stream->io_watcher, POLLOUT);
|
||||
}
|
||||
|
||||
@ -1352,7 +1352,7 @@ int uv_write2(uv_write_t* req,
|
||||
req->handle = stream;
|
||||
req->error = 0;
|
||||
req->send_handle = send_handle;
|
||||
QUEUE_INIT(&req->queue);
|
||||
uv__queue_init(&req->queue);
|
||||
|
||||
req->bufs = req->bufsml;
|
||||
if (nbufs > ARRAY_SIZE(req->bufsml))
|
||||
@ -1367,7 +1367,7 @@ int uv_write2(uv_write_t* req,
|
||||
stream->write_queue_size += uv__count_bufs(bufs, nbufs);
|
||||
|
||||
/* Append the request to write_queue. */
|
||||
QUEUE_INSERT_TAIL(&stream->write_queue, &req->queue);
|
||||
uv__queue_insert_tail(&stream->write_queue, &req->queue);
|
||||
|
||||
/* If the queue was empty when this function began, we should attempt to
|
||||
* do the write immediately. Otherwise start the write_watcher and wait
|
||||
|
18
deps/libuv/src/unix/sunos.c
vendored
18
deps/libuv/src/unix/sunos.c
vendored
@ -148,7 +148,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
struct port_event events[1024];
|
||||
struct port_event* pe;
|
||||
struct timespec spec;
|
||||
QUEUE* q;
|
||||
struct uv__queue* q;
|
||||
uv__io_t* w;
|
||||
sigset_t* pset;
|
||||
sigset_t set;
|
||||
@ -166,16 +166,16 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
int reset_timeout;
|
||||
|
||||
if (loop->nfds == 0) {
|
||||
assert(QUEUE_EMPTY(&loop->watcher_queue));
|
||||
assert(uv__queue_empty(&loop->watcher_queue));
|
||||
return;
|
||||
}
|
||||
|
||||
while (!QUEUE_EMPTY(&loop->watcher_queue)) {
|
||||
q = QUEUE_HEAD(&loop->watcher_queue);
|
||||
QUEUE_REMOVE(q);
|
||||
QUEUE_INIT(q);
|
||||
while (!uv__queue_empty(&loop->watcher_queue)) {
|
||||
q = uv__queue_head(&loop->watcher_queue);
|
||||
uv__queue_remove(q);
|
||||
uv__queue_init(q);
|
||||
|
||||
w = QUEUE_DATA(q, uv__io_t, watcher_queue);
|
||||
w = uv__queue_data(q, uv__io_t, watcher_queue);
|
||||
assert(w->pevents != 0);
|
||||
|
||||
if (port_associate(loop->backend_fd,
|
||||
@ -316,8 +316,8 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
continue; /* Disabled by callback. */
|
||||
|
||||
/* Events Ports operates in oneshot mode, rearm timer on next run. */
|
||||
if (w->pevents != 0 && QUEUE_EMPTY(&w->watcher_queue))
|
||||
QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
|
||||
if (w->pevents != 0 && uv__queue_empty(&w->watcher_queue))
|
||||
uv__queue_insert_tail(&loop->watcher_queue, &w->watcher_queue);
|
||||
}
|
||||
|
||||
uv__metrics_inc_events(loop, nevents);
|
||||
|
4
deps/libuv/src/unix/tcp.c
vendored
4
deps/libuv/src/unix/tcp.c
vendored
@ -124,7 +124,7 @@ int uv_tcp_init_ex(uv_loop_t* loop, uv_tcp_t* tcp, unsigned int flags) {
|
||||
if (domain != AF_UNSPEC) {
|
||||
err = new_socket(tcp, domain, 0);
|
||||
if (err) {
|
||||
QUEUE_REMOVE(&tcp->handle_queue);
|
||||
uv__queue_remove(&tcp->handle_queue);
|
||||
if (tcp->io_watcher.fd != -1)
|
||||
uv__close(tcp->io_watcher.fd);
|
||||
tcp->io_watcher.fd = -1;
|
||||
@ -252,7 +252,7 @@ out:
|
||||
uv__req_init(handle->loop, req, UV_CONNECT);
|
||||
req->cb = cb;
|
||||
req->handle = (uv_stream_t*) handle;
|
||||
QUEUE_INIT(&req->queue);
|
||||
uv__queue_init(&req->queue);
|
||||
handle->connect_req = req;
|
||||
|
||||
uv__io_start(handle->loop, &handle->io_watcher, POLLOUT);
|
||||
|
2
deps/libuv/src/unix/tty.c
vendored
2
deps/libuv/src/unix/tty.c
vendored
@ -222,7 +222,7 @@ skip:
|
||||
int rc = r;
|
||||
if (newfd != -1)
|
||||
uv__close(newfd);
|
||||
QUEUE_REMOVE(&tty->handle_queue);
|
||||
uv__queue_remove(&tty->handle_queue);
|
||||
do
|
||||
r = fcntl(fd, F_SETFL, saved_flags);
|
||||
while (r == -1 && errno == EINTR);
|
||||
|
76
deps/libuv/src/unix/udp.c
vendored
76
deps/libuv/src/unix/udp.c
vendored
@ -62,18 +62,18 @@ void uv__udp_close(uv_udp_t* handle) {
|
||||
|
||||
void uv__udp_finish_close(uv_udp_t* handle) {
|
||||
uv_udp_send_t* req;
|
||||
QUEUE* q;
|
||||
struct uv__queue* q;
|
||||
|
||||
assert(!uv__io_active(&handle->io_watcher, POLLIN | POLLOUT));
|
||||
assert(handle->io_watcher.fd == -1);
|
||||
|
||||
while (!QUEUE_EMPTY(&handle->write_queue)) {
|
||||
q = QUEUE_HEAD(&handle->write_queue);
|
||||
QUEUE_REMOVE(q);
|
||||
while (!uv__queue_empty(&handle->write_queue)) {
|
||||
q = uv__queue_head(&handle->write_queue);
|
||||
uv__queue_remove(q);
|
||||
|
||||
req = QUEUE_DATA(q, uv_udp_send_t, queue);
|
||||
req = uv__queue_data(q, uv_udp_send_t, queue);
|
||||
req->status = UV_ECANCELED;
|
||||
QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue);
|
||||
uv__queue_insert_tail(&handle->write_completed_queue, &req->queue);
|
||||
}
|
||||
|
||||
uv__udp_run_completed(handle);
|
||||
@ -90,16 +90,16 @@ void uv__udp_finish_close(uv_udp_t* handle) {
|
||||
|
||||
static void uv__udp_run_completed(uv_udp_t* handle) {
|
||||
uv_udp_send_t* req;
|
||||
QUEUE* q;
|
||||
struct uv__queue* q;
|
||||
|
||||
assert(!(handle->flags & UV_HANDLE_UDP_PROCESSING));
|
||||
handle->flags |= UV_HANDLE_UDP_PROCESSING;
|
||||
|
||||
while (!QUEUE_EMPTY(&handle->write_completed_queue)) {
|
||||
q = QUEUE_HEAD(&handle->write_completed_queue);
|
||||
QUEUE_REMOVE(q);
|
||||
while (!uv__queue_empty(&handle->write_completed_queue)) {
|
||||
q = uv__queue_head(&handle->write_completed_queue);
|
||||
uv__queue_remove(q);
|
||||
|
||||
req = QUEUE_DATA(q, uv_udp_send_t, queue);
|
||||
req = uv__queue_data(q, uv_udp_send_t, queue);
|
||||
uv__req_unregister(handle->loop, req);
|
||||
|
||||
handle->send_queue_size -= uv__count_bufs(req->bufs, req->nbufs);
|
||||
@ -121,7 +121,7 @@ static void uv__udp_run_completed(uv_udp_t* handle) {
|
||||
req->send_cb(req, req->status);
|
||||
}
|
||||
|
||||
if (QUEUE_EMPTY(&handle->write_queue)) {
|
||||
if (uv__queue_empty(&handle->write_queue)) {
|
||||
/* Pending queue and completion queue empty, stop watcher. */
|
||||
uv__io_stop(handle->loop, &handle->io_watcher, POLLOUT);
|
||||
if (!uv__io_active(&handle->io_watcher, POLLIN))
|
||||
@ -280,20 +280,20 @@ static void uv__udp_sendmsg(uv_udp_t* handle) {
|
||||
uv_udp_send_t* req;
|
||||
struct mmsghdr h[20];
|
||||
struct mmsghdr* p;
|
||||
QUEUE* q;
|
||||
struct uv__queue* q;
|
||||
ssize_t npkts;
|
||||
size_t pkts;
|
||||
size_t i;
|
||||
|
||||
if (QUEUE_EMPTY(&handle->write_queue))
|
||||
if (uv__queue_empty(&handle->write_queue))
|
||||
return;
|
||||
|
||||
write_queue_drain:
|
||||
for (pkts = 0, q = QUEUE_HEAD(&handle->write_queue);
|
||||
for (pkts = 0, q = uv__queue_head(&handle->write_queue);
|
||||
pkts < ARRAY_SIZE(h) && q != &handle->write_queue;
|
||||
++pkts, q = QUEUE_HEAD(q)) {
|
||||
++pkts, q = uv__queue_head(q)) {
|
||||
assert(q != NULL);
|
||||
req = QUEUE_DATA(q, uv_udp_send_t, queue);
|
||||
req = uv__queue_data(q, uv_udp_send_t, queue);
|
||||
assert(req != NULL);
|
||||
|
||||
p = &h[pkts];
|
||||
@ -325,16 +325,16 @@ write_queue_drain:
|
||||
if (npkts < 1) {
|
||||
if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOBUFS)
|
||||
return;
|
||||
for (i = 0, q = QUEUE_HEAD(&handle->write_queue);
|
||||
for (i = 0, q = uv__queue_head(&handle->write_queue);
|
||||
i < pkts && q != &handle->write_queue;
|
||||
++i, q = QUEUE_HEAD(&handle->write_queue)) {
|
||||
++i, q = uv__queue_head(&handle->write_queue)) {
|
||||
assert(q != NULL);
|
||||
req = QUEUE_DATA(q, uv_udp_send_t, queue);
|
||||
req = uv__queue_data(q, uv_udp_send_t, queue);
|
||||
assert(req != NULL);
|
||||
|
||||
req->status = UV__ERR(errno);
|
||||
QUEUE_REMOVE(&req->queue);
|
||||
QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue);
|
||||
uv__queue_remove(&req->queue);
|
||||
uv__queue_insert_tail(&handle->write_completed_queue, &req->queue);
|
||||
}
|
||||
uv__io_feed(handle->loop, &handle->io_watcher);
|
||||
return;
|
||||
@ -343,11 +343,11 @@ write_queue_drain:
|
||||
/* Safety: npkts known to be >0 below. Hence cast from ssize_t
|
||||
* to size_t safe.
|
||||
*/
|
||||
for (i = 0, q = QUEUE_HEAD(&handle->write_queue);
|
||||
for (i = 0, q = uv__queue_head(&handle->write_queue);
|
||||
i < (size_t)npkts && q != &handle->write_queue;
|
||||
++i, q = QUEUE_HEAD(&handle->write_queue)) {
|
||||
++i, q = uv__queue_head(&handle->write_queue)) {
|
||||
assert(q != NULL);
|
||||
req = QUEUE_DATA(q, uv_udp_send_t, queue);
|
||||
req = uv__queue_data(q, uv_udp_send_t, queue);
|
||||
assert(req != NULL);
|
||||
|
||||
req->status = req->bufs[0].len;
|
||||
@ -357,25 +357,25 @@ write_queue_drain:
|
||||
* why we don't handle partial writes. Just pop the request
|
||||
* off the write queue and onto the completed queue, done.
|
||||
*/
|
||||
QUEUE_REMOVE(&req->queue);
|
||||
QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue);
|
||||
uv__queue_remove(&req->queue);
|
||||
uv__queue_insert_tail(&handle->write_completed_queue, &req->queue);
|
||||
}
|
||||
|
||||
/* couldn't batch everything, continue sending (jump to avoid stack growth) */
|
||||
if (!QUEUE_EMPTY(&handle->write_queue))
|
||||
if (!uv__queue_empty(&handle->write_queue))
|
||||
goto write_queue_drain;
|
||||
uv__io_feed(handle->loop, &handle->io_watcher);
|
||||
#else /* __linux__ || ____FreeBSD__ */
|
||||
uv_udp_send_t* req;
|
||||
struct msghdr h;
|
||||
QUEUE* q;
|
||||
struct uv__queue* q;
|
||||
ssize_t size;
|
||||
|
||||
while (!QUEUE_EMPTY(&handle->write_queue)) {
|
||||
q = QUEUE_HEAD(&handle->write_queue);
|
||||
while (!uv__queue_empty(&handle->write_queue)) {
|
||||
q = uv__queue_head(&handle->write_queue);
|
||||
assert(q != NULL);
|
||||
|
||||
req = QUEUE_DATA(q, uv_udp_send_t, queue);
|
||||
req = uv__queue_data(q, uv_udp_send_t, queue);
|
||||
assert(req != NULL);
|
||||
|
||||
memset(&h, 0, sizeof h);
|
||||
@ -414,8 +414,8 @@ write_queue_drain:
|
||||
* why we don't handle partial writes. Just pop the request
|
||||
* off the write queue and onto the completed queue, done.
|
||||
*/
|
||||
QUEUE_REMOVE(&req->queue);
|
||||
QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue);
|
||||
uv__queue_remove(&req->queue);
|
||||
uv__queue_insert_tail(&handle->write_completed_queue, &req->queue);
|
||||
uv__io_feed(handle->loop, &handle->io_watcher);
|
||||
}
|
||||
#endif /* __linux__ || ____FreeBSD__ */
|
||||
@ -729,7 +729,7 @@ int uv__udp_send(uv_udp_send_t* req,
|
||||
memcpy(req->bufs, bufs, nbufs * sizeof(bufs[0]));
|
||||
handle->send_queue_size += uv__count_bufs(req->bufs, req->nbufs);
|
||||
handle->send_queue_count++;
|
||||
QUEUE_INSERT_TAIL(&handle->write_queue, &req->queue);
|
||||
uv__queue_insert_tail(&handle->write_queue, &req->queue);
|
||||
uv__handle_start(handle);
|
||||
|
||||
if (empty_queue && !(handle->flags & UV_HANDLE_UDP_PROCESSING)) {
|
||||
@ -739,7 +739,7 @@ int uv__udp_send(uv_udp_send_t* req,
|
||||
* away. In such cases the `io_watcher` has to be queued for asynchronous
|
||||
* write.
|
||||
*/
|
||||
if (!QUEUE_EMPTY(&handle->write_queue))
|
||||
if (!uv__queue_empty(&handle->write_queue))
|
||||
uv__io_start(handle->loop, &handle->io_watcher, POLLOUT);
|
||||
} else {
|
||||
uv__io_start(handle->loop, &handle->io_watcher, POLLOUT);
|
||||
@ -1007,8 +1007,8 @@ int uv__udp_init_ex(uv_loop_t* loop,
|
||||
handle->send_queue_size = 0;
|
||||
handle->send_queue_count = 0;
|
||||
uv__io_init(&handle->io_watcher, uv__udp_io, fd);
|
||||
QUEUE_INIT(&handle->write_queue);
|
||||
QUEUE_INIT(&handle->write_completed_queue);
|
||||
uv__queue_init(&handle->write_queue);
|
||||
uv__queue_init(&handle->write_completed_queue);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
Reference in New Issue
Block a user