libuv 1.45.0, #include cleanup, probably something else.
git-svn-id: https://www.unprompted.com/svn/projects/tildefriends/trunk@4308 ed5197a5-7fde-0310-b194-c3ffbd925b24
This commit is contained in:
Makefile
core
deps/libuv
.github
.mailmap.readthedocs.yamlAUTHORSCMakeLists.txtChangeLogLICENSELICENSE-extraLINKS.mdMAINTAINERS.mdMakefile.amREADME.mdSUPPORTED_PLATFORMS.mdautogen.shcmake-toolchains
configure.acdocs
include
libuv-static.pc.inlibuv.pc.insrc
inet.cthread-common.cthreadpool.c
unix
aix.casync.catomic-ops.hcore.ccygwin.cdarwin-stub.hdarwin.cepoll.cfreebsd.cfs.cfsevents.chaiku.churd.cibmi.cinternal.hkqueue.clinux-core.clinux-inotify.clinux-syscalls.clinux-syscalls.hlinux.cloop.cnetbsd.copenbsd.cos390.cpipe.cposix-hrtime.cposix-poll.cprocess.cpthread-fixes.cqnx.crandom-devurandom.crandom-getrandom.csignal.cspinlock.hstream.csunos.ctcp.cthread.ctty.cudp.c
uv-common.cuv-common.hwin
test
benchmark-async-pummel.cbenchmark-async.cbenchmark-fs-stat.cbenchmark-getaddrinfo.cbenchmark-loop-count.cbenchmark-million-async.cbenchmark-million-timers.cbenchmark-multi-accept.cbenchmark-ping-pongs.cbenchmark-ping-udp.cbenchmark-pound.cbenchmark-pump.cbenchmark-queue-work.cbenchmark-spawn.cbenchmark-tcp-write-batch.cbenchmark-udp-pummel.crun-tests.crunner-unix.crunner.ctask.htest-active.ctest-async-null-cb.ctest-async.ctest-barrier.ctest-callback-stack.ctest-close-fd.ctest-close-order.ctest-condvar.ctest-connect-unspecified.ctest-connection-fail.ctest-default-loop-close.ctest-delayed-accept.ctest-dlerror.ctest-eintr-handling.ctest-embed.ctest-emfile.ctest-env-vars.ctest-fork.ctest-fs-copyfile.ctest-fs-event.ctest-fs-open-flags.ctest-fs-poll.ctest-fs-readdir.ctest-fs.ctest-get-currentexe.ctest-get-memory.ctest-get-passwd.ctest-getaddrinfo.ctest-gethostname.ctest-getnameinfo.ctest-getsockname.ctest-getters-setters.ctest-handle-fileno.ctest-hrtime.ctest-idle.ctest-idna.ctest-ip-name.ctest-ip4-addr.ctest-ip6-addr.ctest-ipc-heavy-traffic-deadlock-bug.ctest-ipc-send-recv.ctest-ipc.ctest-list.htest-loop-alive.ctest-loop-close.ctest-loop-handles.ctest-loop-stop.ctest-loop-time.ctest-metrics.ctest-multiple-listen.ctest-not-readable-nor-writable-on-read-error.ctest-not-writable-after-shutdown.ctest-osx-select.ctest-ping-pong.ctest-pipe-bind-error.ctest-pipe-close-stdout-read-stdin.ctest-pipe-connect-error.ctest-pipe-connect-multiple.ctest-pipe-connect-prepare.ctest-pipe-getsockname.ctest-pipe-pending-instances.ctest-pipe-sendmsg.ctest-pipe-server-close.ctest-pipe-set-fchmod.ctest-pipe-set-non-blocking.ctest-platform-output.ctest-poll-close-doesnt-corrupt-stack.ctest-poll-close.ctest-poll-closesocket.ctest-poll-multiple-handles.ctest-poll-oob.ctest-poll.ctest-process-title.ctest-queue-foreach-delete.ctest-random.ctest-readable-on-eof.ctest-ref.ctest-run-nowait.ctest-run-once.ctest-shutdown-close.ctest-shutdown-eof.ctest-shutdown-simultaneous.ctest-shutdown-twice.ctest-signal-multiple-loops.ctest-signal-pending-on-close.ctest-signal.ctest-socket-buffer-size.ctest-spawn.ctest-stdio-over-pipes.ctest-tcp-alloc-cb-fail.ctest-tcp-bind-error.ctest-tcp-bind6-error.ctest-tcp-close-accept.ctest-tcp-close-after-read-timeout.ctest-tcp-close-reset.ctest-tcp-close-while-connecting.ctest-tcp-close.ctest-tcp-connect-error-after-write.ctest-tcp-connect-error.ctest-tcp-connect-timeout.ctest-tcp-connect6-error.ctest-tcp-create-socket-early.ctest-tcp-flags.ctest-tcp-oob.ctest-tcp-open.ctest-tcp-read-stop-start.ctest-tcp-read-stop.ctest-tcp-rst.ctest-tcp-shutdown-after-write.ctest-tcp-try-write-error.ctest-tcp-try-write.ctest-tcp-unexpected-read.ctest-tcp-write-after-connect.ctest-tcp-write-fail.ctest-tcp-write-in-a-row.ctest-tcp-write-queue-order.ctest-tcp-write-to-half-open-connection.ctest-tcp-writealot.ctest-thread-affinity.ctest-threadpool-cancel.ctest-threadpool.ctest-timer-again.ctest-timer-from-check.ctest-timer.ctest-tty-duplicate-key.ctest-tty-escape-sequence-processing.ctest-tty.ctest-udp-alloc-cb-fail.ctest-udp-bind.ctest-udp-connect.ctest-udp-connect6.ctest-udp-create-socket-early.ctest-udp-dgram-too-big.ctest-udp-ipv6.ctest-udp-mmsg.ctest-udp-multicast-interface.ctest-udp-multicast-interface6.ctest-udp-multicast-join.ctest-udp-multicast-join6.ctest-udp-multicast-ttl.ctest-udp-open.ctest-udp-options.ctest-udp-recv-in-a-row.ctest-udp-send-and-recv.ctest-udp-send-hang-loop.ctest-udp-send-immediate.ctest-udp-send-unreachable.ctest-udp-sendmmsg-error.ctest-udp-try-send.ctest-walk-handles.ctest-watcher-cross-stop.c
tsansupp.txtfixtures
one_file
src
9
deps/libuv/src/inet.c
vendored
9
deps/libuv/src/inet.c
vendored
@ -17,12 +17,7 @@
|
||||
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
|
||||
#if defined(_MSC_VER) && _MSC_VER < 1600
|
||||
# include "uv/stdint-msvc2008.h"
|
||||
#else
|
||||
# include <stdint.h>
|
||||
#endif
|
||||
#include <stdint.h>
|
||||
|
||||
#include "uv.h"
|
||||
#include "uv-common.h"
|
||||
@ -135,7 +130,7 @@ static int inet_ntop6(const unsigned char *src, char *dst, size_t size) {
|
||||
tp += strlen(tp);
|
||||
break;
|
||||
}
|
||||
tp += sprintf(tp, "%x", words[i]);
|
||||
tp += snprintf(tp, sizeof tmp - (tp - tmp), "%x", words[i]);
|
||||
}
|
||||
/* Was it a trailing run of 0x00's? */
|
||||
if (best.base != -1 && (best.base + best.len) == ARRAY_SIZE(words))
|
||||
|
175
deps/libuv/src/thread-common.c
vendored
Normal file
175
deps/libuv/src/thread-common.c
vendored
Normal file
@ -0,0 +1,175 @@
|
||||
/* Copyright libuv project contributors. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to
|
||||
* deal in the Software without restriction, including without limitation the
|
||||
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
||||
* sell copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "uv.h"
|
||||
#include "uv-common.h"
|
||||
|
||||
#include <stdlib.h>
|
||||
#ifndef _WIN32
|
||||
#include <pthread.h>
|
||||
#endif
|
||||
|
||||
#if defined(PTHREAD_BARRIER_SERIAL_THREAD)
|
||||
STATIC_ASSERT(sizeof(uv_barrier_t) == sizeof(pthread_barrier_t));
|
||||
#endif
|
||||
|
||||
/* Note: guard clauses should match uv_barrier_t's in include/uv/unix.h. */
|
||||
#if defined(_AIX) || \
|
||||
defined(__OpenBSD__) || \
|
||||
!defined(PTHREAD_BARRIER_SERIAL_THREAD)
|
||||
int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
|
||||
int rc;
|
||||
#ifdef _WIN32
|
||||
uv_barrier_t* b;
|
||||
b = barrier;
|
||||
|
||||
if (barrier == NULL || count == 0)
|
||||
return UV_EINVAL;
|
||||
#else
|
||||
struct _uv_barrier* b;
|
||||
|
||||
if (barrier == NULL || count == 0)
|
||||
return UV_EINVAL;
|
||||
|
||||
b = uv__malloc(sizeof(*b));
|
||||
if (b == NULL)
|
||||
return UV_ENOMEM;
|
||||
#endif
|
||||
|
||||
b->in = 0;
|
||||
b->out = 0;
|
||||
b->threshold = count;
|
||||
|
||||
rc = uv_mutex_init(&b->mutex);
|
||||
if (rc != 0)
|
||||
goto error2;
|
||||
|
||||
/* TODO(vjnash): remove these uv_cond_t casts in v2. */
|
||||
rc = uv_cond_init((uv_cond_t*) &b->cond);
|
||||
if (rc != 0)
|
||||
goto error;
|
||||
|
||||
#ifndef _WIN32
|
||||
barrier->b = b;
|
||||
#endif
|
||||
return 0;
|
||||
|
||||
error:
|
||||
uv_mutex_destroy(&b->mutex);
|
||||
error2:
|
||||
#ifndef _WIN32
|
||||
uv__free(b);
|
||||
#endif
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
||||
int uv_barrier_wait(uv_barrier_t* barrier) {
|
||||
int last;
|
||||
#ifdef _WIN32
|
||||
uv_barrier_t* b;
|
||||
b = barrier;
|
||||
#else
|
||||
struct _uv_barrier* b;
|
||||
|
||||
if (barrier == NULL || barrier->b == NULL)
|
||||
return UV_EINVAL;
|
||||
|
||||
b = barrier->b;
|
||||
#endif
|
||||
|
||||
uv_mutex_lock(&b->mutex);
|
||||
|
||||
while (b->out != 0)
|
||||
uv_cond_wait((uv_cond_t*) &b->cond, &b->mutex);
|
||||
|
||||
if (++b->in == b->threshold) {
|
||||
b->in = 0;
|
||||
b->out = b->threshold;
|
||||
uv_cond_broadcast((uv_cond_t*) &b->cond);
|
||||
} else {
|
||||
do
|
||||
uv_cond_wait((uv_cond_t*) &b->cond, &b->mutex);
|
||||
while (b->in != 0);
|
||||
}
|
||||
|
||||
last = (--b->out == 0);
|
||||
if (last)
|
||||
uv_cond_broadcast((uv_cond_t*) &b->cond);
|
||||
|
||||
uv_mutex_unlock(&b->mutex);
|
||||
return last;
|
||||
}
|
||||
|
||||
|
||||
void uv_barrier_destroy(uv_barrier_t* barrier) {
|
||||
#ifdef _WIN32
|
||||
uv_barrier_t* b;
|
||||
b = barrier;
|
||||
#else
|
||||
struct _uv_barrier* b;
|
||||
b = barrier->b;
|
||||
#endif
|
||||
|
||||
uv_mutex_lock(&b->mutex);
|
||||
|
||||
assert(b->in == 0);
|
||||
while (b->out != 0)
|
||||
uv_cond_wait((uv_cond_t*) &b->cond, &b->mutex);
|
||||
|
||||
if (b->in != 0)
|
||||
abort();
|
||||
|
||||
uv_mutex_unlock(&b->mutex);
|
||||
uv_mutex_destroy(&b->mutex);
|
||||
uv_cond_destroy((uv_cond_t*) &b->cond);
|
||||
|
||||
#ifndef _WIN32
|
||||
uv__free(barrier->b);
|
||||
barrier->b = NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
|
||||
return UV__ERR(pthread_barrier_init(barrier, NULL, count));
|
||||
}
|
||||
|
||||
|
||||
int uv_barrier_wait(uv_barrier_t* barrier) {
|
||||
int rc;
|
||||
|
||||
rc = pthread_barrier_wait(barrier);
|
||||
if (rc != 0)
|
||||
if (rc != PTHREAD_BARRIER_SERIAL_THREAD)
|
||||
abort();
|
||||
|
||||
return rc == PTHREAD_BARRIER_SERIAL_THREAD;
|
||||
}
|
||||
|
||||
|
||||
void uv_barrier_destroy(uv_barrier_t* barrier) {
|
||||
if (pthread_barrier_destroy(barrier))
|
||||
abort();
|
||||
}
|
||||
|
||||
#endif
|
27
deps/libuv/src/threadpool.c
vendored
27
deps/libuv/src/threadpool.c
vendored
@ -191,6 +191,7 @@ void uv__threadpool_cleanup(void) {
|
||||
|
||||
|
||||
static void init_threads(void) {
|
||||
uv_thread_options_t config;
|
||||
unsigned int i;
|
||||
const char* val;
|
||||
uv_sem_t sem;
|
||||
@ -226,8 +227,11 @@ static void init_threads(void) {
|
||||
if (uv_sem_init(&sem, 0))
|
||||
abort();
|
||||
|
||||
config.flags = UV_THREAD_HAS_STACK_SIZE;
|
||||
config.stack_size = 8u << 20; /* 8 MB */
|
||||
|
||||
for (i = 0; i < nthreads; i++)
|
||||
if (uv_thread_create(threads + i, worker, &sem))
|
||||
if (uv_thread_create_ex(threads + i, &config, worker, &sem))
|
||||
abort();
|
||||
|
||||
for (i = 0; i < nthreads; i++)
|
||||
@ -271,9 +275,13 @@ void uv__work_submit(uv_loop_t* loop,
|
||||
}
|
||||
|
||||
|
||||
/* TODO(bnoordhuis) teach libuv how to cancel file operations
|
||||
* that go through io_uring instead of the thread pool.
|
||||
*/
|
||||
static int uv__work_cancel(uv_loop_t* loop, uv_req_t* req, struct uv__work* w) {
|
||||
int cancelled;
|
||||
|
||||
uv_once(&once, init_once); /* Ensure |mutex| is initialized. */
|
||||
uv_mutex_lock(&mutex);
|
||||
uv_mutex_lock(&w->loop->wq_mutex);
|
||||
|
||||
@ -303,12 +311,15 @@ void uv__work_done(uv_async_t* handle) {
|
||||
QUEUE* q;
|
||||
QUEUE wq;
|
||||
int err;
|
||||
int nevents;
|
||||
|
||||
loop = container_of(handle, uv_loop_t, wq_async);
|
||||
uv_mutex_lock(&loop->wq_mutex);
|
||||
QUEUE_MOVE(&loop->wq, &wq);
|
||||
uv_mutex_unlock(&loop->wq_mutex);
|
||||
|
||||
nevents = 0;
|
||||
|
||||
while (!QUEUE_EMPTY(&wq)) {
|
||||
q = QUEUE_HEAD(&wq);
|
||||
QUEUE_REMOVE(q);
|
||||
@ -316,6 +327,20 @@ void uv__work_done(uv_async_t* handle) {
|
||||
w = container_of(q, struct uv__work, wq);
|
||||
err = (w->work == uv__cancelled) ? UV_ECANCELED : 0;
|
||||
w->done(w, err);
|
||||
nevents++;
|
||||
}
|
||||
|
||||
/* This check accomplishes 2 things:
|
||||
* 1. Even if the queue was empty, the call to uv__work_done() should count
|
||||
* as an event. Which will have been added by the event loop when
|
||||
* calling this callback.
|
||||
* 2. Prevents accidental wrap around in case nevents == 0 events == 0.
|
||||
*/
|
||||
if (nevents > 1) {
|
||||
/* Subtract 1 to counter the call to uv__work_done(). */
|
||||
uv__metrics_inc_events(loop, nevents - 1);
|
||||
if (uv__get_internal_fields(loop)->current_timeout == 0)
|
||||
uv__metrics_inc_events_waiting(loop, nevents - 1);
|
||||
}
|
||||
}
|
||||
|
||||
|
20
deps/libuv/src/unix/aix.c
vendored
20
deps/libuv/src/unix/aix.c
vendored
@ -131,6 +131,7 @@ int uv__io_check_fd(uv_loop_t* loop, int fd) {
|
||||
|
||||
|
||||
void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
uv__loop_internal_fields_t* lfields;
|
||||
struct pollfd events[1024];
|
||||
struct pollfd pqry;
|
||||
struct pollfd* pe;
|
||||
@ -154,6 +155,8 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
return;
|
||||
}
|
||||
|
||||
lfields = uv__get_internal_fields(loop);
|
||||
|
||||
while (!QUEUE_EMPTY(&loop->watcher_queue)) {
|
||||
q = QUEUE_HEAD(&loop->watcher_queue);
|
||||
QUEUE_REMOVE(q);
|
||||
@ -217,7 +220,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
base = loop->time;
|
||||
count = 48; /* Benchmarks suggest this gives the best throughput. */
|
||||
|
||||
if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
|
||||
if (lfields->flags & UV_METRICS_IDLE_TIME) {
|
||||
reset_timeout = 1;
|
||||
user_timeout = timeout;
|
||||
timeout = 0;
|
||||
@ -232,6 +235,12 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
if (timeout != 0)
|
||||
uv__metrics_set_provider_entry_time(loop);
|
||||
|
||||
/* Store the current timeout in a location that's globally accessible so
|
||||
* other locations like uv__work_done() can determine whether the queue
|
||||
* of events in the callback were waiting when poll was called.
|
||||
*/
|
||||
lfields->current_timeout = timeout;
|
||||
|
||||
nfds = pollset_poll(loop->backend_fd,
|
||||
events,
|
||||
ARRAY_SIZE(events),
|
||||
@ -321,9 +330,11 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
nevents++;
|
||||
}
|
||||
|
||||
uv__metrics_inc_events(loop, nevents);
|
||||
if (reset_timeout != 0) {
|
||||
timeout = user_timeout;
|
||||
reset_timeout = 0;
|
||||
uv__metrics_inc_events_waiting(loop, nevents);
|
||||
}
|
||||
|
||||
if (have_signals != 0) {
|
||||
@ -389,6 +400,11 @@ uint64_t uv_get_constrained_memory(void) {
|
||||
}
|
||||
|
||||
|
||||
uint64_t uv_get_available_memory(void) {
|
||||
return uv_get_free_memory();
|
||||
}
|
||||
|
||||
|
||||
void uv_loadavg(double avg[3]) {
|
||||
perfstat_cpu_total_t ps_total;
|
||||
int result = perfstat_cpu_total(NULL, &ps_total, sizeof(ps_total), 1);
|
||||
@ -425,7 +441,7 @@ static char* uv__rawname(const char* cp, char (*dst)[FILENAME_MAX+1]) {
|
||||
static int uv__path_is_a_directory(char* filename) {
|
||||
struct stat statbuf;
|
||||
|
||||
if (stat(filename, &statbuf) < 0)
|
||||
if (uv__stat(filename, &statbuf) < 0)
|
||||
return -1; /* failed: not a directory, assume it is a file */
|
||||
|
||||
if (statbuf.st_type == VDIR)
|
||||
|
146
deps/libuv/src/unix/async.c
vendored
146
deps/libuv/src/unix/async.c
vendored
@ -24,9 +24,9 @@
|
||||
|
||||
#include "uv.h"
|
||||
#include "internal.h"
|
||||
#include "atomic-ops.h"
|
||||
|
||||
#include <errno.h>
|
||||
#include <stdatomic.h>
|
||||
#include <stdio.h> /* snprintf() */
|
||||
#include <assert.h>
|
||||
#include <stdlib.h>
|
||||
@ -40,6 +40,7 @@
|
||||
|
||||
static void uv__async_send(uv_loop_t* loop);
|
||||
static int uv__async_start(uv_loop_t* loop);
|
||||
static void uv__cpu_relax(void);
|
||||
|
||||
|
||||
int uv_async_init(uv_loop_t* loop, uv_async_t* handle, uv_async_cb async_cb) {
|
||||
@ -52,6 +53,7 @@ int uv_async_init(uv_loop_t* loop, uv_async_t* handle, uv_async_cb async_cb) {
|
||||
uv__handle_init(loop, (uv_handle_t*)handle, UV_ASYNC);
|
||||
handle->async_cb = async_cb;
|
||||
handle->pending = 0;
|
||||
handle->u.fd = 0; /* This will be used as a busy flag. */
|
||||
|
||||
QUEUE_INSERT_TAIL(&loop->async_handles, &handle->queue);
|
||||
uv__handle_start(handle);
|
||||
@ -61,46 +63,54 @@ int uv_async_init(uv_loop_t* loop, uv_async_t* handle, uv_async_cb async_cb) {
|
||||
|
||||
|
||||
int uv_async_send(uv_async_t* handle) {
|
||||
_Atomic int* pending;
|
||||
_Atomic int* busy;
|
||||
|
||||
pending = (_Atomic int*) &handle->pending;
|
||||
busy = (_Atomic int*) &handle->u.fd;
|
||||
|
||||
/* Do a cheap read first. */
|
||||
if (ACCESS_ONCE(int, handle->pending) != 0)
|
||||
if (atomic_load_explicit(pending, memory_order_relaxed) != 0)
|
||||
return 0;
|
||||
|
||||
/* Tell the other thread we're busy with the handle. */
|
||||
if (cmpxchgi(&handle->pending, 0, 1) != 0)
|
||||
return 0;
|
||||
/* Set the loop to busy. */
|
||||
atomic_fetch_add(busy, 1);
|
||||
|
||||
/* Wake up the other thread's event loop. */
|
||||
uv__async_send(handle->loop);
|
||||
if (atomic_exchange(pending, 1) == 0)
|
||||
uv__async_send(handle->loop);
|
||||
|
||||
/* Tell the other thread we're done. */
|
||||
if (cmpxchgi(&handle->pending, 1, 2) != 1)
|
||||
abort();
|
||||
/* Set the loop to not-busy. */
|
||||
atomic_fetch_add(busy, -1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/* Only call this from the event loop thread. */
|
||||
static int uv__async_spin(uv_async_t* handle) {
|
||||
/* Wait for the busy flag to clear before closing.
|
||||
* Only call this from the event loop thread. */
|
||||
static void uv__async_spin(uv_async_t* handle) {
|
||||
_Atomic int* pending;
|
||||
_Atomic int* busy;
|
||||
int i;
|
||||
int rc;
|
||||
|
||||
pending = (_Atomic int*) &handle->pending;
|
||||
busy = (_Atomic int*) &handle->u.fd;
|
||||
|
||||
/* Set the pending flag first, so no new events will be added by other
|
||||
* threads after this function returns. */
|
||||
atomic_store(pending, 1);
|
||||
|
||||
for (;;) {
|
||||
/* 997 is not completely chosen at random. It's a prime number, acyclical
|
||||
* by nature, and should therefore hopefully dampen sympathetic resonance.
|
||||
/* 997 is not completely chosen at random. It's a prime number, acyclic by
|
||||
* nature, and should therefore hopefully dampen sympathetic resonance.
|
||||
*/
|
||||
for (i = 0; i < 997; i++) {
|
||||
/* rc=0 -- handle is not pending.
|
||||
* rc=1 -- handle is pending, other thread is still working with it.
|
||||
* rc=2 -- handle is pending, other thread is done.
|
||||
*/
|
||||
rc = cmpxchgi(&handle->pending, 2, 0);
|
||||
|
||||
if (rc != 1)
|
||||
return rc;
|
||||
if (atomic_load(busy) == 0)
|
||||
return;
|
||||
|
||||
/* Other thread is busy with this handle, spin until it's done. */
|
||||
cpu_relax();
|
||||
uv__cpu_relax();
|
||||
}
|
||||
|
||||
/* Yield the CPU. We may have preempted the other thread while it's
|
||||
@ -125,6 +135,7 @@ static void uv__async_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
|
||||
QUEUE queue;
|
||||
QUEUE* q;
|
||||
uv_async_t* h;
|
||||
_Atomic int *pending;
|
||||
|
||||
assert(w == &loop->async_io_watcher);
|
||||
|
||||
@ -154,8 +165,10 @@ static void uv__async_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
|
||||
QUEUE_REMOVE(q);
|
||||
QUEUE_INSERT_TAIL(&loop->async_handles, q);
|
||||
|
||||
if (0 == uv__async_spin(h))
|
||||
continue; /* Not pending. */
|
||||
/* Atomically fetch and clear pending flag */
|
||||
pending = (_Atomic int*) &h->pending;
|
||||
if (atomic_exchange(pending, 0) == 0)
|
||||
continue;
|
||||
|
||||
if (h->async_cb == NULL)
|
||||
continue;
|
||||
@ -227,20 +240,28 @@ static int uv__async_start(uv_loop_t* loop) {
|
||||
}
|
||||
|
||||
|
||||
int uv__async_fork(uv_loop_t* loop) {
|
||||
if (loop->async_io_watcher.fd == -1) /* never started */
|
||||
return 0;
|
||||
|
||||
uv__async_stop(loop);
|
||||
|
||||
return uv__async_start(loop);
|
||||
}
|
||||
|
||||
|
||||
void uv__async_stop(uv_loop_t* loop) {
|
||||
QUEUE queue;
|
||||
QUEUE* q;
|
||||
uv_async_t* h;
|
||||
|
||||
if (loop->async_io_watcher.fd == -1)
|
||||
return;
|
||||
|
||||
/* Make sure no other thread is accessing the async handle fd after the loop
|
||||
* cleanup.
|
||||
*/
|
||||
QUEUE_MOVE(&loop->async_handles, &queue);
|
||||
while (!QUEUE_EMPTY(&queue)) {
|
||||
q = QUEUE_HEAD(&queue);
|
||||
h = QUEUE_DATA(q, uv_async_t, queue);
|
||||
|
||||
QUEUE_REMOVE(q);
|
||||
QUEUE_INSERT_TAIL(&loop->async_handles, q);
|
||||
|
||||
uv__async_spin(h);
|
||||
}
|
||||
|
||||
if (loop->async_wfd != -1) {
|
||||
if (loop->async_wfd != loop->async_io_watcher.fd)
|
||||
uv__close(loop->async_wfd);
|
||||
@ -251,3 +272,58 @@ void uv__async_stop(uv_loop_t* loop) {
|
||||
uv__close(loop->async_io_watcher.fd);
|
||||
loop->async_io_watcher.fd = -1;
|
||||
}
|
||||
|
||||
|
||||
int uv__async_fork(uv_loop_t* loop) {
|
||||
QUEUE queue;
|
||||
QUEUE* q;
|
||||
uv_async_t* h;
|
||||
|
||||
if (loop->async_io_watcher.fd == -1) /* never started */
|
||||
return 0;
|
||||
|
||||
QUEUE_MOVE(&loop->async_handles, &queue);
|
||||
while (!QUEUE_EMPTY(&queue)) {
|
||||
q = QUEUE_HEAD(&queue);
|
||||
h = QUEUE_DATA(q, uv_async_t, queue);
|
||||
|
||||
QUEUE_REMOVE(q);
|
||||
QUEUE_INSERT_TAIL(&loop->async_handles, q);
|
||||
|
||||
/* The state of any thread that set pending is now likely corrupt in this
|
||||
* child because the user called fork, so just clear these flags and move
|
||||
* on. Calling most libc functions after `fork` is declared to be undefined
|
||||
* behavior anyways, unless async-signal-safe, for multithreaded programs
|
||||
* like libuv, and nothing interesting in pthreads is async-signal-safe.
|
||||
*/
|
||||
h->pending = 0;
|
||||
/* This is the busy flag, and we just abruptly lost all other threads. */
|
||||
h->u.fd = 0;
|
||||
}
|
||||
|
||||
/* Recreate these, since they still exist, but belong to the wrong pid now. */
|
||||
if (loop->async_wfd != -1) {
|
||||
if (loop->async_wfd != loop->async_io_watcher.fd)
|
||||
uv__close(loop->async_wfd);
|
||||
loop->async_wfd = -1;
|
||||
}
|
||||
|
||||
uv__io_stop(loop, &loop->async_io_watcher, POLLIN);
|
||||
uv__close(loop->async_io_watcher.fd);
|
||||
loop->async_io_watcher.fd = -1;
|
||||
|
||||
return uv__async_start(loop);
|
||||
}
|
||||
|
||||
|
||||
static void uv__cpu_relax(void) {
|
||||
#if defined(__i386__) || defined(__x86_64__)
|
||||
__asm__ __volatile__ ("rep; nop" ::: "memory"); /* a.k.a. PAUSE */
|
||||
#elif (defined(__arm__) && __ARM_ARCH >= 7) || defined(__aarch64__)
|
||||
__asm__ __volatile__ ("yield" ::: "memory");
|
||||
#elif (defined(__ppc__) || defined(__ppc64__)) && defined(__APPLE__)
|
||||
__asm volatile ("" : : : "memory");
|
||||
#elif !defined(__APPLE__) && (defined(__powerpc64__) || defined(__ppc64__) || defined(__PPC64__))
|
||||
__asm__ __volatile__ ("or 1,1,1; or 2,2,2" ::: "memory");
|
||||
#endif
|
||||
}
|
||||
|
64
deps/libuv/src/unix/atomic-ops.h
vendored
64
deps/libuv/src/unix/atomic-ops.h
vendored
@ -1,64 +0,0 @@
|
||||
/* Copyright (c) 2013, Ben Noordhuis <info@bnoordhuis.nl>
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef UV_ATOMIC_OPS_H_
|
||||
#define UV_ATOMIC_OPS_H_
|
||||
|
||||
#include "internal.h" /* UV_UNUSED */
|
||||
|
||||
#if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
|
||||
#include <atomic.h>
|
||||
#endif
|
||||
|
||||
UV_UNUSED(static int cmpxchgi(int* ptr, int oldval, int newval));
|
||||
UV_UNUSED(static void cpu_relax(void));
|
||||
|
||||
/* Prefer hand-rolled assembly over the gcc builtins because the latter also
|
||||
* issue full memory barriers.
|
||||
*/
|
||||
UV_UNUSED(static int cmpxchgi(int* ptr, int oldval, int newval)) {
|
||||
#if defined(__i386__) || defined(__x86_64__)
|
||||
int out;
|
||||
__asm__ __volatile__ ("lock; cmpxchg %2, %1;"
|
||||
: "=a" (out), "+m" (*(volatile int*) ptr)
|
||||
: "r" (newval), "0" (oldval)
|
||||
: "memory");
|
||||
return out;
|
||||
#elif defined(__MVS__)
|
||||
/* Use hand-rolled assembly because codegen from builtin __plo_CSST results in
|
||||
* a runtime bug.
|
||||
*/
|
||||
__asm(" cs %0,%2,%1 \n " : "+r"(oldval), "+m"(*ptr) : "r"(newval) :);
|
||||
return oldval;
|
||||
#elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
|
||||
return atomic_cas_uint((uint_t *)ptr, (uint_t)oldval, (uint_t)newval);
|
||||
#else
|
||||
return __sync_val_compare_and_swap(ptr, oldval, newval);
|
||||
#endif
|
||||
}
|
||||
|
||||
UV_UNUSED(static void cpu_relax(void)) {
|
||||
#if defined(__i386__) || defined(__x86_64__)
|
||||
__asm__ __volatile__ ("rep; nop" ::: "memory"); /* a.k.a. PAUSE */
|
||||
#elif (defined(__arm__) && __ARM_ARCH >= 7) || defined(__aarch64__)
|
||||
__asm__ __volatile__ ("yield" ::: "memory");
|
||||
#elif (defined(__ppc__) || defined(__ppc64__)) && defined(__APPLE__)
|
||||
__asm volatile ("" : : : "memory");
|
||||
#elif !defined(__APPLE__) && (defined(__powerpc64__) || defined(__ppc64__) || defined(__PPC64__))
|
||||
__asm__ __volatile__ ("or 1,1,1; or 2,2,2" ::: "memory");
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* UV_ATOMIC_OPS_H_ */
|
194
deps/libuv/src/unix/core.c
vendored
194
deps/libuv/src/unix/core.c
vendored
@ -41,12 +41,13 @@
|
||||
#include <sys/uio.h> /* writev */
|
||||
#include <sys/resource.h> /* getrusage */
|
||||
#include <pwd.h>
|
||||
#include <grp.h>
|
||||
#include <sys/utsname.h>
|
||||
#include <sys/time.h>
|
||||
#include <time.h> /* clock_gettime */
|
||||
|
||||
#ifdef __sun
|
||||
# include <sys/filio.h>
|
||||
# include <sys/types.h>
|
||||
# include <sys/wait.h>
|
||||
#endif
|
||||
|
||||
@ -66,13 +67,14 @@ extern char** environ;
|
||||
|
||||
#if defined(__DragonFly__) || \
|
||||
defined(__FreeBSD__) || \
|
||||
defined(__FreeBSD_kernel__) || \
|
||||
defined(__NetBSD__) || \
|
||||
defined(__OpenBSD__)
|
||||
# include <sys/sysctl.h>
|
||||
# include <sys/filio.h>
|
||||
# include <sys/wait.h>
|
||||
# include <sys/param.h>
|
||||
# if defined(__FreeBSD__)
|
||||
# include <sys/cpuset.h>
|
||||
# define uv__accept4 accept4
|
||||
# endif
|
||||
# if defined(__NetBSD__)
|
||||
@ -107,6 +109,35 @@ STATIC_ASSERT(offsetof(uv_buf_t, base) == offsetof(struct iovec, iov_base));
|
||||
STATIC_ASSERT(offsetof(uv_buf_t, len) == offsetof(struct iovec, iov_len));
|
||||
|
||||
|
||||
/* https://github.com/libuv/libuv/issues/1674 */
|
||||
int uv_clock_gettime(uv_clock_id clock_id, uv_timespec64_t* ts) {
|
||||
struct timespec t;
|
||||
int r;
|
||||
|
||||
if (ts == NULL)
|
||||
return UV_EFAULT;
|
||||
|
||||
switch (clock_id) {
|
||||
default:
|
||||
return UV_EINVAL;
|
||||
case UV_CLOCK_MONOTONIC:
|
||||
r = clock_gettime(CLOCK_MONOTONIC, &t);
|
||||
break;
|
||||
case UV_CLOCK_REALTIME:
|
||||
r = clock_gettime(CLOCK_REALTIME, &t);
|
||||
break;
|
||||
}
|
||||
|
||||
if (r)
|
||||
return UV__ERR(errno);
|
||||
|
||||
ts->tv_sec = t.tv_sec;
|
||||
ts->tv_nsec = t.tv_nsec;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
uint64_t uv_hrtime(void) {
|
||||
return uv__hrtime(UV_CLOCK_PRECISE);
|
||||
}
|
||||
@ -232,10 +263,10 @@ int uv__getiovmax(void) {
|
||||
#if defined(IOV_MAX)
|
||||
return IOV_MAX;
|
||||
#elif defined(_SC_IOV_MAX)
|
||||
static int iovmax_cached = -1;
|
||||
static _Atomic int iovmax_cached = -1;
|
||||
int iovmax;
|
||||
|
||||
iovmax = uv__load_relaxed(&iovmax_cached);
|
||||
iovmax = atomic_load_explicit(&iovmax_cached, memory_order_relaxed);
|
||||
if (iovmax != -1)
|
||||
return iovmax;
|
||||
|
||||
@ -247,7 +278,7 @@ int uv__getiovmax(void) {
|
||||
if (iovmax == -1)
|
||||
iovmax = 1;
|
||||
|
||||
uv__store_relaxed(&iovmax_cached, iovmax);
|
||||
atomic_store_explicit(&iovmax_cached, iovmax, memory_order_relaxed);
|
||||
|
||||
return iovmax;
|
||||
#else
|
||||
@ -360,6 +391,7 @@ static int uv__backend_timeout(const uv_loop_t* loop) {
|
||||
(uv__has_active_handles(loop) || uv__has_active_reqs(loop)) &&
|
||||
QUEUE_EMPTY(&loop->pending_queue) &&
|
||||
QUEUE_EMPTY(&loop->idle_handles) &&
|
||||
(loop->flags & UV_LOOP_REAP_CHILDREN) == 0 &&
|
||||
loop->closing_handles == NULL)
|
||||
return uv__next_timeout(loop);
|
||||
return 0;
|
||||
@ -388,10 +420,17 @@ int uv_run(uv_loop_t* loop, uv_run_mode mode) {
|
||||
if (!r)
|
||||
uv__update_time(loop);
|
||||
|
||||
while (r != 0 && loop->stop_flag == 0) {
|
||||
uv__update_time(loop);
|
||||
/* Maintain backwards compatibility by processing timers before entering the
|
||||
* while loop for UV_RUN_DEFAULT. Otherwise timers only need to be executed
|
||||
* once, which should be done after polling in order to maintain proper
|
||||
* execution order of the conceptual event loop. */
|
||||
if (mode == UV_RUN_DEFAULT) {
|
||||
if (r)
|
||||
uv__update_time(loop);
|
||||
uv__run_timers(loop);
|
||||
}
|
||||
|
||||
while (r != 0 && loop->stop_flag == 0) {
|
||||
can_sleep =
|
||||
QUEUE_EMPTY(&loop->pending_queue) && QUEUE_EMPTY(&loop->idle_handles);
|
||||
|
||||
@ -403,6 +442,8 @@ int uv_run(uv_loop_t* loop, uv_run_mode mode) {
|
||||
if ((mode == UV_RUN_ONCE && can_sleep) || mode == UV_RUN_DEFAULT)
|
||||
timeout = uv__backend_timeout(loop);
|
||||
|
||||
uv__metrics_inc_loop_count(loop);
|
||||
|
||||
uv__io_poll(loop, timeout);
|
||||
|
||||
/* Process immediate callbacks (e.g. write_cb) a small fixed number of
|
||||
@ -420,18 +461,8 @@ int uv_run(uv_loop_t* loop, uv_run_mode mode) {
|
||||
uv__run_check(loop);
|
||||
uv__run_closing_handles(loop);
|
||||
|
||||
if (mode == UV_RUN_ONCE) {
|
||||
/* UV_RUN_ONCE implies forward progress: at least one callback must have
|
||||
* been invoked when it returns. uv__io_poll() can return without doing
|
||||
* I/O (meaning: no callbacks) when its timeout expires - which means we
|
||||
* have pending timers that satisfy the forward progress constraint.
|
||||
*
|
||||
* UV_RUN_NOWAIT makes no guarantees about progress so it's omitted from
|
||||
* the check.
|
||||
*/
|
||||
uv__update_time(loop);
|
||||
uv__run_timers(loop);
|
||||
}
|
||||
uv__update_time(loop);
|
||||
uv__run_timers(loop);
|
||||
|
||||
r = uv__loop_alive(loop);
|
||||
if (mode == UV_RUN_ONCE || mode == UV_RUN_NOWAIT)
|
||||
@ -867,11 +898,6 @@ void uv__io_init(uv__io_t* w, uv__io_cb cb, int fd) {
|
||||
w->fd = fd;
|
||||
w->events = 0;
|
||||
w->pevents = 0;
|
||||
|
||||
#if defined(UV_HAVE_KQUEUE)
|
||||
w->rcount = 0;
|
||||
w->wcount = 0;
|
||||
#endif /* defined(UV_HAVE_KQUEUE) */
|
||||
}
|
||||
|
||||
|
||||
@ -991,6 +1017,15 @@ int uv_getrusage(uv_rusage_t* rusage) {
|
||||
rusage->ru_nivcsw = usage.ru_nivcsw;
|
||||
#endif
|
||||
|
||||
/* Most platforms report ru_maxrss in kilobytes; macOS and Solaris are
|
||||
* the outliers because of course they are.
|
||||
*/
|
||||
#if defined(__APPLE__) && !TARGET_OS_IPHONE
|
||||
rusage->ru_maxrss /= 1024; /* macOS reports bytes. */
|
||||
#elif defined(__sun)
|
||||
rusage->ru_maxrss /= getpagesize() / 1024; /* Solaris reports pages. */
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1090,8 +1125,8 @@ int uv_os_homedir(char* buffer, size_t* size) {
|
||||
if (r != UV_ENOENT)
|
||||
return r;
|
||||
|
||||
/* HOME is not set, so call uv__getpwuid_r() */
|
||||
r = uv__getpwuid_r(&pwd);
|
||||
/* HOME is not set, so call uv_os_get_passwd() */
|
||||
r = uv_os_get_passwd(&pwd);
|
||||
|
||||
if (r != 0) {
|
||||
return r;
|
||||
@ -1164,11 +1199,10 @@ return_buffer:
|
||||
}
|
||||
|
||||
|
||||
int uv__getpwuid_r(uv_passwd_t* pwd) {
|
||||
static int uv__getpwuid_r(uv_passwd_t *pwd, uid_t uid) {
|
||||
struct passwd pw;
|
||||
struct passwd* result;
|
||||
char* buf;
|
||||
uid_t uid;
|
||||
size_t bufsize;
|
||||
size_t name_size;
|
||||
size_t homedir_size;
|
||||
@ -1178,8 +1212,6 @@ int uv__getpwuid_r(uv_passwd_t* pwd) {
|
||||
if (pwd == NULL)
|
||||
return UV_EINVAL;
|
||||
|
||||
uid = geteuid();
|
||||
|
||||
/* Calling sysconf(_SC_GETPW_R_SIZE_MAX) would get the suggested size, but it
|
||||
* is frequently 1024 or 4096, so we can just use that directly. The pwent
|
||||
* will not usually be large. */
|
||||
@ -1238,24 +1270,93 @@ int uv__getpwuid_r(uv_passwd_t* pwd) {
|
||||
}
|
||||
|
||||
|
||||
void uv_os_free_passwd(uv_passwd_t* pwd) {
|
||||
if (pwd == NULL)
|
||||
return;
|
||||
int uv_os_get_group(uv_group_t* grp, uv_uid_t gid) {
|
||||
struct group gp;
|
||||
struct group* result;
|
||||
char* buf;
|
||||
char* gr_mem;
|
||||
size_t bufsize;
|
||||
size_t name_size;
|
||||
long members;
|
||||
size_t mem_size;
|
||||
int r;
|
||||
|
||||
/*
|
||||
The memory for name, shell, and homedir are allocated in a single
|
||||
uv__malloc() call. The base of the pointer is stored in pwd->username, so
|
||||
that is the field that needs to be freed.
|
||||
*/
|
||||
uv__free(pwd->username);
|
||||
pwd->username = NULL;
|
||||
pwd->shell = NULL;
|
||||
pwd->homedir = NULL;
|
||||
if (grp == NULL)
|
||||
return UV_EINVAL;
|
||||
|
||||
/* Calling sysconf(_SC_GETGR_R_SIZE_MAX) would get the suggested size, but it
|
||||
* is frequently 1024 or 4096, so we can just use that directly. The pwent
|
||||
* will not usually be large. */
|
||||
for (bufsize = 2000;; bufsize *= 2) {
|
||||
buf = uv__malloc(bufsize);
|
||||
|
||||
if (buf == NULL)
|
||||
return UV_ENOMEM;
|
||||
|
||||
do
|
||||
r = getgrgid_r(gid, &gp, buf, bufsize, &result);
|
||||
while (r == EINTR);
|
||||
|
||||
if (r != 0 || result == NULL)
|
||||
uv__free(buf);
|
||||
|
||||
if (r != ERANGE)
|
||||
break;
|
||||
}
|
||||
|
||||
if (r != 0)
|
||||
return UV__ERR(r);
|
||||
|
||||
if (result == NULL)
|
||||
return UV_ENOENT;
|
||||
|
||||
/* Allocate memory for the groupname and members. */
|
||||
name_size = strlen(gp.gr_name) + 1;
|
||||
members = 0;
|
||||
mem_size = sizeof(char*);
|
||||
for (r = 0; gp.gr_mem[r] != NULL; r++) {
|
||||
mem_size += strlen(gp.gr_mem[r]) + 1 + sizeof(char*);
|
||||
members++;
|
||||
}
|
||||
|
||||
gr_mem = uv__malloc(name_size + mem_size);
|
||||
if (gr_mem == NULL) {
|
||||
uv__free(buf);
|
||||
return UV_ENOMEM;
|
||||
}
|
||||
|
||||
/* Copy the members */
|
||||
grp->members = (char**) gr_mem;
|
||||
grp->members[members] = NULL;
|
||||
gr_mem = (char*) &grp->members[members + 1];
|
||||
for (r = 0; r < members; r++) {
|
||||
grp->members[r] = gr_mem;
|
||||
strcpy(gr_mem, gp.gr_mem[r]);
|
||||
gr_mem += strlen(gr_mem) + 1;
|
||||
}
|
||||
assert(gr_mem == (char*)grp->members + mem_size);
|
||||
|
||||
/* Copy the groupname */
|
||||
grp->groupname = gr_mem;
|
||||
memcpy(grp->groupname, gp.gr_name, name_size);
|
||||
gr_mem += name_size;
|
||||
|
||||
/* Copy the gid */
|
||||
grp->gid = gp.gr_gid;
|
||||
|
||||
uv__free(buf);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int uv_os_get_passwd(uv_passwd_t* pwd) {
|
||||
return uv__getpwuid_r(pwd);
|
||||
return uv__getpwuid_r(pwd, geteuid());
|
||||
}
|
||||
|
||||
|
||||
int uv_os_get_passwd2(uv_passwd_t* pwd, uv_uid_t uid) {
|
||||
return uv__getpwuid_r(pwd, uid);
|
||||
}
|
||||
|
||||
|
||||
@ -1416,6 +1517,13 @@ uv_pid_t uv_os_getppid(void) {
|
||||
return getppid();
|
||||
}
|
||||
|
||||
int uv_cpumask_size(void) {
|
||||
#if UV__CPU_AFFINITY_SUPPORTED
|
||||
return CPU_SETSIZE;
|
||||
#else
|
||||
return UV_ENOTSUP;
|
||||
#endif
|
||||
}
|
||||
|
||||
int uv_os_getpriority(uv_pid_t pid, int* priority) {
|
||||
int r;
|
||||
|
4
deps/libuv/src/unix/cygwin.c
vendored
4
deps/libuv/src/unix/cygwin.c
vendored
@ -51,3 +51,7 @@ int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
|
||||
uint64_t uv_get_constrained_memory(void) {
|
||||
return 0; /* Memory constraints are unknown. */
|
||||
}
|
||||
|
||||
uint64_t uv_get_available_memory(void) {
|
||||
return uv_get_free_memory();
|
||||
}
|
||||
|
16
deps/libuv/src/unix/darwin-stub.h
vendored
16
deps/libuv/src/unix/darwin-stub.h
vendored
@ -27,7 +27,6 @@
|
||||
struct CFArrayCallBacks;
|
||||
struct CFRunLoopSourceContext;
|
||||
struct FSEventStreamContext;
|
||||
struct CFRange;
|
||||
|
||||
typedef double CFAbsoluteTime;
|
||||
typedef double CFTimeInterval;
|
||||
@ -43,23 +42,13 @@ typedef unsigned CFStringEncoding;
|
||||
typedef void* CFAllocatorRef;
|
||||
typedef void* CFArrayRef;
|
||||
typedef void* CFBundleRef;
|
||||
typedef void* CFDataRef;
|
||||
typedef void* CFDictionaryRef;
|
||||
typedef void* CFMutableDictionaryRef;
|
||||
typedef struct CFRange CFRange;
|
||||
typedef void* CFRunLoopRef;
|
||||
typedef void* CFRunLoopSourceRef;
|
||||
typedef void* CFStringRef;
|
||||
typedef void* CFTypeRef;
|
||||
typedef void* FSEventStreamRef;
|
||||
|
||||
typedef uint32_t IOOptionBits;
|
||||
typedef unsigned int io_iterator_t;
|
||||
typedef unsigned int io_object_t;
|
||||
typedef unsigned int io_service_t;
|
||||
typedef unsigned int io_registry_entry_t;
|
||||
|
||||
|
||||
typedef void (*FSEventStreamCallback)(const FSEventStreamRef,
|
||||
void*,
|
||||
size_t,
|
||||
@ -80,11 +69,6 @@ struct FSEventStreamContext {
|
||||
void* pad[3];
|
||||
};
|
||||
|
||||
struct CFRange {
|
||||
CFIndex location;
|
||||
CFIndex length;
|
||||
};
|
||||
|
||||
static const CFStringEncoding kCFStringEncodingUTF8 = 0x8000100;
|
||||
static const OSStatus noErr = 0;
|
||||
|
||||
|
166
deps/libuv/src/unix/darwin.c
vendored
166
deps/libuv/src/unix/darwin.c
vendored
@ -33,13 +33,10 @@
|
||||
#include <sys/sysctl.h>
|
||||
#include <unistd.h> /* sysconf */
|
||||
|
||||
#include "darwin-stub.h"
|
||||
|
||||
static uv_once_t once = UV_ONCE_INIT;
|
||||
static uint64_t (*time_func)(void);
|
||||
static mach_timebase_info_data_t timebase;
|
||||
|
||||
typedef unsigned char UInt8;
|
||||
|
||||
int uv__platform_loop_init(uv_loop_t* loop) {
|
||||
loop->cf_state = NULL;
|
||||
@ -110,7 +107,7 @@ uint64_t uv_get_free_memory(void) {
|
||||
|
||||
if (host_statistics(mach_host_self(), HOST_VM_INFO,
|
||||
(host_info_t)&info, &count) != KERN_SUCCESS) {
|
||||
return UV_EINVAL; /* FIXME(bnoordhuis) Translate error. */
|
||||
return 0;
|
||||
}
|
||||
|
||||
return (uint64_t) info.free_count * sysconf(_SC_PAGESIZE);
|
||||
@ -123,7 +120,7 @@ uint64_t uv_get_total_memory(void) {
|
||||
size_t size = sizeof(info);
|
||||
|
||||
if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0))
|
||||
return UV__ERR(errno);
|
||||
return 0;
|
||||
|
||||
return (uint64_t) info;
|
||||
}
|
||||
@ -134,6 +131,11 @@ uint64_t uv_get_constrained_memory(void) {
|
||||
}
|
||||
|
||||
|
||||
uint64_t uv_get_available_memory(void) {
|
||||
return uv_get_free_memory();
|
||||
}
|
||||
|
||||
|
||||
void uv_loadavg(double avg[3]) {
|
||||
struct loadavg info;
|
||||
size_t size = sizeof(info);
|
||||
@ -183,159 +185,17 @@ int uv_uptime(double* uptime) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int uv__get_cpu_speed(uint64_t* speed) {
|
||||
/* IOKit */
|
||||
void (*pIOObjectRelease)(io_object_t);
|
||||
kern_return_t (*pIOMasterPort)(mach_port_t, mach_port_t*);
|
||||
CFMutableDictionaryRef (*pIOServiceMatching)(const char*);
|
||||
kern_return_t (*pIOServiceGetMatchingServices)(mach_port_t,
|
||||
CFMutableDictionaryRef,
|
||||
io_iterator_t*);
|
||||
io_service_t (*pIOIteratorNext)(io_iterator_t);
|
||||
CFTypeRef (*pIORegistryEntryCreateCFProperty)(io_registry_entry_t,
|
||||
CFStringRef,
|
||||
CFAllocatorRef,
|
||||
IOOptionBits);
|
||||
|
||||
/* CoreFoundation */
|
||||
CFStringRef (*pCFStringCreateWithCString)(CFAllocatorRef,
|
||||
const char*,
|
||||
CFStringEncoding);
|
||||
CFStringEncoding (*pCFStringGetSystemEncoding)(void);
|
||||
UInt8 *(*pCFDataGetBytePtr)(CFDataRef);
|
||||
CFIndex (*pCFDataGetLength)(CFDataRef);
|
||||
void (*pCFDataGetBytes)(CFDataRef, CFRange, UInt8*);
|
||||
void (*pCFRelease)(CFTypeRef);
|
||||
|
||||
void* core_foundation_handle;
|
||||
void* iokit_handle;
|
||||
int err;
|
||||
|
||||
kern_return_t kr;
|
||||
mach_port_t mach_port;
|
||||
io_iterator_t it;
|
||||
io_object_t service;
|
||||
|
||||
mach_port = 0;
|
||||
|
||||
err = UV_ENOENT;
|
||||
core_foundation_handle = dlopen("/System/Library/Frameworks/"
|
||||
"CoreFoundation.framework/"
|
||||
"CoreFoundation",
|
||||
RTLD_LAZY | RTLD_LOCAL);
|
||||
iokit_handle = dlopen("/System/Library/Frameworks/IOKit.framework/"
|
||||
"IOKit",
|
||||
RTLD_LAZY | RTLD_LOCAL);
|
||||
|
||||
if (core_foundation_handle == NULL || iokit_handle == NULL)
|
||||
goto out;
|
||||
|
||||
#define V(handle, symbol) \
|
||||
do { \
|
||||
*(void **)(&p ## symbol) = dlsym((handle), #symbol); \
|
||||
if (p ## symbol == NULL) \
|
||||
goto out; \
|
||||
} \
|
||||
while (0)
|
||||
V(iokit_handle, IOMasterPort);
|
||||
V(iokit_handle, IOServiceMatching);
|
||||
V(iokit_handle, IOServiceGetMatchingServices);
|
||||
V(iokit_handle, IOIteratorNext);
|
||||
V(iokit_handle, IOObjectRelease);
|
||||
V(iokit_handle, IORegistryEntryCreateCFProperty);
|
||||
V(core_foundation_handle, CFStringCreateWithCString);
|
||||
V(core_foundation_handle, CFStringGetSystemEncoding);
|
||||
V(core_foundation_handle, CFDataGetBytePtr);
|
||||
V(core_foundation_handle, CFDataGetLength);
|
||||
V(core_foundation_handle, CFDataGetBytes);
|
||||
V(core_foundation_handle, CFRelease);
|
||||
#undef V
|
||||
|
||||
#define S(s) pCFStringCreateWithCString(NULL, (s), kCFStringEncodingUTF8)
|
||||
|
||||
kr = pIOMasterPort(MACH_PORT_NULL, &mach_port);
|
||||
assert(kr == KERN_SUCCESS);
|
||||
CFMutableDictionaryRef classes_to_match
|
||||
= pIOServiceMatching("IOPlatformDevice");
|
||||
kr = pIOServiceGetMatchingServices(mach_port, classes_to_match, &it);
|
||||
assert(kr == KERN_SUCCESS);
|
||||
service = pIOIteratorNext(it);
|
||||
|
||||
CFStringRef device_type_str = S("device_type");
|
||||
CFStringRef clock_frequency_str = S("clock-frequency");
|
||||
|
||||
while (service != 0) {
|
||||
CFDataRef data;
|
||||
data = pIORegistryEntryCreateCFProperty(service,
|
||||
device_type_str,
|
||||
NULL,
|
||||
0);
|
||||
if (data) {
|
||||
const UInt8* raw = pCFDataGetBytePtr(data);
|
||||
if (strncmp((char*)raw, "cpu", 3) == 0 ||
|
||||
strncmp((char*)raw, "processor", 9) == 0) {
|
||||
CFDataRef freq_ref;
|
||||
freq_ref = pIORegistryEntryCreateCFProperty(service,
|
||||
clock_frequency_str,
|
||||
NULL,
|
||||
0);
|
||||
if (freq_ref) {
|
||||
const UInt8* freq_ref_ptr = pCFDataGetBytePtr(freq_ref);
|
||||
CFIndex len = pCFDataGetLength(freq_ref);
|
||||
if (len == 8)
|
||||
memcpy(speed, freq_ref_ptr, 8);
|
||||
else if (len == 4) {
|
||||
uint32_t v;
|
||||
memcpy(&v, freq_ref_ptr, 4);
|
||||
*speed = v;
|
||||
} else {
|
||||
*speed = 0;
|
||||
}
|
||||
|
||||
pCFRelease(freq_ref);
|
||||
pCFRelease(data);
|
||||
break;
|
||||
}
|
||||
}
|
||||
pCFRelease(data);
|
||||
}
|
||||
|
||||
service = pIOIteratorNext(it);
|
||||
}
|
||||
|
||||
pIOObjectRelease(it);
|
||||
|
||||
err = 0;
|
||||
|
||||
if (device_type_str != NULL)
|
||||
pCFRelease(device_type_str);
|
||||
if (clock_frequency_str != NULL)
|
||||
pCFRelease(clock_frequency_str);
|
||||
|
||||
out:
|
||||
if (core_foundation_handle != NULL)
|
||||
dlclose(core_foundation_handle);
|
||||
|
||||
if (iokit_handle != NULL)
|
||||
dlclose(iokit_handle);
|
||||
|
||||
mach_port_deallocate(mach_task_self(), mach_port);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
|
||||
unsigned int ticks = (unsigned int)sysconf(_SC_CLK_TCK),
|
||||
multiplier = ((uint64_t)1000L / ticks);
|
||||
char model[512];
|
||||
uint64_t cpuspeed;
|
||||
size_t size;
|
||||
unsigned int i;
|
||||
natural_t numcpus;
|
||||
mach_msg_type_number_t msg_type;
|
||||
processor_cpu_load_info_data_t *info;
|
||||
uv_cpu_info_t* cpu_info;
|
||||
uint64_t cpuspeed;
|
||||
int err;
|
||||
|
||||
size = sizeof(model);
|
||||
if (sysctlbyname("machdep.cpu.brand_string", &model, &size, NULL, 0) &&
|
||||
@ -343,9 +203,13 @@ int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
|
||||
return UV__ERR(errno);
|
||||
}
|
||||
|
||||
err = uv__get_cpu_speed(&cpuspeed);
|
||||
if (err < 0)
|
||||
return err;
|
||||
cpuspeed = 0;
|
||||
size = sizeof(cpuspeed);
|
||||
sysctlbyname("hw.cpufrequency", &cpuspeed, &size, NULL, 0);
|
||||
if (cpuspeed == 0)
|
||||
/* If sysctl hw.cputype == CPU_TYPE_ARM64, the correct value is unavailable
|
||||
* from Apple, but we can hard-code it here to a plausible value. */
|
||||
cpuspeed = 2400000000;
|
||||
|
||||
if (host_processor_info(mach_host_self(), PROCESSOR_CPU_LOAD_INFO, &numcpus,
|
||||
(processor_info_array_t*)&info,
|
||||
|
422
deps/libuv/src/unix/epoll.c
vendored
422
deps/libuv/src/unix/epoll.c
vendored
@ -1,422 +0,0 @@
|
||||
/* Copyright libuv contributors. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to
|
||||
* deal in the Software without restriction, including without limitation the
|
||||
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
||||
* sell copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "uv.h"
|
||||
#include "internal.h"
|
||||
#include <errno.h>
|
||||
#include <sys/epoll.h>
|
||||
|
||||
int uv__epoll_init(uv_loop_t* loop) {
|
||||
int fd;
|
||||
fd = epoll_create1(O_CLOEXEC);
|
||||
|
||||
/* epoll_create1() can fail either because it's not implemented (old kernel)
|
||||
* or because it doesn't understand the O_CLOEXEC flag.
|
||||
*/
|
||||
if (fd == -1 && (errno == ENOSYS || errno == EINVAL)) {
|
||||
fd = epoll_create(256);
|
||||
|
||||
if (fd != -1)
|
||||
uv__cloexec(fd, 1);
|
||||
}
|
||||
|
||||
loop->backend_fd = fd;
|
||||
if (fd == -1)
|
||||
return UV__ERR(errno);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
|
||||
struct epoll_event* events;
|
||||
struct epoll_event dummy;
|
||||
uintptr_t i;
|
||||
uintptr_t nfds;
|
||||
|
||||
assert(loop->watchers != NULL);
|
||||
assert(fd >= 0);
|
||||
|
||||
events = (struct epoll_event*) loop->watchers[loop->nwatchers];
|
||||
nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
|
||||
if (events != NULL)
|
||||
/* Invalidate events with same file descriptor */
|
||||
for (i = 0; i < nfds; i++)
|
||||
if (events[i].data.fd == fd)
|
||||
events[i].data.fd = -1;
|
||||
|
||||
/* Remove the file descriptor from the epoll.
|
||||
* This avoids a problem where the same file description remains open
|
||||
* in another process, causing repeated junk epoll events.
|
||||
*
|
||||
* We pass in a dummy epoll_event, to work around a bug in old kernels.
|
||||
*/
|
||||
if (loop->backend_fd >= 0) {
|
||||
/* Work around a bug in kernels 3.10 to 3.19 where passing a struct that
|
||||
* has the EPOLLWAKEUP flag set generates spurious audit syslog warnings.
|
||||
*/
|
||||
memset(&dummy, 0, sizeof(dummy));
|
||||
epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, &dummy);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int uv__io_check_fd(uv_loop_t* loop, int fd) {
|
||||
struct epoll_event e;
|
||||
int rc;
|
||||
|
||||
memset(&e, 0, sizeof(e));
|
||||
e.events = POLLIN;
|
||||
e.data.fd = -1;
|
||||
|
||||
rc = 0;
|
||||
if (epoll_ctl(loop->backend_fd, EPOLL_CTL_ADD, fd, &e))
|
||||
if (errno != EEXIST)
|
||||
rc = UV__ERR(errno);
|
||||
|
||||
if (rc == 0)
|
||||
if (epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, &e))
|
||||
abort();
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
||||
void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
/* A bug in kernels < 2.6.37 makes timeouts larger than ~30 minutes
|
||||
* effectively infinite on 32 bits architectures. To avoid blocking
|
||||
* indefinitely, we cap the timeout and poll again if necessary.
|
||||
*
|
||||
* Note that "30 minutes" is a simplification because it depends on
|
||||
* the value of CONFIG_HZ. The magic constant assumes CONFIG_HZ=1200,
|
||||
* that being the largest value I have seen in the wild (and only once.)
|
||||
*/
|
||||
static const int max_safe_timeout = 1789569;
|
||||
static int no_epoll_pwait_cached;
|
||||
static int no_epoll_wait_cached;
|
||||
int no_epoll_pwait;
|
||||
int no_epoll_wait;
|
||||
struct epoll_event events[1024];
|
||||
struct epoll_event* pe;
|
||||
struct epoll_event e;
|
||||
int real_timeout;
|
||||
QUEUE* q;
|
||||
uv__io_t* w;
|
||||
sigset_t sigset;
|
||||
uint64_t sigmask;
|
||||
uint64_t base;
|
||||
int have_signals;
|
||||
int nevents;
|
||||
int count;
|
||||
int nfds;
|
||||
int fd;
|
||||
int op;
|
||||
int i;
|
||||
int user_timeout;
|
||||
int reset_timeout;
|
||||
|
||||
if (loop->nfds == 0) {
|
||||
assert(QUEUE_EMPTY(&loop->watcher_queue));
|
||||
return;
|
||||
}
|
||||
|
||||
memset(&e, 0, sizeof(e));
|
||||
|
||||
while (!QUEUE_EMPTY(&loop->watcher_queue)) {
|
||||
q = QUEUE_HEAD(&loop->watcher_queue);
|
||||
QUEUE_REMOVE(q);
|
||||
QUEUE_INIT(q);
|
||||
|
||||
w = QUEUE_DATA(q, uv__io_t, watcher_queue);
|
||||
assert(w->pevents != 0);
|
||||
assert(w->fd >= 0);
|
||||
assert(w->fd < (int) loop->nwatchers);
|
||||
|
||||
e.events = w->pevents;
|
||||
e.data.fd = w->fd;
|
||||
|
||||
if (w->events == 0)
|
||||
op = EPOLL_CTL_ADD;
|
||||
else
|
||||
op = EPOLL_CTL_MOD;
|
||||
|
||||
/* XXX Future optimization: do EPOLL_CTL_MOD lazily if we stop watching
|
||||
* events, skip the syscall and squelch the events after epoll_wait().
|
||||
*/
|
||||
if (epoll_ctl(loop->backend_fd, op, w->fd, &e)) {
|
||||
if (errno != EEXIST)
|
||||
abort();
|
||||
|
||||
assert(op == EPOLL_CTL_ADD);
|
||||
|
||||
/* We've reactivated a file descriptor that's been watched before. */
|
||||
if (epoll_ctl(loop->backend_fd, EPOLL_CTL_MOD, w->fd, &e))
|
||||
abort();
|
||||
}
|
||||
|
||||
w->events = w->pevents;
|
||||
}
|
||||
|
||||
sigmask = 0;
|
||||
if (loop->flags & UV_LOOP_BLOCK_SIGPROF) {
|
||||
sigemptyset(&sigset);
|
||||
sigaddset(&sigset, SIGPROF);
|
||||
sigmask |= 1 << (SIGPROF - 1);
|
||||
}
|
||||
|
||||
assert(timeout >= -1);
|
||||
base = loop->time;
|
||||
count = 48; /* Benchmarks suggest this gives the best throughput. */
|
||||
real_timeout = timeout;
|
||||
|
||||
if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
|
||||
reset_timeout = 1;
|
||||
user_timeout = timeout;
|
||||
timeout = 0;
|
||||
} else {
|
||||
reset_timeout = 0;
|
||||
user_timeout = 0;
|
||||
}
|
||||
|
||||
/* You could argue there is a dependency between these two but
|
||||
* ultimately we don't care about their ordering with respect
|
||||
* to one another. Worst case, we make a few system calls that
|
||||
* could have been avoided because another thread already knows
|
||||
* they fail with ENOSYS. Hardly the end of the world.
|
||||
*/
|
||||
no_epoll_pwait = uv__load_relaxed(&no_epoll_pwait_cached);
|
||||
no_epoll_wait = uv__load_relaxed(&no_epoll_wait_cached);
|
||||
|
||||
for (;;) {
|
||||
/* Only need to set the provider_entry_time if timeout != 0. The function
|
||||
* will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
|
||||
*/
|
||||
if (timeout != 0)
|
||||
uv__metrics_set_provider_entry_time(loop);
|
||||
|
||||
/* See the comment for max_safe_timeout for an explanation of why
|
||||
* this is necessary. Executive summary: kernel bug workaround.
|
||||
*/
|
||||
if (sizeof(int32_t) == sizeof(long) && timeout >= max_safe_timeout)
|
||||
timeout = max_safe_timeout;
|
||||
|
||||
if (sigmask != 0 && no_epoll_pwait != 0)
|
||||
if (pthread_sigmask(SIG_BLOCK, &sigset, NULL))
|
||||
abort();
|
||||
|
||||
if (no_epoll_wait != 0 || (sigmask != 0 && no_epoll_pwait == 0)) {
|
||||
nfds = epoll_pwait(loop->backend_fd,
|
||||
events,
|
||||
ARRAY_SIZE(events),
|
||||
timeout,
|
||||
&sigset);
|
||||
if (nfds == -1 && errno == ENOSYS) {
|
||||
uv__store_relaxed(&no_epoll_pwait_cached, 1);
|
||||
no_epoll_pwait = 1;
|
||||
}
|
||||
} else {
|
||||
nfds = epoll_wait(loop->backend_fd,
|
||||
events,
|
||||
ARRAY_SIZE(events),
|
||||
timeout);
|
||||
if (nfds == -1 && errno == ENOSYS) {
|
||||
uv__store_relaxed(&no_epoll_wait_cached, 1);
|
||||
no_epoll_wait = 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (sigmask != 0 && no_epoll_pwait != 0)
|
||||
if (pthread_sigmask(SIG_UNBLOCK, &sigset, NULL))
|
||||
abort();
|
||||
|
||||
/* Update loop->time unconditionally. It's tempting to skip the update when
|
||||
* timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
|
||||
* operating system didn't reschedule our process while in the syscall.
|
||||
*/
|
||||
SAVE_ERRNO(uv__update_time(loop));
|
||||
|
||||
if (nfds == 0) {
|
||||
assert(timeout != -1);
|
||||
|
||||
if (reset_timeout != 0) {
|
||||
timeout = user_timeout;
|
||||
reset_timeout = 0;
|
||||
}
|
||||
|
||||
if (timeout == -1)
|
||||
continue;
|
||||
|
||||
if (timeout == 0)
|
||||
return;
|
||||
|
||||
/* We may have been inside the system call for longer than |timeout|
|
||||
* milliseconds so we need to update the timestamp to avoid drift.
|
||||
*/
|
||||
goto update_timeout;
|
||||
}
|
||||
|
||||
if (nfds == -1) {
|
||||
if (errno == ENOSYS) {
|
||||
/* epoll_wait() or epoll_pwait() failed, try the other system call. */
|
||||
assert(no_epoll_wait == 0 || no_epoll_pwait == 0);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (errno != EINTR)
|
||||
abort();
|
||||
|
||||
if (reset_timeout != 0) {
|
||||
timeout = user_timeout;
|
||||
reset_timeout = 0;
|
||||
}
|
||||
|
||||
if (timeout == -1)
|
||||
continue;
|
||||
|
||||
if (timeout == 0)
|
||||
return;
|
||||
|
||||
/* Interrupted by a signal. Update timeout and poll again. */
|
||||
goto update_timeout;
|
||||
}
|
||||
|
||||
have_signals = 0;
|
||||
nevents = 0;
|
||||
|
||||
{
|
||||
/* Squelch a -Waddress-of-packed-member warning with gcc >= 9. */
|
||||
union {
|
||||
struct epoll_event* events;
|
||||
uv__io_t* watchers;
|
||||
} x;
|
||||
|
||||
x.events = events;
|
||||
assert(loop->watchers != NULL);
|
||||
loop->watchers[loop->nwatchers] = x.watchers;
|
||||
loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
|
||||
}
|
||||
|
||||
for (i = 0; i < nfds; i++) {
|
||||
pe = events + i;
|
||||
fd = pe->data.fd;
|
||||
|
||||
/* Skip invalidated events, see uv__platform_invalidate_fd */
|
||||
if (fd == -1)
|
||||
continue;
|
||||
|
||||
assert(fd >= 0);
|
||||
assert((unsigned) fd < loop->nwatchers);
|
||||
|
||||
w = loop->watchers[fd];
|
||||
|
||||
if (w == NULL) {
|
||||
/* File descriptor that we've stopped watching, disarm it.
|
||||
*
|
||||
* Ignore all errors because we may be racing with another thread
|
||||
* when the file descriptor is closed.
|
||||
*/
|
||||
epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, pe);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Give users only events they're interested in. Prevents spurious
|
||||
* callbacks when previous callback invocation in this loop has stopped
|
||||
* the current watcher. Also, filters out events that users has not
|
||||
* requested us to watch.
|
||||
*/
|
||||
pe->events &= w->pevents | POLLERR | POLLHUP;
|
||||
|
||||
/* Work around an epoll quirk where it sometimes reports just the
|
||||
* EPOLLERR or EPOLLHUP event. In order to force the event loop to
|
||||
* move forward, we merge in the read/write events that the watcher
|
||||
* is interested in; uv__read() and uv__write() will then deal with
|
||||
* the error or hangup in the usual fashion.
|
||||
*
|
||||
* Note to self: happens when epoll reports EPOLLIN|EPOLLHUP, the user
|
||||
* reads the available data, calls uv_read_stop(), then sometime later
|
||||
* calls uv_read_start() again. By then, libuv has forgotten about the
|
||||
* hangup and the kernel won't report EPOLLIN again because there's
|
||||
* nothing left to read. If anything, libuv is to blame here. The
|
||||
* current hack is just a quick bandaid; to properly fix it, libuv
|
||||
* needs to remember the error/hangup event. We should get that for
|
||||
* free when we switch over to edge-triggered I/O.
|
||||
*/
|
||||
if (pe->events == POLLERR || pe->events == POLLHUP)
|
||||
pe->events |=
|
||||
w->pevents & (POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI);
|
||||
|
||||
if (pe->events != 0) {
|
||||
/* Run signal watchers last. This also affects child process watchers
|
||||
* because those are implemented in terms of signal watchers.
|
||||
*/
|
||||
if (w == &loop->signal_io_watcher) {
|
||||
have_signals = 1;
|
||||
} else {
|
||||
uv__metrics_update_idle_time(loop);
|
||||
w->cb(loop, w, pe->events);
|
||||
}
|
||||
|
||||
nevents++;
|
||||
}
|
||||
}
|
||||
|
||||
if (reset_timeout != 0) {
|
||||
timeout = user_timeout;
|
||||
reset_timeout = 0;
|
||||
}
|
||||
|
||||
if (have_signals != 0) {
|
||||
uv__metrics_update_idle_time(loop);
|
||||
loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
|
||||
}
|
||||
|
||||
loop->watchers[loop->nwatchers] = NULL;
|
||||
loop->watchers[loop->nwatchers + 1] = NULL;
|
||||
|
||||
if (have_signals != 0)
|
||||
return; /* Event loop should cycle now so don't poll again. */
|
||||
|
||||
if (nevents != 0) {
|
||||
if (nfds == ARRAY_SIZE(events) && --count != 0) {
|
||||
/* Poll for more events but don't block this time. */
|
||||
timeout = 0;
|
||||
continue;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (timeout == 0)
|
||||
return;
|
||||
|
||||
if (timeout == -1)
|
||||
continue;
|
||||
|
||||
update_timeout:
|
||||
assert(timeout > 0);
|
||||
|
||||
real_timeout -= (loop->time - base);
|
||||
if (real_timeout <= 0)
|
||||
return;
|
||||
|
||||
timeout = real_timeout;
|
||||
}
|
||||
}
|
||||
|
33
deps/libuv/src/unix/freebsd.c
vendored
33
deps/libuv/src/unix/freebsd.c
vendored
@ -91,7 +91,7 @@ uint64_t uv_get_free_memory(void) {
|
||||
size_t size = sizeof(freecount);
|
||||
|
||||
if (sysctlbyname("vm.stats.vm.v_free_count", &freecount, &size, NULL, 0))
|
||||
return UV__ERR(errno);
|
||||
return 0;
|
||||
|
||||
return (uint64_t) freecount * sysconf(_SC_PAGESIZE);
|
||||
|
||||
@ -105,7 +105,7 @@ uint64_t uv_get_total_memory(void) {
|
||||
size_t size = sizeof(info);
|
||||
|
||||
if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0))
|
||||
return UV__ERR(errno);
|
||||
return 0;
|
||||
|
||||
return (uint64_t) info;
|
||||
}
|
||||
@ -116,6 +116,11 @@ uint64_t uv_get_constrained_memory(void) {
|
||||
}
|
||||
|
||||
|
||||
uint64_t uv_get_available_memory(void) {
|
||||
return uv_get_free_memory();
|
||||
}
|
||||
|
||||
|
||||
void uv_loadavg(double avg[3]) {
|
||||
struct loadavg info;
|
||||
size_t size = sizeof(info);
|
||||
@ -264,30 +269,6 @@ int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
|
||||
}
|
||||
|
||||
|
||||
int uv__sendmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen) {
|
||||
#if __FreeBSD__ >= 11 && !defined(__DragonFly__)
|
||||
return sendmmsg(fd,
|
||||
(struct mmsghdr*) mmsg,
|
||||
vlen,
|
||||
0 /* flags */);
|
||||
#else
|
||||
return errno = ENOSYS, -1;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
int uv__recvmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen) {
|
||||
#if __FreeBSD__ >= 11 && !defined(__DragonFly__)
|
||||
return recvmmsg(fd,
|
||||
(struct mmsghdr*) mmsg,
|
||||
vlen,
|
||||
0 /* flags */,
|
||||
NULL /* timeout */);
|
||||
#else
|
||||
return errno = ENOSYS, -1;
|
||||
#endif
|
||||
}
|
||||
|
||||
ssize_t
|
||||
uv__fs_copy_file_range(int fd_in,
|
||||
off_t* off_in,
|
||||
|
164
deps/libuv/src/unix/fs.c
vendored
164
deps/libuv/src/unix/fs.c
vendored
@ -48,7 +48,6 @@
|
||||
|
||||
#if defined(__DragonFly__) || \
|
||||
defined(__FreeBSD__) || \
|
||||
defined(__FreeBSD_kernel__) || \
|
||||
defined(__OpenBSD__) || \
|
||||
defined(__NetBSD__)
|
||||
# define HAVE_PREADV 1
|
||||
@ -57,10 +56,11 @@
|
||||
#endif
|
||||
|
||||
#if defined(__linux__)
|
||||
# include "sys/utsname.h"
|
||||
# include <sys/sendfile.h>
|
||||
# include <sys/utsname.h>
|
||||
#endif
|
||||
|
||||
#if defined(__linux__) || defined(__sun)
|
||||
#if defined(__sun)
|
||||
# include <sys/sendfile.h>
|
||||
# include <sys/sysmacros.h>
|
||||
#endif
|
||||
@ -79,7 +79,6 @@
|
||||
#if defined(__APPLE__) || \
|
||||
defined(__DragonFly__) || \
|
||||
defined(__FreeBSD__) || \
|
||||
defined(__FreeBSD_kernel__) || \
|
||||
defined(__OpenBSD__) || \
|
||||
defined(__NetBSD__)
|
||||
# include <sys/param.h>
|
||||
@ -256,7 +255,6 @@ static ssize_t uv__fs_futime(uv_fs_t* req) {
|
||||
#elif defined(__APPLE__) \
|
||||
|| defined(__DragonFly__) \
|
||||
|| defined(__FreeBSD__) \
|
||||
|| defined(__FreeBSD_kernel__) \
|
||||
|| defined(__NetBSD__) \
|
||||
|| defined(__OpenBSD__) \
|
||||
|| defined(__sun)
|
||||
@ -311,7 +309,7 @@ static int uv__fs_mkstemp(uv_fs_t* req) {
|
||||
static uv_once_t once = UV_ONCE_INIT;
|
||||
int r;
|
||||
#ifdef O_CLOEXEC
|
||||
static int no_cloexec_support;
|
||||
static _Atomic int no_cloexec_support;
|
||||
#endif
|
||||
static const char pattern[] = "XXXXXX";
|
||||
static const size_t pattern_size = sizeof(pattern) - 1;
|
||||
@ -336,7 +334,8 @@ static int uv__fs_mkstemp(uv_fs_t* req) {
|
||||
uv_once(&once, uv__mkostemp_initonce);
|
||||
|
||||
#ifdef O_CLOEXEC
|
||||
if (uv__load_relaxed(&no_cloexec_support) == 0 && uv__mkostemp != NULL) {
|
||||
if (atomic_load_explicit(&no_cloexec_support, memory_order_relaxed) == 0 &&
|
||||
uv__mkostemp != NULL) {
|
||||
r = uv__mkostemp(path, O_CLOEXEC);
|
||||
|
||||
if (r >= 0)
|
||||
@ -349,7 +348,7 @@ static int uv__fs_mkstemp(uv_fs_t* req) {
|
||||
|
||||
/* We set the static variable so that next calls don't even
|
||||
try to use mkostemp. */
|
||||
uv__store_relaxed(&no_cloexec_support, 1);
|
||||
atomic_store_explicit(&no_cloexec_support, 1, memory_order_relaxed);
|
||||
}
|
||||
#endif /* O_CLOEXEC */
|
||||
|
||||
@ -459,7 +458,7 @@ static ssize_t uv__fs_preadv(uv_file fd,
|
||||
|
||||
static ssize_t uv__fs_read(uv_fs_t* req) {
|
||||
#if defined(__linux__)
|
||||
static int no_preadv;
|
||||
static _Atomic int no_preadv;
|
||||
#endif
|
||||
unsigned int iovmax;
|
||||
ssize_t result;
|
||||
@ -483,19 +482,19 @@ static ssize_t uv__fs_read(uv_fs_t* req) {
|
||||
result = preadv(req->file, (struct iovec*) req->bufs, req->nbufs, req->off);
|
||||
#else
|
||||
# if defined(__linux__)
|
||||
if (uv__load_relaxed(&no_preadv)) retry:
|
||||
if (atomic_load_explicit(&no_preadv, memory_order_relaxed)) retry:
|
||||
# endif
|
||||
{
|
||||
result = uv__fs_preadv(req->file, req->bufs, req->nbufs, req->off);
|
||||
}
|
||||
# if defined(__linux__)
|
||||
else {
|
||||
result = uv__preadv(req->file,
|
||||
(struct iovec*)req->bufs,
|
||||
req->nbufs,
|
||||
req->off);
|
||||
result = preadv(req->file,
|
||||
(struct iovec*) req->bufs,
|
||||
req->nbufs,
|
||||
req->off);
|
||||
if (result == -1 && errno == ENOSYS) {
|
||||
uv__store_relaxed(&no_preadv, 1);
|
||||
atomic_store_explicit(&no_preadv, 1, memory_order_relaxed);
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
@ -516,7 +515,7 @@ done:
|
||||
if (result == -1 && errno == EOPNOTSUPP) {
|
||||
struct stat buf;
|
||||
ssize_t rc;
|
||||
rc = fstat(req->file, &buf);
|
||||
rc = uv__fstat(req->file, &buf);
|
||||
if (rc == 0 && S_ISDIR(buf.st_mode)) {
|
||||
errno = EISDIR;
|
||||
}
|
||||
@ -527,19 +526,12 @@ done:
|
||||
}
|
||||
|
||||
|
||||
#if defined(__APPLE__) && !defined(MAC_OS_X_VERSION_10_8)
|
||||
#define UV_CONST_DIRENT uv__dirent_t
|
||||
#else
|
||||
#define UV_CONST_DIRENT const uv__dirent_t
|
||||
#endif
|
||||
|
||||
|
||||
static int uv__fs_scandir_filter(UV_CONST_DIRENT* dent) {
|
||||
static int uv__fs_scandir_filter(const uv__dirent_t* dent) {
|
||||
return strcmp(dent->d_name, ".") != 0 && strcmp(dent->d_name, "..") != 0;
|
||||
}
|
||||
|
||||
|
||||
static int uv__fs_scandir_sort(UV_CONST_DIRENT** a, UV_CONST_DIRENT** b) {
|
||||
static int uv__fs_scandir_sort(const uv__dirent_t** a, const uv__dirent_t** b) {
|
||||
return strcmp((*a)->d_name, (*b)->d_name);
|
||||
}
|
||||
|
||||
@ -715,7 +707,7 @@ static ssize_t uv__fs_readlink(uv_fs_t* req) {
|
||||
/* We may not have a real PATH_MAX. Read size of link. */
|
||||
struct stat st;
|
||||
int ret;
|
||||
ret = lstat(req->path, &st);
|
||||
ret = uv__lstat(req->path, &st);
|
||||
if (ret != 0)
|
||||
return -1;
|
||||
if (!S_ISLNK(st.st_mode)) {
|
||||
@ -908,14 +900,14 @@ out:
|
||||
|
||||
#ifdef __linux__
|
||||
static unsigned uv__kernel_version(void) {
|
||||
static unsigned cached_version;
|
||||
static _Atomic unsigned cached_version;
|
||||
struct utsname u;
|
||||
unsigned version;
|
||||
unsigned major;
|
||||
unsigned minor;
|
||||
unsigned patch;
|
||||
|
||||
version = uv__load_relaxed(&cached_version);
|
||||
version = atomic_load_explicit(&cached_version, memory_order_relaxed);
|
||||
if (version != 0)
|
||||
return version;
|
||||
|
||||
@ -926,7 +918,7 @@ static unsigned uv__kernel_version(void) {
|
||||
return 0;
|
||||
|
||||
version = major * 65536 + minor * 256 + patch;
|
||||
uv__store_relaxed(&cached_version, version);
|
||||
atomic_store_explicit(&cached_version, version, memory_order_relaxed);
|
||||
|
||||
return version;
|
||||
}
|
||||
@ -968,10 +960,10 @@ static int uv__is_cifs_or_smb(int fd) {
|
||||
|
||||
static ssize_t uv__fs_try_copy_file_range(int in_fd, off_t* off,
|
||||
int out_fd, size_t len) {
|
||||
static int no_copy_file_range_support;
|
||||
static _Atomic int no_copy_file_range_support;
|
||||
ssize_t r;
|
||||
|
||||
if (uv__load_relaxed(&no_copy_file_range_support)) {
|
||||
if (atomic_load_explicit(&no_copy_file_range_support, memory_order_relaxed)) {
|
||||
errno = ENOSYS;
|
||||
return -1;
|
||||
}
|
||||
@ -990,7 +982,7 @@ static ssize_t uv__fs_try_copy_file_range(int in_fd, off_t* off,
|
||||
errno = ENOSYS; /* Use fallback. */
|
||||
break;
|
||||
case ENOSYS:
|
||||
uv__store_relaxed(&no_copy_file_range_support, 1);
|
||||
atomic_store_explicit(&no_copy_file_range_support, 1, memory_order_relaxed);
|
||||
break;
|
||||
case EPERM:
|
||||
/* It's been reported that CIFS spuriously fails.
|
||||
@ -1061,10 +1053,7 @@ static ssize_t uv__fs_sendfile(uv_fs_t* req) {
|
||||
|
||||
return -1;
|
||||
}
|
||||
#elif defined(__APPLE__) || \
|
||||
defined(__DragonFly__) || \
|
||||
defined(__FreeBSD__) || \
|
||||
defined(__FreeBSD_kernel__)
|
||||
#elif defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__)
|
||||
{
|
||||
off_t len;
|
||||
ssize_t r;
|
||||
@ -1088,15 +1077,6 @@ static ssize_t uv__fs_sendfile(uv_fs_t* req) {
|
||||
#endif
|
||||
len = 0;
|
||||
r = sendfile(in_fd, out_fd, req->off, req->bufsml[0].len, NULL, &len, 0);
|
||||
#elif defined(__FreeBSD_kernel__)
|
||||
len = 0;
|
||||
r = bsd_sendfile(in_fd,
|
||||
out_fd,
|
||||
req->off,
|
||||
req->bufsml[0].len,
|
||||
NULL,
|
||||
&len,
|
||||
0);
|
||||
#else
|
||||
/* The darwin sendfile takes len as an input for the length to send,
|
||||
* so make sure to initialize it with the caller's value. */
|
||||
@ -1148,7 +1128,6 @@ static ssize_t uv__fs_utime(uv_fs_t* req) {
|
||||
#elif defined(__APPLE__) \
|
||||
|| defined(__DragonFly__) \
|
||||
|| defined(__FreeBSD__) \
|
||||
|| defined(__FreeBSD_kernel__) \
|
||||
|| defined(__NetBSD__) \
|
||||
|| defined(__OpenBSD__)
|
||||
struct timeval tv[2];
|
||||
@ -1190,7 +1169,6 @@ static ssize_t uv__fs_lutime(uv_fs_t* req) {
|
||||
#elif defined(__APPLE__) || \
|
||||
defined(__DragonFly__) || \
|
||||
defined(__FreeBSD__) || \
|
||||
defined(__FreeBSD_kernel__) || \
|
||||
defined(__NetBSD__)
|
||||
struct timeval tv[2];
|
||||
tv[0] = uv__fs_to_timeval(req->atime);
|
||||
@ -1241,10 +1219,10 @@ static ssize_t uv__fs_write(uv_fs_t* req) {
|
||||
}
|
||||
# if defined(__linux__)
|
||||
else {
|
||||
r = uv__pwritev(req->file,
|
||||
(struct iovec*) req->bufs,
|
||||
req->nbufs,
|
||||
req->off);
|
||||
r = pwritev(req->file,
|
||||
(struct iovec*) req->bufs,
|
||||
req->nbufs,
|
||||
req->off);
|
||||
if (r == -1 && errno == ENOSYS) {
|
||||
no_pwritev = 1;
|
||||
goto retry;
|
||||
@ -1288,7 +1266,7 @@ static ssize_t uv__fs_copyfile(uv_fs_t* req) {
|
||||
return srcfd;
|
||||
|
||||
/* Get the source file's mode. */
|
||||
if (fstat(srcfd, &src_statsbuf)) {
|
||||
if (uv__fstat(srcfd, &src_statsbuf)) {
|
||||
err = UV__ERR(errno);
|
||||
goto out;
|
||||
}
|
||||
@ -1316,7 +1294,7 @@ static ssize_t uv__fs_copyfile(uv_fs_t* req) {
|
||||
destination are not the same file. If they are the same, bail out early. */
|
||||
if ((req->flags & UV_FS_COPYFILE_EXCL) == 0) {
|
||||
/* Get the destination file's mode. */
|
||||
if (fstat(dstfd, &dst_statsbuf)) {
|
||||
if (uv__fstat(dstfd, &dst_statsbuf)) {
|
||||
err = UV__ERR(errno);
|
||||
goto out;
|
||||
}
|
||||
@ -1330,7 +1308,19 @@ static ssize_t uv__fs_copyfile(uv_fs_t* req) {
|
||||
/* Truncate the file in case the destination already existed. */
|
||||
if (ftruncate(dstfd, 0) != 0) {
|
||||
err = UV__ERR(errno);
|
||||
goto out;
|
||||
|
||||
/* ftruncate() on ceph-fuse fails with EACCES when the file is created
|
||||
* with read only permissions. Since ftruncate() on a newly created
|
||||
* file is a meaningless operation anyway, detect that condition
|
||||
* and squelch the error.
|
||||
*/
|
||||
if (err != UV_EACCES)
|
||||
goto out;
|
||||
|
||||
if (dst_statsbuf.st_size > 0)
|
||||
goto out;
|
||||
|
||||
err = 0;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1514,14 +1504,14 @@ static int uv__fs_statx(int fd,
|
||||
uv_stat_t* buf) {
|
||||
STATIC_ASSERT(UV_ENOSYS != -1);
|
||||
#ifdef __linux__
|
||||
static int no_statx;
|
||||
static _Atomic int no_statx;
|
||||
struct uv__statx statxbuf;
|
||||
int dirfd;
|
||||
int flags;
|
||||
int mode;
|
||||
int rc;
|
||||
|
||||
if (uv__load_relaxed(&no_statx))
|
||||
if (atomic_load_explicit(&no_statx, memory_order_relaxed))
|
||||
return UV_ENOSYS;
|
||||
|
||||
dirfd = AT_FDCWD;
|
||||
@ -1555,30 +1545,11 @@ static int uv__fs_statx(int fd,
|
||||
* implemented, rc might return 1 with 0 set as the error code in which
|
||||
* case we return ENOSYS.
|
||||
*/
|
||||
uv__store_relaxed(&no_statx, 1);
|
||||
atomic_store_explicit(&no_statx, 1, memory_order_relaxed);
|
||||
return UV_ENOSYS;
|
||||
}
|
||||
|
||||
buf->st_dev = makedev(statxbuf.stx_dev_major, statxbuf.stx_dev_minor);
|
||||
buf->st_mode = statxbuf.stx_mode;
|
||||
buf->st_nlink = statxbuf.stx_nlink;
|
||||
buf->st_uid = statxbuf.stx_uid;
|
||||
buf->st_gid = statxbuf.stx_gid;
|
||||
buf->st_rdev = makedev(statxbuf.stx_rdev_major, statxbuf.stx_rdev_minor);
|
||||
buf->st_ino = statxbuf.stx_ino;
|
||||
buf->st_size = statxbuf.stx_size;
|
||||
buf->st_blksize = statxbuf.stx_blksize;
|
||||
buf->st_blocks = statxbuf.stx_blocks;
|
||||
buf->st_atim.tv_sec = statxbuf.stx_atime.tv_sec;
|
||||
buf->st_atim.tv_nsec = statxbuf.stx_atime.tv_nsec;
|
||||
buf->st_mtim.tv_sec = statxbuf.stx_mtime.tv_sec;
|
||||
buf->st_mtim.tv_nsec = statxbuf.stx_mtime.tv_nsec;
|
||||
buf->st_ctim.tv_sec = statxbuf.stx_ctime.tv_sec;
|
||||
buf->st_ctim.tv_nsec = statxbuf.stx_ctime.tv_nsec;
|
||||
buf->st_birthtim.tv_sec = statxbuf.stx_btime.tv_sec;
|
||||
buf->st_birthtim.tv_nsec = statxbuf.stx_btime.tv_nsec;
|
||||
buf->st_flags = 0;
|
||||
buf->st_gen = 0;
|
||||
uv__statx_to_stat(&statxbuf, buf);
|
||||
|
||||
return 0;
|
||||
#else
|
||||
@ -1595,7 +1566,7 @@ static int uv__fs_stat(const char *path, uv_stat_t *buf) {
|
||||
if (ret != UV_ENOSYS)
|
||||
return ret;
|
||||
|
||||
ret = stat(path, &pbuf);
|
||||
ret = uv__stat(path, &pbuf);
|
||||
if (ret == 0)
|
||||
uv__to_stat(&pbuf, buf);
|
||||
|
||||
@ -1611,7 +1582,7 @@ static int uv__fs_lstat(const char *path, uv_stat_t *buf) {
|
||||
if (ret != UV_ENOSYS)
|
||||
return ret;
|
||||
|
||||
ret = lstat(path, &pbuf);
|
||||
ret = uv__lstat(path, &pbuf);
|
||||
if (ret == 0)
|
||||
uv__to_stat(&pbuf, buf);
|
||||
|
||||
@ -1627,7 +1598,7 @@ static int uv__fs_fstat(int fd, uv_stat_t *buf) {
|
||||
if (ret != UV_ENOSYS)
|
||||
return ret;
|
||||
|
||||
ret = fstat(fd, &pbuf);
|
||||
ret = uv__fstat(fd, &pbuf);
|
||||
if (ret == 0)
|
||||
uv__to_stat(&pbuf, buf);
|
||||
|
||||
@ -1822,6 +1793,9 @@ int uv_fs_chown(uv_loop_t* loop,
|
||||
int uv_fs_close(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
|
||||
INIT(CLOSE);
|
||||
req->file = file;
|
||||
if (cb != NULL)
|
||||
if (uv__iou_fs_close(loop, req))
|
||||
return 0;
|
||||
POST;
|
||||
}
|
||||
|
||||
@ -1869,6 +1843,9 @@ int uv_fs_lchown(uv_loop_t* loop,
|
||||
int uv_fs_fdatasync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
|
||||
INIT(FDATASYNC);
|
||||
req->file = file;
|
||||
if (cb != NULL)
|
||||
if (uv__iou_fs_fsync_or_fdatasync(loop, req, /* IORING_FSYNC_DATASYNC */ 1))
|
||||
return 0;
|
||||
POST;
|
||||
}
|
||||
|
||||
@ -1876,6 +1853,9 @@ int uv_fs_fdatasync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
|
||||
int uv_fs_fstat(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
|
||||
INIT(FSTAT);
|
||||
req->file = file;
|
||||
if (cb != NULL)
|
||||
if (uv__iou_fs_statx(loop, req, /* is_fstat */ 1, /* is_lstat */ 0))
|
||||
return 0;
|
||||
POST;
|
||||
}
|
||||
|
||||
@ -1883,6 +1863,9 @@ int uv_fs_fstat(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
|
||||
int uv_fs_fsync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
|
||||
INIT(FSYNC);
|
||||
req->file = file;
|
||||
if (cb != NULL)
|
||||
if (uv__iou_fs_fsync_or_fdatasync(loop, req, /* no flags */ 0))
|
||||
return 0;
|
||||
POST;
|
||||
}
|
||||
|
||||
@ -1929,6 +1912,9 @@ int uv_fs_lutime(uv_loop_t* loop,
|
||||
int uv_fs_lstat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
|
||||
INIT(LSTAT);
|
||||
PATH;
|
||||
if (cb != NULL)
|
||||
if (uv__iou_fs_statx(loop, req, /* is_fstat */ 0, /* is_lstat */ 1))
|
||||
return 0;
|
||||
POST;
|
||||
}
|
||||
|
||||
@ -1990,6 +1976,9 @@ int uv_fs_open(uv_loop_t* loop,
|
||||
PATH;
|
||||
req->flags = flags;
|
||||
req->mode = mode;
|
||||
if (cb != NULL)
|
||||
if (uv__iou_fs_open(loop, req))
|
||||
return 0;
|
||||
POST;
|
||||
}
|
||||
|
||||
@ -2018,6 +2007,11 @@ int uv_fs_read(uv_loop_t* loop, uv_fs_t* req,
|
||||
memcpy(req->bufs, bufs, nbufs * sizeof(*bufs));
|
||||
|
||||
req->off = off;
|
||||
|
||||
if (cb != NULL)
|
||||
if (uv__iou_fs_read_or_write(loop, req, /* is_read */ 1))
|
||||
return 0;
|
||||
|
||||
POST;
|
||||
}
|
||||
|
||||
@ -2125,6 +2119,9 @@ int uv_fs_sendfile(uv_loop_t* loop,
|
||||
int uv_fs_stat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
|
||||
INIT(STAT);
|
||||
PATH;
|
||||
if (cb != NULL)
|
||||
if (uv__iou_fs_statx(loop, req, /* is_fstat */ 0, /* is_lstat */ 0))
|
||||
return 0;
|
||||
POST;
|
||||
}
|
||||
|
||||
@ -2188,6 +2185,11 @@ int uv_fs_write(uv_loop_t* loop,
|
||||
memcpy(req->bufs, bufs, nbufs * sizeof(*bufs));
|
||||
|
||||
req->off = off;
|
||||
|
||||
if (cb != NULL)
|
||||
if (uv__iou_fs_read_or_write(loop, req, /* is_read */ 0))
|
||||
return 0;
|
||||
|
||||
POST;
|
||||
}
|
||||
|
||||
@ -2196,7 +2198,7 @@ void uv_fs_req_cleanup(uv_fs_t* req) {
|
||||
if (req == NULL)
|
||||
return;
|
||||
|
||||
/* Only necessary for asychronous requests, i.e., requests with a callback.
|
||||
/* Only necessary for asynchronous requests, i.e., requests with a callback.
|
||||
* Synchronous ones don't copy their arguments and have req->path and
|
||||
* req->new_path pointing to user-owned memory. UV_FS_MKDTEMP and
|
||||
* UV_FS_MKSTEMP are the exception to the rule, they always allocate memory.
|
||||
|
31
deps/libuv/src/unix/fsevents.c
vendored
31
deps/libuv/src/unix/fsevents.c
vendored
@ -132,7 +132,6 @@ static void (*pCFRunLoopWakeUp)(CFRunLoopRef);
|
||||
static CFStringRef (*pCFStringCreateWithFileSystemRepresentation)(
|
||||
CFAllocatorRef,
|
||||
const char*);
|
||||
static CFStringEncoding (*pCFStringGetSystemEncoding)(void);
|
||||
static CFStringRef (*pkCFRunLoopDefaultMode);
|
||||
static FSEventStreamRef (*pFSEventStreamCreate)(CFAllocatorRef,
|
||||
FSEventStreamCallback,
|
||||
@ -141,7 +140,6 @@ static FSEventStreamRef (*pFSEventStreamCreate)(CFAllocatorRef,
|
||||
FSEventStreamEventId,
|
||||
CFTimeInterval,
|
||||
FSEventStreamCreateFlags);
|
||||
static void (*pFSEventStreamFlushSync)(FSEventStreamRef);
|
||||
static void (*pFSEventStreamInvalidate)(FSEventStreamRef);
|
||||
static void (*pFSEventStreamRelease)(FSEventStreamRef);
|
||||
static void (*pFSEventStreamScheduleWithRunLoop)(FSEventStreamRef,
|
||||
@ -331,8 +329,9 @@ static void uv__fsevents_event_cb(const FSEventStreamRef streamRef,
|
||||
|
||||
|
||||
/* Runs in CF thread */
|
||||
static int uv__fsevents_create_stream(uv_loop_t* loop, CFArrayRef paths) {
|
||||
uv__cf_loop_state_t* state;
|
||||
static int uv__fsevents_create_stream(uv__cf_loop_state_t* state,
|
||||
uv_loop_t* loop,
|
||||
CFArrayRef paths) {
|
||||
FSEventStreamContext ctx;
|
||||
FSEventStreamRef ref;
|
||||
CFAbsoluteTime latency;
|
||||
@ -373,10 +372,7 @@ static int uv__fsevents_create_stream(uv_loop_t* loop, CFArrayRef paths) {
|
||||
flags);
|
||||
assert(ref != NULL);
|
||||
|
||||
state = loop->cf_state;
|
||||
pFSEventStreamScheduleWithRunLoop(ref,
|
||||
state->loop,
|
||||
*pkCFRunLoopDefaultMode);
|
||||
pFSEventStreamScheduleWithRunLoop(ref, state->loop, *pkCFRunLoopDefaultMode);
|
||||
if (!pFSEventStreamStart(ref)) {
|
||||
pFSEventStreamInvalidate(ref);
|
||||
pFSEventStreamRelease(ref);
|
||||
@ -389,11 +385,7 @@ static int uv__fsevents_create_stream(uv_loop_t* loop, CFArrayRef paths) {
|
||||
|
||||
|
||||
/* Runs in CF thread */
|
||||
static void uv__fsevents_destroy_stream(uv_loop_t* loop) {
|
||||
uv__cf_loop_state_t* state;
|
||||
|
||||
state = loop->cf_state;
|
||||
|
||||
static void uv__fsevents_destroy_stream(uv__cf_loop_state_t* state) {
|
||||
if (state->fsevent_stream == NULL)
|
||||
return;
|
||||
|
||||
@ -408,9 +400,9 @@ static void uv__fsevents_destroy_stream(uv_loop_t* loop) {
|
||||
|
||||
|
||||
/* Runs in CF thread, when there're new fsevent handles to add to stream */
|
||||
static void uv__fsevents_reschedule(uv_fs_event_t* handle,
|
||||
static void uv__fsevents_reschedule(uv__cf_loop_state_t* state,
|
||||
uv_loop_t* loop,
|
||||
uv__cf_loop_signal_type_t type) {
|
||||
uv__cf_loop_state_t* state;
|
||||
QUEUE* q;
|
||||
uv_fs_event_t* curr;
|
||||
CFArrayRef cf_paths;
|
||||
@ -419,7 +411,6 @@ static void uv__fsevents_reschedule(uv_fs_event_t* handle,
|
||||
int err;
|
||||
unsigned int path_count;
|
||||
|
||||
state = handle->loop->cf_state;
|
||||
paths = NULL;
|
||||
cf_paths = NULL;
|
||||
err = 0;
|
||||
@ -438,7 +429,7 @@ static void uv__fsevents_reschedule(uv_fs_event_t* handle,
|
||||
uv_mutex_unlock(&state->fsevent_mutex);
|
||||
|
||||
/* Destroy previous FSEventStream */
|
||||
uv__fsevents_destroy_stream(handle->loop);
|
||||
uv__fsevents_destroy_stream(state);
|
||||
|
||||
/* Any failure below will be a memory failure */
|
||||
err = UV_ENOMEM;
|
||||
@ -478,7 +469,7 @@ static void uv__fsevents_reschedule(uv_fs_event_t* handle,
|
||||
err = UV_ENOMEM;
|
||||
goto final;
|
||||
}
|
||||
err = uv__fsevents_create_stream(handle->loop, cf_paths);
|
||||
err = uv__fsevents_create_stream(state, loop, cf_paths);
|
||||
}
|
||||
|
||||
final:
|
||||
@ -563,10 +554,8 @@ static int uv__fsevents_global_init(void) {
|
||||
V(core_foundation_handle, CFRunLoopStop);
|
||||
V(core_foundation_handle, CFRunLoopWakeUp);
|
||||
V(core_foundation_handle, CFStringCreateWithFileSystemRepresentation);
|
||||
V(core_foundation_handle, CFStringGetSystemEncoding);
|
||||
V(core_foundation_handle, kCFRunLoopDefaultMode);
|
||||
V(core_services_handle, FSEventStreamCreate);
|
||||
V(core_services_handle, FSEventStreamFlushSync);
|
||||
V(core_services_handle, FSEventStreamInvalidate);
|
||||
V(core_services_handle, FSEventStreamRelease);
|
||||
V(core_services_handle, FSEventStreamScheduleWithRunLoop);
|
||||
@ -767,7 +756,7 @@ static void uv__cf_loop_cb(void* arg) {
|
||||
if (s->handle == NULL)
|
||||
pCFRunLoopStop(state->loop);
|
||||
else
|
||||
uv__fsevents_reschedule(s->handle, s->type);
|
||||
uv__fsevents_reschedule(state, loop, s->type);
|
||||
|
||||
uv__free(s);
|
||||
}
|
||||
|
5
deps/libuv/src/unix/haiku.c
vendored
5
deps/libuv/src/unix/haiku.c
vendored
@ -84,6 +84,11 @@ uint64_t uv_get_constrained_memory(void) {
|
||||
}
|
||||
|
||||
|
||||
uint64_t uv_get_available_memory(void) {
|
||||
return uv_get_free_memory();
|
||||
}
|
||||
|
||||
|
||||
int uv_resident_set_memory(size_t* rss) {
|
||||
area_info area;
|
||||
ssize_t cookie;
|
||||
|
5
deps/libuv/src/unix/hurd.c
vendored
5
deps/libuv/src/unix/hurd.c
vendored
@ -165,3 +165,8 @@ int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
|
||||
uint64_t uv_get_constrained_memory(void) {
|
||||
return 0; /* Memory constraints are unknown. */
|
||||
}
|
||||
|
||||
|
||||
uint64_t uv_get_available_memory(void) {
|
||||
return uv_get_free_memory();
|
||||
}
|
||||
|
5
deps/libuv/src/unix/ibmi.c
vendored
5
deps/libuv/src/unix/ibmi.c
vendored
@ -249,6 +249,11 @@ uint64_t uv_get_constrained_memory(void) {
|
||||
}
|
||||
|
||||
|
||||
uint64_t uv_get_available_memory(void) {
|
||||
return uv_get_free_memory();
|
||||
}
|
||||
|
||||
|
||||
void uv_loadavg(double avg[3]) {
|
||||
SSTS0200 rcvr;
|
||||
|
||||
|
156
deps/libuv/src/unix/internal.h
vendored
156
deps/libuv/src/unix/internal.h
vendored
@ -26,21 +26,34 @@
|
||||
|
||||
#include <assert.h>
|
||||
#include <limits.h> /* _POSIX_PATH_MAX, PATH_MAX */
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h> /* abort */
|
||||
#include <string.h> /* strrchr */
|
||||
#include <fcntl.h> /* O_CLOEXEC and O_NONBLOCK, if supported. */
|
||||
#include <stdio.h>
|
||||
#include <errno.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
#define uv__msan_unpoison(p, n) \
|
||||
do { \
|
||||
(void) (p); \
|
||||
(void) (n); \
|
||||
} while (0)
|
||||
|
||||
#if defined(__has_feature)
|
||||
# if __has_feature(memory_sanitizer)
|
||||
# include <sanitizer/msan_interface.h>
|
||||
# undef uv__msan_unpoison
|
||||
# define uv__msan_unpoison __msan_unpoison
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#if defined(__STRICT_ANSI__)
|
||||
# define inline __inline
|
||||
#endif
|
||||
|
||||
#if defined(__linux__)
|
||||
# include "linux-syscalls.h"
|
||||
#endif /* __linux__ */
|
||||
|
||||
#if defined(__MVS__)
|
||||
# include "os390-syscalls.h"
|
||||
#endif /* __MVS__ */
|
||||
@ -79,13 +92,11 @@
|
||||
# define UV__PATH_MAX 8192
|
||||
#endif
|
||||
|
||||
#if defined(__ANDROID__)
|
||||
int uv__pthread_sigmask(int how, const sigset_t* set, sigset_t* oset);
|
||||
# ifdef pthread_sigmask
|
||||
# undef pthread_sigmask
|
||||
# endif
|
||||
# define pthread_sigmask(how, set, oldset) uv__pthread_sigmask(how, set, oldset)
|
||||
#endif
|
||||
union uv__sockaddr {
|
||||
struct sockaddr_in6 in6;
|
||||
struct sockaddr_in in;
|
||||
struct sockaddr addr;
|
||||
};
|
||||
|
||||
#define ACCESS_ONCE(type, var) \
|
||||
(*(volatile type*) &(var))
|
||||
@ -166,12 +177,42 @@ struct uv__stream_queued_fds_s {
|
||||
int fds[1];
|
||||
};
|
||||
|
||||
#ifdef __linux__
|
||||
struct uv__statx_timestamp {
|
||||
int64_t tv_sec;
|
||||
uint32_t tv_nsec;
|
||||
int32_t unused0;
|
||||
};
|
||||
|
||||
struct uv__statx {
|
||||
uint32_t stx_mask;
|
||||
uint32_t stx_blksize;
|
||||
uint64_t stx_attributes;
|
||||
uint32_t stx_nlink;
|
||||
uint32_t stx_uid;
|
||||
uint32_t stx_gid;
|
||||
uint16_t stx_mode;
|
||||
uint16_t unused0;
|
||||
uint64_t stx_ino;
|
||||
uint64_t stx_size;
|
||||
uint64_t stx_blocks;
|
||||
uint64_t stx_attributes_mask;
|
||||
struct uv__statx_timestamp stx_atime;
|
||||
struct uv__statx_timestamp stx_btime;
|
||||
struct uv__statx_timestamp stx_ctime;
|
||||
struct uv__statx_timestamp stx_mtime;
|
||||
uint32_t stx_rdev_major;
|
||||
uint32_t stx_rdev_minor;
|
||||
uint32_t stx_dev_major;
|
||||
uint32_t stx_dev_minor;
|
||||
uint64_t unused1[14];
|
||||
};
|
||||
#endif /* __linux__ */
|
||||
|
||||
#if defined(_AIX) || \
|
||||
defined(__APPLE__) || \
|
||||
defined(__DragonFly__) || \
|
||||
defined(__FreeBSD__) || \
|
||||
defined(__FreeBSD_kernel__) || \
|
||||
defined(__linux__) || \
|
||||
defined(__OpenBSD__) || \
|
||||
defined(__NetBSD__)
|
||||
@ -258,10 +299,10 @@ int uv__signal_loop_fork(uv_loop_t* loop);
|
||||
/* platform specific */
|
||||
uint64_t uv__hrtime(uv_clocktype_t type);
|
||||
int uv__kqueue_init(uv_loop_t* loop);
|
||||
int uv__epoll_init(uv_loop_t* loop);
|
||||
int uv__platform_loop_init(uv_loop_t* loop);
|
||||
void uv__platform_loop_delete(uv_loop_t* loop);
|
||||
void uv__platform_invalidate_fd(uv_loop_t* loop, int fd);
|
||||
int uv__process_init(uv_loop_t* loop);
|
||||
|
||||
/* various */
|
||||
void uv__async_close(uv_async_t* handle);
|
||||
@ -278,7 +319,6 @@ size_t uv__thread_stack_size(void);
|
||||
void uv__udp_close(uv_udp_t* handle);
|
||||
void uv__udp_finish_close(uv_udp_t* handle);
|
||||
FILE* uv__open_file(const char* path);
|
||||
int uv__getpwuid_r(uv_passwd_t* pwd);
|
||||
int uv__search_path(const char* prog, char* buf, size_t* buflen);
|
||||
void uv__wait_children(uv_loop_t* loop);
|
||||
|
||||
@ -289,6 +329,28 @@ int uv__random_getentropy(void* buf, size_t buflen);
|
||||
int uv__random_readpath(const char* path, void* buf, size_t buflen);
|
||||
int uv__random_sysctl(void* buf, size_t buflen);
|
||||
|
||||
/* io_uring */
|
||||
#ifdef __linux__
|
||||
int uv__iou_fs_close(uv_loop_t* loop, uv_fs_t* req);
|
||||
int uv__iou_fs_fsync_or_fdatasync(uv_loop_t* loop,
|
||||
uv_fs_t* req,
|
||||
uint32_t fsync_flags);
|
||||
int uv__iou_fs_open(uv_loop_t* loop, uv_fs_t* req);
|
||||
int uv__iou_fs_read_or_write(uv_loop_t* loop,
|
||||
uv_fs_t* req,
|
||||
int is_read);
|
||||
int uv__iou_fs_statx(uv_loop_t* loop,
|
||||
uv_fs_t* req,
|
||||
int is_fstat,
|
||||
int is_lstat);
|
||||
#else
|
||||
#define uv__iou_fs_close(loop, req) 0
|
||||
#define uv__iou_fs_fsync_or_fdatasync(loop, req, fsync_flags) 0
|
||||
#define uv__iou_fs_open(loop, req) 0
|
||||
#define uv__iou_fs_read_or_write(loop, req, is_read) 0
|
||||
#define uv__iou_fs_statx(loop, req, is_fstat, is_lstat) 0
|
||||
#endif
|
||||
|
||||
#if defined(__APPLE__)
|
||||
int uv___stream_fd(const uv_stream_t* handle);
|
||||
#define uv__stream_fd(handle) (uv___stream_fd((const uv_stream_t*) (handle)))
|
||||
@ -322,8 +384,51 @@ UV_UNUSED(static char* uv__basename_r(const char* path)) {
|
||||
return s + 1;
|
||||
}
|
||||
|
||||
UV_UNUSED(static int uv__fstat(int fd, struct stat* s)) {
|
||||
int rc;
|
||||
|
||||
rc = fstat(fd, s);
|
||||
if (rc >= 0)
|
||||
uv__msan_unpoison(s, sizeof(*s));
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
UV_UNUSED(static int uv__lstat(const char* path, struct stat* s)) {
|
||||
int rc;
|
||||
|
||||
rc = lstat(path, s);
|
||||
if (rc >= 0)
|
||||
uv__msan_unpoison(s, sizeof(*s));
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
UV_UNUSED(static int uv__stat(const char* path, struct stat* s)) {
|
||||
int rc;
|
||||
|
||||
rc = stat(path, s);
|
||||
if (rc >= 0)
|
||||
uv__msan_unpoison(s, sizeof(*s));
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
#if defined(__linux__)
|
||||
int uv__inotify_fork(uv_loop_t* loop, void* old_watchers);
|
||||
ssize_t
|
||||
uv__fs_copy_file_range(int fd_in,
|
||||
off_t* off_in,
|
||||
int fd_out,
|
||||
off_t* off_out,
|
||||
size_t len,
|
||||
unsigned int flags);
|
||||
int uv__statx(int dirfd,
|
||||
const char* path,
|
||||
int flags,
|
||||
unsigned int mask,
|
||||
struct uv__statx* statxbuf);
|
||||
void uv__statx_to_stat(const struct uv__statx* statxbuf, uv_stat_t* buf);
|
||||
ssize_t uv__getrandom(void* buf, size_t buflen, unsigned flags);
|
||||
#endif
|
||||
|
||||
typedef int (*uv__peersockfunc)(int, struct sockaddr*, socklen_t*);
|
||||
@ -333,22 +438,6 @@ int uv__getsockpeername(const uv_handle_t* handle,
|
||||
struct sockaddr* name,
|
||||
int* namelen);
|
||||
|
||||
#if defined(__linux__) || \
|
||||
defined(__FreeBSD__) || \
|
||||
defined(__FreeBSD_kernel__) || \
|
||||
defined(__DragonFly__)
|
||||
#define HAVE_MMSG 1
|
||||
struct uv__mmsghdr {
|
||||
struct msghdr msg_hdr;
|
||||
unsigned int msg_len;
|
||||
};
|
||||
|
||||
int uv__recvmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen);
|
||||
int uv__sendmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen);
|
||||
#else
|
||||
#define HAVE_MMSG 0
|
||||
#endif
|
||||
|
||||
#if defined(__sun)
|
||||
#if !defined(_POSIX_VERSION) || _POSIX_VERSION < 200809L
|
||||
size_t strnlen(const char* s, size_t maxlen);
|
||||
@ -365,5 +454,10 @@ uv__fs_copy_file_range(int fd_in,
|
||||
unsigned int flags);
|
||||
#endif
|
||||
|
||||
#if defined(__linux__) || (defined(__FreeBSD__) && __FreeBSD_version >= 1301000)
|
||||
#define UV__CPU_AFFINITY_SUPPORTED 1
|
||||
#else
|
||||
#define UV__CPU_AFFINITY_SUPPORTED 0
|
||||
#endif
|
||||
|
||||
#endif /* UV_UNIX_INTERNAL_H_ */
|
||||
|
133
deps/libuv/src/unix/kqueue.c
vendored
133
deps/libuv/src/unix/kqueue.c
vendored
@ -60,7 +60,7 @@ int uv__kqueue_init(uv_loop_t* loop) {
|
||||
|
||||
|
||||
#if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
|
||||
static int uv__has_forked_with_cfrunloop;
|
||||
static _Atomic int uv__has_forked_with_cfrunloop;
|
||||
#endif
|
||||
|
||||
int uv__io_fork(uv_loop_t* loop) {
|
||||
@ -82,7 +82,9 @@ int uv__io_fork(uv_loop_t* loop) {
|
||||
process. So we sidestep the issue by pretending like we never
|
||||
started it in the first place.
|
||||
*/
|
||||
uv__store_relaxed(&uv__has_forked_with_cfrunloop, 1);
|
||||
atomic_store_explicit(&uv__has_forked_with_cfrunloop,
|
||||
1,
|
||||
memory_order_relaxed);
|
||||
uv__free(loop->cf_state);
|
||||
loop->cf_state = NULL;
|
||||
}
|
||||
@ -109,7 +111,23 @@ int uv__io_check_fd(uv_loop_t* loop, int fd) {
|
||||
}
|
||||
|
||||
|
||||
static void uv__kqueue_delete(int kqfd, const struct kevent *ev) {
|
||||
struct kevent change;
|
||||
|
||||
EV_SET(&change, ev->ident, ev->filter, EV_DELETE, 0, 0, 0);
|
||||
|
||||
if (0 == kevent(kqfd, &change, 1, NULL, 0, NULL))
|
||||
return;
|
||||
|
||||
if (errno == EBADF || errno == ENOENT)
|
||||
return;
|
||||
|
||||
abort();
|
||||
}
|
||||
|
||||
|
||||
void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
uv__loop_internal_fields_t* lfields;
|
||||
struct kevent events[1024];
|
||||
struct kevent* ev;
|
||||
struct timespec spec;
|
||||
@ -138,6 +156,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
return;
|
||||
}
|
||||
|
||||
lfields = uv__get_internal_fields(loop);
|
||||
nevents = 0;
|
||||
|
||||
while (!QUEUE_EMPTY(&loop->watcher_queue)) {
|
||||
@ -205,7 +224,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
base = loop->time;
|
||||
count = 48; /* Benchmarks suggest this gives the best throughput. */
|
||||
|
||||
if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
|
||||
if (lfields->flags & UV_METRICS_IDLE_TIME) {
|
||||
reset_timeout = 1;
|
||||
user_timeout = timeout;
|
||||
timeout = 0;
|
||||
@ -228,6 +247,12 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
if (pset != NULL)
|
||||
pthread_sigmask(SIG_BLOCK, pset, NULL);
|
||||
|
||||
/* Store the current timeout in a location that's globally accessible so
|
||||
* other locations like uv__work_done() can determine whether the queue
|
||||
* of events in the callback were waiting when poll was called.
|
||||
*/
|
||||
lfields->current_timeout = timeout;
|
||||
|
||||
nfds = kevent(loop->backend_fd,
|
||||
events,
|
||||
nevents,
|
||||
@ -235,6 +260,9 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
ARRAY_SIZE(events),
|
||||
timeout == -1 ? NULL : &spec);
|
||||
|
||||
if (nfds == -1)
|
||||
assert(errno == EINTR);
|
||||
|
||||
if (pset != NULL)
|
||||
pthread_sigmask(SIG_UNBLOCK, pset, NULL);
|
||||
|
||||
@ -242,36 +270,26 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
* timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
|
||||
* operating system didn't reschedule our process while in the syscall.
|
||||
*/
|
||||
SAVE_ERRNO(uv__update_time(loop));
|
||||
uv__update_time(loop);
|
||||
|
||||
if (nfds == 0) {
|
||||
if (reset_timeout != 0) {
|
||||
timeout = user_timeout;
|
||||
reset_timeout = 0;
|
||||
if (timeout == -1)
|
||||
continue;
|
||||
if (timeout > 0)
|
||||
goto update_timeout;
|
||||
if (nfds == 0 || nfds == -1) {
|
||||
/* If kqueue is empty or interrupted, we might still have children ready
|
||||
* to reap immediately. */
|
||||
if (loop->flags & UV_LOOP_REAP_CHILDREN) {
|
||||
loop->flags &= ~UV_LOOP_REAP_CHILDREN;
|
||||
uv__wait_children(loop);
|
||||
assert((reset_timeout == 0 ? timeout : user_timeout) == 0);
|
||||
return; /* Equivalent to fall-through behavior. */
|
||||
}
|
||||
|
||||
assert(timeout != -1);
|
||||
return;
|
||||
}
|
||||
|
||||
if (nfds == -1) {
|
||||
if (errno != EINTR)
|
||||
abort();
|
||||
|
||||
if (reset_timeout != 0) {
|
||||
timeout = user_timeout;
|
||||
reset_timeout = 0;
|
||||
}
|
||||
|
||||
if (timeout == 0)
|
||||
} else if (nfds == 0) {
|
||||
/* Reached the user timeout value. */
|
||||
assert(timeout != -1);
|
||||
return;
|
||||
|
||||
if (timeout == -1)
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Interrupted by a signal. Update timeout and poll again. */
|
||||
goto update_timeout;
|
||||
@ -307,15 +325,8 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
w = loop->watchers[fd];
|
||||
|
||||
if (w == NULL) {
|
||||
/* File descriptor that we've stopped watching, disarm it.
|
||||
* TODO: batch up. */
|
||||
struct kevent events[1];
|
||||
|
||||
EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
|
||||
if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
|
||||
if (errno != EBADF && errno != ENOENT)
|
||||
abort();
|
||||
|
||||
/* File descriptor that we've stopped watching, disarm it. */
|
||||
uv__kqueue_delete(loop->backend_fd, ev);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -331,47 +342,27 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
revents = 0;
|
||||
|
||||
if (ev->filter == EVFILT_READ) {
|
||||
if (w->pevents & POLLIN) {
|
||||
if (w->pevents & POLLIN)
|
||||
revents |= POLLIN;
|
||||
w->rcount = ev->data;
|
||||
} else {
|
||||
/* TODO batch up */
|
||||
struct kevent events[1];
|
||||
EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
|
||||
if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
|
||||
if (errno != ENOENT)
|
||||
abort();
|
||||
}
|
||||
else
|
||||
uv__kqueue_delete(loop->backend_fd, ev);
|
||||
|
||||
if ((ev->flags & EV_EOF) && (w->pevents & UV__POLLRDHUP))
|
||||
revents |= UV__POLLRDHUP;
|
||||
}
|
||||
|
||||
if (ev->filter == EV_OOBAND) {
|
||||
if (w->pevents & UV__POLLPRI) {
|
||||
if (w->pevents & UV__POLLPRI)
|
||||
revents |= UV__POLLPRI;
|
||||
w->rcount = ev->data;
|
||||
} else {
|
||||
/* TODO batch up */
|
||||
struct kevent events[1];
|
||||
EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
|
||||
if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
|
||||
if (errno != ENOENT)
|
||||
abort();
|
||||
}
|
||||
else
|
||||
uv__kqueue_delete(loop->backend_fd, ev);
|
||||
}
|
||||
|
||||
if (ev->filter == EVFILT_WRITE) {
|
||||
if (w->pevents & POLLOUT) {
|
||||
if (w->pevents & POLLOUT)
|
||||
revents |= POLLOUT;
|
||||
w->wcount = ev->data;
|
||||
} else {
|
||||
/* TODO batch up */
|
||||
struct kevent events[1];
|
||||
EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
|
||||
if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
|
||||
if (errno != ENOENT)
|
||||
abort();
|
||||
}
|
||||
else
|
||||
uv__kqueue_delete(loop->backend_fd, ev);
|
||||
}
|
||||
|
||||
if (ev->flags & EV_ERROR)
|
||||
@ -398,9 +389,11 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
uv__wait_children(loop);
|
||||
}
|
||||
|
||||
uv__metrics_inc_events(loop, nevents);
|
||||
if (reset_timeout != 0) {
|
||||
timeout = user_timeout;
|
||||
reset_timeout = 0;
|
||||
uv__metrics_inc_events_waiting(loop, nevents);
|
||||
}
|
||||
|
||||
if (have_signals != 0) {
|
||||
@ -423,13 +416,13 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
return;
|
||||
}
|
||||
|
||||
update_timeout:
|
||||
if (timeout == 0)
|
||||
return;
|
||||
|
||||
if (timeout == -1)
|
||||
continue;
|
||||
|
||||
update_timeout:
|
||||
assert(timeout > 0);
|
||||
|
||||
diff = loop->time - base;
|
||||
@ -541,13 +534,14 @@ int uv_fs_event_start(uv_fs_event_t* handle,
|
||||
handle->realpath_len = 0;
|
||||
handle->cf_flags = flags;
|
||||
|
||||
if (fstat(fd, &statbuf))
|
||||
if (uv__fstat(fd, &statbuf))
|
||||
goto fallback;
|
||||
/* FSEvents works only with directories */
|
||||
if (!(statbuf.st_mode & S_IFDIR))
|
||||
goto fallback;
|
||||
|
||||
if (0 == uv__load_relaxed(&uv__has_forked_with_cfrunloop)) {
|
||||
if (0 == atomic_load_explicit(&uv__has_forked_with_cfrunloop,
|
||||
memory_order_relaxed)) {
|
||||
int r;
|
||||
/* The fallback fd is no longer needed */
|
||||
uv__close_nocheckstdio(fd);
|
||||
@ -582,7 +576,8 @@ int uv_fs_event_stop(uv_fs_event_t* handle) {
|
||||
uv__handle_stop(handle);
|
||||
|
||||
#if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
|
||||
if (0 == uv__load_relaxed(&uv__has_forked_with_cfrunloop))
|
||||
if (0 == atomic_load_explicit(&uv__has_forked_with_cfrunloop,
|
||||
memory_order_relaxed))
|
||||
if (handle->cf_cb != NULL)
|
||||
r = uv__fsevents_close(handle);
|
||||
#endif
|
||||
|
834
deps/libuv/src/unix/linux-core.c
vendored
834
deps/libuv/src/unix/linux-core.c
vendored
@ -1,834 +0,0 @@
|
||||
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to
|
||||
* deal in the Software without restriction, including without limitation the
|
||||
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
||||
* sell copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/* We lean on the fact that POLL{IN,OUT,ERR,HUP} correspond with their
|
||||
* EPOLL* counterparts. We use the POLL* variants in this file because that
|
||||
* is what libuv uses elsewhere.
|
||||
*/
|
||||
|
||||
#include "uv.h"
|
||||
#include "internal.h"
|
||||
|
||||
#include <inttypes.h>
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <assert.h>
|
||||
#include <errno.h>
|
||||
|
||||
#include <net/if.h>
|
||||
#include <sys/epoll.h>
|
||||
#include <sys/param.h>
|
||||
#include <sys/prctl.h>
|
||||
#include <sys/sysinfo.h>
|
||||
#include <unistd.h>
|
||||
#include <fcntl.h>
|
||||
#include <time.h>
|
||||
|
||||
#define HAVE_IFADDRS_H 1
|
||||
|
||||
# if defined(__ANDROID_API__) && __ANDROID_API__ < 24
|
||||
# undef HAVE_IFADDRS_H
|
||||
#endif
|
||||
|
||||
#ifdef __UCLIBC__
|
||||
# if __UCLIBC_MAJOR__ < 0 && __UCLIBC_MINOR__ < 9 && __UCLIBC_SUBLEVEL__ < 32
|
||||
# undef HAVE_IFADDRS_H
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_IFADDRS_H
|
||||
# include <ifaddrs.h>
|
||||
# include <sys/socket.h>
|
||||
# include <net/ethernet.h>
|
||||
# include <netpacket/packet.h>
|
||||
#endif /* HAVE_IFADDRS_H */
|
||||
|
||||
/* Available from 2.6.32 onwards. */
|
||||
#ifndef CLOCK_MONOTONIC_COARSE
|
||||
# define CLOCK_MONOTONIC_COARSE 6
|
||||
#endif
|
||||
|
||||
/* This is rather annoying: CLOCK_BOOTTIME lives in <linux/time.h> but we can't
|
||||
* include that file because it conflicts with <time.h>. We'll just have to
|
||||
* define it ourselves.
|
||||
*/
|
||||
#ifndef CLOCK_BOOTTIME
|
||||
# define CLOCK_BOOTTIME 7
|
||||
#endif
|
||||
|
||||
static int read_models(unsigned int numcpus, uv_cpu_info_t* ci);
|
||||
static int read_times(FILE* statfile_fp,
|
||||
unsigned int numcpus,
|
||||
uv_cpu_info_t* ci);
|
||||
static void read_speeds(unsigned int numcpus, uv_cpu_info_t* ci);
|
||||
static uint64_t read_cpufreq(unsigned int cpunum);
|
||||
|
||||
int uv__platform_loop_init(uv_loop_t* loop) {
|
||||
|
||||
loop->inotify_fd = -1;
|
||||
loop->inotify_watchers = NULL;
|
||||
|
||||
return uv__epoll_init(loop);
|
||||
}
|
||||
|
||||
|
||||
int uv__io_fork(uv_loop_t* loop) {
|
||||
int err;
|
||||
void* old_watchers;
|
||||
|
||||
old_watchers = loop->inotify_watchers;
|
||||
|
||||
uv__close(loop->backend_fd);
|
||||
loop->backend_fd = -1;
|
||||
uv__platform_loop_delete(loop);
|
||||
|
||||
err = uv__platform_loop_init(loop);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return uv__inotify_fork(loop, old_watchers);
|
||||
}
|
||||
|
||||
|
||||
void uv__platform_loop_delete(uv_loop_t* loop) {
|
||||
if (loop->inotify_fd == -1) return;
|
||||
uv__io_stop(loop, &loop->inotify_read_watcher, POLLIN);
|
||||
uv__close(loop->inotify_fd);
|
||||
loop->inotify_fd = -1;
|
||||
}
|
||||
|
||||
|
||||
|
||||
uint64_t uv__hrtime(uv_clocktype_t type) {
|
||||
static clock_t fast_clock_id = -1;
|
||||
struct timespec t;
|
||||
clock_t clock_id;
|
||||
|
||||
/* Prefer CLOCK_MONOTONIC_COARSE if available but only when it has
|
||||
* millisecond granularity or better. CLOCK_MONOTONIC_COARSE is
|
||||
* serviced entirely from the vDSO, whereas CLOCK_MONOTONIC may
|
||||
* decide to make a costly system call.
|
||||
*/
|
||||
/* TODO(bnoordhuis) Use CLOCK_MONOTONIC_COARSE for UV_CLOCK_PRECISE
|
||||
* when it has microsecond granularity or better (unlikely).
|
||||
*/
|
||||
clock_id = CLOCK_MONOTONIC;
|
||||
if (type != UV_CLOCK_FAST)
|
||||
goto done;
|
||||
|
||||
clock_id = uv__load_relaxed(&fast_clock_id);
|
||||
if (clock_id != -1)
|
||||
goto done;
|
||||
|
||||
clock_id = CLOCK_MONOTONIC;
|
||||
if (0 == clock_getres(CLOCK_MONOTONIC_COARSE, &t))
|
||||
if (t.tv_nsec <= 1 * 1000 * 1000)
|
||||
clock_id = CLOCK_MONOTONIC_COARSE;
|
||||
|
||||
uv__store_relaxed(&fast_clock_id, clock_id);
|
||||
|
||||
done:
|
||||
|
||||
if (clock_gettime(clock_id, &t))
|
||||
return 0; /* Not really possible. */
|
||||
|
||||
return t.tv_sec * (uint64_t) 1e9 + t.tv_nsec;
|
||||
}
|
||||
|
||||
|
||||
int uv_resident_set_memory(size_t* rss) {
|
||||
char buf[1024];
|
||||
const char* s;
|
||||
ssize_t n;
|
||||
long val;
|
||||
int fd;
|
||||
int i;
|
||||
|
||||
do
|
||||
fd = open("/proc/self/stat", O_RDONLY);
|
||||
while (fd == -1 && errno == EINTR);
|
||||
|
||||
if (fd == -1)
|
||||
return UV__ERR(errno);
|
||||
|
||||
do
|
||||
n = read(fd, buf, sizeof(buf) - 1);
|
||||
while (n == -1 && errno == EINTR);
|
||||
|
||||
uv__close(fd);
|
||||
if (n == -1)
|
||||
return UV__ERR(errno);
|
||||
buf[n] = '\0';
|
||||
|
||||
s = strchr(buf, ' ');
|
||||
if (s == NULL)
|
||||
goto err;
|
||||
|
||||
s += 1;
|
||||
if (*s != '(')
|
||||
goto err;
|
||||
|
||||
s = strchr(s, ')');
|
||||
if (s == NULL)
|
||||
goto err;
|
||||
|
||||
for (i = 1; i <= 22; i++) {
|
||||
s = strchr(s + 1, ' ');
|
||||
if (s == NULL)
|
||||
goto err;
|
||||
}
|
||||
|
||||
errno = 0;
|
||||
val = strtol(s, NULL, 10);
|
||||
if (errno != 0)
|
||||
goto err;
|
||||
if (val < 0)
|
||||
goto err;
|
||||
|
||||
*rss = val * getpagesize();
|
||||
return 0;
|
||||
|
||||
err:
|
||||
return UV_EINVAL;
|
||||
}
|
||||
|
||||
int uv_uptime(double* uptime) {
|
||||
static volatile int no_clock_boottime;
|
||||
char buf[128];
|
||||
struct timespec now;
|
||||
int r;
|
||||
|
||||
/* Try /proc/uptime first, then fallback to clock_gettime(). */
|
||||
|
||||
if (0 == uv__slurp("/proc/uptime", buf, sizeof(buf)))
|
||||
if (1 == sscanf(buf, "%lf", uptime))
|
||||
return 0;
|
||||
|
||||
/* Try CLOCK_BOOTTIME first, fall back to CLOCK_MONOTONIC if not available
|
||||
* (pre-2.6.39 kernels). CLOCK_MONOTONIC doesn't increase when the system
|
||||
* is suspended.
|
||||
*/
|
||||
if (no_clock_boottime) {
|
||||
retry_clock_gettime: r = clock_gettime(CLOCK_MONOTONIC, &now);
|
||||
}
|
||||
else if ((r = clock_gettime(CLOCK_BOOTTIME, &now)) && errno == EINVAL) {
|
||||
no_clock_boottime = 1;
|
||||
goto retry_clock_gettime;
|
||||
}
|
||||
|
||||
if (r)
|
||||
return UV__ERR(errno);
|
||||
|
||||
*uptime = now.tv_sec;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int uv__cpu_num(FILE* statfile_fp, unsigned int* numcpus) {
|
||||
unsigned int num;
|
||||
char buf[1024];
|
||||
|
||||
if (!fgets(buf, sizeof(buf), statfile_fp))
|
||||
return UV_EIO;
|
||||
|
||||
num = 0;
|
||||
while (fgets(buf, sizeof(buf), statfile_fp)) {
|
||||
if (strncmp(buf, "cpu", 3))
|
||||
break;
|
||||
num++;
|
||||
}
|
||||
|
||||
if (num == 0)
|
||||
return UV_EIO;
|
||||
|
||||
*numcpus = num;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
|
||||
unsigned int numcpus;
|
||||
uv_cpu_info_t* ci;
|
||||
int err;
|
||||
FILE* statfile_fp;
|
||||
|
||||
*cpu_infos = NULL;
|
||||
*count = 0;
|
||||
|
||||
statfile_fp = uv__open_file("/proc/stat");
|
||||
if (statfile_fp == NULL)
|
||||
return UV__ERR(errno);
|
||||
|
||||
err = uv__cpu_num(statfile_fp, &numcpus);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
|
||||
err = UV_ENOMEM;
|
||||
ci = uv__calloc(numcpus, sizeof(*ci));
|
||||
if (ci == NULL)
|
||||
goto out;
|
||||
|
||||
err = read_models(numcpus, ci);
|
||||
if (err == 0)
|
||||
err = read_times(statfile_fp, numcpus, ci);
|
||||
|
||||
if (err) {
|
||||
uv_free_cpu_info(ci, numcpus);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* read_models() on x86 also reads the CPU speed from /proc/cpuinfo.
|
||||
* We don't check for errors here. Worst case, the field is left zero.
|
||||
*/
|
||||
if (ci[0].speed == 0)
|
||||
read_speeds(numcpus, ci);
|
||||
|
||||
*cpu_infos = ci;
|
||||
*count = numcpus;
|
||||
err = 0;
|
||||
|
||||
out:
|
||||
|
||||
if (fclose(statfile_fp))
|
||||
if (errno != EINTR && errno != EINPROGRESS)
|
||||
abort();
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
static void read_speeds(unsigned int numcpus, uv_cpu_info_t* ci) {
|
||||
unsigned int num;
|
||||
|
||||
for (num = 0; num < numcpus; num++)
|
||||
ci[num].speed = read_cpufreq(num) / 1000;
|
||||
}
|
||||
|
||||
|
||||
/* Also reads the CPU frequency on ppc and x86. The other architectures only
|
||||
* have a BogoMIPS field, which may not be very accurate.
|
||||
*
|
||||
* Note: Simply returns on error, uv_cpu_info() takes care of the cleanup.
|
||||
*/
|
||||
static int read_models(unsigned int numcpus, uv_cpu_info_t* ci) {
|
||||
#if defined(__PPC__)
|
||||
static const char model_marker[] = "cpu\t\t: ";
|
||||
static const char speed_marker[] = "clock\t\t: ";
|
||||
#else
|
||||
static const char model_marker[] = "model name\t: ";
|
||||
static const char speed_marker[] = "cpu MHz\t\t: ";
|
||||
#endif
|
||||
const char* inferred_model;
|
||||
unsigned int model_idx;
|
||||
unsigned int speed_idx;
|
||||
unsigned int part_idx;
|
||||
char buf[1024];
|
||||
char* model;
|
||||
FILE* fp;
|
||||
int model_id;
|
||||
|
||||
/* Most are unused on non-ARM, non-MIPS and non-x86 architectures. */
|
||||
(void) &model_marker;
|
||||
(void) &speed_marker;
|
||||
(void) &speed_idx;
|
||||
(void) &part_idx;
|
||||
(void) &model;
|
||||
(void) &buf;
|
||||
(void) &fp;
|
||||
(void) &model_id;
|
||||
|
||||
model_idx = 0;
|
||||
speed_idx = 0;
|
||||
part_idx = 0;
|
||||
|
||||
#if defined(__arm__) || \
|
||||
defined(__i386__) || \
|
||||
defined(__mips__) || \
|
||||
defined(__aarch64__) || \
|
||||
defined(__PPC__) || \
|
||||
defined(__x86_64__)
|
||||
fp = uv__open_file("/proc/cpuinfo");
|
||||
if (fp == NULL)
|
||||
return UV__ERR(errno);
|
||||
|
||||
while (fgets(buf, sizeof(buf), fp)) {
|
||||
if (model_idx < numcpus) {
|
||||
if (strncmp(buf, model_marker, sizeof(model_marker) - 1) == 0) {
|
||||
model = buf + sizeof(model_marker) - 1;
|
||||
model = uv__strndup(model, strlen(model) - 1); /* Strip newline. */
|
||||
if (model == NULL) {
|
||||
fclose(fp);
|
||||
return UV_ENOMEM;
|
||||
}
|
||||
ci[model_idx++].model = model;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
#if defined(__arm__) || defined(__mips__) || defined(__aarch64__)
|
||||
if (model_idx < numcpus) {
|
||||
#if defined(__arm__)
|
||||
/* Fallback for pre-3.8 kernels. */
|
||||
static const char model_marker[] = "Processor\t: ";
|
||||
#elif defined(__aarch64__)
|
||||
static const char part_marker[] = "CPU part\t: ";
|
||||
|
||||
/* Adapted from: https://github.com/karelzak/util-linux */
|
||||
struct vendor_part {
|
||||
const int id;
|
||||
const char* name;
|
||||
};
|
||||
|
||||
static const struct vendor_part arm_chips[] = {
|
||||
{ 0x811, "ARM810" },
|
||||
{ 0x920, "ARM920" },
|
||||
{ 0x922, "ARM922" },
|
||||
{ 0x926, "ARM926" },
|
||||
{ 0x940, "ARM940" },
|
||||
{ 0x946, "ARM946" },
|
||||
{ 0x966, "ARM966" },
|
||||
{ 0xa20, "ARM1020" },
|
||||
{ 0xa22, "ARM1022" },
|
||||
{ 0xa26, "ARM1026" },
|
||||
{ 0xb02, "ARM11 MPCore" },
|
||||
{ 0xb36, "ARM1136" },
|
||||
{ 0xb56, "ARM1156" },
|
||||
{ 0xb76, "ARM1176" },
|
||||
{ 0xc05, "Cortex-A5" },
|
||||
{ 0xc07, "Cortex-A7" },
|
||||
{ 0xc08, "Cortex-A8" },
|
||||
{ 0xc09, "Cortex-A9" },
|
||||
{ 0xc0d, "Cortex-A17" }, /* Originally A12 */
|
||||
{ 0xc0f, "Cortex-A15" },
|
||||
{ 0xc0e, "Cortex-A17" },
|
||||
{ 0xc14, "Cortex-R4" },
|
||||
{ 0xc15, "Cortex-R5" },
|
||||
{ 0xc17, "Cortex-R7" },
|
||||
{ 0xc18, "Cortex-R8" },
|
||||
{ 0xc20, "Cortex-M0" },
|
||||
{ 0xc21, "Cortex-M1" },
|
||||
{ 0xc23, "Cortex-M3" },
|
||||
{ 0xc24, "Cortex-M4" },
|
||||
{ 0xc27, "Cortex-M7" },
|
||||
{ 0xc60, "Cortex-M0+" },
|
||||
{ 0xd01, "Cortex-A32" },
|
||||
{ 0xd03, "Cortex-A53" },
|
||||
{ 0xd04, "Cortex-A35" },
|
||||
{ 0xd05, "Cortex-A55" },
|
||||
{ 0xd06, "Cortex-A65" },
|
||||
{ 0xd07, "Cortex-A57" },
|
||||
{ 0xd08, "Cortex-A72" },
|
||||
{ 0xd09, "Cortex-A73" },
|
||||
{ 0xd0a, "Cortex-A75" },
|
||||
{ 0xd0b, "Cortex-A76" },
|
||||
{ 0xd0c, "Neoverse-N1" },
|
||||
{ 0xd0d, "Cortex-A77" },
|
||||
{ 0xd0e, "Cortex-A76AE" },
|
||||
{ 0xd13, "Cortex-R52" },
|
||||
{ 0xd20, "Cortex-M23" },
|
||||
{ 0xd21, "Cortex-M33" },
|
||||
{ 0xd41, "Cortex-A78" },
|
||||
{ 0xd42, "Cortex-A78AE" },
|
||||
{ 0xd4a, "Neoverse-E1" },
|
||||
{ 0xd4b, "Cortex-A78C" },
|
||||
};
|
||||
|
||||
if (strncmp(buf, part_marker, sizeof(part_marker) - 1) == 0) {
|
||||
model = buf + sizeof(part_marker) - 1;
|
||||
|
||||
errno = 0;
|
||||
model_id = strtol(model, NULL, 16);
|
||||
if ((errno != 0) || model_id < 0) {
|
||||
fclose(fp);
|
||||
return UV_EINVAL;
|
||||
}
|
||||
|
||||
for (part_idx = 0; part_idx < ARRAY_SIZE(arm_chips); part_idx++) {
|
||||
if (model_id == arm_chips[part_idx].id) {
|
||||
model = uv__strdup(arm_chips[part_idx].name);
|
||||
if (model == NULL) {
|
||||
fclose(fp);
|
||||
return UV_ENOMEM;
|
||||
}
|
||||
ci[model_idx++].model = model;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
#else /* defined(__mips__) */
|
||||
static const char model_marker[] = "cpu model\t\t: ";
|
||||
#endif
|
||||
if (strncmp(buf, model_marker, sizeof(model_marker) - 1) == 0) {
|
||||
model = buf + sizeof(model_marker) - 1;
|
||||
model = uv__strndup(model, strlen(model) - 1); /* Strip newline. */
|
||||
if (model == NULL) {
|
||||
fclose(fp);
|
||||
return UV_ENOMEM;
|
||||
}
|
||||
ci[model_idx++].model = model;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
#else /* !__arm__ && !__mips__ && !__aarch64__ */
|
||||
if (speed_idx < numcpus) {
|
||||
if (strncmp(buf, speed_marker, sizeof(speed_marker) - 1) == 0) {
|
||||
ci[speed_idx++].speed = atoi(buf + sizeof(speed_marker) - 1);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
#endif /* __arm__ || __mips__ || __aarch64__ */
|
||||
}
|
||||
|
||||
fclose(fp);
|
||||
#endif /* __arm__ || __i386__ || __mips__ || __PPC__ || __x86_64__ || __aarch__ */
|
||||
|
||||
/* Now we want to make sure that all the models contain *something* because
|
||||
* it's not safe to leave them as null. Copy the last entry unless there
|
||||
* isn't one, in that case we simply put "unknown" into everything.
|
||||
*/
|
||||
inferred_model = "unknown";
|
||||
if (model_idx > 0)
|
||||
inferred_model = ci[model_idx - 1].model;
|
||||
|
||||
while (model_idx < numcpus) {
|
||||
model = uv__strndup(inferred_model, strlen(inferred_model));
|
||||
if (model == NULL)
|
||||
return UV_ENOMEM;
|
||||
ci[model_idx++].model = model;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int read_times(FILE* statfile_fp,
|
||||
unsigned int numcpus,
|
||||
uv_cpu_info_t* ci) {
|
||||
struct uv_cpu_times_s ts;
|
||||
unsigned int ticks;
|
||||
unsigned int multiplier;
|
||||
uint64_t user;
|
||||
uint64_t nice;
|
||||
uint64_t sys;
|
||||
uint64_t idle;
|
||||
uint64_t dummy;
|
||||
uint64_t irq;
|
||||
uint64_t num;
|
||||
uint64_t len;
|
||||
char buf[1024];
|
||||
|
||||
ticks = (unsigned int)sysconf(_SC_CLK_TCK);
|
||||
assert(ticks != (unsigned int) -1);
|
||||
assert(ticks != 0);
|
||||
multiplier = ((uint64_t)1000L / ticks);
|
||||
|
||||
rewind(statfile_fp);
|
||||
|
||||
if (!fgets(buf, sizeof(buf), statfile_fp))
|
||||
abort();
|
||||
|
||||
num = 0;
|
||||
|
||||
while (fgets(buf, sizeof(buf), statfile_fp)) {
|
||||
if (num >= numcpus)
|
||||
break;
|
||||
|
||||
if (strncmp(buf, "cpu", 3))
|
||||
break;
|
||||
|
||||
/* skip "cpu<num> " marker */
|
||||
{
|
||||
unsigned int n;
|
||||
int r = sscanf(buf, "cpu%u ", &n);
|
||||
assert(r == 1);
|
||||
(void) r; /* silence build warning */
|
||||
for (len = sizeof("cpu0"); n /= 10; len++);
|
||||
}
|
||||
|
||||
/* Line contains user, nice, system, idle, iowait, irq, softirq, steal,
|
||||
* guest, guest_nice but we're only interested in the first four + irq.
|
||||
*
|
||||
* Don't use %*s to skip fields or %ll to read straight into the uint64_t
|
||||
* fields, they're not allowed in C89 mode.
|
||||
*/
|
||||
if (6 != sscanf(buf + len,
|
||||
"%" PRIu64 " %" PRIu64 " %" PRIu64
|
||||
"%" PRIu64 " %" PRIu64 " %" PRIu64,
|
||||
&user,
|
||||
&nice,
|
||||
&sys,
|
||||
&idle,
|
||||
&dummy,
|
||||
&irq))
|
||||
abort();
|
||||
|
||||
ts.user = user * multiplier;
|
||||
ts.nice = nice * multiplier;
|
||||
ts.sys = sys * multiplier;
|
||||
ts.idle = idle * multiplier;
|
||||
ts.irq = irq * multiplier;
|
||||
ci[num++].cpu_times = ts;
|
||||
}
|
||||
assert(num == numcpus);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static uint64_t read_cpufreq(unsigned int cpunum) {
|
||||
uint64_t val;
|
||||
char buf[1024];
|
||||
FILE* fp;
|
||||
|
||||
snprintf(buf,
|
||||
sizeof(buf),
|
||||
"/sys/devices/system/cpu/cpu%u/cpufreq/scaling_cur_freq",
|
||||
cpunum);
|
||||
|
||||
fp = uv__open_file(buf);
|
||||
if (fp == NULL)
|
||||
return 0;
|
||||
|
||||
if (fscanf(fp, "%" PRIu64, &val) != 1)
|
||||
val = 0;
|
||||
|
||||
fclose(fp);
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
|
||||
#ifdef HAVE_IFADDRS_H
|
||||
static int uv__ifaddr_exclude(struct ifaddrs *ent, int exclude_type) {
|
||||
if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)))
|
||||
return 1;
|
||||
if (ent->ifa_addr == NULL)
|
||||
return 1;
|
||||
/*
|
||||
* On Linux getifaddrs returns information related to the raw underlying
|
||||
* devices. We're not interested in this information yet.
|
||||
*/
|
||||
if (ent->ifa_addr->sa_family == PF_PACKET)
|
||||
return exclude_type;
|
||||
return !exclude_type;
|
||||
}
|
||||
#endif
|
||||
|
||||
int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
|
||||
#ifndef HAVE_IFADDRS_H
|
||||
*count = 0;
|
||||
*addresses = NULL;
|
||||
return UV_ENOSYS;
|
||||
#else
|
||||
struct ifaddrs *addrs, *ent;
|
||||
uv_interface_address_t* address;
|
||||
int i;
|
||||
struct sockaddr_ll *sll;
|
||||
|
||||
*count = 0;
|
||||
*addresses = NULL;
|
||||
|
||||
if (getifaddrs(&addrs))
|
||||
return UV__ERR(errno);
|
||||
|
||||
/* Count the number of interfaces */
|
||||
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
|
||||
if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFADDR))
|
||||
continue;
|
||||
|
||||
(*count)++;
|
||||
}
|
||||
|
||||
if (*count == 0) {
|
||||
freeifaddrs(addrs);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Make sure the memory is initiallized to zero using calloc() */
|
||||
*addresses = uv__calloc(*count, sizeof(**addresses));
|
||||
if (!(*addresses)) {
|
||||
freeifaddrs(addrs);
|
||||
return UV_ENOMEM;
|
||||
}
|
||||
|
||||
address = *addresses;
|
||||
|
||||
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
|
||||
if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFADDR))
|
||||
continue;
|
||||
|
||||
address->name = uv__strdup(ent->ifa_name);
|
||||
|
||||
if (ent->ifa_addr->sa_family == AF_INET6) {
|
||||
address->address.address6 = *((struct sockaddr_in6*) ent->ifa_addr);
|
||||
} else {
|
||||
address->address.address4 = *((struct sockaddr_in*) ent->ifa_addr);
|
||||
}
|
||||
|
||||
if (ent->ifa_netmask->sa_family == AF_INET6) {
|
||||
address->netmask.netmask6 = *((struct sockaddr_in6*) ent->ifa_netmask);
|
||||
} else {
|
||||
address->netmask.netmask4 = *((struct sockaddr_in*) ent->ifa_netmask);
|
||||
}
|
||||
|
||||
address->is_internal = !!(ent->ifa_flags & IFF_LOOPBACK);
|
||||
|
||||
address++;
|
||||
}
|
||||
|
||||
/* Fill in physical addresses for each interface */
|
||||
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
|
||||
if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFPHYS))
|
||||
continue;
|
||||
|
||||
address = *addresses;
|
||||
|
||||
for (i = 0; i < (*count); i++) {
|
||||
size_t namelen = strlen(ent->ifa_name);
|
||||
/* Alias interface share the same physical address */
|
||||
if (strncmp(address->name, ent->ifa_name, namelen) == 0 &&
|
||||
(address->name[namelen] == 0 || address->name[namelen] == ':')) {
|
||||
sll = (struct sockaddr_ll*)ent->ifa_addr;
|
||||
memcpy(address->phys_addr, sll->sll_addr, sizeof(address->phys_addr));
|
||||
}
|
||||
address++;
|
||||
}
|
||||
}
|
||||
|
||||
freeifaddrs(addrs);
|
||||
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
void uv_free_interface_addresses(uv_interface_address_t* addresses,
|
||||
int count) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
uv__free(addresses[i].name);
|
||||
}
|
||||
|
||||
uv__free(addresses);
|
||||
}
|
||||
|
||||
|
||||
void uv__set_process_title(const char* title) {
|
||||
#if defined(PR_SET_NAME)
|
||||
prctl(PR_SET_NAME, title); /* Only copies first 16 characters. */
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
static uint64_t uv__read_proc_meminfo(const char* what) {
|
||||
uint64_t rc;
|
||||
char* p;
|
||||
char buf[4096]; /* Large enough to hold all of /proc/meminfo. */
|
||||
|
||||
if (uv__slurp("/proc/meminfo", buf, sizeof(buf)))
|
||||
return 0;
|
||||
|
||||
p = strstr(buf, what);
|
||||
|
||||
if (p == NULL)
|
||||
return 0;
|
||||
|
||||
p += strlen(what);
|
||||
|
||||
rc = 0;
|
||||
sscanf(p, "%" PRIu64 " kB", &rc);
|
||||
|
||||
return rc * 1024;
|
||||
}
|
||||
|
||||
|
||||
uint64_t uv_get_free_memory(void) {
|
||||
struct sysinfo info;
|
||||
uint64_t rc;
|
||||
|
||||
rc = uv__read_proc_meminfo("MemAvailable:");
|
||||
|
||||
if (rc != 0)
|
||||
return rc;
|
||||
|
||||
if (0 == sysinfo(&info))
|
||||
return (uint64_t) info.freeram * info.mem_unit;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
uint64_t uv_get_total_memory(void) {
|
||||
struct sysinfo info;
|
||||
uint64_t rc;
|
||||
|
||||
rc = uv__read_proc_meminfo("MemTotal:");
|
||||
|
||||
if (rc != 0)
|
||||
return rc;
|
||||
|
||||
if (0 == sysinfo(&info))
|
||||
return (uint64_t) info.totalram * info.mem_unit;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static uint64_t uv__read_cgroups_uint64(const char* cgroup, const char* param) {
|
||||
char filename[256];
|
||||
char buf[32]; /* Large enough to hold an encoded uint64_t. */
|
||||
uint64_t rc;
|
||||
|
||||
rc = 0;
|
||||
snprintf(filename, sizeof(filename), "/sys/fs/cgroup/%s/%s", cgroup, param);
|
||||
if (0 == uv__slurp(filename, buf, sizeof(buf)))
|
||||
sscanf(buf, "%" PRIu64, &rc);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
||||
uint64_t uv_get_constrained_memory(void) {
|
||||
/*
|
||||
* This might return 0 if there was a problem getting the memory limit from
|
||||
* cgroups. This is OK because a return value of 0 signifies that the memory
|
||||
* limit is unknown.
|
||||
*/
|
||||
return uv__read_cgroups_uint64("memory", "memory.limit_in_bytes");
|
||||
}
|
||||
|
||||
|
||||
void uv_loadavg(double avg[3]) {
|
||||
struct sysinfo info;
|
||||
char buf[128]; /* Large enough to hold all of /proc/loadavg. */
|
||||
|
||||
if (0 == uv__slurp("/proc/loadavg", buf, sizeof(buf)))
|
||||
if (3 == sscanf(buf, "%lf %lf %lf", &avg[0], &avg[1], &avg[2]))
|
||||
return;
|
||||
|
||||
if (sysinfo(&info) < 0)
|
||||
return;
|
||||
|
||||
avg[0] = (double) info.loads[0] / 65536.0;
|
||||
avg[1] = (double) info.loads[1] / 65536.0;
|
||||
avg[2] = (double) info.loads[2] / 65536.0;
|
||||
}
|
327
deps/libuv/src/unix/linux-inotify.c
vendored
327
deps/libuv/src/unix/linux-inotify.c
vendored
@ -1,327 +0,0 @@
|
||||
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to
|
||||
* deal in the Software without restriction, including without limitation the
|
||||
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
||||
* sell copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "uv.h"
|
||||
#include "uv/tree.h"
|
||||
#include "internal.h"
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <assert.h>
|
||||
#include <errno.h>
|
||||
|
||||
#include <sys/inotify.h>
|
||||
#include <sys/types.h>
|
||||
#include <unistd.h>
|
||||
|
||||
struct watcher_list {
|
||||
RB_ENTRY(watcher_list) entry;
|
||||
QUEUE watchers;
|
||||
int iterating;
|
||||
char* path;
|
||||
int wd;
|
||||
};
|
||||
|
||||
struct watcher_root {
|
||||
struct watcher_list* rbh_root;
|
||||
};
|
||||
#define CAST(p) ((struct watcher_root*)(p))
|
||||
|
||||
|
||||
static int compare_watchers(const struct watcher_list* a,
|
||||
const struct watcher_list* b) {
|
||||
if (a->wd < b->wd) return -1;
|
||||
if (a->wd > b->wd) return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
RB_GENERATE_STATIC(watcher_root, watcher_list, entry, compare_watchers)
|
||||
|
||||
|
||||
static void uv__inotify_read(uv_loop_t* loop,
|
||||
uv__io_t* w,
|
||||
unsigned int revents);
|
||||
|
||||
static void maybe_free_watcher_list(struct watcher_list* w,
|
||||
uv_loop_t* loop);
|
||||
|
||||
static int init_inotify(uv_loop_t* loop) {
|
||||
int fd;
|
||||
|
||||
if (loop->inotify_fd != -1)
|
||||
return 0;
|
||||
|
||||
fd = inotify_init1(IN_NONBLOCK | IN_CLOEXEC);
|
||||
if (fd < 0)
|
||||
return UV__ERR(errno);
|
||||
|
||||
loop->inotify_fd = fd;
|
||||
uv__io_init(&loop->inotify_read_watcher, uv__inotify_read, loop->inotify_fd);
|
||||
uv__io_start(loop, &loop->inotify_read_watcher, POLLIN);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int uv__inotify_fork(uv_loop_t* loop, void* old_watchers) {
|
||||
/* Open the inotify_fd, and re-arm all the inotify watchers. */
|
||||
int err;
|
||||
struct watcher_list* tmp_watcher_list_iter;
|
||||
struct watcher_list* watcher_list;
|
||||
struct watcher_list tmp_watcher_list;
|
||||
QUEUE queue;
|
||||
QUEUE* q;
|
||||
uv_fs_event_t* handle;
|
||||
char* tmp_path;
|
||||
|
||||
if (old_watchers != NULL) {
|
||||
/* We must restore the old watcher list to be able to close items
|
||||
* out of it.
|
||||
*/
|
||||
loop->inotify_watchers = old_watchers;
|
||||
|
||||
QUEUE_INIT(&tmp_watcher_list.watchers);
|
||||
/* Note that the queue we use is shared with the start and stop()
|
||||
* functions, making QUEUE_FOREACH unsafe to use. So we use the
|
||||
* QUEUE_MOVE trick to safely iterate. Also don't free the watcher
|
||||
* list until we're done iterating. c.f. uv__inotify_read.
|
||||
*/
|
||||
RB_FOREACH_SAFE(watcher_list, watcher_root,
|
||||
CAST(&old_watchers), tmp_watcher_list_iter) {
|
||||
watcher_list->iterating = 1;
|
||||
QUEUE_MOVE(&watcher_list->watchers, &queue);
|
||||
while (!QUEUE_EMPTY(&queue)) {
|
||||
q = QUEUE_HEAD(&queue);
|
||||
handle = QUEUE_DATA(q, uv_fs_event_t, watchers);
|
||||
/* It's critical to keep a copy of path here, because it
|
||||
* will be set to NULL by stop() and then deallocated by
|
||||
* maybe_free_watcher_list
|
||||
*/
|
||||
tmp_path = uv__strdup(handle->path);
|
||||
assert(tmp_path != NULL);
|
||||
QUEUE_REMOVE(q);
|
||||
QUEUE_INSERT_TAIL(&watcher_list->watchers, q);
|
||||
uv_fs_event_stop(handle);
|
||||
|
||||
QUEUE_INSERT_TAIL(&tmp_watcher_list.watchers, &handle->watchers);
|
||||
handle->path = tmp_path;
|
||||
}
|
||||
watcher_list->iterating = 0;
|
||||
maybe_free_watcher_list(watcher_list, loop);
|
||||
}
|
||||
|
||||
QUEUE_MOVE(&tmp_watcher_list.watchers, &queue);
|
||||
while (!QUEUE_EMPTY(&queue)) {
|
||||
q = QUEUE_HEAD(&queue);
|
||||
QUEUE_REMOVE(q);
|
||||
handle = QUEUE_DATA(q, uv_fs_event_t, watchers);
|
||||
tmp_path = handle->path;
|
||||
handle->path = NULL;
|
||||
err = uv_fs_event_start(handle, handle->cb, tmp_path, 0);
|
||||
uv__free(tmp_path);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static struct watcher_list* find_watcher(uv_loop_t* loop, int wd) {
|
||||
struct watcher_list w;
|
||||
w.wd = wd;
|
||||
return RB_FIND(watcher_root, CAST(&loop->inotify_watchers), &w);
|
||||
}
|
||||
|
||||
static void maybe_free_watcher_list(struct watcher_list* w, uv_loop_t* loop) {
|
||||
/* if the watcher_list->watchers is being iterated over, we can't free it. */
|
||||
if ((!w->iterating) && QUEUE_EMPTY(&w->watchers)) {
|
||||
/* No watchers left for this path. Clean up. */
|
||||
RB_REMOVE(watcher_root, CAST(&loop->inotify_watchers), w);
|
||||
inotify_rm_watch(loop->inotify_fd, w->wd);
|
||||
uv__free(w);
|
||||
}
|
||||
}
|
||||
|
||||
static void uv__inotify_read(uv_loop_t* loop,
|
||||
uv__io_t* dummy,
|
||||
unsigned int events) {
|
||||
const struct inotify_event* e;
|
||||
struct watcher_list* w;
|
||||
uv_fs_event_t* h;
|
||||
QUEUE queue;
|
||||
QUEUE* q;
|
||||
const char* path;
|
||||
ssize_t size;
|
||||
const char *p;
|
||||
/* needs to be large enough for sizeof(inotify_event) + strlen(path) */
|
||||
char buf[4096];
|
||||
|
||||
for (;;) {
|
||||
do
|
||||
size = read(loop->inotify_fd, buf, sizeof(buf));
|
||||
while (size == -1 && errno == EINTR);
|
||||
|
||||
if (size == -1) {
|
||||
assert(errno == EAGAIN || errno == EWOULDBLOCK);
|
||||
break;
|
||||
}
|
||||
|
||||
assert(size > 0); /* pre-2.6.21 thing, size=0 == read buffer too small */
|
||||
|
||||
/* Now we have one or more inotify_event structs. */
|
||||
for (p = buf; p < buf + size; p += sizeof(*e) + e->len) {
|
||||
e = (const struct inotify_event*) p;
|
||||
|
||||
events = 0;
|
||||
if (e->mask & (IN_ATTRIB|IN_MODIFY))
|
||||
events |= UV_CHANGE;
|
||||
if (e->mask & ~(IN_ATTRIB|IN_MODIFY))
|
||||
events |= UV_RENAME;
|
||||
|
||||
w = find_watcher(loop, e->wd);
|
||||
if (w == NULL)
|
||||
continue; /* Stale event, no watchers left. */
|
||||
|
||||
/* inotify does not return the filename when monitoring a single file
|
||||
* for modifications. Repurpose the filename for API compatibility.
|
||||
* I'm not convinced this is a good thing, maybe it should go.
|
||||
*/
|
||||
path = e->len ? (const char*) (e + 1) : uv__basename_r(w->path);
|
||||
|
||||
/* We're about to iterate over the queue and call user's callbacks.
|
||||
* What can go wrong?
|
||||
* A callback could call uv_fs_event_stop()
|
||||
* and the queue can change under our feet.
|
||||
* So, we use QUEUE_MOVE() trick to safely iterate over the queue.
|
||||
* And we don't free the watcher_list until we're done iterating.
|
||||
*
|
||||
* First,
|
||||
* tell uv_fs_event_stop() (that could be called from a user's callback)
|
||||
* not to free watcher_list.
|
||||
*/
|
||||
w->iterating = 1;
|
||||
QUEUE_MOVE(&w->watchers, &queue);
|
||||
while (!QUEUE_EMPTY(&queue)) {
|
||||
q = QUEUE_HEAD(&queue);
|
||||
h = QUEUE_DATA(q, uv_fs_event_t, watchers);
|
||||
|
||||
QUEUE_REMOVE(q);
|
||||
QUEUE_INSERT_TAIL(&w->watchers, q);
|
||||
|
||||
h->cb(h, path, events, 0);
|
||||
}
|
||||
/* done iterating, time to (maybe) free empty watcher_list */
|
||||
w->iterating = 0;
|
||||
maybe_free_watcher_list(w, loop);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
|
||||
uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int uv_fs_event_start(uv_fs_event_t* handle,
|
||||
uv_fs_event_cb cb,
|
||||
const char* path,
|
||||
unsigned int flags) {
|
||||
struct watcher_list* w;
|
||||
size_t len;
|
||||
int events;
|
||||
int err;
|
||||
int wd;
|
||||
|
||||
if (uv__is_active(handle))
|
||||
return UV_EINVAL;
|
||||
|
||||
err = init_inotify(handle->loop);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
events = IN_ATTRIB
|
||||
| IN_CREATE
|
||||
| IN_MODIFY
|
||||
| IN_DELETE
|
||||
| IN_DELETE_SELF
|
||||
| IN_MOVE_SELF
|
||||
| IN_MOVED_FROM
|
||||
| IN_MOVED_TO;
|
||||
|
||||
wd = inotify_add_watch(handle->loop->inotify_fd, path, events);
|
||||
if (wd == -1)
|
||||
return UV__ERR(errno);
|
||||
|
||||
w = find_watcher(handle->loop, wd);
|
||||
if (w)
|
||||
goto no_insert;
|
||||
|
||||
len = strlen(path) + 1;
|
||||
w = uv__malloc(sizeof(*w) + len);
|
||||
if (w == NULL)
|
||||
return UV_ENOMEM;
|
||||
|
||||
w->wd = wd;
|
||||
w->path = memcpy(w + 1, path, len);
|
||||
QUEUE_INIT(&w->watchers);
|
||||
w->iterating = 0;
|
||||
RB_INSERT(watcher_root, CAST(&handle->loop->inotify_watchers), w);
|
||||
|
||||
no_insert:
|
||||
uv__handle_start(handle);
|
||||
QUEUE_INSERT_TAIL(&w->watchers, &handle->watchers);
|
||||
handle->path = w->path;
|
||||
handle->cb = cb;
|
||||
handle->wd = wd;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int uv_fs_event_stop(uv_fs_event_t* handle) {
|
||||
struct watcher_list* w;
|
||||
|
||||
if (!uv__is_active(handle))
|
||||
return 0;
|
||||
|
||||
w = find_watcher(handle->loop, handle->wd);
|
||||
assert(w != NULL);
|
||||
|
||||
handle->wd = -1;
|
||||
handle->path = NULL;
|
||||
uv__handle_stop(handle);
|
||||
QUEUE_REMOVE(&handle->watchers);
|
||||
|
||||
maybe_free_watcher_list(w, handle->loop);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
void uv__fs_event_close(uv_fs_event_t* handle) {
|
||||
uv_fs_event_stop(handle);
|
||||
}
|
264
deps/libuv/src/unix/linux-syscalls.c
vendored
264
deps/libuv/src/unix/linux-syscalls.c
vendored
@ -1,264 +0,0 @@
|
||||
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to
|
||||
* deal in the Software without restriction, including without limitation the
|
||||
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
||||
* sell copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "linux-syscalls.h"
|
||||
#include <unistd.h>
|
||||
#include <signal.h>
|
||||
#include <sys/syscall.h>
|
||||
#include <sys/types.h>
|
||||
#include <errno.h>
|
||||
|
||||
#if defined(__arm__)
|
||||
# if defined(__thumb__) || defined(__ARM_EABI__)
|
||||
# define UV_SYSCALL_BASE 0
|
||||
# else
|
||||
# define UV_SYSCALL_BASE 0x900000
|
||||
# endif
|
||||
#endif /* __arm__ */
|
||||
|
||||
#ifndef __NR_recvmmsg
|
||||
# if defined(__x86_64__)
|
||||
# define __NR_recvmmsg 299
|
||||
# elif defined(__arm__)
|
||||
# define __NR_recvmmsg (UV_SYSCALL_BASE + 365)
|
||||
# endif
|
||||
#endif /* __NR_recvmsg */
|
||||
|
||||
#ifndef __NR_sendmmsg
|
||||
# if defined(__x86_64__)
|
||||
# define __NR_sendmmsg 307
|
||||
# elif defined(__arm__)
|
||||
# define __NR_sendmmsg (UV_SYSCALL_BASE + 374)
|
||||
# endif
|
||||
#endif /* __NR_sendmmsg */
|
||||
|
||||
#ifndef __NR_utimensat
|
||||
# if defined(__x86_64__)
|
||||
# define __NR_utimensat 280
|
||||
# elif defined(__i386__)
|
||||
# define __NR_utimensat 320
|
||||
# elif defined(__arm__)
|
||||
# define __NR_utimensat (UV_SYSCALL_BASE + 348)
|
||||
# endif
|
||||
#endif /* __NR_utimensat */
|
||||
|
||||
#ifndef __NR_preadv
|
||||
# if defined(__x86_64__)
|
||||
# define __NR_preadv 295
|
||||
# elif defined(__i386__)
|
||||
# define __NR_preadv 333
|
||||
# elif defined(__arm__)
|
||||
# define __NR_preadv (UV_SYSCALL_BASE + 361)
|
||||
# endif
|
||||
#endif /* __NR_preadv */
|
||||
|
||||
#ifndef __NR_pwritev
|
||||
# if defined(__x86_64__)
|
||||
# define __NR_pwritev 296
|
||||
# elif defined(__i386__)
|
||||
# define __NR_pwritev 334
|
||||
# elif defined(__arm__)
|
||||
# define __NR_pwritev (UV_SYSCALL_BASE + 362)
|
||||
# endif
|
||||
#endif /* __NR_pwritev */
|
||||
|
||||
#ifndef __NR_dup3
|
||||
# if defined(__x86_64__)
|
||||
# define __NR_dup3 292
|
||||
# elif defined(__i386__)
|
||||
# define __NR_dup3 330
|
||||
# elif defined(__arm__)
|
||||
# define __NR_dup3 (UV_SYSCALL_BASE + 358)
|
||||
# endif
|
||||
#endif /* __NR_pwritev */
|
||||
|
||||
#ifndef __NR_copy_file_range
|
||||
# if defined(__x86_64__)
|
||||
# define __NR_copy_file_range 326
|
||||
# elif defined(__i386__)
|
||||
# define __NR_copy_file_range 377
|
||||
# elif defined(__s390__)
|
||||
# define __NR_copy_file_range 375
|
||||
# elif defined(__arm__)
|
||||
# define __NR_copy_file_range (UV_SYSCALL_BASE + 391)
|
||||
# elif defined(__aarch64__)
|
||||
# define __NR_copy_file_range 285
|
||||
# elif defined(__powerpc__)
|
||||
# define __NR_copy_file_range 379
|
||||
# elif defined(__arc__)
|
||||
# define __NR_copy_file_range 285
|
||||
# endif
|
||||
#endif /* __NR_copy_file_range */
|
||||
|
||||
#ifndef __NR_statx
|
||||
# if defined(__x86_64__)
|
||||
# define __NR_statx 332
|
||||
# elif defined(__i386__)
|
||||
# define __NR_statx 383
|
||||
# elif defined(__aarch64__)
|
||||
# define __NR_statx 397
|
||||
# elif defined(__arm__)
|
||||
# define __NR_statx (UV_SYSCALL_BASE + 397)
|
||||
# elif defined(__ppc__)
|
||||
# define __NR_statx 383
|
||||
# elif defined(__s390__)
|
||||
# define __NR_statx 379
|
||||
# endif
|
||||
#endif /* __NR_statx */
|
||||
|
||||
#ifndef __NR_getrandom
|
||||
# if defined(__x86_64__)
|
||||
# define __NR_getrandom 318
|
||||
# elif defined(__i386__)
|
||||
# define __NR_getrandom 355
|
||||
# elif defined(__aarch64__)
|
||||
# define __NR_getrandom 384
|
||||
# elif defined(__arm__)
|
||||
# define __NR_getrandom (UV_SYSCALL_BASE + 384)
|
||||
# elif defined(__ppc__)
|
||||
# define __NR_getrandom 359
|
||||
# elif defined(__s390__)
|
||||
# define __NR_getrandom 349
|
||||
# endif
|
||||
#endif /* __NR_getrandom */
|
||||
|
||||
struct uv__mmsghdr;
|
||||
|
||||
int uv__sendmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen) {
|
||||
#if defined(__i386__)
|
||||
unsigned long args[4];
|
||||
int rc;
|
||||
|
||||
args[0] = (unsigned long) fd;
|
||||
args[1] = (unsigned long) mmsg;
|
||||
args[2] = (unsigned long) vlen;
|
||||
args[3] = /* flags */ 0;
|
||||
|
||||
/* socketcall() raises EINVAL when SYS_SENDMMSG is not supported. */
|
||||
rc = syscall(/* __NR_socketcall */ 102, 20 /* SYS_SENDMMSG */, args);
|
||||
if (rc == -1)
|
||||
if (errno == EINVAL)
|
||||
errno = ENOSYS;
|
||||
|
||||
return rc;
|
||||
#elif defined(__NR_sendmmsg)
|
||||
return syscall(__NR_sendmmsg, fd, mmsg, vlen, /* flags */ 0);
|
||||
#else
|
||||
return errno = ENOSYS, -1;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
int uv__recvmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen) {
|
||||
#if defined(__i386__)
|
||||
unsigned long args[5];
|
||||
int rc;
|
||||
|
||||
args[0] = (unsigned long) fd;
|
||||
args[1] = (unsigned long) mmsg;
|
||||
args[2] = (unsigned long) vlen;
|
||||
args[3] = /* flags */ 0;
|
||||
args[4] = /* timeout */ 0;
|
||||
|
||||
/* socketcall() raises EINVAL when SYS_RECVMMSG is not supported. */
|
||||
rc = syscall(/* __NR_socketcall */ 102, 19 /* SYS_RECVMMSG */, args);
|
||||
if (rc == -1)
|
||||
if (errno == EINVAL)
|
||||
errno = ENOSYS;
|
||||
|
||||
return rc;
|
||||
#elif defined(__NR_recvmmsg)
|
||||
return syscall(__NR_recvmmsg, fd, mmsg, vlen, /* flags */ 0, /* timeout */ 0);
|
||||
#else
|
||||
return errno = ENOSYS, -1;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
ssize_t uv__preadv(int fd, const struct iovec *iov, int iovcnt, int64_t offset) {
|
||||
#if !defined(__NR_preadv) || defined(__ANDROID_API__) && __ANDROID_API__ < 24
|
||||
return errno = ENOSYS, -1;
|
||||
#else
|
||||
return syscall(__NR_preadv, fd, iov, iovcnt, (long)offset, (long)(offset >> 32));
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
ssize_t uv__pwritev(int fd, const struct iovec *iov, int iovcnt, int64_t offset) {
|
||||
#if !defined(__NR_pwritev) || defined(__ANDROID_API__) && __ANDROID_API__ < 24
|
||||
return errno = ENOSYS, -1;
|
||||
#else
|
||||
return syscall(__NR_pwritev, fd, iov, iovcnt, (long)offset, (long)(offset >> 32));
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
int uv__dup3(int oldfd, int newfd, int flags) {
|
||||
#if !defined(__NR_dup3) || defined(__ANDROID_API__) && __ANDROID_API__ < 21
|
||||
return errno = ENOSYS, -1;
|
||||
#else
|
||||
return syscall(__NR_dup3, oldfd, newfd, flags);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
ssize_t
|
||||
uv__fs_copy_file_range(int fd_in,
|
||||
off_t* off_in,
|
||||
int fd_out,
|
||||
off_t* off_out,
|
||||
size_t len,
|
||||
unsigned int flags)
|
||||
{
|
||||
#ifdef __NR_copy_file_range
|
||||
return syscall(__NR_copy_file_range,
|
||||
fd_in,
|
||||
off_in,
|
||||
fd_out,
|
||||
off_out,
|
||||
len,
|
||||
flags);
|
||||
#else
|
||||
return errno = ENOSYS, -1;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
int uv__statx(int dirfd,
|
||||
const char* path,
|
||||
int flags,
|
||||
unsigned int mask,
|
||||
struct uv__statx* statxbuf) {
|
||||
#if !defined(__NR_statx) || defined(__ANDROID_API__) && __ANDROID_API__ < 30
|
||||
return errno = ENOSYS, -1;
|
||||
#else
|
||||
return syscall(__NR_statx, dirfd, path, flags, mask, statxbuf);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
ssize_t uv__getrandom(void* buf, size_t buflen, unsigned flags) {
|
||||
#if !defined(__NR_getrandom) || defined(__ANDROID_API__) && __ANDROID_API__ < 28
|
||||
return errno = ENOSYS, -1;
|
||||
#else
|
||||
return syscall(__NR_getrandom, buf, buflen, flags);
|
||||
#endif
|
||||
}
|
78
deps/libuv/src/unix/linux-syscalls.h
vendored
78
deps/libuv/src/unix/linux-syscalls.h
vendored
@ -1,78 +0,0 @@
|
||||
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to
|
||||
* deal in the Software without restriction, including without limitation the
|
||||
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
||||
* sell copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef UV_LINUX_SYSCALL_H_
|
||||
#define UV_LINUX_SYSCALL_H_
|
||||
|
||||
#include <stdint.h>
|
||||
#include <signal.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/time.h>
|
||||
#include <sys/socket.h>
|
||||
|
||||
struct uv__statx_timestamp {
|
||||
int64_t tv_sec;
|
||||
uint32_t tv_nsec;
|
||||
int32_t unused0;
|
||||
};
|
||||
|
||||
struct uv__statx {
|
||||
uint32_t stx_mask;
|
||||
uint32_t stx_blksize;
|
||||
uint64_t stx_attributes;
|
||||
uint32_t stx_nlink;
|
||||
uint32_t stx_uid;
|
||||
uint32_t stx_gid;
|
||||
uint16_t stx_mode;
|
||||
uint16_t unused0;
|
||||
uint64_t stx_ino;
|
||||
uint64_t stx_size;
|
||||
uint64_t stx_blocks;
|
||||
uint64_t stx_attributes_mask;
|
||||
struct uv__statx_timestamp stx_atime;
|
||||
struct uv__statx_timestamp stx_btime;
|
||||
struct uv__statx_timestamp stx_ctime;
|
||||
struct uv__statx_timestamp stx_mtime;
|
||||
uint32_t stx_rdev_major;
|
||||
uint32_t stx_rdev_minor;
|
||||
uint32_t stx_dev_major;
|
||||
uint32_t stx_dev_minor;
|
||||
uint64_t unused1[14];
|
||||
};
|
||||
|
||||
ssize_t uv__preadv(int fd, const struct iovec *iov, int iovcnt, int64_t offset);
|
||||
ssize_t uv__pwritev(int fd, const struct iovec *iov, int iovcnt, int64_t offset);
|
||||
int uv__dup3(int oldfd, int newfd, int flags);
|
||||
ssize_t
|
||||
uv__fs_copy_file_range(int fd_in,
|
||||
off_t* off_in,
|
||||
int fd_out,
|
||||
off_t* off_out,
|
||||
size_t len,
|
||||
unsigned int flags);
|
||||
int uv__statx(int dirfd,
|
||||
const char* path,
|
||||
int flags,
|
||||
unsigned int mask,
|
||||
struct uv__statx* statxbuf);
|
||||
ssize_t uv__getrandom(void* buf, size_t buflen, unsigned flags);
|
||||
|
||||
#endif /* UV_LINUX_SYSCALL_H_ */
|
2341
deps/libuv/src/unix/linux.c
vendored
Normal file
2341
deps/libuv/src/unix/linux.c
vendored
Normal file
File diff suppressed because it is too large
Load Diff
8
deps/libuv/src/unix/loop.c
vendored
8
deps/libuv/src/unix/loop.c
vendored
@ -45,6 +45,9 @@ int uv_loop_init(uv_loop_t* loop) {
|
||||
err = uv_mutex_init(&lfields->loop_metrics.lock);
|
||||
if (err)
|
||||
goto fail_metrics_mutex_init;
|
||||
memset(&lfields->loop_metrics.metrics,
|
||||
0,
|
||||
sizeof(lfields->loop_metrics.metrics));
|
||||
|
||||
heap_init((struct heap*) &loop->timer_heap);
|
||||
QUEUE_INIT(&loop->wq);
|
||||
@ -79,12 +82,9 @@ int uv_loop_init(uv_loop_t* loop) {
|
||||
goto fail_platform_init;
|
||||
|
||||
uv__signal_global_once_init();
|
||||
err = uv_signal_init(loop, &loop->child_watcher);
|
||||
err = uv__process_init(loop);
|
||||
if (err)
|
||||
goto fail_signal_init;
|
||||
|
||||
uv__handle_unref(&loop->child_watcher);
|
||||
loop->child_watcher.flags |= UV_HANDLE_INTERNAL;
|
||||
QUEUE_INIT(&loop->process_handles);
|
||||
|
||||
err = uv_rwlock_init(&loop->cloexec_lock);
|
||||
|
9
deps/libuv/src/unix/netbsd.c
vendored
9
deps/libuv/src/unix/netbsd.c
vendored
@ -103,7 +103,7 @@ uint64_t uv_get_free_memory(void) {
|
||||
int which[] = {CTL_VM, VM_UVMEXP};
|
||||
|
||||
if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0))
|
||||
return UV__ERR(errno);
|
||||
return 0;
|
||||
|
||||
return (uint64_t) info.free * sysconf(_SC_PAGESIZE);
|
||||
}
|
||||
@ -120,7 +120,7 @@ uint64_t uv_get_total_memory(void) {
|
||||
size_t size = sizeof(info);
|
||||
|
||||
if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0))
|
||||
return UV__ERR(errno);
|
||||
return 0;
|
||||
|
||||
return (uint64_t) info;
|
||||
}
|
||||
@ -131,6 +131,11 @@ uint64_t uv_get_constrained_memory(void) {
|
||||
}
|
||||
|
||||
|
||||
uint64_t uv_get_available_memory(void) {
|
||||
return uv_get_free_memory();
|
||||
}
|
||||
|
||||
|
||||
int uv_resident_set_memory(size_t* rss) {
|
||||
kvm_t *kd = NULL;
|
||||
struct kinfo_proc2 *kinfo = NULL;
|
||||
|
9
deps/libuv/src/unix/openbsd.c
vendored
9
deps/libuv/src/unix/openbsd.c
vendored
@ -116,7 +116,7 @@ uint64_t uv_get_free_memory(void) {
|
||||
int which[] = {CTL_VM, VM_UVMEXP};
|
||||
|
||||
if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0))
|
||||
return UV__ERR(errno);
|
||||
return 0;
|
||||
|
||||
return (uint64_t) info.free * sysconf(_SC_PAGESIZE);
|
||||
}
|
||||
@ -128,7 +128,7 @@ uint64_t uv_get_total_memory(void) {
|
||||
size_t size = sizeof(info);
|
||||
|
||||
if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0))
|
||||
return UV__ERR(errno);
|
||||
return 0;
|
||||
|
||||
return (uint64_t) info;
|
||||
}
|
||||
@ -139,6 +139,11 @@ uint64_t uv_get_constrained_memory(void) {
|
||||
}
|
||||
|
||||
|
||||
uint64_t uv_get_available_memory(void) {
|
||||
return uv_get_free_memory();
|
||||
}
|
||||
|
||||
|
||||
int uv_resident_set_memory(size_t* rss) {
|
||||
struct kinfo_proc kinfo;
|
||||
size_t page_size = getpagesize();
|
||||
|
18
deps/libuv/src/unix/os390.c
vendored
18
deps/libuv/src/unix/os390.c
vendored
@ -198,6 +198,11 @@ uint64_t uv_get_constrained_memory(void) {
|
||||
}
|
||||
|
||||
|
||||
uint64_t uv_get_available_memory(void) {
|
||||
return uv_get_free_memory();
|
||||
}
|
||||
|
||||
|
||||
int uv_resident_set_memory(size_t* rss) {
|
||||
char* ascb;
|
||||
char* rax;
|
||||
@ -803,6 +808,7 @@ static int os390_message_queue_handler(uv__os390_epoll* ep) {
|
||||
|
||||
void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
static const int max_safe_timeout = 1789569;
|
||||
uv__loop_internal_fields_t* lfields;
|
||||
struct epoll_event events[1024];
|
||||
struct epoll_event* pe;
|
||||
struct epoll_event e;
|
||||
@ -825,6 +831,8 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
return;
|
||||
}
|
||||
|
||||
lfields = uv__get_internal_fields(loop);
|
||||
|
||||
while (!QUEUE_EMPTY(&loop->watcher_queue)) {
|
||||
uv_stream_t* stream;
|
||||
|
||||
@ -872,7 +880,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
int nevents = 0;
|
||||
have_signals = 0;
|
||||
|
||||
if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
|
||||
if (lfields->flags & UV_METRICS_IDLE_TIME) {
|
||||
reset_timeout = 1;
|
||||
user_timeout = timeout;
|
||||
timeout = 0;
|
||||
@ -891,6 +899,12 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
if (sizeof(int32_t) == sizeof(long) && timeout >= max_safe_timeout)
|
||||
timeout = max_safe_timeout;
|
||||
|
||||
/* Store the current timeout in a location that's globally accessible so
|
||||
* other locations like uv__work_done() can determine whether the queue
|
||||
* of events in the callback were waiting when poll was called.
|
||||
*/
|
||||
lfields->current_timeout = timeout;
|
||||
|
||||
nfds = epoll_wait(loop->ep, events,
|
||||
ARRAY_SIZE(events), timeout);
|
||||
|
||||
@ -998,9 +1012,11 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
}
|
||||
}
|
||||
|
||||
uv__metrics_inc_events(loop, nevents);
|
||||
if (reset_timeout != 0) {
|
||||
timeout = user_timeout;
|
||||
reset_timeout = 0;
|
||||
uv__metrics_inc_events_waiting(loop, nevents);
|
||||
}
|
||||
|
||||
if (have_signals != 0) {
|
||||
|
2
deps/libuv/src/unix/pipe.c
vendored
2
deps/libuv/src/unix/pipe.c
vendored
@ -357,7 +357,7 @@ int uv_pipe_chmod(uv_pipe_t* handle, int mode) {
|
||||
}
|
||||
|
||||
/* stat must be used as fstat has a bug on Darwin */
|
||||
if (stat(name_buffer, &pipe_stat) == -1) {
|
||||
if (uv__stat(name_buffer, &pipe_stat) == -1) {
|
||||
uv__free(name_buffer);
|
||||
return -errno;
|
||||
}
|
||||
|
13
deps/libuv/src/unix/posix-hrtime.c
vendored
13
deps/libuv/src/unix/posix-hrtime.c
vendored
@ -23,13 +23,14 @@
|
||||
#include "internal.h"
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
#include <time.h>
|
||||
|
||||
#undef NANOSEC
|
||||
#define NANOSEC ((uint64_t) 1e9)
|
||||
|
||||
uint64_t uv__hrtime(uv_clocktype_t type) {
|
||||
struct timespec ts;
|
||||
clock_gettime(CLOCK_MONOTONIC, &ts);
|
||||
return (((uint64_t) ts.tv_sec) * NANOSEC + ts.tv_nsec);
|
||||
struct timespec t;
|
||||
|
||||
if (clock_gettime(CLOCK_MONOTONIC, &t))
|
||||
abort();
|
||||
|
||||
return t.tv_sec * (uint64_t) 1e9 + t.tv_nsec;
|
||||
}
|
||||
|
13
deps/libuv/src/unix/posix-poll.c
vendored
13
deps/libuv/src/unix/posix-poll.c
vendored
@ -132,6 +132,7 @@ static void uv__pollfds_del(uv_loop_t* loop, int fd) {
|
||||
|
||||
|
||||
void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
uv__loop_internal_fields_t* lfields;
|
||||
sigset_t* pset;
|
||||
sigset_t set;
|
||||
uint64_t time_base;
|
||||
@ -152,6 +153,8 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
return;
|
||||
}
|
||||
|
||||
lfields = uv__get_internal_fields(loop);
|
||||
|
||||
/* Take queued watchers and add their fds to our poll fds array. */
|
||||
while (!QUEUE_EMPTY(&loop->watcher_queue)) {
|
||||
q = QUEUE_HEAD(&loop->watcher_queue);
|
||||
@ -179,7 +182,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
assert(timeout >= -1);
|
||||
time_base = loop->time;
|
||||
|
||||
if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
|
||||
if (lfields->flags & UV_METRICS_IDLE_TIME) {
|
||||
reset_timeout = 1;
|
||||
user_timeout = timeout;
|
||||
timeout = 0;
|
||||
@ -198,6 +201,12 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
if (timeout != 0)
|
||||
uv__metrics_set_provider_entry_time(loop);
|
||||
|
||||
/* Store the current timeout in a location that's globally accessible so
|
||||
* other locations like uv__work_done() can determine whether the queue
|
||||
* of events in the callback were waiting when poll was called.
|
||||
*/
|
||||
lfields->current_timeout = timeout;
|
||||
|
||||
if (pset != NULL)
|
||||
if (pthread_sigmask(SIG_BLOCK, pset, NULL))
|
||||
abort();
|
||||
@ -292,9 +301,11 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
}
|
||||
}
|
||||
|
||||
uv__metrics_inc_events(loop, nevents);
|
||||
if (reset_timeout != 0) {
|
||||
timeout = user_timeout;
|
||||
reset_timeout = 0;
|
||||
uv__metrics_inc_events_waiting(loop, nevents);
|
||||
}
|
||||
|
||||
if (have_signals != 0) {
|
||||
|
31
deps/libuv/src/unix/process.c
vendored
31
deps/libuv/src/unix/process.c
vendored
@ -55,7 +55,7 @@
|
||||
extern char **environ;
|
||||
#endif
|
||||
|
||||
#if defined(__linux__) || defined(__GLIBC__)
|
||||
#if defined(__linux__)
|
||||
# include <grp.h>
|
||||
#endif
|
||||
|
||||
@ -79,8 +79,28 @@ static void uv__chld(uv_signal_t* handle, int signum) {
|
||||
assert(signum == SIGCHLD);
|
||||
uv__wait_children(handle->loop);
|
||||
}
|
||||
|
||||
|
||||
int uv__process_init(uv_loop_t* loop) {
|
||||
int err;
|
||||
|
||||
err = uv_signal_init(loop, &loop->child_watcher);
|
||||
if (err)
|
||||
return err;
|
||||
uv__handle_unref(&loop->child_watcher);
|
||||
loop->child_watcher.flags |= UV_HANDLE_INTERNAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
#else
|
||||
int uv__process_init(uv_loop_t* loop) {
|
||||
memset(&loop->child_watcher, 0, sizeof(loop->child_watcher));
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
void uv__wait_children(uv_loop_t* loop) {
|
||||
uv_process_t* process;
|
||||
int exit_status;
|
||||
@ -105,6 +125,7 @@ void uv__wait_children(uv_loop_t* loop) {
|
||||
continue;
|
||||
options = 0;
|
||||
process->flags &= ~UV_HANDLE_REAP;
|
||||
loop->nfds--;
|
||||
#else
|
||||
options = WNOHANG;
|
||||
#endif
|
||||
@ -665,7 +686,7 @@ static int uv__spawn_resolve_and_spawn(const uv_process_options_t* options,
|
||||
if (options->file == NULL)
|
||||
return ENOENT;
|
||||
|
||||
/* The environment for the child process is that of the parent unless overriden
|
||||
/* The environment for the child process is that of the parent unless overridden
|
||||
* by options->env */
|
||||
char** env = environ;
|
||||
if (options->env != NULL)
|
||||
@ -1012,6 +1033,10 @@ int uv_spawn(uv_loop_t* loop,
|
||||
process->flags |= UV_HANDLE_REAP;
|
||||
loop->flags |= UV_LOOP_REAP_CHILDREN;
|
||||
}
|
||||
/* This prevents uv__io_poll() from bailing out prematurely, being unaware
|
||||
* that we added an event here for it to react to. We will decrement this
|
||||
* again after the waitpid call succeeds. */
|
||||
loop->nfds++;
|
||||
#endif
|
||||
|
||||
process->pid = pid;
|
||||
@ -1080,6 +1105,8 @@ int uv_kill(int pid, int signum) {
|
||||
void uv__process_close(uv_process_t* handle) {
|
||||
QUEUE_REMOVE(&handle->queue);
|
||||
uv__handle_stop(handle);
|
||||
#ifdef UV_USE_SIGCHLD
|
||||
if (QUEUE_EMPTY(&handle->loop->process_handles))
|
||||
uv_signal_stop(&handle->loop->child_watcher);
|
||||
#endif
|
||||
}
|
||||
|
58
deps/libuv/src/unix/pthread-fixes.c
vendored
58
deps/libuv/src/unix/pthread-fixes.c
vendored
@ -1,58 +0,0 @@
|
||||
/* Copyright (c) 2013, Sony Mobile Communications AB
|
||||
* Copyright (c) 2012, Google Inc.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
/* Android versions < 4.1 have a broken pthread_sigmask. */
|
||||
#include "uv-common.h"
|
||||
|
||||
#include <errno.h>
|
||||
#include <pthread.h>
|
||||
#include <signal.h>
|
||||
|
||||
int uv__pthread_sigmask(int how, const sigset_t* set, sigset_t* oset) {
|
||||
static int workaround;
|
||||
int err;
|
||||
|
||||
if (uv__load_relaxed(&workaround)) {
|
||||
return sigprocmask(how, set, oset);
|
||||
} else {
|
||||
err = pthread_sigmask(how, set, oset);
|
||||
if (err) {
|
||||
if (err == EINVAL && sigprocmask(how, set, oset) == 0) {
|
||||
uv__store_relaxed(&workaround, 1);
|
||||
return 0;
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
5
deps/libuv/src/unix/qnx.c
vendored
5
deps/libuv/src/unix/qnx.c
vendored
@ -88,6 +88,11 @@ uint64_t uv_get_constrained_memory(void) {
|
||||
}
|
||||
|
||||
|
||||
uint64_t uv_get_available_memory(void) {
|
||||
return uv_get_free_memory();
|
||||
}
|
||||
|
||||
|
||||
int uv_resident_set_memory(size_t* rss) {
|
||||
int fd;
|
||||
procfs_asinfo asinfo;
|
||||
|
2
deps/libuv/src/unix/random-devurandom.c
vendored
2
deps/libuv/src/unix/random-devurandom.c
vendored
@ -40,7 +40,7 @@ int uv__random_readpath(const char* path, void* buf, size_t buflen) {
|
||||
if (fd < 0)
|
||||
return fd;
|
||||
|
||||
if (fstat(fd, &s)) {
|
||||
if (uv__fstat(fd, &s)) {
|
||||
uv__close(fd);
|
||||
return UV__ERR(errno);
|
||||
}
|
||||
|
2
deps/libuv/src/unix/random-getrandom.c
vendored
2
deps/libuv/src/unix/random-getrandom.c
vendored
@ -24,8 +24,6 @@
|
||||
|
||||
#ifdef __linux__
|
||||
|
||||
#include "linux-syscalls.h"
|
||||
|
||||
#define uv__random_getrandom_init() 0
|
||||
|
||||
#else /* !__linux__ */
|
||||
|
2
deps/libuv/src/unix/signal.c
vendored
2
deps/libuv/src/unix/signal.c
vendored
@ -279,6 +279,8 @@ static int uv__signal_loop_once_init(uv_loop_t* loop) {
|
||||
|
||||
|
||||
int uv__signal_loop_fork(uv_loop_t* loop) {
|
||||
if (loop->signal_pipefd[0] == -1)
|
||||
return 0;
|
||||
uv__io_stop(loop, &loop->signal_io_watcher, POLLIN);
|
||||
uv__close(loop->signal_pipefd[0]);
|
||||
uv__close(loop->signal_pipefd[1]);
|
||||
|
53
deps/libuv/src/unix/spinlock.h
vendored
53
deps/libuv/src/unix/spinlock.h
vendored
@ -1,53 +0,0 @@
|
||||
/* Copyright (c) 2013, Ben Noordhuis <info@bnoordhuis.nl>
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef UV_SPINLOCK_H_
|
||||
#define UV_SPINLOCK_H_
|
||||
|
||||
#include "internal.h" /* ACCESS_ONCE, UV_UNUSED */
|
||||
#include "atomic-ops.h"
|
||||
|
||||
#define UV_SPINLOCK_INITIALIZER { 0 }
|
||||
|
||||
typedef struct {
|
||||
int lock;
|
||||
} uv_spinlock_t;
|
||||
|
||||
UV_UNUSED(static void uv_spinlock_init(uv_spinlock_t* spinlock));
|
||||
UV_UNUSED(static void uv_spinlock_lock(uv_spinlock_t* spinlock));
|
||||
UV_UNUSED(static void uv_spinlock_unlock(uv_spinlock_t* spinlock));
|
||||
UV_UNUSED(static int uv_spinlock_trylock(uv_spinlock_t* spinlock));
|
||||
|
||||
UV_UNUSED(static void uv_spinlock_init(uv_spinlock_t* spinlock)) {
|
||||
ACCESS_ONCE(int, spinlock->lock) = 0;
|
||||
}
|
||||
|
||||
UV_UNUSED(static void uv_spinlock_lock(uv_spinlock_t* spinlock)) {
|
||||
while (!uv_spinlock_trylock(spinlock)) cpu_relax();
|
||||
}
|
||||
|
||||
UV_UNUSED(static void uv_spinlock_unlock(uv_spinlock_t* spinlock)) {
|
||||
ACCESS_ONCE(int, spinlock->lock) = 0;
|
||||
}
|
||||
|
||||
UV_UNUSED(static int uv_spinlock_trylock(uv_spinlock_t* spinlock)) {
|
||||
/* TODO(bnoordhuis) Maybe change to a ticket lock to guarantee fair queueing.
|
||||
* Not really critical until we have locks that are (frequently) contended
|
||||
* for by several threads.
|
||||
*/
|
||||
return 0 == cmpxchgi(&spinlock->lock, 0, 1);
|
||||
}
|
||||
|
||||
#endif /* UV_SPINLOCK_H_ */
|
185
deps/libuv/src/unix/stream.c
vendored
185
deps/libuv/src/unix/stream.c
vendored
@ -60,6 +60,16 @@ struct uv__stream_select_s {
|
||||
};
|
||||
#endif /* defined(__APPLE__) */
|
||||
|
||||
union uv__cmsg {
|
||||
struct cmsghdr hdr;
|
||||
/* This cannot be larger because of the IBMi PASE limitation that
|
||||
* the total size of control messages cannot exceed 256 bytes.
|
||||
*/
|
||||
char pad[256];
|
||||
};
|
||||
|
||||
STATIC_ASSERT(256 == sizeof(union uv__cmsg));
|
||||
|
||||
static void uv__stream_connect(uv_stream_t*);
|
||||
static void uv__write(uv_stream_t* stream);
|
||||
static void uv__read(uv_stream_t* stream);
|
||||
@ -495,76 +505,34 @@ static int uv__emfile_trick(uv_loop_t* loop, int accept_fd) {
|
||||
}
|
||||
|
||||
|
||||
#if defined(UV_HAVE_KQUEUE)
|
||||
# define UV_DEC_BACKLOG(w) w->rcount--;
|
||||
#else
|
||||
# define UV_DEC_BACKLOG(w) /* no-op */
|
||||
#endif /* defined(UV_HAVE_KQUEUE) */
|
||||
|
||||
|
||||
void uv__server_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
|
||||
uv_stream_t* stream;
|
||||
int err;
|
||||
int fd;
|
||||
|
||||
stream = container_of(w, uv_stream_t, io_watcher);
|
||||
assert(events & POLLIN);
|
||||
assert(stream->accepted_fd == -1);
|
||||
assert(!(stream->flags & UV_HANDLE_CLOSING));
|
||||
|
||||
uv__io_start(stream->loop, &stream->io_watcher, POLLIN);
|
||||
fd = uv__stream_fd(stream);
|
||||
err = uv__accept(fd);
|
||||
|
||||
/* connection_cb can close the server socket while we're
|
||||
* in the loop so check it on each iteration.
|
||||
*/
|
||||
while (uv__stream_fd(stream) != -1) {
|
||||
assert(stream->accepted_fd == -1);
|
||||
if (err == UV_EMFILE || err == UV_ENFILE)
|
||||
err = uv__emfile_trick(loop, fd); /* Shed load. */
|
||||
|
||||
#if defined(UV_HAVE_KQUEUE)
|
||||
if (w->rcount <= 0)
|
||||
return;
|
||||
#endif /* defined(UV_HAVE_KQUEUE) */
|
||||
if (err < 0)
|
||||
return;
|
||||
|
||||
err = uv__accept(uv__stream_fd(stream));
|
||||
if (err < 0) {
|
||||
if (err == UV_EAGAIN || err == UV__ERR(EWOULDBLOCK))
|
||||
return; /* Not an error. */
|
||||
stream->accepted_fd = err;
|
||||
stream->connection_cb(stream, 0);
|
||||
|
||||
if (err == UV_ECONNABORTED)
|
||||
continue; /* Ignore. Nothing we can do about that. */
|
||||
|
||||
if (err == UV_EMFILE || err == UV_ENFILE) {
|
||||
err = uv__emfile_trick(loop, uv__stream_fd(stream));
|
||||
if (err == UV_EAGAIN || err == UV__ERR(EWOULDBLOCK))
|
||||
break;
|
||||
}
|
||||
|
||||
stream->connection_cb(stream, err);
|
||||
continue;
|
||||
}
|
||||
|
||||
UV_DEC_BACKLOG(w)
|
||||
stream->accepted_fd = err;
|
||||
stream->connection_cb(stream, 0);
|
||||
|
||||
if (stream->accepted_fd != -1) {
|
||||
/* The user hasn't yet accepted called uv_accept() */
|
||||
uv__io_stop(loop, &stream->io_watcher, POLLIN);
|
||||
return;
|
||||
}
|
||||
|
||||
if (stream->type == UV_TCP &&
|
||||
(stream->flags & UV_HANDLE_TCP_SINGLE_ACCEPT)) {
|
||||
/* Give other processes a chance to accept connections. */
|
||||
struct timespec timeout = { 0, 1 };
|
||||
nanosleep(&timeout, NULL);
|
||||
}
|
||||
}
|
||||
if (stream->accepted_fd != -1)
|
||||
/* The user hasn't yet accepted called uv_accept() */
|
||||
uv__io_stop(loop, &stream->io_watcher, POLLIN);
|
||||
}
|
||||
|
||||
|
||||
#undef UV_DEC_BACKLOG
|
||||
|
||||
|
||||
int uv_accept(uv_stream_t* server, uv_stream_t* client) {
|
||||
int err;
|
||||
|
||||
@ -665,7 +633,7 @@ static void uv__drain(uv_stream_t* stream) {
|
||||
uv__stream_osx_interrupt_select(stream);
|
||||
}
|
||||
|
||||
if (!(stream->flags & UV_HANDLE_SHUTTING))
|
||||
if (!uv__is_stream_shutting(stream))
|
||||
return;
|
||||
|
||||
req = stream->shutdown_req;
|
||||
@ -674,7 +642,6 @@ static void uv__drain(uv_stream_t* stream) {
|
||||
if ((stream->flags & UV_HANDLE_CLOSING) ||
|
||||
!(stream->flags & UV_HANDLE_SHUT)) {
|
||||
stream->shutdown_req = NULL;
|
||||
stream->flags &= ~UV_HANDLE_SHUTTING;
|
||||
uv__req_unregister(stream->loop, req);
|
||||
|
||||
err = 0;
|
||||
@ -812,18 +779,14 @@ static int uv__try_write(uv_stream_t* stream,
|
||||
if (send_handle != NULL) {
|
||||
int fd_to_send;
|
||||
struct msghdr msg;
|
||||
struct cmsghdr *cmsg;
|
||||
union {
|
||||
char data[64];
|
||||
struct cmsghdr alias;
|
||||
} scratch;
|
||||
union uv__cmsg cmsg;
|
||||
|
||||
if (uv__is_closing(send_handle))
|
||||
return UV_EBADF;
|
||||
|
||||
fd_to_send = uv__handle_fd((uv_handle_t*) send_handle);
|
||||
|
||||
memset(&scratch, 0, sizeof(scratch));
|
||||
memset(&cmsg, 0, sizeof(cmsg));
|
||||
|
||||
assert(fd_to_send >= 0);
|
||||
|
||||
@ -833,20 +796,13 @@ static int uv__try_write(uv_stream_t* stream,
|
||||
msg.msg_iovlen = iovcnt;
|
||||
msg.msg_flags = 0;
|
||||
|
||||
msg.msg_control = &scratch.alias;
|
||||
msg.msg_control = &cmsg.hdr;
|
||||
msg.msg_controllen = CMSG_SPACE(sizeof(fd_to_send));
|
||||
|
||||
cmsg = CMSG_FIRSTHDR(&msg);
|
||||
cmsg->cmsg_level = SOL_SOCKET;
|
||||
cmsg->cmsg_type = SCM_RIGHTS;
|
||||
cmsg->cmsg_len = CMSG_LEN(sizeof(fd_to_send));
|
||||
|
||||
/* silence aliasing warning */
|
||||
{
|
||||
void* pv = CMSG_DATA(cmsg);
|
||||
int* pi = pv;
|
||||
*pi = fd_to_send;
|
||||
}
|
||||
cmsg.hdr.cmsg_level = SOL_SOCKET;
|
||||
cmsg.hdr.cmsg_type = SCM_RIGHTS;
|
||||
cmsg.hdr.cmsg_len = CMSG_LEN(sizeof(fd_to_send));
|
||||
memcpy(CMSG_DATA(&cmsg.hdr), &fd_to_send, sizeof(fd_to_send));
|
||||
|
||||
do
|
||||
n = sendmsg(uv__stream_fd(stream), &msg, 0);
|
||||
@ -884,9 +840,16 @@ static void uv__write(uv_stream_t* stream) {
|
||||
QUEUE* q;
|
||||
uv_write_t* req;
|
||||
ssize_t n;
|
||||
int count;
|
||||
|
||||
assert(uv__stream_fd(stream) >= 0);
|
||||
|
||||
/* Prevent loop starvation when the consumer of this stream read as fast as
|
||||
* (or faster than) we can write it. This `count` mechanism does not need to
|
||||
* change even if we switch to edge-triggered I/O.
|
||||
*/
|
||||
count = 32;
|
||||
|
||||
for (;;) {
|
||||
if (QUEUE_EMPTY(&stream->write_queue))
|
||||
return;
|
||||
@ -905,10 +868,13 @@ static void uv__write(uv_stream_t* stream) {
|
||||
req->send_handle = NULL;
|
||||
if (uv__write_req_update(stream, req, n)) {
|
||||
uv__write_req_finish(req);
|
||||
return; /* TODO(bnoordhuis) Start trying to write the next request. */
|
||||
if (count-- > 0)
|
||||
continue; /* Start trying to write the next request. */
|
||||
|
||||
return;
|
||||
}
|
||||
} else if (n != UV_EAGAIN)
|
||||
break;
|
||||
goto error;
|
||||
|
||||
/* If this is a blocking stream, try again. */
|
||||
if (stream->flags & UV_HANDLE_BLOCKING_WRITES)
|
||||
@ -923,6 +889,7 @@ static void uv__write(uv_stream_t* stream) {
|
||||
return;
|
||||
}
|
||||
|
||||
error:
|
||||
req->error = n;
|
||||
uv__write_req_finish(req);
|
||||
uv__io_stop(stream->loop, &stream->io_watcher, POLLOUT);
|
||||
@ -1010,57 +977,38 @@ static int uv__stream_queue_fd(uv_stream_t* stream, int fd) {
|
||||
}
|
||||
|
||||
|
||||
#if defined(__PASE__)
|
||||
/* on IBMi PASE the control message length can not exceed 256. */
|
||||
# define UV__CMSG_FD_COUNT 60
|
||||
#else
|
||||
# define UV__CMSG_FD_COUNT 64
|
||||
#endif
|
||||
#define UV__CMSG_FD_SIZE (UV__CMSG_FD_COUNT * sizeof(int))
|
||||
|
||||
|
||||
static int uv__stream_recv_cmsg(uv_stream_t* stream, struct msghdr* msg) {
|
||||
struct cmsghdr* cmsg;
|
||||
int fd;
|
||||
int err;
|
||||
size_t i;
|
||||
size_t count;
|
||||
|
||||
for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg)) {
|
||||
char* start;
|
||||
char* end;
|
||||
int err;
|
||||
void* pv;
|
||||
int* pi;
|
||||
unsigned int i;
|
||||
unsigned int count;
|
||||
|
||||
if (cmsg->cmsg_type != SCM_RIGHTS) {
|
||||
fprintf(stderr, "ignoring non-SCM_RIGHTS ancillary data: %d\n",
|
||||
cmsg->cmsg_type);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* silence aliasing warning */
|
||||
pv = CMSG_DATA(cmsg);
|
||||
pi = pv;
|
||||
|
||||
/* Count available fds */
|
||||
start = (char*) cmsg;
|
||||
end = (char*) cmsg + cmsg->cmsg_len;
|
||||
count = 0;
|
||||
while (start + CMSG_LEN(count * sizeof(*pi)) < end)
|
||||
count++;
|
||||
assert(start + CMSG_LEN(count * sizeof(*pi)) == end);
|
||||
assert(cmsg->cmsg_len >= CMSG_LEN(0));
|
||||
count = cmsg->cmsg_len - CMSG_LEN(0);
|
||||
assert(count % sizeof(fd) == 0);
|
||||
count /= sizeof(fd);
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
memcpy(&fd, (char*) CMSG_DATA(cmsg) + i * sizeof(fd), sizeof(fd));
|
||||
/* Already has accepted fd, queue now */
|
||||
if (stream->accepted_fd != -1) {
|
||||
err = uv__stream_queue_fd(stream, pi[i]);
|
||||
err = uv__stream_queue_fd(stream, fd);
|
||||
if (err != 0) {
|
||||
/* Close rest */
|
||||
for (; i < count; i++)
|
||||
uv__close(pi[i]);
|
||||
uv__close(fd);
|
||||
return err;
|
||||
}
|
||||
} else {
|
||||
stream->accepted_fd = pi[i];
|
||||
stream->accepted_fd = fd;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1069,17 +1017,11 @@ static int uv__stream_recv_cmsg(uv_stream_t* stream, struct msghdr* msg) {
|
||||
}
|
||||
|
||||
|
||||
#ifdef __clang__
|
||||
# pragma clang diagnostic push
|
||||
# pragma clang diagnostic ignored "-Wgnu-folding-constant"
|
||||
# pragma clang diagnostic ignored "-Wvla-extension"
|
||||
#endif
|
||||
|
||||
static void uv__read(uv_stream_t* stream) {
|
||||
uv_buf_t buf;
|
||||
ssize_t nread;
|
||||
struct msghdr msg;
|
||||
char cmsg_space[CMSG_SPACE(UV__CMSG_FD_SIZE)];
|
||||
union uv__cmsg cmsg;
|
||||
int count;
|
||||
int err;
|
||||
int is_ipc;
|
||||
@ -1125,8 +1067,8 @@ static void uv__read(uv_stream_t* stream) {
|
||||
msg.msg_name = NULL;
|
||||
msg.msg_namelen = 0;
|
||||
/* Set up to receive a descriptor even if one isn't in the message */
|
||||
msg.msg_controllen = sizeof(cmsg_space);
|
||||
msg.msg_control = cmsg_space;
|
||||
msg.msg_controllen = sizeof(cmsg);
|
||||
msg.msg_control = &cmsg.hdr;
|
||||
|
||||
do {
|
||||
nread = uv__recvmsg(uv__stream_fd(stream), &msg, 0);
|
||||
@ -1210,14 +1152,6 @@ static void uv__read(uv_stream_t* stream) {
|
||||
}
|
||||
|
||||
|
||||
#ifdef __clang__
|
||||
# pragma clang diagnostic pop
|
||||
#endif
|
||||
|
||||
#undef UV__CMSG_FD_COUNT
|
||||
#undef UV__CMSG_FD_SIZE
|
||||
|
||||
|
||||
int uv_shutdown(uv_shutdown_t* req, uv_stream_t* stream, uv_shutdown_cb cb) {
|
||||
assert(stream->type == UV_TCP ||
|
||||
stream->type == UV_TTY ||
|
||||
@ -1225,7 +1159,7 @@ int uv_shutdown(uv_shutdown_t* req, uv_stream_t* stream, uv_shutdown_cb cb) {
|
||||
|
||||
if (!(stream->flags & UV_HANDLE_WRITABLE) ||
|
||||
stream->flags & UV_HANDLE_SHUT ||
|
||||
stream->flags & UV_HANDLE_SHUTTING ||
|
||||
uv__is_stream_shutting(stream) ||
|
||||
uv__is_closing(stream)) {
|
||||
return UV_ENOTCONN;
|
||||
}
|
||||
@ -1238,7 +1172,6 @@ int uv_shutdown(uv_shutdown_t* req, uv_stream_t* stream, uv_shutdown_cb cb) {
|
||||
req->handle = stream;
|
||||
req->cb = cb;
|
||||
stream->shutdown_req = req;
|
||||
stream->flags |= UV_HANDLE_SHUTTING;
|
||||
stream->flags &= ~UV_HANDLE_WRITABLE;
|
||||
|
||||
if (QUEUE_EMPTY(&stream->write_queue))
|
||||
|
7
deps/libuv/src/unix/sunos.c
vendored
7
deps/libuv/src/unix/sunos.c
vendored
@ -320,9 +320,11 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
|
||||
QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
|
||||
}
|
||||
|
||||
uv__metrics_inc_events(loop, nevents);
|
||||
if (reset_timeout != 0) {
|
||||
timeout = user_timeout;
|
||||
reset_timeout = 0;
|
||||
uv__metrics_inc_events_waiting(loop, nevents);
|
||||
}
|
||||
|
||||
if (have_signals != 0) {
|
||||
@ -415,6 +417,11 @@ uint64_t uv_get_constrained_memory(void) {
|
||||
}
|
||||
|
||||
|
||||
uint64_t uv_get_available_memory(void) {
|
||||
return uv_get_free_memory();
|
||||
}
|
||||
|
||||
|
||||
void uv_loadavg(double avg[3]) {
|
||||
(void) getloadavg(avg, 3);
|
||||
}
|
||||
|
132
deps/libuv/src/unix/tcp.c
vendored
132
deps/libuv/src/unix/tcp.c
vendored
@ -28,16 +28,39 @@
|
||||
#include <errno.h>
|
||||
|
||||
|
||||
static int new_socket(uv_tcp_t* handle, int domain, unsigned long flags) {
|
||||
struct sockaddr_storage saddr;
|
||||
static int maybe_bind_socket(int fd) {
|
||||
union uv__sockaddr s;
|
||||
socklen_t slen;
|
||||
|
||||
slen = sizeof(s);
|
||||
memset(&s, 0, sizeof(s));
|
||||
|
||||
if (getsockname(fd, &s.addr, &slen))
|
||||
return UV__ERR(errno);
|
||||
|
||||
if (s.addr.sa_family == AF_INET)
|
||||
if (s.in.sin_port != 0)
|
||||
return 0; /* Already bound to a port. */
|
||||
|
||||
if (s.addr.sa_family == AF_INET6)
|
||||
if (s.in6.sin6_port != 0)
|
||||
return 0; /* Already bound to a port. */
|
||||
|
||||
/* Bind to an arbitrary port. */
|
||||
if (bind(fd, &s.addr, slen))
|
||||
return UV__ERR(errno);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int new_socket(uv_tcp_t* handle, int domain, unsigned int flags) {
|
||||
int sockfd;
|
||||
int err;
|
||||
|
||||
err = uv__socket(domain, SOCK_STREAM, 0);
|
||||
if (err < 0)
|
||||
return err;
|
||||
sockfd = err;
|
||||
sockfd = uv__socket(domain, SOCK_STREAM, 0);
|
||||
if (sockfd < 0)
|
||||
return sockfd;
|
||||
|
||||
err = uv__stream_open((uv_stream_t*) handle, sockfd, flags);
|
||||
if (err) {
|
||||
@ -45,74 +68,44 @@ static int new_socket(uv_tcp_t* handle, int domain, unsigned long flags) {
|
||||
return err;
|
||||
}
|
||||
|
||||
if (flags & UV_HANDLE_BOUND) {
|
||||
/* Bind this new socket to an arbitrary port */
|
||||
slen = sizeof(saddr);
|
||||
memset(&saddr, 0, sizeof(saddr));
|
||||
if (getsockname(uv__stream_fd(handle), (struct sockaddr*) &saddr, &slen)) {
|
||||
uv__close(sockfd);
|
||||
return UV__ERR(errno);
|
||||
}
|
||||
|
||||
if (bind(uv__stream_fd(handle), (struct sockaddr*) &saddr, slen)) {
|
||||
uv__close(sockfd);
|
||||
return UV__ERR(errno);
|
||||
}
|
||||
}
|
||||
if (flags & UV_HANDLE_BOUND)
|
||||
return maybe_bind_socket(sockfd);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int maybe_new_socket(uv_tcp_t* handle, int domain, unsigned long flags) {
|
||||
struct sockaddr_storage saddr;
|
||||
socklen_t slen;
|
||||
static int maybe_new_socket(uv_tcp_t* handle, int domain, unsigned int flags) {
|
||||
int sockfd;
|
||||
int err;
|
||||
|
||||
if (domain == AF_UNSPEC) {
|
||||
handle->flags |= flags;
|
||||
return 0;
|
||||
}
|
||||
if (domain == AF_UNSPEC)
|
||||
goto out;
|
||||
|
||||
if (uv__stream_fd(handle) != -1) {
|
||||
sockfd = uv__stream_fd(handle);
|
||||
if (sockfd == -1)
|
||||
return new_socket(handle, domain, flags);
|
||||
|
||||
if (flags & UV_HANDLE_BOUND) {
|
||||
if (!(flags & UV_HANDLE_BOUND))
|
||||
goto out;
|
||||
|
||||
if (handle->flags & UV_HANDLE_BOUND) {
|
||||
/* It is already bound to a port. */
|
||||
handle->flags |= flags;
|
||||
return 0;
|
||||
}
|
||||
if (handle->flags & UV_HANDLE_BOUND)
|
||||
goto out; /* Already bound to a port. */
|
||||
|
||||
/* Query to see if tcp socket is bound. */
|
||||
slen = sizeof(saddr);
|
||||
memset(&saddr, 0, sizeof(saddr));
|
||||
if (getsockname(uv__stream_fd(handle), (struct sockaddr*) &saddr, &slen))
|
||||
return UV__ERR(errno);
|
||||
err = maybe_bind_socket(sockfd);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if ((saddr.ss_family == AF_INET6 &&
|
||||
((struct sockaddr_in6*) &saddr)->sin6_port != 0) ||
|
||||
(saddr.ss_family == AF_INET &&
|
||||
((struct sockaddr_in*) &saddr)->sin_port != 0)) {
|
||||
/* Handle is already bound to a port. */
|
||||
handle->flags |= flags;
|
||||
return 0;
|
||||
}
|
||||
out:
|
||||
|
||||
/* Bind to arbitrary port */
|
||||
if (bind(uv__stream_fd(handle), (struct sockaddr*) &saddr, slen))
|
||||
return UV__ERR(errno);
|
||||
}
|
||||
|
||||
handle->flags |= flags;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return new_socket(handle, domain, flags);
|
||||
handle->flags |= flags;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int uv_tcp_init_ex(uv_loop_t* loop, uv_tcp_t* tcp, unsigned int flags) {
|
||||
int domain;
|
||||
int err;
|
||||
|
||||
/* Use the lower 8 bits for the domain */
|
||||
domain = flags & 0xFF;
|
||||
@ -129,9 +122,12 @@ int uv_tcp_init_ex(uv_loop_t* loop, uv_tcp_t* tcp, unsigned int flags) {
|
||||
*/
|
||||
|
||||
if (domain != AF_UNSPEC) {
|
||||
int err = maybe_new_socket(tcp, domain, 0);
|
||||
err = new_socket(tcp, domain, 0);
|
||||
if (err) {
|
||||
QUEUE_REMOVE(&tcp->handle_queue);
|
||||
if (tcp->io_watcher.fd != -1)
|
||||
uv__close(tcp->io_watcher.fd);
|
||||
tcp->io_watcher.fd = -1;
|
||||
return err;
|
||||
}
|
||||
}
|
||||
@ -317,7 +313,7 @@ int uv_tcp_close_reset(uv_tcp_t* handle, uv_close_cb close_cb) {
|
||||
struct linger l = { 1, 0 };
|
||||
|
||||
/* Disallow setting SO_LINGER to zero due to some platform inconsistencies */
|
||||
if (handle->flags & UV_HANDLE_SHUTTING)
|
||||
if (uv__is_stream_shutting(handle))
|
||||
return UV_EINVAL;
|
||||
|
||||
fd = uv__stream_fd(handle);
|
||||
@ -338,24 +334,12 @@ int uv_tcp_close_reset(uv_tcp_t* handle, uv_close_cb close_cb) {
|
||||
|
||||
|
||||
int uv__tcp_listen(uv_tcp_t* tcp, int backlog, uv_connection_cb cb) {
|
||||
static int single_accept_cached = -1;
|
||||
unsigned long flags;
|
||||
int single_accept;
|
||||
unsigned int flags;
|
||||
int err;
|
||||
|
||||
if (tcp->delayed_error)
|
||||
return tcp->delayed_error;
|
||||
|
||||
single_accept = uv__load_relaxed(&single_accept_cached);
|
||||
if (single_accept == -1) {
|
||||
const char* val = getenv("UV_TCP_SINGLE_ACCEPT");
|
||||
single_accept = (val != NULL && atoi(val) != 0); /* Off by default. */
|
||||
uv__store_relaxed(&single_accept_cached, single_accept);
|
||||
}
|
||||
|
||||
if (single_accept)
|
||||
tcp->flags |= UV_HANDLE_TCP_SINGLE_ACCEPT;
|
||||
|
||||
flags = 0;
|
||||
#if defined(__MVS__)
|
||||
/* on zOS the listen call does not bind automatically
|
||||
@ -460,10 +444,6 @@ int uv_tcp_keepalive(uv_tcp_t* handle, int on, unsigned int delay) {
|
||||
|
||||
|
||||
int uv_tcp_simultaneous_accepts(uv_tcp_t* handle, int enable) {
|
||||
if (enable)
|
||||
handle->flags &= ~UV_HANDLE_TCP_SINGLE_ACCEPT;
|
||||
else
|
||||
handle->flags |= UV_HANDLE_TCP_SINGLE_ACCEPT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
231
deps/libuv/src/unix/thread.c
vendored
231
deps/libuv/src/unix/thread.c
vendored
@ -41,127 +41,20 @@
|
||||
#include <gnu/libc-version.h> /* gnu_get_libc_version() */
|
||||
#endif
|
||||
|
||||
#if defined(__linux__)
|
||||
# include <sched.h>
|
||||
# define uv__cpu_set_t cpu_set_t
|
||||
#elif defined(__FreeBSD__)
|
||||
# include <sys/param.h>
|
||||
# include <sys/cpuset.h>
|
||||
# include <pthread_np.h>
|
||||
# define uv__cpu_set_t cpuset_t
|
||||
#endif
|
||||
|
||||
|
||||
#undef NANOSEC
|
||||
#define NANOSEC ((uint64_t) 1e9)
|
||||
|
||||
#if defined(PTHREAD_BARRIER_SERIAL_THREAD)
|
||||
STATIC_ASSERT(sizeof(uv_barrier_t) == sizeof(pthread_barrier_t));
|
||||
#endif
|
||||
|
||||
/* Note: guard clauses should match uv_barrier_t's in include/uv/unix.h. */
|
||||
#if defined(_AIX) || \
|
||||
defined(__OpenBSD__) || \
|
||||
!defined(PTHREAD_BARRIER_SERIAL_THREAD)
|
||||
int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
|
||||
struct _uv_barrier* b;
|
||||
int rc;
|
||||
|
||||
if (barrier == NULL || count == 0)
|
||||
return UV_EINVAL;
|
||||
|
||||
b = uv__malloc(sizeof(*b));
|
||||
if (b == NULL)
|
||||
return UV_ENOMEM;
|
||||
|
||||
b->in = 0;
|
||||
b->out = 0;
|
||||
b->threshold = count;
|
||||
|
||||
rc = uv_mutex_init(&b->mutex);
|
||||
if (rc != 0)
|
||||
goto error2;
|
||||
|
||||
rc = uv_cond_init(&b->cond);
|
||||
if (rc != 0)
|
||||
goto error;
|
||||
|
||||
barrier->b = b;
|
||||
return 0;
|
||||
|
||||
error:
|
||||
uv_mutex_destroy(&b->mutex);
|
||||
error2:
|
||||
uv__free(b);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
||||
int uv_barrier_wait(uv_barrier_t* barrier) {
|
||||
struct _uv_barrier* b;
|
||||
int last;
|
||||
|
||||
if (barrier == NULL || barrier->b == NULL)
|
||||
return UV_EINVAL;
|
||||
|
||||
b = barrier->b;
|
||||
uv_mutex_lock(&b->mutex);
|
||||
|
||||
if (++b->in == b->threshold) {
|
||||
b->in = 0;
|
||||
b->out = b->threshold;
|
||||
uv_cond_signal(&b->cond);
|
||||
} else {
|
||||
do
|
||||
uv_cond_wait(&b->cond, &b->mutex);
|
||||
while (b->in != 0);
|
||||
}
|
||||
|
||||
last = (--b->out == 0);
|
||||
uv_cond_signal(&b->cond);
|
||||
|
||||
uv_mutex_unlock(&b->mutex);
|
||||
return last;
|
||||
}
|
||||
|
||||
|
||||
void uv_barrier_destroy(uv_barrier_t* barrier) {
|
||||
struct _uv_barrier* b;
|
||||
|
||||
b = barrier->b;
|
||||
uv_mutex_lock(&b->mutex);
|
||||
|
||||
assert(b->in == 0);
|
||||
while (b->out != 0)
|
||||
uv_cond_wait(&b->cond, &b->mutex);
|
||||
|
||||
if (b->in != 0)
|
||||
abort();
|
||||
|
||||
uv_mutex_unlock(&b->mutex);
|
||||
uv_mutex_destroy(&b->mutex);
|
||||
uv_cond_destroy(&b->cond);
|
||||
|
||||
uv__free(barrier->b);
|
||||
barrier->b = NULL;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
|
||||
return UV__ERR(pthread_barrier_init(barrier, NULL, count));
|
||||
}
|
||||
|
||||
|
||||
int uv_barrier_wait(uv_barrier_t* barrier) {
|
||||
int rc;
|
||||
|
||||
rc = pthread_barrier_wait(barrier);
|
||||
if (rc != 0)
|
||||
if (rc != PTHREAD_BARRIER_SERIAL_THREAD)
|
||||
abort();
|
||||
|
||||
return rc == PTHREAD_BARRIER_SERIAL_THREAD;
|
||||
}
|
||||
|
||||
|
||||
void uv_barrier_destroy(uv_barrier_t* barrier) {
|
||||
if (pthread_barrier_destroy(barrier))
|
||||
abort();
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/* Musl's PTHREAD_STACK_MIN is 2 KB on all architectures, which is
|
||||
* too small to safely receive signals on.
|
||||
*
|
||||
@ -284,6 +177,106 @@ int uv_thread_create_ex(uv_thread_t* tid,
|
||||
return UV__ERR(err);
|
||||
}
|
||||
|
||||
#if UV__CPU_AFFINITY_SUPPORTED
|
||||
|
||||
int uv_thread_setaffinity(uv_thread_t* tid,
|
||||
char* cpumask,
|
||||
char* oldmask,
|
||||
size_t mask_size) {
|
||||
int i;
|
||||
int r;
|
||||
uv__cpu_set_t cpuset;
|
||||
int cpumasksize;
|
||||
|
||||
cpumasksize = uv_cpumask_size();
|
||||
if (cpumasksize < 0)
|
||||
return cpumasksize;
|
||||
if (mask_size < (size_t)cpumasksize)
|
||||
return UV_EINVAL;
|
||||
|
||||
if (oldmask != NULL) {
|
||||
r = uv_thread_getaffinity(tid, oldmask, mask_size);
|
||||
if (r < 0)
|
||||
return r;
|
||||
}
|
||||
|
||||
CPU_ZERO(&cpuset);
|
||||
for (i = 0; i < cpumasksize; i++)
|
||||
if (cpumask[i])
|
||||
CPU_SET(i, &cpuset);
|
||||
|
||||
#if defined(__ANDROID__)
|
||||
if (sched_setaffinity(pthread_gettid_np(*tid), sizeof(cpuset), &cpuset))
|
||||
r = errno;
|
||||
else
|
||||
r = 0;
|
||||
#else
|
||||
r = pthread_setaffinity_np(*tid, sizeof(cpuset), &cpuset);
|
||||
#endif
|
||||
|
||||
return UV__ERR(r);
|
||||
}
|
||||
|
||||
|
||||
int uv_thread_getaffinity(uv_thread_t* tid,
|
||||
char* cpumask,
|
||||
size_t mask_size) {
|
||||
int r;
|
||||
int i;
|
||||
uv__cpu_set_t cpuset;
|
||||
int cpumasksize;
|
||||
|
||||
cpumasksize = uv_cpumask_size();
|
||||
if (cpumasksize < 0)
|
||||
return cpumasksize;
|
||||
if (mask_size < (size_t)cpumasksize)
|
||||
return UV_EINVAL;
|
||||
|
||||
CPU_ZERO(&cpuset);
|
||||
#if defined(__ANDROID__)
|
||||
if (sched_getaffinity(pthread_gettid_np(*tid), sizeof(cpuset), &cpuset))
|
||||
r = errno;
|
||||
else
|
||||
r = 0;
|
||||
#else
|
||||
r = pthread_getaffinity_np(*tid, sizeof(cpuset), &cpuset);
|
||||
#endif
|
||||
if (r)
|
||||
return UV__ERR(r);
|
||||
for (i = 0; i < cpumasksize; i++)
|
||||
cpumask[i] = !!CPU_ISSET(i, &cpuset);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
int uv_thread_setaffinity(uv_thread_t* tid,
|
||||
char* cpumask,
|
||||
char* oldmask,
|
||||
size_t mask_size) {
|
||||
return UV_ENOTSUP;
|
||||
}
|
||||
|
||||
|
||||
int uv_thread_getaffinity(uv_thread_t* tid,
|
||||
char* cpumask,
|
||||
size_t mask_size) {
|
||||
return UV_ENOTSUP;
|
||||
}
|
||||
#endif /* defined(__linux__) || defined(UV_BSD_H) */
|
||||
|
||||
int uv_thread_getcpu(void) {
|
||||
#if UV__CPU_AFFINITY_SUPPORTED
|
||||
int cpu;
|
||||
|
||||
cpu = sched_getcpu();
|
||||
if (cpu < 0)
|
||||
return UV__ERR(errno);
|
||||
|
||||
return cpu;
|
||||
#else
|
||||
return UV_ENOTSUP;
|
||||
#endif
|
||||
}
|
||||
|
||||
uv_thread_t uv_thread_self(void) {
|
||||
return pthread_self();
|
||||
@ -585,7 +578,7 @@ static void uv__custom_sem_post(uv_sem_t* sem_) {
|
||||
uv_mutex_lock(&sem->mutex);
|
||||
sem->value++;
|
||||
if (sem->value == 1)
|
||||
uv_cond_signal(&sem->cond);
|
||||
uv_cond_signal(&sem->cond); /* Release one to replace us. */
|
||||
uv_mutex_unlock(&sem->mutex);
|
||||
}
|
||||
|
||||
|
24
deps/libuv/src/unix/tty.c
vendored
24
deps/libuv/src/unix/tty.c
vendored
@ -21,8 +21,8 @@
|
||||
|
||||
#include "uv.h"
|
||||
#include "internal.h"
|
||||
#include "spinlock.h"
|
||||
|
||||
#include <stdatomic.h>
|
||||
#include <stdlib.h>
|
||||
#include <assert.h>
|
||||
#include <unistd.h>
|
||||
@ -64,7 +64,7 @@ static int isreallyatty(int file) {
|
||||
|
||||
static int orig_termios_fd = -1;
|
||||
static struct termios orig_termios;
|
||||
static uv_spinlock_t termios_spinlock = UV_SPINLOCK_INITIALIZER;
|
||||
static _Atomic int termios_spinlock;
|
||||
|
||||
int uv__tcsetattr(int fd, int how, const struct termios *term) {
|
||||
int rc;
|
||||
@ -81,7 +81,7 @@ int uv__tcsetattr(int fd, int how, const struct termios *term) {
|
||||
|
||||
static int uv__tty_is_slave(const int fd) {
|
||||
int result;
|
||||
#if defined(__linux__) || defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
|
||||
#if defined(__linux__) || defined(__FreeBSD__)
|
||||
int dummy;
|
||||
|
||||
result = ioctl(fd, TIOCGPTN, &dummy) != 0;
|
||||
@ -113,7 +113,7 @@ static int uv__tty_is_slave(const int fd) {
|
||||
}
|
||||
|
||||
/* Lookup stat structure behind the file descriptor. */
|
||||
if (fstat(fd, &sb) != 0)
|
||||
if (uv__fstat(fd, &sb) != 0)
|
||||
abort();
|
||||
|
||||
/* Assert character device. */
|
||||
@ -280,6 +280,7 @@ static void uv__tty_make_raw(struct termios* tio) {
|
||||
|
||||
int uv_tty_set_mode(uv_tty_t* tty, uv_tty_mode_t mode) {
|
||||
struct termios tmp;
|
||||
int expected;
|
||||
int fd;
|
||||
int rc;
|
||||
|
||||
@ -296,12 +297,16 @@ int uv_tty_set_mode(uv_tty_t* tty, uv_tty_mode_t mode) {
|
||||
return UV__ERR(errno);
|
||||
|
||||
/* This is used for uv_tty_reset_mode() */
|
||||
uv_spinlock_lock(&termios_spinlock);
|
||||
do
|
||||
expected = 0;
|
||||
while (!atomic_compare_exchange_strong(&termios_spinlock, &expected, 1));
|
||||
|
||||
if (orig_termios_fd == -1) {
|
||||
orig_termios = tty->orig_termios;
|
||||
orig_termios_fd = fd;
|
||||
}
|
||||
uv_spinlock_unlock(&termios_spinlock);
|
||||
|
||||
atomic_store(&termios_spinlock, 0);
|
||||
}
|
||||
|
||||
tmp = tty->orig_termios;
|
||||
@ -360,7 +365,7 @@ uv_handle_type uv_guess_handle(uv_file file) {
|
||||
if (isatty(file))
|
||||
return UV_TTY;
|
||||
|
||||
if (fstat(file, &s)) {
|
||||
if (uv__fstat(file, &s)) {
|
||||
#if defined(__PASE__)
|
||||
/* On ibmi receiving RST from TCP instead of FIN immediately puts fd into
|
||||
* an error state. fstat will return EINVAL, getsockname will also return
|
||||
@ -445,14 +450,15 @@ int uv_tty_reset_mode(void) {
|
||||
int err;
|
||||
|
||||
saved_errno = errno;
|
||||
if (!uv_spinlock_trylock(&termios_spinlock))
|
||||
|
||||
if (atomic_exchange(&termios_spinlock, 1))
|
||||
return UV_EBUSY; /* In uv_tty_set_mode(). */
|
||||
|
||||
err = 0;
|
||||
if (orig_termios_fd != -1)
|
||||
err = uv__tcsetattr(orig_termios_fd, TCSANOW, &orig_termios);
|
||||
|
||||
uv_spinlock_unlock(&termios_spinlock);
|
||||
atomic_store(&termios_spinlock, 0);
|
||||
errno = saved_errno;
|
||||
|
||||
return err;
|
||||
|
90
deps/libuv/src/unix/udp.c
vendored
90
deps/libuv/src/unix/udp.c
vendored
@ -40,12 +40,6 @@
|
||||
# define IPV6_DROP_MEMBERSHIP IPV6_LEAVE_GROUP
|
||||
#endif
|
||||
|
||||
union uv__sockaddr {
|
||||
struct sockaddr_in6 in6;
|
||||
struct sockaddr_in in;
|
||||
struct sockaddr addr;
|
||||
};
|
||||
|
||||
static void uv__udp_run_completed(uv_udp_t* handle);
|
||||
static void uv__udp_io(uv_loop_t* loop, uv__io_t* w, unsigned int revents);
|
||||
static void uv__udp_recvmsg(uv_udp_t* handle);
|
||||
@ -54,36 +48,6 @@ static int uv__udp_maybe_deferred_bind(uv_udp_t* handle,
|
||||
int domain,
|
||||
unsigned int flags);
|
||||
|
||||
#if HAVE_MMSG
|
||||
|
||||
#define UV__MMSG_MAXWIDTH 20
|
||||
|
||||
static int uv__udp_recvmmsg(uv_udp_t* handle, uv_buf_t* buf);
|
||||
static void uv__udp_sendmmsg(uv_udp_t* handle);
|
||||
|
||||
static int uv__recvmmsg_avail;
|
||||
static int uv__sendmmsg_avail;
|
||||
static uv_once_t once = UV_ONCE_INIT;
|
||||
|
||||
static void uv__udp_mmsg_init(void) {
|
||||
int ret;
|
||||
int s;
|
||||
s = uv__socket(AF_INET, SOCK_DGRAM, 0);
|
||||
if (s < 0)
|
||||
return;
|
||||
ret = uv__sendmmsg(s, NULL, 0);
|
||||
if (ret == 0 || errno != ENOSYS) {
|
||||
uv__sendmmsg_avail = 1;
|
||||
uv__recvmmsg_avail = 1;
|
||||
} else {
|
||||
ret = uv__recvmmsg(s, NULL, 0);
|
||||
if (ret == 0 || errno != ENOSYS)
|
||||
uv__recvmmsg_avail = 1;
|
||||
}
|
||||
uv__close(s);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
void uv__udp_close(uv_udp_t* handle) {
|
||||
uv__io_close(handle->loop, &handle->io_watcher);
|
||||
@ -183,11 +147,11 @@ static void uv__udp_io(uv_loop_t* loop, uv__io_t* w, unsigned int revents) {
|
||||
}
|
||||
}
|
||||
|
||||
#if HAVE_MMSG
|
||||
static int uv__udp_recvmmsg(uv_udp_t* handle, uv_buf_t* buf) {
|
||||
struct sockaddr_in6 peers[UV__MMSG_MAXWIDTH];
|
||||
struct iovec iov[UV__MMSG_MAXWIDTH];
|
||||
struct uv__mmsghdr msgs[UV__MMSG_MAXWIDTH];
|
||||
#if defined(__linux__) || defined(__FreeBSD__)
|
||||
struct sockaddr_in6 peers[20];
|
||||
struct iovec iov[ARRAY_SIZE(peers)];
|
||||
struct mmsghdr msgs[ARRAY_SIZE(peers)];
|
||||
ssize_t nread;
|
||||
uv_buf_t chunk_buf;
|
||||
size_t chunks;
|
||||
@ -212,7 +176,7 @@ static int uv__udp_recvmmsg(uv_udp_t* handle, uv_buf_t* buf) {
|
||||
}
|
||||
|
||||
do
|
||||
nread = uv__recvmmsg(handle->io_watcher.fd, msgs, chunks);
|
||||
nread = recvmmsg(handle->io_watcher.fd, msgs, chunks, 0, NULL);
|
||||
while (nread == -1 && errno == EINTR);
|
||||
|
||||
if (nread < 1) {
|
||||
@ -240,8 +204,10 @@ static int uv__udp_recvmmsg(uv_udp_t* handle, uv_buf_t* buf) {
|
||||
handle->recv_cb(handle, 0, buf, NULL, UV_UDP_MMSG_FREE);
|
||||
}
|
||||
return nread;
|
||||
#else /* __linux__ || ____FreeBSD__ */
|
||||
return UV_ENOSYS;
|
||||
#endif /* __linux__ || ____FreeBSD__ */
|
||||
}
|
||||
#endif
|
||||
|
||||
static void uv__udp_recvmsg(uv_udp_t* handle) {
|
||||
struct sockaddr_storage peer;
|
||||
@ -268,14 +234,12 @@ static void uv__udp_recvmsg(uv_udp_t* handle) {
|
||||
}
|
||||
assert(buf.base != NULL);
|
||||
|
||||
#if HAVE_MMSG
|
||||
if (uv_udp_using_recvmmsg(handle)) {
|
||||
nread = uv__udp_recvmmsg(handle, &buf);
|
||||
if (nread > 0)
|
||||
count -= nread;
|
||||
continue;
|
||||
}
|
||||
#endif
|
||||
|
||||
memset(&h, 0, sizeof(h));
|
||||
memset(&peer, 0, sizeof(peer));
|
||||
@ -311,11 +275,11 @@ static void uv__udp_recvmsg(uv_udp_t* handle) {
|
||||
&& handle->recv_cb != NULL);
|
||||
}
|
||||
|
||||
#if HAVE_MMSG
|
||||
static void uv__udp_sendmmsg(uv_udp_t* handle) {
|
||||
static void uv__udp_sendmsg(uv_udp_t* handle) {
|
||||
#if defined(__linux__) || defined(__FreeBSD__)
|
||||
uv_udp_send_t* req;
|
||||
struct uv__mmsghdr h[UV__MMSG_MAXWIDTH];
|
||||
struct uv__mmsghdr *p;
|
||||
struct mmsghdr h[20];
|
||||
struct mmsghdr* p;
|
||||
QUEUE* q;
|
||||
ssize_t npkts;
|
||||
size_t pkts;
|
||||
@ -326,7 +290,7 @@ static void uv__udp_sendmmsg(uv_udp_t* handle) {
|
||||
|
||||
write_queue_drain:
|
||||
for (pkts = 0, q = QUEUE_HEAD(&handle->write_queue);
|
||||
pkts < UV__MMSG_MAXWIDTH && q != &handle->write_queue;
|
||||
pkts < ARRAY_SIZE(h) && q != &handle->write_queue;
|
||||
++pkts, q = QUEUE_HEAD(q)) {
|
||||
assert(q != NULL);
|
||||
req = QUEUE_DATA(q, uv_udp_send_t, queue);
|
||||
@ -355,7 +319,7 @@ write_queue_drain:
|
||||
}
|
||||
|
||||
do
|
||||
npkts = uv__sendmmsg(handle->io_watcher.fd, h, pkts);
|
||||
npkts = sendmmsg(handle->io_watcher.fd, h, pkts, 0);
|
||||
while (npkts == -1 && errno == EINTR);
|
||||
|
||||
if (npkts < 1) {
|
||||
@ -401,24 +365,12 @@ write_queue_drain:
|
||||
if (!QUEUE_EMPTY(&handle->write_queue))
|
||||
goto write_queue_drain;
|
||||
uv__io_feed(handle->loop, &handle->io_watcher);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void uv__udp_sendmsg(uv_udp_t* handle) {
|
||||
#else /* __linux__ || ____FreeBSD__ */
|
||||
uv_udp_send_t* req;
|
||||
struct msghdr h;
|
||||
QUEUE* q;
|
||||
ssize_t size;
|
||||
|
||||
#if HAVE_MMSG
|
||||
uv_once(&once, uv__udp_mmsg_init);
|
||||
if (uv__sendmmsg_avail) {
|
||||
uv__udp_sendmmsg(handle);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
while (!QUEUE_EMPTY(&handle->write_queue)) {
|
||||
q = QUEUE_HEAD(&handle->write_queue);
|
||||
assert(q != NULL);
|
||||
@ -466,6 +418,7 @@ static void uv__udp_sendmsg(uv_udp_t* handle) {
|
||||
QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue);
|
||||
uv__io_feed(handle->loop, &handle->io_watcher);
|
||||
}
|
||||
#endif /* __linux__ || ____FreeBSD__ */
|
||||
}
|
||||
|
||||
/* On the BSDs, SO_REUSEPORT implies SO_REUSEADDR but with some additional
|
||||
@ -495,7 +448,8 @@ static int uv__set_reuse(int fd) {
|
||||
if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &yes, sizeof(yes)))
|
||||
return UV__ERR(errno);
|
||||
}
|
||||
#elif defined(SO_REUSEPORT) && !defined(__linux__) && !defined(__GNU__)
|
||||
#elif defined(SO_REUSEPORT) && !defined(__linux__) && !defined(__GNU__) && \
|
||||
!defined(__sun__)
|
||||
if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &yes, sizeof(yes)))
|
||||
return UV__ERR(errno);
|
||||
#else
|
||||
@ -1061,11 +1015,9 @@ int uv__udp_init_ex(uv_loop_t* loop,
|
||||
|
||||
|
||||
int uv_udp_using_recvmmsg(const uv_udp_t* handle) {
|
||||
#if HAVE_MMSG
|
||||
if (handle->flags & UV_HANDLE_UDP_RECVMMSG) {
|
||||
uv_once(&once, uv__udp_mmsg_init);
|
||||
return uv__recvmmsg_avail;
|
||||
}
|
||||
#if defined(__linux__) || defined(__FreeBSD__)
|
||||
if (handle->flags & UV_HANDLE_UDP_RECVMMSG)
|
||||
return 1;
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
70
deps/libuv/src/uv-common.c
vendored
70
deps/libuv/src/uv-common.c
vendored
@ -128,6 +128,39 @@ int uv_replace_allocator(uv_malloc_func malloc_func,
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
void uv_os_free_passwd(uv_passwd_t* pwd) {
|
||||
if (pwd == NULL)
|
||||
return;
|
||||
|
||||
/* On unix, the memory for name, shell, and homedir are allocated in a single
|
||||
* uv__malloc() call. The base of the pointer is stored in pwd->username, so
|
||||
* that is the field that needs to be freed.
|
||||
*/
|
||||
uv__free(pwd->username);
|
||||
#ifdef _WIN32
|
||||
uv__free(pwd->homedir);
|
||||
#endif
|
||||
pwd->username = NULL;
|
||||
pwd->shell = NULL;
|
||||
pwd->homedir = NULL;
|
||||
}
|
||||
|
||||
|
||||
void uv_os_free_group(uv_group_t *grp) {
|
||||
if (grp == NULL)
|
||||
return;
|
||||
|
||||
/* The memory for is allocated in a single uv__malloc() call. The base of the
|
||||
* pointer is stored in grp->members, so that is the only field that needs to
|
||||
* be freed.
|
||||
*/
|
||||
uv__free(grp->members);
|
||||
grp->members = NULL;
|
||||
grp->groupname = NULL;
|
||||
}
|
||||
|
||||
|
||||
#define XX(uc, lc) case UV_##uc: return sizeof(uv_##lc##_t);
|
||||
|
||||
size_t uv_handle_size(uv_handle_type type) {
|
||||
@ -650,14 +683,22 @@ static unsigned int* uv__get_nbufs(uv_fs_t* req) {
|
||||
|
||||
void uv__fs_scandir_cleanup(uv_fs_t* req) {
|
||||
uv__dirent_t** dents;
|
||||
unsigned int* nbufs;
|
||||
unsigned int i;
|
||||
unsigned int n;
|
||||
|
||||
unsigned int* nbufs = uv__get_nbufs(req);
|
||||
if (req->result >= 0) {
|
||||
dents = req->ptr;
|
||||
nbufs = uv__get_nbufs(req);
|
||||
|
||||
dents = req->ptr;
|
||||
if (*nbufs > 0 && *nbufs != (unsigned int) req->result)
|
||||
(*nbufs)--;
|
||||
for (; *nbufs < (unsigned int) req->result; (*nbufs)++)
|
||||
uv__fs_scandir_free(dents[*nbufs]);
|
||||
i = 0;
|
||||
if (*nbufs > 0)
|
||||
i = *nbufs - 1;
|
||||
|
||||
n = (unsigned int) req->result;
|
||||
for (; i < n; i++)
|
||||
uv__fs_scandir_free(dents[i]);
|
||||
}
|
||||
|
||||
uv__fs_scandir_free(req->ptr);
|
||||
req->ptr = NULL;
|
||||
@ -879,12 +920,17 @@ void uv_os_free_environ(uv_env_item_t* envitems, int count) {
|
||||
|
||||
|
||||
void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) {
|
||||
#ifdef __linux__
|
||||
(void) &count;
|
||||
uv__free(cpu_infos);
|
||||
#else
|
||||
int i;
|
||||
|
||||
for (i = 0; i < count; i++)
|
||||
uv__free(cpu_infos[i].model);
|
||||
|
||||
uv__free(cpu_infos);
|
||||
#endif /* __linux__ */
|
||||
}
|
||||
|
||||
|
||||
@ -898,7 +944,7 @@ __attribute__((destructor))
|
||||
void uv_library_shutdown(void) {
|
||||
static int was_shutdown;
|
||||
|
||||
if (uv__load_relaxed(&was_shutdown))
|
||||
if (uv__exchange_int_relaxed(&was_shutdown, 1))
|
||||
return;
|
||||
|
||||
uv__process_title_cleanup();
|
||||
@ -909,7 +955,6 @@ void uv_library_shutdown(void) {
|
||||
#else
|
||||
uv__threadpool_cleanup();
|
||||
#endif
|
||||
uv__store_relaxed(&was_shutdown, 1);
|
||||
}
|
||||
|
||||
|
||||
@ -955,6 +1000,15 @@ void uv__metrics_set_provider_entry_time(uv_loop_t* loop) {
|
||||
}
|
||||
|
||||
|
||||
int uv_metrics_info(uv_loop_t* loop, uv_metrics_t* metrics) {
|
||||
memcpy(metrics,
|
||||
&uv__get_loop_metrics(loop)->metrics,
|
||||
sizeof(*metrics));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
uint64_t uv_metrics_idle_time(uv_loop_t* loop) {
|
||||
uv__loop_metrics_t* loop_metrics;
|
||||
uint64_t entry_time;
|
||||
|
80
deps/libuv/src/uv-common.h
vendored
80
deps/libuv/src/uv-common.h
vendored
@ -30,18 +30,17 @@
|
||||
#include <assert.h>
|
||||
#include <stdarg.h>
|
||||
#include <stddef.h>
|
||||
|
||||
#if defined(_MSC_VER) && _MSC_VER < 1600
|
||||
# include "uv/stdint-msvc2008.h"
|
||||
#else
|
||||
# include <stdint.h>
|
||||
#endif
|
||||
#include <stdint.h>
|
||||
|
||||
#include "uv.h"
|
||||
#include "uv/tree.h"
|
||||
#include "queue.h"
|
||||
#include "strscpy.h"
|
||||
|
||||
#ifndef _MSC_VER
|
||||
# include <stdatomic.h>
|
||||
#endif
|
||||
|
||||
#if EDOM > 0
|
||||
# define UV__ERR(x) (-(x))
|
||||
#else
|
||||
@ -53,19 +52,25 @@ extern int snprintf(char*, size_t, const char*, ...);
|
||||
#endif
|
||||
|
||||
#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
|
||||
#define ARRAY_END(a) ((a) + ARRAY_SIZE(a))
|
||||
|
||||
#define container_of(ptr, type, member) \
|
||||
((type *) ((char *) (ptr) - offsetof(type, member)))
|
||||
|
||||
/* C11 defines static_assert to be a macro which calls _Static_assert. */
|
||||
#if defined(static_assert)
|
||||
#define STATIC_ASSERT(expr) static_assert(expr, #expr)
|
||||
#else
|
||||
#define STATIC_ASSERT(expr) \
|
||||
void uv__static_assert(int static_assert_failed[1 - 2 * !(expr)])
|
||||
#endif
|
||||
|
||||
#if defined(__GNUC__) && (__GNUC__ > 4 || __GNUC__ == 4 && __GNUC_MINOR__ >= 7)
|
||||
#define uv__load_relaxed(p) __atomic_load_n(p, __ATOMIC_RELAXED)
|
||||
#define uv__store_relaxed(p, v) __atomic_store_n(p, v, __ATOMIC_RELAXED)
|
||||
#ifdef _MSC_VER
|
||||
#define uv__exchange_int_relaxed(p, v) \
|
||||
InterlockedExchangeNoFence((LONG volatile*)(p), v)
|
||||
#else
|
||||
#define uv__load_relaxed(p) (*p)
|
||||
#define uv__store_relaxed(p, v) do *p = v; while (0)
|
||||
#define uv__exchange_int_relaxed(p, v) \
|
||||
atomic_exchange_explicit((_Atomic int*)(p), v, memory_order_relaxed)
|
||||
#endif
|
||||
|
||||
#define UV__UDP_DGRAM_MAXSIZE (64 * 1024)
|
||||
@ -83,7 +88,6 @@ enum {
|
||||
/* Used by streams. */
|
||||
UV_HANDLE_LISTENING = 0x00000040,
|
||||
UV_HANDLE_CONNECTION = 0x00000080,
|
||||
UV_HANDLE_SHUTTING = 0x00000100,
|
||||
UV_HANDLE_SHUT = 0x00000200,
|
||||
UV_HANDLE_READ_PARTIAL = 0x00000400,
|
||||
UV_HANDLE_READ_EOF = 0x00000800,
|
||||
@ -263,6 +267,14 @@ void uv__threadpool_cleanup(void);
|
||||
#define uv__is_closing(h) \
|
||||
(((h)->flags & (UV_HANDLE_CLOSING | UV_HANDLE_CLOSED)) != 0)
|
||||
|
||||
#if defined(_WIN32)
|
||||
# define uv__is_stream_shutting(h) \
|
||||
(h->stream.conn.shutdown_req != NULL)
|
||||
#else
|
||||
# define uv__is_stream_shutting(h) \
|
||||
(h->shutdown_req != NULL)
|
||||
#endif
|
||||
|
||||
#define uv__handle_start(h) \
|
||||
do { \
|
||||
if (((h)->flags & UV_HANDLE_ACTIVE) != 0) break; \
|
||||
@ -347,6 +359,21 @@ void uv__threadpool_cleanup(void);
|
||||
#define uv__get_loop_metrics(loop) \
|
||||
(&uv__get_internal_fields(loop)->loop_metrics)
|
||||
|
||||
#define uv__metrics_inc_loop_count(loop) \
|
||||
do { \
|
||||
uv__get_loop_metrics(loop)->metrics.loop_count++; \
|
||||
} while (0)
|
||||
|
||||
#define uv__metrics_inc_events(loop, e) \
|
||||
do { \
|
||||
uv__get_loop_metrics(loop)->metrics.events += (e); \
|
||||
} while (0)
|
||||
|
||||
#define uv__metrics_inc_events_waiting(loop, e) \
|
||||
do { \
|
||||
uv__get_loop_metrics(loop)->metrics.events_waiting += (e); \
|
||||
} while (0)
|
||||
|
||||
/* Allocator prototypes */
|
||||
void *uv__calloc(size_t count, size_t size);
|
||||
char *uv__strdup(const char* s);
|
||||
@ -360,6 +387,7 @@ typedef struct uv__loop_metrics_s uv__loop_metrics_t;
|
||||
typedef struct uv__loop_internal_fields_s uv__loop_internal_fields_t;
|
||||
|
||||
struct uv__loop_metrics_s {
|
||||
uv_metrics_t metrics;
|
||||
uint64_t provider_entry_time;
|
||||
uint64_t provider_idle_time;
|
||||
uv_mutex_t lock;
|
||||
@ -368,9 +396,37 @@ struct uv__loop_metrics_s {
|
||||
void uv__metrics_update_idle_time(uv_loop_t* loop);
|
||||
void uv__metrics_set_provider_entry_time(uv_loop_t* loop);
|
||||
|
||||
#ifdef __linux__
|
||||
struct uv__iou {
|
||||
uint32_t* sqhead;
|
||||
uint32_t* sqtail;
|
||||
uint32_t* sqarray;
|
||||
uint32_t sqmask;
|
||||
uint32_t* sqflags;
|
||||
uint32_t* cqhead;
|
||||
uint32_t* cqtail;
|
||||
uint32_t cqmask;
|
||||
void* sq; /* pointer to munmap() on event loop teardown */
|
||||
void* cqe; /* pointer to array of struct uv__io_uring_cqe */
|
||||
void* sqe; /* pointer to array of struct uv__io_uring_sqe */
|
||||
size_t sqlen;
|
||||
size_t cqlen;
|
||||
size_t maxlen;
|
||||
size_t sqelen;
|
||||
int ringfd;
|
||||
uint32_t in_flight;
|
||||
};
|
||||
#endif /* __linux__ */
|
||||
|
||||
struct uv__loop_internal_fields_s {
|
||||
unsigned int flags;
|
||||
uv__loop_metrics_t loop_metrics;
|
||||
int current_timeout;
|
||||
#ifdef __linux__
|
||||
struct uv__iou ctl;
|
||||
struct uv__iou iou;
|
||||
void* inv; /* used by uv__platform_invalidate_fd() */
|
||||
#endif /* __linux__ */
|
||||
};
|
||||
|
||||
#endif /* UV_COMMON_H_ */
|
||||
|
70
deps/libuv/src/win/core.c
vendored
70
deps/libuv/src/win/core.c
vendored
@ -245,6 +245,9 @@ int uv_loop_init(uv_loop_t* loop) {
|
||||
err = uv_mutex_init(&lfields->loop_metrics.lock);
|
||||
if (err)
|
||||
goto fail_metrics_mutex_init;
|
||||
memset(&lfields->loop_metrics.metrics,
|
||||
0,
|
||||
sizeof(lfields->loop_metrics.metrics));
|
||||
|
||||
/* To prevent uninitialized memory access, loop->time must be initialized
|
||||
* to zero before calling uv_update_time for the first time.
|
||||
@ -279,9 +282,6 @@ int uv_loop_init(uv_loop_t* loop) {
|
||||
|
||||
memset(&loop->poll_peer_sockets, 0, sizeof loop->poll_peer_sockets);
|
||||
|
||||
loop->active_tcp_streams = 0;
|
||||
loop->active_udp_streams = 0;
|
||||
|
||||
loop->timer_counter = 0;
|
||||
loop->stop_flag = 0;
|
||||
|
||||
@ -424,6 +424,7 @@ int uv_backend_timeout(const uv_loop_t* loop) {
|
||||
|
||||
|
||||
static void uv__poll_wine(uv_loop_t* loop, DWORD timeout) {
|
||||
uv__loop_internal_fields_t* lfields;
|
||||
DWORD bytes;
|
||||
ULONG_PTR key;
|
||||
OVERLAPPED* overlapped;
|
||||
@ -433,9 +434,10 @@ static void uv__poll_wine(uv_loop_t* loop, DWORD timeout) {
|
||||
uint64_t user_timeout;
|
||||
int reset_timeout;
|
||||
|
||||
lfields = uv__get_internal_fields(loop);
|
||||
timeout_time = loop->time + timeout;
|
||||
|
||||
if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
|
||||
if (lfields->flags & UV_METRICS_IDLE_TIME) {
|
||||
reset_timeout = 1;
|
||||
user_timeout = timeout;
|
||||
timeout = 0;
|
||||
@ -450,6 +452,12 @@ static void uv__poll_wine(uv_loop_t* loop, DWORD timeout) {
|
||||
if (timeout != 0)
|
||||
uv__metrics_set_provider_entry_time(loop);
|
||||
|
||||
/* Store the current timeout in a location that's globally accessible so
|
||||
* other locations like uv__work_done() can determine whether the queue
|
||||
* of events in the callback were waiting when poll was called.
|
||||
*/
|
||||
lfields->current_timeout = timeout;
|
||||
|
||||
GetQueuedCompletionStatus(loop->iocp,
|
||||
&bytes,
|
||||
&key,
|
||||
@ -457,6 +465,8 @@ static void uv__poll_wine(uv_loop_t* loop, DWORD timeout) {
|
||||
timeout);
|
||||
|
||||
if (reset_timeout != 0) {
|
||||
if (overlapped && timeout == 0)
|
||||
uv__metrics_inc_events_waiting(loop, 1);
|
||||
timeout = user_timeout;
|
||||
reset_timeout = 0;
|
||||
}
|
||||
@ -469,6 +479,8 @@ static void uv__poll_wine(uv_loop_t* loop, DWORD timeout) {
|
||||
uv__metrics_update_idle_time(loop);
|
||||
|
||||
if (overlapped) {
|
||||
uv__metrics_inc_events(loop, 1);
|
||||
|
||||
/* Package was dequeued */
|
||||
req = uv__overlapped_to_req(overlapped);
|
||||
uv__insert_pending_req(loop, req);
|
||||
@ -503,6 +515,7 @@ static void uv__poll_wine(uv_loop_t* loop, DWORD timeout) {
|
||||
|
||||
|
||||
static void uv__poll(uv_loop_t* loop, DWORD timeout) {
|
||||
uv__loop_internal_fields_t* lfields;
|
||||
BOOL success;
|
||||
uv_req_t* req;
|
||||
OVERLAPPED_ENTRY overlappeds[128];
|
||||
@ -511,11 +524,13 @@ static void uv__poll(uv_loop_t* loop, DWORD timeout) {
|
||||
int repeat;
|
||||
uint64_t timeout_time;
|
||||
uint64_t user_timeout;
|
||||
uint64_t actual_timeout;
|
||||
int reset_timeout;
|
||||
|
||||
lfields = uv__get_internal_fields(loop);
|
||||
timeout_time = loop->time + timeout;
|
||||
|
||||
if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
|
||||
if (lfields->flags & UV_METRICS_IDLE_TIME) {
|
||||
reset_timeout = 1;
|
||||
user_timeout = timeout;
|
||||
timeout = 0;
|
||||
@ -524,12 +539,20 @@ static void uv__poll(uv_loop_t* loop, DWORD timeout) {
|
||||
}
|
||||
|
||||
for (repeat = 0; ; repeat++) {
|
||||
actual_timeout = timeout;
|
||||
|
||||
/* Only need to set the provider_entry_time if timeout != 0. The function
|
||||
* will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
|
||||
*/
|
||||
if (timeout != 0)
|
||||
uv__metrics_set_provider_entry_time(loop);
|
||||
|
||||
/* Store the current timeout in a location that's globally accessible so
|
||||
* other locations like uv__work_done() can determine whether the queue
|
||||
* of events in the callback were waiting when poll was called.
|
||||
*/
|
||||
lfields->current_timeout = timeout;
|
||||
|
||||
success = pGetQueuedCompletionStatusEx(loop->iocp,
|
||||
overlappeds,
|
||||
ARRAY_SIZE(overlappeds),
|
||||
@ -543,9 +566,9 @@ static void uv__poll(uv_loop_t* loop, DWORD timeout) {
|
||||
}
|
||||
|
||||
/* Placed here because on success the loop will break whether there is an
|
||||
* empty package or not, or if GetQueuedCompletionStatus returned early then
|
||||
* the timeout will be updated and the loop will run again. In either case
|
||||
* the idle time will need to be updated.
|
||||
* empty package or not, or if pGetQueuedCompletionStatusEx returned early
|
||||
* then the timeout will be updated and the loop will run again. In either
|
||||
* case the idle time will need to be updated.
|
||||
*/
|
||||
uv__metrics_update_idle_time(loop);
|
||||
|
||||
@ -555,6 +578,10 @@ static void uv__poll(uv_loop_t* loop, DWORD timeout) {
|
||||
* meant only to wake us up.
|
||||
*/
|
||||
if (overlappeds[i].lpOverlapped) {
|
||||
uv__metrics_inc_events(loop, 1);
|
||||
if (actual_timeout == 0)
|
||||
uv__metrics_inc_events_waiting(loop, 1);
|
||||
|
||||
req = uv__overlapped_to_req(overlappeds[i].lpOverlapped);
|
||||
uv__insert_pending_req(loop, req);
|
||||
}
|
||||
@ -598,10 +625,17 @@ int uv_run(uv_loop_t *loop, uv_run_mode mode) {
|
||||
if (!r)
|
||||
uv_update_time(loop);
|
||||
|
||||
while (r != 0 && loop->stop_flag == 0) {
|
||||
uv_update_time(loop);
|
||||
/* Maintain backwards compatibility by processing timers before entering the
|
||||
* while loop for UV_RUN_DEFAULT. Otherwise timers only need to be executed
|
||||
* once, which should be done after polling in order to maintain proper
|
||||
* execution order of the conceptual event loop. */
|
||||
if (mode == UV_RUN_DEFAULT) {
|
||||
if (r)
|
||||
uv_update_time(loop);
|
||||
uv__run_timers(loop);
|
||||
}
|
||||
|
||||
while (r != 0 && loop->stop_flag == 0) {
|
||||
can_sleep = loop->pending_reqs_tail == NULL && loop->idle_handles == NULL;
|
||||
|
||||
uv__process_reqs(loop);
|
||||
@ -612,6 +646,8 @@ int uv_run(uv_loop_t *loop, uv_run_mode mode) {
|
||||
if ((mode == UV_RUN_ONCE && can_sleep) || mode == UV_RUN_DEFAULT)
|
||||
timeout = uv_backend_timeout(loop);
|
||||
|
||||
uv__metrics_inc_loop_count(loop);
|
||||
|
||||
if (pGetQueuedCompletionStatusEx)
|
||||
uv__poll(loop, timeout);
|
||||
else
|
||||
@ -632,18 +668,8 @@ int uv_run(uv_loop_t *loop, uv_run_mode mode) {
|
||||
uv__check_invoke(loop);
|
||||
uv__process_endgames(loop);
|
||||
|
||||
if (mode == UV_RUN_ONCE) {
|
||||
/* UV_RUN_ONCE implies forward progress: at least one callback must have
|
||||
* been invoked when it returns. uv__io_poll() can return without doing
|
||||
* I/O (meaning: no callbacks) when its timeout expires - which means we
|
||||
* have pending timers that satisfy the forward progress constraint.
|
||||
*
|
||||
* UV_RUN_NOWAIT makes no guarantees about progress so it's omitted from
|
||||
* the check.
|
||||
*/
|
||||
uv_update_time(loop);
|
||||
uv__run_timers(loop);
|
||||
}
|
||||
uv_update_time(loop);
|
||||
uv__run_timers(loop);
|
||||
|
||||
r = uv__loop_alive(loop);
|
||||
if (mode == UV_RUN_ONCE || mode == UV_RUN_NOWAIT)
|
||||
|
67
deps/libuv/src/win/fs.c
vendored
67
deps/libuv/src/win/fs.c
vendored
@ -36,6 +36,8 @@
|
||||
#include "handle-inl.h"
|
||||
#include "fs-fd-hash-inl.h"
|
||||
|
||||
#include <winioctl.h>
|
||||
|
||||
|
||||
#define UV_FS_FREE_PATHS 0x0002
|
||||
#define UV_FS_FREE_PTR 0x0008
|
||||
@ -1706,11 +1708,36 @@ void fs__closedir(uv_fs_t* req) {
|
||||
|
||||
INLINE static int fs__stat_handle(HANDLE handle, uv_stat_t* statbuf,
|
||||
int do_lstat) {
|
||||
FILE_FS_DEVICE_INFORMATION device_info;
|
||||
FILE_ALL_INFORMATION file_info;
|
||||
FILE_FS_VOLUME_INFORMATION volume_info;
|
||||
NTSTATUS nt_status;
|
||||
IO_STATUS_BLOCK io_status;
|
||||
|
||||
nt_status = pNtQueryVolumeInformationFile(handle,
|
||||
&io_status,
|
||||
&device_info,
|
||||
sizeof device_info,
|
||||
FileFsDeviceInformation);
|
||||
|
||||
/* Buffer overflow (a warning status code) is expected here. */
|
||||
if (NT_ERROR(nt_status)) {
|
||||
SetLastError(pRtlNtStatusToDosError(nt_status));
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* If it's NUL device set fields as reasonable as possible and return. */
|
||||
if (device_info.DeviceType == FILE_DEVICE_NULL) {
|
||||
memset(statbuf, 0, sizeof(uv_stat_t));
|
||||
statbuf->st_mode = _S_IFCHR;
|
||||
statbuf->st_mode |= (_S_IREAD | _S_IWRITE) | ((_S_IREAD | _S_IWRITE) >> 3) |
|
||||
((_S_IREAD | _S_IWRITE) >> 6);
|
||||
statbuf->st_nlink = 1;
|
||||
statbuf->st_blksize = 4096;
|
||||
statbuf->st_rdev = FILE_DEVICE_NULL << 16;
|
||||
return 0;
|
||||
}
|
||||
|
||||
nt_status = pNtQueryInformationFile(handle,
|
||||
&io_status,
|
||||
&file_info,
|
||||
@ -1915,6 +1942,37 @@ INLINE static void fs__stat_impl(uv_fs_t* req, int do_lstat) {
|
||||
}
|
||||
|
||||
|
||||
INLINE static int fs__fstat_handle(int fd, HANDLE handle, uv_stat_t* statbuf) {
|
||||
DWORD file_type;
|
||||
|
||||
/* Each file type is processed differently. */
|
||||
file_type = uv_guess_handle(fd);
|
||||
switch (file_type) {
|
||||
/* Disk files use the existing logic from fs__stat_handle. */
|
||||
case UV_FILE:
|
||||
return fs__stat_handle(handle, statbuf, 0);
|
||||
|
||||
/* Devices and pipes are processed identically. There is no more information
|
||||
* for them from any API. Fields are set as reasonably as possible and the
|
||||
* function returns. */
|
||||
case UV_TTY:
|
||||
case UV_NAMED_PIPE:
|
||||
memset(statbuf, 0, sizeof(uv_stat_t));
|
||||
statbuf->st_mode = file_type == UV_TTY ? _S_IFCHR : _S_IFIFO;
|
||||
statbuf->st_nlink = 1;
|
||||
statbuf->st_rdev = (file_type == UV_TTY ? FILE_DEVICE_CONSOLE : FILE_DEVICE_NAMED_PIPE) << 16;
|
||||
statbuf->st_ino = (uint64_t) handle;
|
||||
return 0;
|
||||
|
||||
/* If file type is unknown it is an error. */
|
||||
case UV_UNKNOWN_HANDLE:
|
||||
default:
|
||||
SetLastError(ERROR_INVALID_HANDLE);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void fs__stat(uv_fs_t* req) {
|
||||
fs__stat_prepare_path(req->file.pathw);
|
||||
fs__stat_impl(req, 0);
|
||||
@ -1940,7 +1998,7 @@ static void fs__fstat(uv_fs_t* req) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (fs__stat_handle(handle, &req->statbuf, 0) != 0) {
|
||||
if (fs__fstat_handle(fd, handle, &req->statbuf) != 0) {
|
||||
SET_REQ_WIN32_ERROR(req, GetLastError());
|
||||
return;
|
||||
}
|
||||
@ -2221,7 +2279,7 @@ static void fs__fchmod(uv_fs_t* req) {
|
||||
SET_REQ_WIN32_ERROR(req, pRtlNtStatusToDosError(nt_status));
|
||||
goto fchmod_cleanup;
|
||||
}
|
||||
/* Remeber to clear the flag later on */
|
||||
/* Remember to clear the flag later on */
|
||||
clear_archive_flag = 1;
|
||||
} else {
|
||||
clear_archive_flag = 0;
|
||||
@ -2604,7 +2662,10 @@ static void fs__readlink(uv_fs_t* req) {
|
||||
}
|
||||
|
||||
if (fs__readlink_handle(handle, (char**) &req->ptr, NULL) != 0) {
|
||||
SET_REQ_WIN32_ERROR(req, GetLastError());
|
||||
DWORD error = GetLastError();
|
||||
SET_REQ_WIN32_ERROR(req, error);
|
||||
if (error == ERROR_NOT_A_REPARSE_POINT)
|
||||
req->result = UV_EINVAL;
|
||||
CloseHandle(handle);
|
||||
return;
|
||||
}
|
||||
|
1
deps/libuv/src/win/internal.h
vendored
1
deps/libuv/src/win/internal.h
vendored
@ -267,7 +267,6 @@ void uv__util_init(void);
|
||||
|
||||
uint64_t uv__hrtime(unsigned int scale);
|
||||
__declspec(noreturn) void uv_fatal_error(const int errorno, const char* syscall);
|
||||
int uv__getpwuid_r(uv_passwd_t* pwd);
|
||||
int uv__convert_utf16_to_utf8(const WCHAR* utf16, int utf16len, char** utf8);
|
||||
int uv__convert_utf8_to_utf16(const char* utf8, int utf8len, WCHAR** utf16);
|
||||
|
||||
|
48
deps/libuv/src/win/pipe.c
vendored
48
deps/libuv/src/win/pipe.c
vendored
@ -792,15 +792,17 @@ static DWORD WINAPI pipe_connect_thread_proc(void* parameter) {
|
||||
|
||||
/* We're here because CreateFile on a pipe returned ERROR_PIPE_BUSY. We wait
|
||||
* up to 30 seconds for the pipe to become available with WaitNamedPipe. */
|
||||
while (WaitNamedPipeW(handle->name, 30000)) {
|
||||
while (WaitNamedPipeW(req->u.connect.name, 30000)) {
|
||||
/* The pipe is now available, try to connect. */
|
||||
pipeHandle = open_named_pipe(handle->name, &duplex_flags);
|
||||
pipeHandle = open_named_pipe(req->u.connect.name, &duplex_flags);
|
||||
if (pipeHandle != INVALID_HANDLE_VALUE)
|
||||
break;
|
||||
|
||||
SwitchToThread();
|
||||
}
|
||||
|
||||
uv__free(req->u.connect.name);
|
||||
req->u.connect.name = NULL;
|
||||
if (pipeHandle != INVALID_HANDLE_VALUE) {
|
||||
SET_REQ_SUCCESS(req);
|
||||
req->u.connect.pipeHandle = pipeHandle;
|
||||
@ -828,6 +830,7 @@ void uv_pipe_connect(uv_connect_t* req, uv_pipe_t* handle,
|
||||
req->cb = cb;
|
||||
req->u.connect.pipeHandle = INVALID_HANDLE_VALUE;
|
||||
req->u.connect.duplex_flags = 0;
|
||||
req->u.connect.name = NULL;
|
||||
|
||||
if (handle->flags & UV_HANDLE_PIPESERVER) {
|
||||
err = ERROR_INVALID_PARAMETER;
|
||||
@ -859,10 +862,19 @@ void uv_pipe_connect(uv_connect_t* req, uv_pipe_t* handle,
|
||||
pipeHandle = open_named_pipe(handle->name, &duplex_flags);
|
||||
if (pipeHandle == INVALID_HANDLE_VALUE) {
|
||||
if (GetLastError() == ERROR_PIPE_BUSY) {
|
||||
req->u.connect.name = uv__malloc(nameSize);
|
||||
if (!req->u.connect.name) {
|
||||
uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
|
||||
}
|
||||
|
||||
memcpy(req->u.connect.name, handle->name, nameSize);
|
||||
|
||||
/* Wait for the server to make a pipe instance available. */
|
||||
if (!QueueUserWorkItem(&pipe_connect_thread_proc,
|
||||
req,
|
||||
WT_EXECUTELONGFUNCTION)) {
|
||||
uv__free(req->u.connect.name);
|
||||
req->u.connect.name = NULL;
|
||||
err = GetLastError();
|
||||
goto error;
|
||||
}
|
||||
@ -1067,11 +1079,12 @@ int uv__pipe_accept(uv_pipe_t* server, uv_stream_t* client) {
|
||||
|
||||
err = uv__tcp_xfer_import(
|
||||
(uv_tcp_t*) client, item->xfer_type, &item->xfer_info);
|
||||
|
||||
uv__free(item);
|
||||
|
||||
if (err != 0)
|
||||
return err;
|
||||
|
||||
uv__free(item);
|
||||
|
||||
} else {
|
||||
pipe_client = (uv_pipe_t*) client;
|
||||
uv__pipe_connection_init(pipe_client);
|
||||
@ -1638,9 +1651,13 @@ static DWORD uv__pipe_get_ipc_remote_pid(uv_pipe_t* handle) {
|
||||
/* If the both ends of the IPC pipe are owned by the same process,
|
||||
* the remote end pid may not yet be set. If so, do it here.
|
||||
* TODO: this is weird; it'd probably better to use a handshake. */
|
||||
if (*pid == 0)
|
||||
*pid = GetCurrentProcessId();
|
||||
|
||||
if (*pid == 0) {
|
||||
GetNamedPipeClientProcessId(handle->handle, pid);
|
||||
if (*pid == GetCurrentProcessId()) {
|
||||
GetNamedPipeServerProcessId(handle->handle, pid);
|
||||
}
|
||||
}
|
||||
|
||||
return *pid;
|
||||
}
|
||||
|
||||
@ -2069,9 +2086,9 @@ void uv__process_pipe_write_req(uv_loop_t* loop, uv_pipe_t* handle,
|
||||
uv__queue_non_overlapped_write(handle);
|
||||
}
|
||||
|
||||
if (handle->stream.conn.write_reqs_pending == 0)
|
||||
if (handle->flags & UV_HANDLE_SHUTTING)
|
||||
uv__pipe_shutdown(loop, handle, handle->stream.conn.shutdown_req);
|
||||
if (handle->stream.conn.write_reqs_pending == 0 &&
|
||||
uv__is_stream_shutting(handle))
|
||||
uv__pipe_shutdown(loop, handle, handle->stream.conn.shutdown_req);
|
||||
|
||||
DECREASE_PENDING_REQ_COUNT(handle);
|
||||
}
|
||||
@ -2126,7 +2143,10 @@ void uv__process_pipe_connect_req(uv_loop_t* loop, uv_pipe_t* handle,
|
||||
if (REQ_SUCCESS(req)) {
|
||||
pipeHandle = req->u.connect.pipeHandle;
|
||||
duplex_flags = req->u.connect.duplex_flags;
|
||||
err = uv__set_pipe_handle(loop, handle, pipeHandle, -1, duplex_flags);
|
||||
if (handle->flags & UV_HANDLE_CLOSING)
|
||||
err = UV_ECANCELED;
|
||||
else
|
||||
err = uv__set_pipe_handle(loop, handle, pipeHandle, -1, duplex_flags);
|
||||
if (err)
|
||||
CloseHandle(pipeHandle);
|
||||
} else {
|
||||
@ -2149,7 +2169,6 @@ void uv__process_pipe_shutdown_req(uv_loop_t* loop, uv_pipe_t* handle,
|
||||
|
||||
/* Clear the shutdown_req field so we don't go here again. */
|
||||
handle->stream.conn.shutdown_req = NULL;
|
||||
handle->flags &= ~UV_HANDLE_SHUTTING;
|
||||
UNREGISTER_HANDLE_REQ(loop, handle, req);
|
||||
|
||||
if (handle->flags & UV_HANDLE_CLOSING) {
|
||||
@ -2342,7 +2361,10 @@ int uv_pipe_open(uv_pipe_t* pipe, uv_file file) {
|
||||
|
||||
if (pipe->ipc) {
|
||||
assert(!(pipe->flags & UV_HANDLE_NON_OVERLAPPED_PIPE));
|
||||
pipe->pipe.conn.ipc_remote_pid = uv_os_getppid();
|
||||
GetNamedPipeClientProcessId(os_handle, &pipe->pipe.conn.ipc_remote_pid);
|
||||
if (pipe->pipe.conn.ipc_remote_pid == GetCurrentProcessId()) {
|
||||
GetNamedPipeServerProcessId(os_handle, &pipe->pipe.conn.ipc_remote_pid);
|
||||
}
|
||||
assert(pipe->pipe.conn.ipc_remote_pid != (DWORD)(uv_pid_t) -1);
|
||||
}
|
||||
return 0;
|
||||
|
5
deps/libuv/src/win/poll.c
vendored
5
deps/libuv/src/win/poll.c
vendored
@ -425,9 +425,8 @@ int uv_poll_init_socket(uv_loop_t* loop, uv_poll_t* handle,
|
||||
return uv_translate_sys_error(WSAGetLastError());
|
||||
|
||||
/* Try to obtain a base handle for the socket. This increases this chances that
|
||||
* we find an AFD handle and are able to use the fast poll mechanism. This will
|
||||
* always fail on windows XP/2k3, since they don't support the. SIO_BASE_HANDLE
|
||||
* ioctl. */
|
||||
* we find an AFD handle and are able to use the fast poll mechanism.
|
||||
*/
|
||||
#ifndef NDEBUG
|
||||
base_socket = INVALID_SOCKET;
|
||||
#endif
|
||||
|
137
deps/libuv/src/win/process.c
vendored
137
deps/libuv/src/win/process.c
vendored
@ -32,6 +32,9 @@
|
||||
#include "internal.h"
|
||||
#include "handle-inl.h"
|
||||
#include "req-inl.h"
|
||||
#include <dbghelp.h>
|
||||
#include <shlobj.h>
|
||||
#include <psapi.h> /* GetModuleBaseNameW */
|
||||
|
||||
|
||||
#define SIGKILL 9
|
||||
@ -144,7 +147,6 @@ static void uv__process_init(uv_loop_t* loop, uv_process_t* handle) {
|
||||
handle->exit_signal = 0;
|
||||
handle->wait_handle = INVALID_HANDLE_VALUE;
|
||||
handle->process_handle = INVALID_HANDLE_VALUE;
|
||||
handle->child_stdio_buffer = NULL;
|
||||
handle->exit_cb_pending = 0;
|
||||
|
||||
UV_REQ_INIT(&handle->exit_req, UV_PROCESS_EXIT);
|
||||
@ -947,9 +949,11 @@ int uv_spawn(uv_loop_t* loop,
|
||||
STARTUPINFOW startup;
|
||||
PROCESS_INFORMATION info;
|
||||
DWORD process_flags;
|
||||
BYTE* child_stdio_buffer;
|
||||
|
||||
uv__process_init(loop, process);
|
||||
process->exit_cb = options->exit_cb;
|
||||
child_stdio_buffer = NULL;
|
||||
|
||||
if (options->flags & (UV_PROCESS_SETGID | UV_PROCESS_SETUID)) {
|
||||
return UV_ENOTSUP;
|
||||
@ -1040,7 +1044,7 @@ int uv_spawn(uv_loop_t* loop,
|
||||
}
|
||||
}
|
||||
|
||||
err = uv__stdio_create(loop, options, &process->child_stdio_buffer);
|
||||
err = uv__stdio_create(loop, options, &child_stdio_buffer);
|
||||
if (err)
|
||||
goto done;
|
||||
|
||||
@ -1059,12 +1063,12 @@ int uv_spawn(uv_loop_t* loop,
|
||||
startup.lpTitle = NULL;
|
||||
startup.dwFlags = STARTF_USESTDHANDLES | STARTF_USESHOWWINDOW;
|
||||
|
||||
startup.cbReserved2 = uv__stdio_size(process->child_stdio_buffer);
|
||||
startup.lpReserved2 = (BYTE*) process->child_stdio_buffer;
|
||||
startup.cbReserved2 = uv__stdio_size(child_stdio_buffer);
|
||||
startup.lpReserved2 = (BYTE*) child_stdio_buffer;
|
||||
|
||||
startup.hStdInput = uv__stdio_handle(process->child_stdio_buffer, 0);
|
||||
startup.hStdOutput = uv__stdio_handle(process->child_stdio_buffer, 1);
|
||||
startup.hStdError = uv__stdio_handle(process->child_stdio_buffer, 2);
|
||||
startup.hStdInput = uv__stdio_handle(child_stdio_buffer, 0);
|
||||
startup.hStdOutput = uv__stdio_handle(child_stdio_buffer, 1);
|
||||
startup.hStdError = uv__stdio_handle(child_stdio_buffer, 2);
|
||||
|
||||
process_flags = CREATE_UNICODE_ENVIRONMENT;
|
||||
|
||||
@ -1178,10 +1182,10 @@ int uv_spawn(uv_loop_t* loop,
|
||||
uv__free(env);
|
||||
uv__free(alloc_path);
|
||||
|
||||
if (process->child_stdio_buffer != NULL) {
|
||||
if (child_stdio_buffer != NULL) {
|
||||
/* Clean up child stdio handles. */
|
||||
uv__stdio_destroy(process->child_stdio_buffer);
|
||||
process->child_stdio_buffer = NULL;
|
||||
uv__stdio_destroy(child_stdio_buffer);
|
||||
child_stdio_buffer = NULL;
|
||||
}
|
||||
|
||||
return uv_translate_sys_error(err);
|
||||
@ -1193,7 +1197,120 @@ static int uv__kill(HANDLE process_handle, int signum) {
|
||||
return UV_EINVAL;
|
||||
}
|
||||
|
||||
/* Create a dump file for the targeted process, if the registry key
|
||||
* `HKLM:Software\Microsoft\Windows\Windows Error Reporting\LocalDumps`
|
||||
* exists. The location of the dumps can be influenced by the `DumpFolder`
|
||||
* sub-key, which has a default value of `%LOCALAPPDATA%\CrashDumps`, see [0]
|
||||
* for more detail. Note that if the dump folder does not exist, we attempt
|
||||
* to create it, to match behavior with WER itself.
|
||||
* [0]: https://learn.microsoft.com/en-us/windows/win32/wer/collecting-user-mode-dumps */
|
||||
if (signum == SIGQUIT) {
|
||||
HKEY registry_key;
|
||||
DWORD pid, ret;
|
||||
WCHAR basename[MAX_PATH];
|
||||
|
||||
/* Get target process name. */
|
||||
GetModuleBaseNameW(process_handle, NULL, &basename[0], sizeof(basename));
|
||||
|
||||
/* Get PID of target process. */
|
||||
pid = GetProcessId(process_handle);
|
||||
|
||||
/* Get LocalDumps directory path. */
|
||||
ret = RegOpenKeyExW(
|
||||
HKEY_LOCAL_MACHINE,
|
||||
L"SOFTWARE\\Microsoft\\Windows\\Windows Error Reporting\\LocalDumps",
|
||||
0,
|
||||
KEY_QUERY_VALUE,
|
||||
®istry_key);
|
||||
if (ret == ERROR_SUCCESS) {
|
||||
HANDLE hDumpFile = NULL;
|
||||
WCHAR dump_folder[MAX_PATH], dump_name[MAX_PATH];
|
||||
DWORD dump_folder_len = sizeof(dump_folder), key_type = 0;
|
||||
ret = RegGetValueW(registry_key,
|
||||
NULL,
|
||||
L"DumpFolder",
|
||||
RRF_RT_ANY,
|
||||
&key_type,
|
||||
(PVOID) dump_folder,
|
||||
&dump_folder_len);
|
||||
if (ret != ERROR_SUCCESS) {
|
||||
/* Default value for `dump_folder` is `%LOCALAPPDATA%\CrashDumps`. */
|
||||
WCHAR* localappdata;
|
||||
SHGetKnownFolderPath(&FOLDERID_LocalAppData, 0, NULL, &localappdata);
|
||||
_snwprintf_s(dump_folder,
|
||||
sizeof(dump_folder),
|
||||
_TRUNCATE,
|
||||
L"%ls\\CrashDumps",
|
||||
localappdata);
|
||||
CoTaskMemFree(localappdata);
|
||||
}
|
||||
RegCloseKey(registry_key);
|
||||
|
||||
/* Create dump folder if it doesn't already exist. */
|
||||
CreateDirectoryW(dump_folder, NULL);
|
||||
|
||||
/* Construct dump filename from process name and PID. */
|
||||
_snwprintf_s(dump_name,
|
||||
sizeof(dump_name),
|
||||
_TRUNCATE,
|
||||
L"%ls\\%ls.%d.dmp",
|
||||
dump_folder,
|
||||
basename,
|
||||
pid);
|
||||
|
||||
hDumpFile = CreateFileW(dump_name,
|
||||
GENERIC_WRITE,
|
||||
0,
|
||||
NULL,
|
||||
CREATE_NEW,
|
||||
FILE_ATTRIBUTE_NORMAL,
|
||||
NULL);
|
||||
if (hDumpFile != INVALID_HANDLE_VALUE) {
|
||||
DWORD dump_options, sym_options;
|
||||
FILE_DISPOSITION_INFO DeleteOnClose = { TRUE };
|
||||
|
||||
/* If something goes wrong while writing it out, delete the file. */
|
||||
SetFileInformationByHandle(hDumpFile,
|
||||
FileDispositionInfo,
|
||||
&DeleteOnClose,
|
||||
sizeof(DeleteOnClose));
|
||||
|
||||
/* Tell wine to dump ELF modules as well. */
|
||||
sym_options = SymGetOptions();
|
||||
SymSetOptions(sym_options | 0x40000000);
|
||||
|
||||
/* MiniDumpWithAvxXStateContext might be undef in server2012r2 or mingw < 12 */
|
||||
#ifndef MiniDumpWithAvxXStateContext
|
||||
#define MiniDumpWithAvxXStateContext 0x00200000
|
||||
#endif
|
||||
/* We default to a fairly complete dump. In the future, we may want to
|
||||
* allow clients to customize what kind of dump to create. */
|
||||
dump_options = MiniDumpWithFullMemory |
|
||||
MiniDumpIgnoreInaccessibleMemory |
|
||||
MiniDumpWithAvxXStateContext;
|
||||
|
||||
if (MiniDumpWriteDump(process_handle,
|
||||
pid,
|
||||
hDumpFile,
|
||||
dump_options,
|
||||
NULL,
|
||||
NULL,
|
||||
NULL)) {
|
||||
/* Don't delete the file on close if we successfully wrote it out. */
|
||||
FILE_DISPOSITION_INFO DontDeleteOnClose = { FALSE };
|
||||
SetFileInformationByHandle(hDumpFile,
|
||||
FileDispositionInfo,
|
||||
&DontDeleteOnClose,
|
||||
sizeof(DontDeleteOnClose));
|
||||
}
|
||||
SymSetOptions(sym_options);
|
||||
CloseHandle(hDumpFile);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch (signum) {
|
||||
case SIGQUIT:
|
||||
case SIGTERM:
|
||||
case SIGKILL:
|
||||
case SIGINT: {
|
||||
|
3
deps/libuv/src/win/stream.c
vendored
3
deps/libuv/src/win/stream.c
vendored
@ -204,7 +204,7 @@ int uv_shutdown(uv_shutdown_t* req, uv_stream_t* handle, uv_shutdown_cb cb) {
|
||||
uv_loop_t* loop = handle->loop;
|
||||
|
||||
if (!(handle->flags & UV_HANDLE_WRITABLE) ||
|
||||
handle->flags & UV_HANDLE_SHUTTING ||
|
||||
uv__is_stream_shutting(handle) ||
|
||||
uv__is_closing(handle)) {
|
||||
return UV_ENOTCONN;
|
||||
}
|
||||
@ -214,7 +214,6 @@ int uv_shutdown(uv_shutdown_t* req, uv_stream_t* handle, uv_shutdown_cb cb) {
|
||||
req->cb = cb;
|
||||
|
||||
handle->flags &= ~UV_HANDLE_WRITABLE;
|
||||
handle->flags |= UV_HANDLE_SHUTTING;
|
||||
handle->stream.conn.shutdown_req = req;
|
||||
handle->reqs_pending++;
|
||||
REGISTER_HANDLE_REQ(loop, handle, req);
|
||||
|
44
deps/libuv/src/win/tcp.c
vendored
44
deps/libuv/src/win/tcp.c
vendored
@ -29,14 +29,6 @@
|
||||
#include "req-inl.h"
|
||||
|
||||
|
||||
/*
|
||||
* Threshold of active tcp streams for which to preallocate tcp read buffers.
|
||||
* (Due to node slab allocator performing poorly under this pattern,
|
||||
* the optimization is temporarily disabled (threshold=0). This will be
|
||||
* revisited once node allocator is improved.)
|
||||
*/
|
||||
const unsigned int uv_active_tcp_streams_threshold = 0;
|
||||
|
||||
/*
|
||||
* Number of simultaneous pending AcceptEx calls.
|
||||
*/
|
||||
@ -214,7 +206,6 @@ void uv__process_tcp_shutdown_req(uv_loop_t* loop, uv_tcp_t* stream, uv_shutdown
|
||||
assert(stream->flags & UV_HANDLE_CONNECTION);
|
||||
|
||||
stream->stream.conn.shutdown_req = NULL;
|
||||
stream->flags &= ~UV_HANDLE_SHUTTING;
|
||||
UNREGISTER_HANDLE_REQ(loop, stream, req);
|
||||
|
||||
err = 0;
|
||||
@ -274,7 +265,6 @@ void uv__tcp_endgame(uv_loop_t* loop, uv_tcp_t* handle) {
|
||||
}
|
||||
|
||||
uv__handle_close(handle);
|
||||
loop->active_tcp_streams--;
|
||||
}
|
||||
|
||||
|
||||
@ -484,26 +474,9 @@ static void uv__tcp_queue_read(uv_loop_t* loop, uv_tcp_t* handle) {
|
||||
req = &handle->read_req;
|
||||
memset(&req->u.io.overlapped, 0, sizeof(req->u.io.overlapped));
|
||||
|
||||
/*
|
||||
* Preallocate a read buffer if the number of active streams is below
|
||||
* the threshold.
|
||||
*/
|
||||
if (loop->active_tcp_streams < uv_active_tcp_streams_threshold) {
|
||||
handle->flags &= ~UV_HANDLE_ZERO_READ;
|
||||
handle->tcp.conn.read_buffer = uv_buf_init(NULL, 0);
|
||||
handle->alloc_cb((uv_handle_t*) handle, 65536, &handle->tcp.conn.read_buffer);
|
||||
if (handle->tcp.conn.read_buffer.base == NULL ||
|
||||
handle->tcp.conn.read_buffer.len == 0) {
|
||||
handle->read_cb((uv_stream_t*) handle, UV_ENOBUFS, &handle->tcp.conn.read_buffer);
|
||||
return;
|
||||
}
|
||||
assert(handle->tcp.conn.read_buffer.base != NULL);
|
||||
buf = handle->tcp.conn.read_buffer;
|
||||
} else {
|
||||
handle->flags |= UV_HANDLE_ZERO_READ;
|
||||
buf.base = (char*) &uv_zero_;
|
||||
buf.len = 0;
|
||||
}
|
||||
handle->flags |= UV_HANDLE_ZERO_READ;
|
||||
buf.base = (char*) &uv_zero_;
|
||||
buf.len = 0;
|
||||
|
||||
/* Prepare the overlapped structure. */
|
||||
memset(&(req->u.io.overlapped), 0, sizeof(req->u.io.overlapped));
|
||||
@ -550,7 +523,7 @@ int uv_tcp_close_reset(uv_tcp_t* handle, uv_close_cb close_cb) {
|
||||
struct linger l = { 1, 0 };
|
||||
|
||||
/* Disallow setting SO_LINGER to zero due to some platform inconsistencies */
|
||||
if (handle->flags & UV_HANDLE_SHUTTING)
|
||||
if (uv__is_stream_shutting(handle))
|
||||
return UV_EINVAL;
|
||||
|
||||
if (0 != setsockopt(handle->socket, SOL_SOCKET, SO_LINGER, (const char*)&l, sizeof(l)))
|
||||
@ -654,7 +627,6 @@ int uv__tcp_listen(uv_tcp_t* handle, int backlog, uv_connection_cb cb) {
|
||||
|
||||
|
||||
int uv__tcp_accept(uv_tcp_t* server, uv_tcp_t* client) {
|
||||
uv_loop_t* loop = server->loop;
|
||||
int err = 0;
|
||||
int family;
|
||||
|
||||
@ -716,8 +688,6 @@ int uv__tcp_accept(uv_tcp_t* server, uv_tcp_t* client) {
|
||||
}
|
||||
}
|
||||
|
||||
loop->active_tcp_streams++;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -1163,7 +1133,7 @@ void uv__process_tcp_write_req(uv_loop_t* loop, uv_tcp_t* handle,
|
||||
closesocket(handle->socket);
|
||||
handle->socket = INVALID_SOCKET;
|
||||
}
|
||||
if (handle->flags & UV_HANDLE_SHUTTING)
|
||||
if (uv__is_stream_shutting(handle))
|
||||
uv__process_tcp_shutdown_req(loop,
|
||||
handle,
|
||||
handle->stream.conn.shutdown_req);
|
||||
@ -1248,7 +1218,6 @@ void uv__process_tcp_connect_req(uv_loop_t* loop, uv_tcp_t* handle,
|
||||
0) == 0) {
|
||||
uv__connection_init((uv_stream_t*)handle);
|
||||
handle->flags |= UV_HANDLE_READABLE | UV_HANDLE_WRITABLE;
|
||||
loop->active_tcp_streams++;
|
||||
} else {
|
||||
err = WSAGetLastError();
|
||||
}
|
||||
@ -1331,7 +1300,6 @@ int uv__tcp_xfer_import(uv_tcp_t* tcp,
|
||||
tcp->flags |= UV_HANDLE_READABLE | UV_HANDLE_WRITABLE;
|
||||
}
|
||||
|
||||
tcp->loop->active_tcp_streams++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1432,7 +1400,7 @@ static void uv__tcp_try_cancel_reqs(uv_tcp_t* tcp) {
|
||||
uv_tcp_non_ifs_lsp_ipv4;
|
||||
|
||||
/* If there are non-ifs LSPs then try to obtain a base handle for the socket.
|
||||
* This will always fail on Windows XP/3k. */
|
||||
*/
|
||||
if (non_ifs_lsp) {
|
||||
DWORD bytes;
|
||||
if (WSAIoctl(socket,
|
||||
|
139
deps/libuv/src/win/thread.c
vendored
139
deps/libuv/src/win/thread.c
vendored
@ -180,6 +180,81 @@ int uv_thread_create_ex(uv_thread_t* tid,
|
||||
return UV_EIO;
|
||||
}
|
||||
|
||||
int uv_thread_setaffinity(uv_thread_t* tid,
|
||||
char* cpumask,
|
||||
char* oldmask,
|
||||
size_t mask_size) {
|
||||
int i;
|
||||
HANDLE hproc;
|
||||
DWORD_PTR procmask;
|
||||
DWORD_PTR sysmask;
|
||||
DWORD_PTR threadmask;
|
||||
DWORD_PTR oldthreadmask;
|
||||
int cpumasksize;
|
||||
|
||||
cpumasksize = uv_cpumask_size();
|
||||
assert(cpumasksize > 0);
|
||||
if (mask_size < (size_t)cpumasksize)
|
||||
return UV_EINVAL;
|
||||
|
||||
hproc = GetCurrentProcess();
|
||||
if (!GetProcessAffinityMask(hproc, &procmask, &sysmask))
|
||||
return uv_translate_sys_error(GetLastError());
|
||||
|
||||
threadmask = 0;
|
||||
for (i = 0; i < cpumasksize; i++) {
|
||||
if (cpumask[i]) {
|
||||
if (procmask & (1 << i))
|
||||
threadmask |= 1 << i;
|
||||
else
|
||||
return UV_EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
oldthreadmask = SetThreadAffinityMask(*tid, threadmask);
|
||||
if (oldthreadmask == 0)
|
||||
return uv_translate_sys_error(GetLastError());
|
||||
|
||||
if (oldmask != NULL) {
|
||||
for (i = 0; i < cpumasksize; i++)
|
||||
oldmask[i] = (oldthreadmask >> i) & 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int uv_thread_getaffinity(uv_thread_t* tid,
|
||||
char* cpumask,
|
||||
size_t mask_size) {
|
||||
int i;
|
||||
HANDLE hproc;
|
||||
DWORD_PTR procmask;
|
||||
DWORD_PTR sysmask;
|
||||
DWORD_PTR threadmask;
|
||||
int cpumasksize;
|
||||
|
||||
cpumasksize = uv_cpumask_size();
|
||||
assert(cpumasksize > 0);
|
||||
if (mask_size < (size_t)cpumasksize)
|
||||
return UV_EINVAL;
|
||||
|
||||
hproc = GetCurrentProcess();
|
||||
if (!GetProcessAffinityMask(hproc, &procmask, &sysmask))
|
||||
return uv_translate_sys_error(GetLastError());
|
||||
|
||||
threadmask = SetThreadAffinityMask(*tid, procmask);
|
||||
if (threadmask == 0 || SetThreadAffinityMask(*tid, threadmask) == 0)
|
||||
return uv_translate_sys_error(GetLastError());
|
||||
|
||||
for (i = 0; i < cpumasksize; i++)
|
||||
cpumask[i] = (threadmask >> i) & 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int uv_thread_getcpu(void) {
|
||||
return GetCurrentProcessorNumber();
|
||||
}
|
||||
|
||||
uv_thread_t uv_thread_self(void) {
|
||||
uv_thread_t key;
|
||||
@ -374,6 +449,7 @@ void uv_cond_wait(uv_cond_t* cond, uv_mutex_t* mutex) {
|
||||
abort();
|
||||
}
|
||||
|
||||
|
||||
int uv_cond_timedwait(uv_cond_t* cond, uv_mutex_t* mutex, uint64_t timeout) {
|
||||
if (SleepConditionVariableCS(&cond->cond_var, mutex, (DWORD)(timeout / 1e6)))
|
||||
return 0;
|
||||
@ -383,69 +459,6 @@ int uv_cond_timedwait(uv_cond_t* cond, uv_mutex_t* mutex, uint64_t timeout) {
|
||||
}
|
||||
|
||||
|
||||
int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
|
||||
int err;
|
||||
|
||||
barrier->n = count;
|
||||
barrier->count = 0;
|
||||
|
||||
err = uv_mutex_init(&barrier->mutex);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = uv_sem_init(&barrier->turnstile1, 0);
|
||||
if (err)
|
||||
goto error2;
|
||||
|
||||
err = uv_sem_init(&barrier->turnstile2, 1);
|
||||
if (err)
|
||||
goto error;
|
||||
|
||||
return 0;
|
||||
|
||||
error:
|
||||
uv_sem_destroy(&barrier->turnstile1);
|
||||
error2:
|
||||
uv_mutex_destroy(&barrier->mutex);
|
||||
return err;
|
||||
|
||||
}
|
||||
|
||||
|
||||
void uv_barrier_destroy(uv_barrier_t* barrier) {
|
||||
uv_sem_destroy(&barrier->turnstile2);
|
||||
uv_sem_destroy(&barrier->turnstile1);
|
||||
uv_mutex_destroy(&barrier->mutex);
|
||||
}
|
||||
|
||||
|
||||
int uv_barrier_wait(uv_barrier_t* barrier) {
|
||||
int serial_thread;
|
||||
|
||||
uv_mutex_lock(&barrier->mutex);
|
||||
if (++barrier->count == barrier->n) {
|
||||
uv_sem_wait(&barrier->turnstile2);
|
||||
uv_sem_post(&barrier->turnstile1);
|
||||
}
|
||||
uv_mutex_unlock(&barrier->mutex);
|
||||
|
||||
uv_sem_wait(&barrier->turnstile1);
|
||||
uv_sem_post(&barrier->turnstile1);
|
||||
|
||||
uv_mutex_lock(&barrier->mutex);
|
||||
serial_thread = (--barrier->count == 0);
|
||||
if (serial_thread) {
|
||||
uv_sem_wait(&barrier->turnstile1);
|
||||
uv_sem_post(&barrier->turnstile2);
|
||||
}
|
||||
uv_mutex_unlock(&barrier->mutex);
|
||||
|
||||
uv_sem_wait(&barrier->turnstile2);
|
||||
uv_sem_post(&barrier->turnstile2);
|
||||
return serial_thread;
|
||||
}
|
||||
|
||||
|
||||
int uv_key_create(uv_key_t* key) {
|
||||
key->tls_index = TlsAlloc();
|
||||
if (key->tls_index == TLS_OUT_OF_INDEXES)
|
||||
|
25
deps/libuv/src/win/tty.c
vendored
25
deps/libuv/src/win/tty.c
vendored
@ -23,12 +23,7 @@
|
||||
#include <io.h>
|
||||
#include <string.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#if defined(_MSC_VER) && _MSC_VER < 1600
|
||||
# include "uv/stdint-msvc2008.h"
|
||||
#else
|
||||
# include <stdint.h>
|
||||
#endif
|
||||
#include <stdint.h>
|
||||
|
||||
#ifndef COMMON_LVB_REVERSE_VIDEO
|
||||
# define COMMON_LVB_REVERSE_VIDEO 0x4000
|
||||
@ -175,14 +170,14 @@ void uv__console_init(void) {
|
||||
0);
|
||||
if (uv__tty_console_handle != INVALID_HANDLE_VALUE) {
|
||||
CONSOLE_SCREEN_BUFFER_INFO sb_info;
|
||||
QueueUserWorkItem(uv__tty_console_resize_message_loop_thread,
|
||||
NULL,
|
||||
WT_EXECUTELONGFUNCTION);
|
||||
uv_mutex_init(&uv__tty_console_resize_mutex);
|
||||
if (GetConsoleScreenBufferInfo(uv__tty_console_handle, &sb_info)) {
|
||||
uv__tty_console_width = sb_info.dwSize.X;
|
||||
uv__tty_console_height = sb_info.srWindow.Bottom - sb_info.srWindow.Top + 1;
|
||||
}
|
||||
QueueUserWorkItem(uv__tty_console_resize_message_loop_thread,
|
||||
NULL,
|
||||
WT_EXECUTELONGFUNCTION);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2239,11 +2234,11 @@ void uv__process_tty_write_req(uv_loop_t* loop, uv_tty_t* handle,
|
||||
|
||||
|
||||
handle->stream.conn.write_reqs_pending--;
|
||||
if (handle->stream.conn.write_reqs_pending == 0)
|
||||
if (handle->flags & UV_HANDLE_SHUTTING)
|
||||
uv__process_tty_shutdown_req(loop,
|
||||
handle,
|
||||
handle->stream.conn.shutdown_req);
|
||||
if (handle->stream.conn.write_reqs_pending == 0 &&
|
||||
uv__is_stream_shutting(handle))
|
||||
uv__process_tty_shutdown_req(loop,
|
||||
handle,
|
||||
handle->stream.conn.shutdown_req);
|
||||
|
||||
DECREASE_PENDING_REQ_COUNT(handle);
|
||||
}
|
||||
@ -2274,7 +2269,6 @@ void uv__process_tty_shutdown_req(uv_loop_t* loop, uv_tty_t* stream, uv_shutdown
|
||||
assert(req);
|
||||
|
||||
stream->stream.conn.shutdown_req = NULL;
|
||||
stream->flags &= ~UV_HANDLE_SHUTTING;
|
||||
UNREGISTER_HANDLE_REQ(loop, stream, req);
|
||||
|
||||
/* TTY shutdown is really just a no-op */
|
||||
@ -2429,7 +2423,6 @@ static void uv__tty_console_signal_resize(void) {
|
||||
height = sb_info.srWindow.Bottom - sb_info.srWindow.Top + 1;
|
||||
|
||||
uv_mutex_lock(&uv__tty_console_resize_mutex);
|
||||
assert(uv__tty_console_width != -1 && uv__tty_console_height != -1);
|
||||
if (width != uv__tty_console_width || height != uv__tty_console_height) {
|
||||
uv__tty_console_width = width;
|
||||
uv__tty_console_height = height;
|
||||
|
203
deps/libuv/src/win/udp.c
vendored
203
deps/libuv/src/win/udp.c
vendored
@ -29,11 +29,6 @@
|
||||
#include "req-inl.h"
|
||||
|
||||
|
||||
/*
|
||||
* Threshold of active udp streams for which to preallocate udp read buffers.
|
||||
*/
|
||||
const unsigned int uv_active_udp_streams_threshold = 0;
|
||||
|
||||
/* A zero-size buffer for use by uv_udp_read */
|
||||
static char uv_zero_[] = "";
|
||||
int uv_udp_getpeername(const uv_udp_t* handle,
|
||||
@ -276,84 +271,35 @@ static void uv__udp_queue_recv(uv_loop_t* loop, uv_udp_t* handle) {
|
||||
req = &handle->recv_req;
|
||||
memset(&req->u.io.overlapped, 0, sizeof(req->u.io.overlapped));
|
||||
|
||||
/*
|
||||
* Preallocate a read buffer if the number of active streams is below
|
||||
* the threshold.
|
||||
*/
|
||||
if (loop->active_udp_streams < uv_active_udp_streams_threshold) {
|
||||
handle->flags &= ~UV_HANDLE_ZERO_READ;
|
||||
handle->flags |= UV_HANDLE_ZERO_READ;
|
||||
|
||||
handle->recv_buffer = uv_buf_init(NULL, 0);
|
||||
handle->alloc_cb((uv_handle_t*) handle, UV__UDP_DGRAM_MAXSIZE, &handle->recv_buffer);
|
||||
if (handle->recv_buffer.base == NULL || handle->recv_buffer.len == 0) {
|
||||
handle->recv_cb(handle, UV_ENOBUFS, &handle->recv_buffer, NULL, 0);
|
||||
return;
|
||||
}
|
||||
assert(handle->recv_buffer.base != NULL);
|
||||
buf.base = (char*) uv_zero_;
|
||||
buf.len = 0;
|
||||
flags = MSG_PEEK;
|
||||
|
||||
buf = handle->recv_buffer;
|
||||
memset(&handle->recv_from, 0, sizeof handle->recv_from);
|
||||
handle->recv_from_len = sizeof handle->recv_from;
|
||||
flags = 0;
|
||||
|
||||
result = handle->func_wsarecvfrom(handle->socket,
|
||||
(WSABUF*) &buf,
|
||||
1,
|
||||
&bytes,
|
||||
&flags,
|
||||
(struct sockaddr*) &handle->recv_from,
|
||||
&handle->recv_from_len,
|
||||
&req->u.io.overlapped,
|
||||
NULL);
|
||||
|
||||
if (UV_SUCCEEDED_WITHOUT_IOCP(result == 0)) {
|
||||
/* Process the req without IOCP. */
|
||||
handle->flags |= UV_HANDLE_READ_PENDING;
|
||||
req->u.io.overlapped.InternalHigh = bytes;
|
||||
handle->reqs_pending++;
|
||||
uv__insert_pending_req(loop, req);
|
||||
} else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) {
|
||||
/* The req will be processed with IOCP. */
|
||||
handle->flags |= UV_HANDLE_READ_PENDING;
|
||||
handle->reqs_pending++;
|
||||
} else {
|
||||
/* Make this req pending reporting an error. */
|
||||
SET_REQ_ERROR(req, WSAGetLastError());
|
||||
uv__insert_pending_req(loop, req);
|
||||
handle->reqs_pending++;
|
||||
}
|
||||
result = handle->func_wsarecv(handle->socket,
|
||||
(WSABUF*) &buf,
|
||||
1,
|
||||
&bytes,
|
||||
&flags,
|
||||
&req->u.io.overlapped,
|
||||
NULL);
|
||||
|
||||
if (UV_SUCCEEDED_WITHOUT_IOCP(result == 0)) {
|
||||
/* Process the req without IOCP. */
|
||||
handle->flags |= UV_HANDLE_READ_PENDING;
|
||||
req->u.io.overlapped.InternalHigh = bytes;
|
||||
handle->reqs_pending++;
|
||||
uv__insert_pending_req(loop, req);
|
||||
} else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) {
|
||||
/* The req will be processed with IOCP. */
|
||||
handle->flags |= UV_HANDLE_READ_PENDING;
|
||||
handle->reqs_pending++;
|
||||
} else {
|
||||
handle->flags |= UV_HANDLE_ZERO_READ;
|
||||
|
||||
buf.base = (char*) uv_zero_;
|
||||
buf.len = 0;
|
||||
flags = MSG_PEEK;
|
||||
|
||||
result = handle->func_wsarecv(handle->socket,
|
||||
(WSABUF*) &buf,
|
||||
1,
|
||||
&bytes,
|
||||
&flags,
|
||||
&req->u.io.overlapped,
|
||||
NULL);
|
||||
|
||||
if (UV_SUCCEEDED_WITHOUT_IOCP(result == 0)) {
|
||||
/* Process the req without IOCP. */
|
||||
handle->flags |= UV_HANDLE_READ_PENDING;
|
||||
req->u.io.overlapped.InternalHigh = bytes;
|
||||
handle->reqs_pending++;
|
||||
uv__insert_pending_req(loop, req);
|
||||
} else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) {
|
||||
/* The req will be processed with IOCP. */
|
||||
handle->flags |= UV_HANDLE_READ_PENDING;
|
||||
handle->reqs_pending++;
|
||||
} else {
|
||||
/* Make this req pending reporting an error. */
|
||||
SET_REQ_ERROR(req, WSAGetLastError());
|
||||
uv__insert_pending_req(loop, req);
|
||||
handle->reqs_pending++;
|
||||
}
|
||||
/* Make this req pending reporting an error. */
|
||||
SET_REQ_ERROR(req, WSAGetLastError());
|
||||
uv__insert_pending_req(loop, req);
|
||||
handle->reqs_pending++;
|
||||
}
|
||||
}
|
||||
|
||||
@ -376,7 +322,6 @@ int uv__udp_recv_start(uv_udp_t* handle, uv_alloc_cb alloc_cb,
|
||||
|
||||
handle->flags |= UV_HANDLE_READING;
|
||||
INCREASE_ACTIVE_COUNT(loop, handle);
|
||||
loop->active_udp_streams++;
|
||||
|
||||
handle->recv_cb = recv_cb;
|
||||
handle->alloc_cb = alloc_cb;
|
||||
@ -393,7 +338,6 @@ int uv__udp_recv_start(uv_udp_t* handle, uv_alloc_cb alloc_cb,
|
||||
int uv__udp_recv_stop(uv_udp_t* handle) {
|
||||
if (handle->flags & UV_HANDLE_READING) {
|
||||
handle->flags &= ~UV_HANDLE_READING;
|
||||
handle->loop->active_udp_streams--;
|
||||
DECREASE_ACTIVE_COUNT(loop, handle);
|
||||
}
|
||||
|
||||
@ -497,57 +441,68 @@ void uv__process_udp_recv_req(uv_loop_t* loop, uv_udp_t* handle,
|
||||
DWORD bytes, err, flags;
|
||||
struct sockaddr_storage from;
|
||||
int from_len;
|
||||
int count;
|
||||
|
||||
/* Do a nonblocking receive.
|
||||
* TODO: try to read multiple datagrams at once. FIONREAD maybe? */
|
||||
buf = uv_buf_init(NULL, 0);
|
||||
handle->alloc_cb((uv_handle_t*) handle, UV__UDP_DGRAM_MAXSIZE, &buf);
|
||||
if (buf.base == NULL || buf.len == 0) {
|
||||
handle->recv_cb(handle, UV_ENOBUFS, &buf, NULL, 0);
|
||||
goto done;
|
||||
}
|
||||
assert(buf.base != NULL);
|
||||
/* Prevent loop starvation when the data comes in as fast as
|
||||
* (or faster than) we can read it. */
|
||||
count = 32;
|
||||
|
||||
memset(&from, 0, sizeof from);
|
||||
from_len = sizeof from;
|
||||
do {
|
||||
/* Do at most `count` nonblocking receive. */
|
||||
buf = uv_buf_init(NULL, 0);
|
||||
handle->alloc_cb((uv_handle_t*) handle, UV__UDP_DGRAM_MAXSIZE, &buf);
|
||||
if (buf.base == NULL || buf.len == 0) {
|
||||
handle->recv_cb(handle, UV_ENOBUFS, &buf, NULL, 0);
|
||||
goto done;
|
||||
}
|
||||
|
||||
flags = 0;
|
||||
memset(&from, 0, sizeof from);
|
||||
from_len = sizeof from;
|
||||
|
||||
if (WSARecvFrom(handle->socket,
|
||||
(WSABUF*)&buf,
|
||||
1,
|
||||
&bytes,
|
||||
&flags,
|
||||
(struct sockaddr*) &from,
|
||||
&from_len,
|
||||
NULL,
|
||||
NULL) != SOCKET_ERROR) {
|
||||
flags = 0;
|
||||
|
||||
/* Message received */
|
||||
handle->recv_cb(handle, bytes, &buf, (const struct sockaddr*) &from, 0);
|
||||
} else {
|
||||
err = WSAGetLastError();
|
||||
if (err == WSAEMSGSIZE) {
|
||||
/* Message truncated */
|
||||
handle->recv_cb(handle,
|
||||
bytes,
|
||||
&buf,
|
||||
(const struct sockaddr*) &from,
|
||||
UV_UDP_PARTIAL);
|
||||
} else if (err == WSAEWOULDBLOCK) {
|
||||
/* Kernel buffer empty */
|
||||
handle->recv_cb(handle, 0, &buf, NULL, 0);
|
||||
} else if (err == WSAECONNRESET || err == WSAENETRESET) {
|
||||
/* WSAECONNRESET/WSANETRESET is ignored because this just indicates
|
||||
* that a previous sendto operation failed.
|
||||
*/
|
||||
handle->recv_cb(handle, 0, &buf, NULL, 0);
|
||||
if (WSARecvFrom(handle->socket,
|
||||
(WSABUF*)&buf,
|
||||
1,
|
||||
&bytes,
|
||||
&flags,
|
||||
(struct sockaddr*) &from,
|
||||
&from_len,
|
||||
NULL,
|
||||
NULL) != SOCKET_ERROR) {
|
||||
|
||||
/* Message received */
|
||||
err = ERROR_SUCCESS;
|
||||
handle->recv_cb(handle, bytes, &buf, (const struct sockaddr*) &from, 0);
|
||||
} else {
|
||||
/* Any other error that we want to report back to the user. */
|
||||
uv_udp_recv_stop(handle);
|
||||
handle->recv_cb(handle, uv_translate_sys_error(err), &buf, NULL, 0);
|
||||
err = WSAGetLastError();
|
||||
if (err == WSAEMSGSIZE) {
|
||||
/* Message truncated */
|
||||
handle->recv_cb(handle,
|
||||
bytes,
|
||||
&buf,
|
||||
(const struct sockaddr*) &from,
|
||||
UV_UDP_PARTIAL);
|
||||
} else if (err == WSAEWOULDBLOCK) {
|
||||
/* Kernel buffer empty */
|
||||
handle->recv_cb(handle, 0, &buf, NULL, 0);
|
||||
} else if (err == WSAECONNRESET || err == WSAENETRESET) {
|
||||
/* WSAECONNRESET/WSANETRESET is ignored because this just indicates
|
||||
* that a previous sendto operation failed.
|
||||
*/
|
||||
handle->recv_cb(handle, 0, &buf, NULL, 0);
|
||||
} else {
|
||||
/* Any other error that we want to report back to the user. */
|
||||
uv_udp_recv_stop(handle);
|
||||
handle->recv_cb(handle, uv_translate_sys_error(err), &buf, NULL, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
while (err == ERROR_SUCCESS &&
|
||||
count-- > 0 &&
|
||||
/* The recv_cb callback may decide to pause or close the handle. */
|
||||
(handle->flags & UV_HANDLE_READING) &&
|
||||
!(handle->flags & UV_HANDLE_READ_PENDING));
|
||||
}
|
||||
|
||||
done:
|
||||
|
310
deps/libuv/src/win/util.c
vendored
310
deps/libuv/src/win/util.c
vendored
@ -31,6 +31,7 @@
|
||||
#include "internal.h"
|
||||
|
||||
/* clang-format off */
|
||||
#include <sysinfoapi.h>
|
||||
#include <winsock2.h>
|
||||
#include <winperf.h>
|
||||
#include <iphlpapi.h>
|
||||
@ -121,9 +122,6 @@ int uv_exepath(char* buffer, size_t* size_ptr) {
|
||||
goto error;
|
||||
}
|
||||
|
||||
/* utf16_len contains the length, *not* including the terminating null. */
|
||||
utf16_buffer[utf16_len] = L'\0';
|
||||
|
||||
/* Convert to UTF-8 */
|
||||
utf8_len = WideCharToMultiByte(CP_UTF8,
|
||||
0,
|
||||
@ -151,6 +149,51 @@ int uv_exepath(char* buffer, size_t* size_ptr) {
|
||||
}
|
||||
|
||||
|
||||
static int uv__cwd(WCHAR** buf, DWORD *len) {
|
||||
WCHAR* p;
|
||||
DWORD n;
|
||||
DWORD t;
|
||||
|
||||
t = GetCurrentDirectoryW(0, NULL);
|
||||
for (;;) {
|
||||
if (t == 0)
|
||||
return uv_translate_sys_error(GetLastError());
|
||||
|
||||
/* |t| is the size of the buffer _including_ nul. */
|
||||
p = uv__malloc(t * sizeof(*p));
|
||||
if (p == NULL)
|
||||
return UV_ENOMEM;
|
||||
|
||||
/* |n| is the size of the buffer _excluding_ nul but _only on success_.
|
||||
* If |t| was too small because another thread changed the working
|
||||
* directory, |n| is the size the buffer should be _including_ nul.
|
||||
* It therefore follows we must resize when n >= t and fail when n == 0.
|
||||
*/
|
||||
n = GetCurrentDirectoryW(t, p);
|
||||
if (n > 0)
|
||||
if (n < t)
|
||||
break;
|
||||
|
||||
uv__free(p);
|
||||
t = n;
|
||||
}
|
||||
|
||||
/* The returned directory should not have a trailing slash, unless it points
|
||||
* at a drive root, like c:\. Remove it if needed.
|
||||
*/
|
||||
t = n - 1;
|
||||
if (p[t] == L'\\' && !(n == 3 && p[1] == L':')) {
|
||||
p[t] = L'\0';
|
||||
n = t;
|
||||
}
|
||||
|
||||
*buf = p;
|
||||
*len = n;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int uv_cwd(char* buffer, size_t* size) {
|
||||
DWORD utf16_len;
|
||||
WCHAR *utf16_buffer;
|
||||
@ -160,30 +203,9 @@ int uv_cwd(char* buffer, size_t* size) {
|
||||
return UV_EINVAL;
|
||||
}
|
||||
|
||||
utf16_len = GetCurrentDirectoryW(0, NULL);
|
||||
if (utf16_len == 0) {
|
||||
return uv_translate_sys_error(GetLastError());
|
||||
}
|
||||
utf16_buffer = uv__malloc(utf16_len * sizeof(WCHAR));
|
||||
if (utf16_buffer == NULL) {
|
||||
return UV_ENOMEM;
|
||||
}
|
||||
|
||||
utf16_len = GetCurrentDirectoryW(utf16_len, utf16_buffer);
|
||||
if (utf16_len == 0) {
|
||||
uv__free(utf16_buffer);
|
||||
return uv_translate_sys_error(GetLastError());
|
||||
}
|
||||
|
||||
/* utf16_len contains the length, *not* including the terminating null. */
|
||||
utf16_buffer[utf16_len] = L'\0';
|
||||
|
||||
/* The returned directory should not have a trailing slash, unless it points
|
||||
* at a drive root, like c:\. Remove it if needed. */
|
||||
if (utf16_buffer[utf16_len - 1] == L'\\' &&
|
||||
!(utf16_len == 3 && utf16_buffer[1] == L':')) {
|
||||
utf16_len--;
|
||||
utf16_buffer[utf16_len] = L'\0';
|
||||
r = uv__cwd(&utf16_buffer, &utf16_len);
|
||||
if (r < 0) {
|
||||
return r;
|
||||
}
|
||||
|
||||
/* Check how much space we need */
|
||||
@ -226,8 +248,9 @@ int uv_cwd(char* buffer, size_t* size) {
|
||||
|
||||
int uv_chdir(const char* dir) {
|
||||
WCHAR *utf16_buffer;
|
||||
size_t utf16_len, new_utf16_len;
|
||||
DWORD utf16_len;
|
||||
WCHAR drive_letter, env_var[4];
|
||||
int r;
|
||||
|
||||
if (dir == NULL) {
|
||||
return UV_EINVAL;
|
||||
@ -262,32 +285,22 @@ int uv_chdir(const char* dir) {
|
||||
return uv_translate_sys_error(GetLastError());
|
||||
}
|
||||
|
||||
/* uv__cwd() will return a new buffer. */
|
||||
uv__free(utf16_buffer);
|
||||
utf16_buffer = NULL;
|
||||
|
||||
/* Windows stores the drive-local path in an "hidden" environment variable,
|
||||
* which has the form "=C:=C:\Windows". SetCurrentDirectory does not update
|
||||
* this, so we'll have to do it. */
|
||||
new_utf16_len = GetCurrentDirectoryW(utf16_len, utf16_buffer);
|
||||
if (new_utf16_len > utf16_len ) {
|
||||
uv__free(utf16_buffer);
|
||||
utf16_buffer = uv__malloc(new_utf16_len * sizeof(WCHAR));
|
||||
if (utf16_buffer == NULL) {
|
||||
/* When updating the environment variable fails, return UV_OK anyway.
|
||||
* We did successfully change current working directory, only updating
|
||||
* hidden env variable failed. */
|
||||
return 0;
|
||||
}
|
||||
new_utf16_len = GetCurrentDirectoryW(new_utf16_len, utf16_buffer);
|
||||
}
|
||||
if (utf16_len == 0) {
|
||||
uv__free(utf16_buffer);
|
||||
r = uv__cwd(&utf16_buffer, &utf16_len);
|
||||
if (r == UV_ENOMEM) {
|
||||
/* When updating the environment variable fails, return UV_OK anyway.
|
||||
* We did successfully change current working directory, only updating
|
||||
* hidden env variable failed. */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* The returned directory should not have a trailing slash, unless it points
|
||||
* at a drive root, like c:\. Remove it if needed. */
|
||||
if (utf16_buffer[utf16_len - 1] == L'\\' &&
|
||||
!(utf16_len == 3 && utf16_buffer[1] == L':')) {
|
||||
utf16_len--;
|
||||
utf16_buffer[utf16_len] = L'\0';
|
||||
if (r < 0) {
|
||||
return r;
|
||||
}
|
||||
|
||||
if (utf16_len < 2 || utf16_buffer[1] != L':') {
|
||||
@ -330,7 +343,7 @@ uint64_t uv_get_free_memory(void) {
|
||||
memory_status.dwLength = sizeof(memory_status);
|
||||
|
||||
if (!GlobalMemoryStatusEx(&memory_status)) {
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return (uint64_t)memory_status.ullAvailPhys;
|
||||
@ -342,7 +355,7 @@ uint64_t uv_get_total_memory(void) {
|
||||
memory_status.dwLength = sizeof(memory_status);
|
||||
|
||||
if (!GlobalMemoryStatusEx(&memory_status)) {
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return (uint64_t)memory_status.ullTotalPhys;
|
||||
@ -354,6 +367,11 @@ uint64_t uv_get_constrained_memory(void) {
|
||||
}
|
||||
|
||||
|
||||
uint64_t uv_get_available_memory(void) {
|
||||
return uv_get_free_memory();
|
||||
}
|
||||
|
||||
|
||||
uv_pid_t uv_os_getpid(void) {
|
||||
return GetCurrentProcessId();
|
||||
}
|
||||
@ -487,11 +505,43 @@ int uv_get_process_title(char* buffer, size_t size) {
|
||||
}
|
||||
|
||||
|
||||
/* https://github.com/libuv/libuv/issues/1674 */
|
||||
int uv_clock_gettime(uv_clock_id clock_id, uv_timespec64_t* ts) {
|
||||
FILETIME ft;
|
||||
int64_t t;
|
||||
|
||||
if (ts == NULL)
|
||||
return UV_EFAULT;
|
||||
|
||||
switch (clock_id) {
|
||||
case UV_CLOCK_MONOTONIC:
|
||||
uv__once_init();
|
||||
t = uv__hrtime(UV__NANOSEC);
|
||||
ts->tv_sec = t / 1000000000;
|
||||
ts->tv_nsec = t % 1000000000;
|
||||
return 0;
|
||||
case UV_CLOCK_REALTIME:
|
||||
GetSystemTimePreciseAsFileTime(&ft);
|
||||
/* In 100-nanosecond increments from 1601-01-01 UTC because why not? */
|
||||
t = (int64_t) ft.dwHighDateTime << 32 | ft.dwLowDateTime;
|
||||
/* Convert to UNIX epoch, 1970-01-01. Still in 100 ns increments. */
|
||||
t -= 116444736000000000ll;
|
||||
/* Now convert to seconds and nanoseconds. */
|
||||
ts->tv_sec = t / 10000000;
|
||||
ts->tv_nsec = t % 10000000 * 100;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return UV_EINVAL;
|
||||
}
|
||||
|
||||
|
||||
uint64_t uv_hrtime(void) {
|
||||
uv__once_init();
|
||||
return uv__hrtime(UV__NANOSEC);
|
||||
}
|
||||
|
||||
|
||||
uint64_t uv__hrtime(unsigned int scale) {
|
||||
LARGE_INTEGER counter;
|
||||
double scaled_freq;
|
||||
@ -678,71 +728,6 @@ int uv_cpu_info(uv_cpu_info_t** cpu_infos_ptr, int* cpu_count_ptr) {
|
||||
}
|
||||
|
||||
|
||||
static int is_windows_version_or_greater(DWORD os_major,
|
||||
DWORD os_minor,
|
||||
WORD service_pack_major,
|
||||
WORD service_pack_minor) {
|
||||
OSVERSIONINFOEX osvi;
|
||||
DWORDLONG condition_mask = 0;
|
||||
int op = VER_GREATER_EQUAL;
|
||||
|
||||
/* Initialize the OSVERSIONINFOEX structure. */
|
||||
ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
|
||||
osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
|
||||
osvi.dwMajorVersion = os_major;
|
||||
osvi.dwMinorVersion = os_minor;
|
||||
osvi.wServicePackMajor = service_pack_major;
|
||||
osvi.wServicePackMinor = service_pack_minor;
|
||||
|
||||
/* Initialize the condition mask. */
|
||||
VER_SET_CONDITION(condition_mask, VER_MAJORVERSION, op);
|
||||
VER_SET_CONDITION(condition_mask, VER_MINORVERSION, op);
|
||||
VER_SET_CONDITION(condition_mask, VER_SERVICEPACKMAJOR, op);
|
||||
VER_SET_CONDITION(condition_mask, VER_SERVICEPACKMINOR, op);
|
||||
|
||||
/* Perform the test. */
|
||||
return (int) VerifyVersionInfo(
|
||||
&osvi,
|
||||
VER_MAJORVERSION | VER_MINORVERSION |
|
||||
VER_SERVICEPACKMAJOR | VER_SERVICEPACKMINOR,
|
||||
condition_mask);
|
||||
}
|
||||
|
||||
|
||||
static int address_prefix_match(int family,
|
||||
struct sockaddr* address,
|
||||
struct sockaddr* prefix_address,
|
||||
int prefix_len) {
|
||||
uint8_t* address_data;
|
||||
uint8_t* prefix_address_data;
|
||||
int i;
|
||||
|
||||
assert(address->sa_family == family);
|
||||
assert(prefix_address->sa_family == family);
|
||||
|
||||
if (family == AF_INET6) {
|
||||
address_data = (uint8_t*) &(((struct sockaddr_in6 *) address)->sin6_addr);
|
||||
prefix_address_data =
|
||||
(uint8_t*) &(((struct sockaddr_in6 *) prefix_address)->sin6_addr);
|
||||
} else {
|
||||
address_data = (uint8_t*) &(((struct sockaddr_in *) address)->sin_addr);
|
||||
prefix_address_data =
|
||||
(uint8_t*) &(((struct sockaddr_in *) prefix_address)->sin_addr);
|
||||
}
|
||||
|
||||
for (i = 0; i < prefix_len >> 3; i++) {
|
||||
if (address_data[i] != prefix_address_data[i])
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (prefix_len % 8)
|
||||
return prefix_address_data[i] ==
|
||||
(address_data[i] & (0xff << (8 - prefix_len % 8)));
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
int uv_interface_addresses(uv_interface_address_t** addresses_ptr,
|
||||
int* count_ptr) {
|
||||
IP_ADAPTER_ADDRESSES* win_address_buf;
|
||||
@ -755,26 +740,13 @@ int uv_interface_addresses(uv_interface_address_t** addresses_ptr,
|
||||
uv_interface_address_t* uv_address;
|
||||
|
||||
int count;
|
||||
|
||||
int is_vista_or_greater;
|
||||
ULONG flags;
|
||||
|
||||
*addresses_ptr = NULL;
|
||||
*count_ptr = 0;
|
||||
|
||||
is_vista_or_greater = is_windows_version_or_greater(6, 0, 0, 0);
|
||||
if (is_vista_or_greater) {
|
||||
flags = GAA_FLAG_SKIP_ANYCAST | GAA_FLAG_SKIP_MULTICAST |
|
||||
GAA_FLAG_SKIP_DNS_SERVER;
|
||||
} else {
|
||||
/* We need at least XP SP1. */
|
||||
if (!is_windows_version_or_greater(5, 1, 1, 0))
|
||||
return UV_ENOTSUP;
|
||||
|
||||
flags = GAA_FLAG_SKIP_ANYCAST | GAA_FLAG_SKIP_MULTICAST |
|
||||
GAA_FLAG_SKIP_DNS_SERVER | GAA_FLAG_INCLUDE_PREFIX;
|
||||
}
|
||||
|
||||
flags = GAA_FLAG_SKIP_ANYCAST | GAA_FLAG_SKIP_MULTICAST |
|
||||
GAA_FLAG_SKIP_DNS_SERVER;
|
||||
|
||||
/* Fetch the size of the adapters reported by windows, and then get the list
|
||||
* itself. */
|
||||
@ -938,37 +910,8 @@ int uv_interface_addresses(uv_interface_address_t** addresses_ptr,
|
||||
|
||||
sa = unicast_address->Address.lpSockaddr;
|
||||
|
||||
/* XP has no OnLinkPrefixLength field. */
|
||||
if (is_vista_or_greater) {
|
||||
prefix_len =
|
||||
((IP_ADAPTER_UNICAST_ADDRESS_LH*) unicast_address)->OnLinkPrefixLength;
|
||||
} else {
|
||||
/* Prior to Windows Vista the FirstPrefix pointed to the list with
|
||||
* single prefix for each IP address assigned to the adapter.
|
||||
* Order of FirstPrefix does not match order of FirstUnicastAddress,
|
||||
* so we need to find corresponding prefix.
|
||||
*/
|
||||
IP_ADAPTER_PREFIX* prefix;
|
||||
prefix_len = 0;
|
||||
|
||||
for (prefix = adapter->FirstPrefix; prefix; prefix = prefix->Next) {
|
||||
/* We want the longest matching prefix. */
|
||||
if (prefix->Address.lpSockaddr->sa_family != sa->sa_family ||
|
||||
prefix->PrefixLength <= prefix_len)
|
||||
continue;
|
||||
|
||||
if (address_prefix_match(sa->sa_family, sa,
|
||||
prefix->Address.lpSockaddr, prefix->PrefixLength)) {
|
||||
prefix_len = prefix->PrefixLength;
|
||||
}
|
||||
}
|
||||
|
||||
/* If there is no matching prefix information, return a single-host
|
||||
* subnet mask (e.g. 255.255.255.255 for IPv4).
|
||||
*/
|
||||
if (!prefix_len)
|
||||
prefix_len = (sa->sa_family == AF_INET6) ? 128 : 32;
|
||||
}
|
||||
prefix_len =
|
||||
((IP_ADAPTER_UNICAST_ADDRESS_LH*) unicast_address)->OnLinkPrefixLength;
|
||||
|
||||
memset(uv_address, 0, sizeof *uv_address);
|
||||
|
||||
@ -1093,8 +1036,8 @@ int uv_os_homedir(char* buffer, size_t* size) {
|
||||
if (r != UV_ENOENT)
|
||||
return r;
|
||||
|
||||
/* USERPROFILE is not set, so call uv__getpwuid_r() */
|
||||
r = uv__getpwuid_r(&pwd);
|
||||
/* USERPROFILE is not set, so call uv_os_get_passwd() */
|
||||
r = uv_os_get_passwd(&pwd);
|
||||
|
||||
if (r != 0) {
|
||||
return r;
|
||||
@ -1181,17 +1124,6 @@ int uv_os_tmpdir(char* buffer, size_t* size) {
|
||||
}
|
||||
|
||||
|
||||
void uv_os_free_passwd(uv_passwd_t* pwd) {
|
||||
if (pwd == NULL)
|
||||
return;
|
||||
|
||||
uv__free(pwd->username);
|
||||
uv__free(pwd->homedir);
|
||||
pwd->username = NULL;
|
||||
pwd->homedir = NULL;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Converts a UTF-16 string into a UTF-8 one. The resulting string is
|
||||
* null-terminated.
|
||||
@ -1288,7 +1220,7 @@ int uv__convert_utf8_to_utf16(const char* utf8, int utf8len, WCHAR** utf16) {
|
||||
}
|
||||
|
||||
|
||||
int uv__getpwuid_r(uv_passwd_t* pwd) {
|
||||
static int uv__getpwuid_r(uv_passwd_t* pwd) {
|
||||
HANDLE token;
|
||||
wchar_t username[UNLEN + 1];
|
||||
wchar_t *path;
|
||||
@ -1366,6 +1298,16 @@ int uv_os_get_passwd(uv_passwd_t* pwd) {
|
||||
}
|
||||
|
||||
|
||||
int uv_os_get_passwd2(uv_passwd_t* pwd, uv_uid_t uid) {
|
||||
return UV_ENOTSUP;
|
||||
}
|
||||
|
||||
|
||||
int uv_os_get_group(uv_group_t* grp, uv_uid_t gid) {
|
||||
return UV_ENOTSUP;
|
||||
}
|
||||
|
||||
|
||||
int uv_os_environ(uv_env_item_t** envitems, int* count) {
|
||||
wchar_t* env;
|
||||
wchar_t* penv;
|
||||
@ -1769,6 +1711,22 @@ int uv_os_uname(uv_utsname_t* buffer) {
|
||||
RegCloseKey(registry_key);
|
||||
|
||||
if (r == ERROR_SUCCESS) {
|
||||
/* Windows 11 shares dwMajorVersion with Windows 10
|
||||
* this workaround tries to disambiguate that by checking
|
||||
* if the dwBuildNumber is from Windows 11 releases (>= 22000).
|
||||
*
|
||||
* This workaround replaces the ProductName key value
|
||||
* from "Windows 10 *" to "Windows 11 *" */
|
||||
if (os_info.dwMajorVersion == 10 &&
|
||||
os_info.dwBuildNumber >= 22000 &&
|
||||
product_name_w_size >= ARRAY_SIZE(L"Windows 10")) {
|
||||
/* If ProductName starts with "Windows 10" */
|
||||
if (wcsncmp(product_name_w, L"Windows 10", ARRAY_SIZE(L"Windows 10") - 1) == 0) {
|
||||
/* Bump 10 to 11 */
|
||||
product_name_w[9] = '1';
|
||||
}
|
||||
}
|
||||
|
||||
version_size = WideCharToMultiByte(CP_UTF8,
|
||||
0,
|
||||
product_name_w,
|
||||
|
Reference in New Issue
Block a user