libuv 1.45.0, #include cleanup, probably something else.

git-svn-id: https://www.unprompted.com/svn/projects/tildefriends/trunk@4308 ed5197a5-7fde-0310-b194-c3ffbd925b24
This commit is contained in:
2023-05-21 21:36:51 +00:00
parent 1ccb9183b4
commit f421606e21
299 changed files with 7167 additions and 4918 deletions

View File

@ -131,6 +131,7 @@ int uv__io_check_fd(uv_loop_t* loop, int fd) {
void uv__io_poll(uv_loop_t* loop, int timeout) {
uv__loop_internal_fields_t* lfields;
struct pollfd events[1024];
struct pollfd pqry;
struct pollfd* pe;
@ -154,6 +155,8 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
return;
}
lfields = uv__get_internal_fields(loop);
while (!QUEUE_EMPTY(&loop->watcher_queue)) {
q = QUEUE_HEAD(&loop->watcher_queue);
QUEUE_REMOVE(q);
@ -217,7 +220,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
base = loop->time;
count = 48; /* Benchmarks suggest this gives the best throughput. */
if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
if (lfields->flags & UV_METRICS_IDLE_TIME) {
reset_timeout = 1;
user_timeout = timeout;
timeout = 0;
@ -232,6 +235,12 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
if (timeout != 0)
uv__metrics_set_provider_entry_time(loop);
/* Store the current timeout in a location that's globally accessible so
* other locations like uv__work_done() can determine whether the queue
* of events in the callback were waiting when poll was called.
*/
lfields->current_timeout = timeout;
nfds = pollset_poll(loop->backend_fd,
events,
ARRAY_SIZE(events),
@ -321,9 +330,11 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
nevents++;
}
uv__metrics_inc_events(loop, nevents);
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
uv__metrics_inc_events_waiting(loop, nevents);
}
if (have_signals != 0) {
@ -389,6 +400,11 @@ uint64_t uv_get_constrained_memory(void) {
}
uint64_t uv_get_available_memory(void) {
return uv_get_free_memory();
}
void uv_loadavg(double avg[3]) {
perfstat_cpu_total_t ps_total;
int result = perfstat_cpu_total(NULL, &ps_total, sizeof(ps_total), 1);
@ -425,7 +441,7 @@ static char* uv__rawname(const char* cp, char (*dst)[FILENAME_MAX+1]) {
static int uv__path_is_a_directory(char* filename) {
struct stat statbuf;
if (stat(filename, &statbuf) < 0)
if (uv__stat(filename, &statbuf) < 0)
return -1; /* failed: not a directory, assume it is a file */
if (statbuf.st_type == VDIR)

View File

@ -24,9 +24,9 @@
#include "uv.h"
#include "internal.h"
#include "atomic-ops.h"
#include <errno.h>
#include <stdatomic.h>
#include <stdio.h> /* snprintf() */
#include <assert.h>
#include <stdlib.h>
@ -40,6 +40,7 @@
static void uv__async_send(uv_loop_t* loop);
static int uv__async_start(uv_loop_t* loop);
static void uv__cpu_relax(void);
int uv_async_init(uv_loop_t* loop, uv_async_t* handle, uv_async_cb async_cb) {
@ -52,6 +53,7 @@ int uv_async_init(uv_loop_t* loop, uv_async_t* handle, uv_async_cb async_cb) {
uv__handle_init(loop, (uv_handle_t*)handle, UV_ASYNC);
handle->async_cb = async_cb;
handle->pending = 0;
handle->u.fd = 0; /* This will be used as a busy flag. */
QUEUE_INSERT_TAIL(&loop->async_handles, &handle->queue);
uv__handle_start(handle);
@ -61,46 +63,54 @@ int uv_async_init(uv_loop_t* loop, uv_async_t* handle, uv_async_cb async_cb) {
int uv_async_send(uv_async_t* handle) {
_Atomic int* pending;
_Atomic int* busy;
pending = (_Atomic int*) &handle->pending;
busy = (_Atomic int*) &handle->u.fd;
/* Do a cheap read first. */
if (ACCESS_ONCE(int, handle->pending) != 0)
if (atomic_load_explicit(pending, memory_order_relaxed) != 0)
return 0;
/* Tell the other thread we're busy with the handle. */
if (cmpxchgi(&handle->pending, 0, 1) != 0)
return 0;
/* Set the loop to busy. */
atomic_fetch_add(busy, 1);
/* Wake up the other thread's event loop. */
uv__async_send(handle->loop);
if (atomic_exchange(pending, 1) == 0)
uv__async_send(handle->loop);
/* Tell the other thread we're done. */
if (cmpxchgi(&handle->pending, 1, 2) != 1)
abort();
/* Set the loop to not-busy. */
atomic_fetch_add(busy, -1);
return 0;
}
/* Only call this from the event loop thread. */
static int uv__async_spin(uv_async_t* handle) {
/* Wait for the busy flag to clear before closing.
* Only call this from the event loop thread. */
static void uv__async_spin(uv_async_t* handle) {
_Atomic int* pending;
_Atomic int* busy;
int i;
int rc;
pending = (_Atomic int*) &handle->pending;
busy = (_Atomic int*) &handle->u.fd;
/* Set the pending flag first, so no new events will be added by other
* threads after this function returns. */
atomic_store(pending, 1);
for (;;) {
/* 997 is not completely chosen at random. It's a prime number, acyclical
* by nature, and should therefore hopefully dampen sympathetic resonance.
/* 997 is not completely chosen at random. It's a prime number, acyclic by
* nature, and should therefore hopefully dampen sympathetic resonance.
*/
for (i = 0; i < 997; i++) {
/* rc=0 -- handle is not pending.
* rc=1 -- handle is pending, other thread is still working with it.
* rc=2 -- handle is pending, other thread is done.
*/
rc = cmpxchgi(&handle->pending, 2, 0);
if (rc != 1)
return rc;
if (atomic_load(busy) == 0)
return;
/* Other thread is busy with this handle, spin until it's done. */
cpu_relax();
uv__cpu_relax();
}
/* Yield the CPU. We may have preempted the other thread while it's
@ -125,6 +135,7 @@ static void uv__async_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
QUEUE queue;
QUEUE* q;
uv_async_t* h;
_Atomic int *pending;
assert(w == &loop->async_io_watcher);
@ -154,8 +165,10 @@ static void uv__async_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
QUEUE_REMOVE(q);
QUEUE_INSERT_TAIL(&loop->async_handles, q);
if (0 == uv__async_spin(h))
continue; /* Not pending. */
/* Atomically fetch and clear pending flag */
pending = (_Atomic int*) &h->pending;
if (atomic_exchange(pending, 0) == 0)
continue;
if (h->async_cb == NULL)
continue;
@ -227,20 +240,28 @@ static int uv__async_start(uv_loop_t* loop) {
}
int uv__async_fork(uv_loop_t* loop) {
if (loop->async_io_watcher.fd == -1) /* never started */
return 0;
uv__async_stop(loop);
return uv__async_start(loop);
}
void uv__async_stop(uv_loop_t* loop) {
QUEUE queue;
QUEUE* q;
uv_async_t* h;
if (loop->async_io_watcher.fd == -1)
return;
/* Make sure no other thread is accessing the async handle fd after the loop
* cleanup.
*/
QUEUE_MOVE(&loop->async_handles, &queue);
while (!QUEUE_EMPTY(&queue)) {
q = QUEUE_HEAD(&queue);
h = QUEUE_DATA(q, uv_async_t, queue);
QUEUE_REMOVE(q);
QUEUE_INSERT_TAIL(&loop->async_handles, q);
uv__async_spin(h);
}
if (loop->async_wfd != -1) {
if (loop->async_wfd != loop->async_io_watcher.fd)
uv__close(loop->async_wfd);
@ -251,3 +272,58 @@ void uv__async_stop(uv_loop_t* loop) {
uv__close(loop->async_io_watcher.fd);
loop->async_io_watcher.fd = -1;
}
int uv__async_fork(uv_loop_t* loop) {
QUEUE queue;
QUEUE* q;
uv_async_t* h;
if (loop->async_io_watcher.fd == -1) /* never started */
return 0;
QUEUE_MOVE(&loop->async_handles, &queue);
while (!QUEUE_EMPTY(&queue)) {
q = QUEUE_HEAD(&queue);
h = QUEUE_DATA(q, uv_async_t, queue);
QUEUE_REMOVE(q);
QUEUE_INSERT_TAIL(&loop->async_handles, q);
/* The state of any thread that set pending is now likely corrupt in this
* child because the user called fork, so just clear these flags and move
* on. Calling most libc functions after `fork` is declared to be undefined
* behavior anyways, unless async-signal-safe, for multithreaded programs
* like libuv, and nothing interesting in pthreads is async-signal-safe.
*/
h->pending = 0;
/* This is the busy flag, and we just abruptly lost all other threads. */
h->u.fd = 0;
}
/* Recreate these, since they still exist, but belong to the wrong pid now. */
if (loop->async_wfd != -1) {
if (loop->async_wfd != loop->async_io_watcher.fd)
uv__close(loop->async_wfd);
loop->async_wfd = -1;
}
uv__io_stop(loop, &loop->async_io_watcher, POLLIN);
uv__close(loop->async_io_watcher.fd);
loop->async_io_watcher.fd = -1;
return uv__async_start(loop);
}
static void uv__cpu_relax(void) {
#if defined(__i386__) || defined(__x86_64__)
__asm__ __volatile__ ("rep; nop" ::: "memory"); /* a.k.a. PAUSE */
#elif (defined(__arm__) && __ARM_ARCH >= 7) || defined(__aarch64__)
__asm__ __volatile__ ("yield" ::: "memory");
#elif (defined(__ppc__) || defined(__ppc64__)) && defined(__APPLE__)
__asm volatile ("" : : : "memory");
#elif !defined(__APPLE__) && (defined(__powerpc64__) || defined(__ppc64__) || defined(__PPC64__))
__asm__ __volatile__ ("or 1,1,1; or 2,2,2" ::: "memory");
#endif
}

View File

@ -1,64 +0,0 @@
/* Copyright (c) 2013, Ben Noordhuis <info@bnoordhuis.nl>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef UV_ATOMIC_OPS_H_
#define UV_ATOMIC_OPS_H_
#include "internal.h" /* UV_UNUSED */
#if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
#include <atomic.h>
#endif
UV_UNUSED(static int cmpxchgi(int* ptr, int oldval, int newval));
UV_UNUSED(static void cpu_relax(void));
/* Prefer hand-rolled assembly over the gcc builtins because the latter also
* issue full memory barriers.
*/
UV_UNUSED(static int cmpxchgi(int* ptr, int oldval, int newval)) {
#if defined(__i386__) || defined(__x86_64__)
int out;
__asm__ __volatile__ ("lock; cmpxchg %2, %1;"
: "=a" (out), "+m" (*(volatile int*) ptr)
: "r" (newval), "0" (oldval)
: "memory");
return out;
#elif defined(__MVS__)
/* Use hand-rolled assembly because codegen from builtin __plo_CSST results in
* a runtime bug.
*/
__asm(" cs %0,%2,%1 \n " : "+r"(oldval), "+m"(*ptr) : "r"(newval) :);
return oldval;
#elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
return atomic_cas_uint((uint_t *)ptr, (uint_t)oldval, (uint_t)newval);
#else
return __sync_val_compare_and_swap(ptr, oldval, newval);
#endif
}
UV_UNUSED(static void cpu_relax(void)) {
#if defined(__i386__) || defined(__x86_64__)
__asm__ __volatile__ ("rep; nop" ::: "memory"); /* a.k.a. PAUSE */
#elif (defined(__arm__) && __ARM_ARCH >= 7) || defined(__aarch64__)
__asm__ __volatile__ ("yield" ::: "memory");
#elif (defined(__ppc__) || defined(__ppc64__)) && defined(__APPLE__)
__asm volatile ("" : : : "memory");
#elif !defined(__APPLE__) && (defined(__powerpc64__) || defined(__ppc64__) || defined(__PPC64__))
__asm__ __volatile__ ("or 1,1,1; or 2,2,2" ::: "memory");
#endif
}
#endif /* UV_ATOMIC_OPS_H_ */

View File

@ -41,12 +41,13 @@
#include <sys/uio.h> /* writev */
#include <sys/resource.h> /* getrusage */
#include <pwd.h>
#include <grp.h>
#include <sys/utsname.h>
#include <sys/time.h>
#include <time.h> /* clock_gettime */
#ifdef __sun
# include <sys/filio.h>
# include <sys/types.h>
# include <sys/wait.h>
#endif
@ -66,13 +67,14 @@ extern char** environ;
#if defined(__DragonFly__) || \
defined(__FreeBSD__) || \
defined(__FreeBSD_kernel__) || \
defined(__NetBSD__) || \
defined(__OpenBSD__)
# include <sys/sysctl.h>
# include <sys/filio.h>
# include <sys/wait.h>
# include <sys/param.h>
# if defined(__FreeBSD__)
# include <sys/cpuset.h>
# define uv__accept4 accept4
# endif
# if defined(__NetBSD__)
@ -107,6 +109,35 @@ STATIC_ASSERT(offsetof(uv_buf_t, base) == offsetof(struct iovec, iov_base));
STATIC_ASSERT(offsetof(uv_buf_t, len) == offsetof(struct iovec, iov_len));
/* https://github.com/libuv/libuv/issues/1674 */
int uv_clock_gettime(uv_clock_id clock_id, uv_timespec64_t* ts) {
struct timespec t;
int r;
if (ts == NULL)
return UV_EFAULT;
switch (clock_id) {
default:
return UV_EINVAL;
case UV_CLOCK_MONOTONIC:
r = clock_gettime(CLOCK_MONOTONIC, &t);
break;
case UV_CLOCK_REALTIME:
r = clock_gettime(CLOCK_REALTIME, &t);
break;
}
if (r)
return UV__ERR(errno);
ts->tv_sec = t.tv_sec;
ts->tv_nsec = t.tv_nsec;
return 0;
}
uint64_t uv_hrtime(void) {
return uv__hrtime(UV_CLOCK_PRECISE);
}
@ -232,10 +263,10 @@ int uv__getiovmax(void) {
#if defined(IOV_MAX)
return IOV_MAX;
#elif defined(_SC_IOV_MAX)
static int iovmax_cached = -1;
static _Atomic int iovmax_cached = -1;
int iovmax;
iovmax = uv__load_relaxed(&iovmax_cached);
iovmax = atomic_load_explicit(&iovmax_cached, memory_order_relaxed);
if (iovmax != -1)
return iovmax;
@ -247,7 +278,7 @@ int uv__getiovmax(void) {
if (iovmax == -1)
iovmax = 1;
uv__store_relaxed(&iovmax_cached, iovmax);
atomic_store_explicit(&iovmax_cached, iovmax, memory_order_relaxed);
return iovmax;
#else
@ -360,6 +391,7 @@ static int uv__backend_timeout(const uv_loop_t* loop) {
(uv__has_active_handles(loop) || uv__has_active_reqs(loop)) &&
QUEUE_EMPTY(&loop->pending_queue) &&
QUEUE_EMPTY(&loop->idle_handles) &&
(loop->flags & UV_LOOP_REAP_CHILDREN) == 0 &&
loop->closing_handles == NULL)
return uv__next_timeout(loop);
return 0;
@ -388,10 +420,17 @@ int uv_run(uv_loop_t* loop, uv_run_mode mode) {
if (!r)
uv__update_time(loop);
while (r != 0 && loop->stop_flag == 0) {
uv__update_time(loop);
/* Maintain backwards compatibility by processing timers before entering the
* while loop for UV_RUN_DEFAULT. Otherwise timers only need to be executed
* once, which should be done after polling in order to maintain proper
* execution order of the conceptual event loop. */
if (mode == UV_RUN_DEFAULT) {
if (r)
uv__update_time(loop);
uv__run_timers(loop);
}
while (r != 0 && loop->stop_flag == 0) {
can_sleep =
QUEUE_EMPTY(&loop->pending_queue) && QUEUE_EMPTY(&loop->idle_handles);
@ -403,6 +442,8 @@ int uv_run(uv_loop_t* loop, uv_run_mode mode) {
if ((mode == UV_RUN_ONCE && can_sleep) || mode == UV_RUN_DEFAULT)
timeout = uv__backend_timeout(loop);
uv__metrics_inc_loop_count(loop);
uv__io_poll(loop, timeout);
/* Process immediate callbacks (e.g. write_cb) a small fixed number of
@ -420,18 +461,8 @@ int uv_run(uv_loop_t* loop, uv_run_mode mode) {
uv__run_check(loop);
uv__run_closing_handles(loop);
if (mode == UV_RUN_ONCE) {
/* UV_RUN_ONCE implies forward progress: at least one callback must have
* been invoked when it returns. uv__io_poll() can return without doing
* I/O (meaning: no callbacks) when its timeout expires - which means we
* have pending timers that satisfy the forward progress constraint.
*
* UV_RUN_NOWAIT makes no guarantees about progress so it's omitted from
* the check.
*/
uv__update_time(loop);
uv__run_timers(loop);
}
uv__update_time(loop);
uv__run_timers(loop);
r = uv__loop_alive(loop);
if (mode == UV_RUN_ONCE || mode == UV_RUN_NOWAIT)
@ -867,11 +898,6 @@ void uv__io_init(uv__io_t* w, uv__io_cb cb, int fd) {
w->fd = fd;
w->events = 0;
w->pevents = 0;
#if defined(UV_HAVE_KQUEUE)
w->rcount = 0;
w->wcount = 0;
#endif /* defined(UV_HAVE_KQUEUE) */
}
@ -991,6 +1017,15 @@ int uv_getrusage(uv_rusage_t* rusage) {
rusage->ru_nivcsw = usage.ru_nivcsw;
#endif
/* Most platforms report ru_maxrss in kilobytes; macOS and Solaris are
* the outliers because of course they are.
*/
#if defined(__APPLE__) && !TARGET_OS_IPHONE
rusage->ru_maxrss /= 1024; /* macOS reports bytes. */
#elif defined(__sun)
rusage->ru_maxrss /= getpagesize() / 1024; /* Solaris reports pages. */
#endif
return 0;
}
@ -1090,8 +1125,8 @@ int uv_os_homedir(char* buffer, size_t* size) {
if (r != UV_ENOENT)
return r;
/* HOME is not set, so call uv__getpwuid_r() */
r = uv__getpwuid_r(&pwd);
/* HOME is not set, so call uv_os_get_passwd() */
r = uv_os_get_passwd(&pwd);
if (r != 0) {
return r;
@ -1164,11 +1199,10 @@ return_buffer:
}
int uv__getpwuid_r(uv_passwd_t* pwd) {
static int uv__getpwuid_r(uv_passwd_t *pwd, uid_t uid) {
struct passwd pw;
struct passwd* result;
char* buf;
uid_t uid;
size_t bufsize;
size_t name_size;
size_t homedir_size;
@ -1178,8 +1212,6 @@ int uv__getpwuid_r(uv_passwd_t* pwd) {
if (pwd == NULL)
return UV_EINVAL;
uid = geteuid();
/* Calling sysconf(_SC_GETPW_R_SIZE_MAX) would get the suggested size, but it
* is frequently 1024 or 4096, so we can just use that directly. The pwent
* will not usually be large. */
@ -1238,24 +1270,93 @@ int uv__getpwuid_r(uv_passwd_t* pwd) {
}
void uv_os_free_passwd(uv_passwd_t* pwd) {
if (pwd == NULL)
return;
int uv_os_get_group(uv_group_t* grp, uv_uid_t gid) {
struct group gp;
struct group* result;
char* buf;
char* gr_mem;
size_t bufsize;
size_t name_size;
long members;
size_t mem_size;
int r;
/*
The memory for name, shell, and homedir are allocated in a single
uv__malloc() call. The base of the pointer is stored in pwd->username, so
that is the field that needs to be freed.
*/
uv__free(pwd->username);
pwd->username = NULL;
pwd->shell = NULL;
pwd->homedir = NULL;
if (grp == NULL)
return UV_EINVAL;
/* Calling sysconf(_SC_GETGR_R_SIZE_MAX) would get the suggested size, but it
* is frequently 1024 or 4096, so we can just use that directly. The pwent
* will not usually be large. */
for (bufsize = 2000;; bufsize *= 2) {
buf = uv__malloc(bufsize);
if (buf == NULL)
return UV_ENOMEM;
do
r = getgrgid_r(gid, &gp, buf, bufsize, &result);
while (r == EINTR);
if (r != 0 || result == NULL)
uv__free(buf);
if (r != ERANGE)
break;
}
if (r != 0)
return UV__ERR(r);
if (result == NULL)
return UV_ENOENT;
/* Allocate memory for the groupname and members. */
name_size = strlen(gp.gr_name) + 1;
members = 0;
mem_size = sizeof(char*);
for (r = 0; gp.gr_mem[r] != NULL; r++) {
mem_size += strlen(gp.gr_mem[r]) + 1 + sizeof(char*);
members++;
}
gr_mem = uv__malloc(name_size + mem_size);
if (gr_mem == NULL) {
uv__free(buf);
return UV_ENOMEM;
}
/* Copy the members */
grp->members = (char**) gr_mem;
grp->members[members] = NULL;
gr_mem = (char*) &grp->members[members + 1];
for (r = 0; r < members; r++) {
grp->members[r] = gr_mem;
strcpy(gr_mem, gp.gr_mem[r]);
gr_mem += strlen(gr_mem) + 1;
}
assert(gr_mem == (char*)grp->members + mem_size);
/* Copy the groupname */
grp->groupname = gr_mem;
memcpy(grp->groupname, gp.gr_name, name_size);
gr_mem += name_size;
/* Copy the gid */
grp->gid = gp.gr_gid;
uv__free(buf);
return 0;
}
int uv_os_get_passwd(uv_passwd_t* pwd) {
return uv__getpwuid_r(pwd);
return uv__getpwuid_r(pwd, geteuid());
}
int uv_os_get_passwd2(uv_passwd_t* pwd, uv_uid_t uid) {
return uv__getpwuid_r(pwd, uid);
}
@ -1416,6 +1517,13 @@ uv_pid_t uv_os_getppid(void) {
return getppid();
}
int uv_cpumask_size(void) {
#if UV__CPU_AFFINITY_SUPPORTED
return CPU_SETSIZE;
#else
return UV_ENOTSUP;
#endif
}
int uv_os_getpriority(uv_pid_t pid, int* priority) {
int r;

View File

@ -51,3 +51,7 @@ int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
uint64_t uv_get_constrained_memory(void) {
return 0; /* Memory constraints are unknown. */
}
uint64_t uv_get_available_memory(void) {
return uv_get_free_memory();
}

View File

@ -27,7 +27,6 @@
struct CFArrayCallBacks;
struct CFRunLoopSourceContext;
struct FSEventStreamContext;
struct CFRange;
typedef double CFAbsoluteTime;
typedef double CFTimeInterval;
@ -43,23 +42,13 @@ typedef unsigned CFStringEncoding;
typedef void* CFAllocatorRef;
typedef void* CFArrayRef;
typedef void* CFBundleRef;
typedef void* CFDataRef;
typedef void* CFDictionaryRef;
typedef void* CFMutableDictionaryRef;
typedef struct CFRange CFRange;
typedef void* CFRunLoopRef;
typedef void* CFRunLoopSourceRef;
typedef void* CFStringRef;
typedef void* CFTypeRef;
typedef void* FSEventStreamRef;
typedef uint32_t IOOptionBits;
typedef unsigned int io_iterator_t;
typedef unsigned int io_object_t;
typedef unsigned int io_service_t;
typedef unsigned int io_registry_entry_t;
typedef void (*FSEventStreamCallback)(const FSEventStreamRef,
void*,
size_t,
@ -80,11 +69,6 @@ struct FSEventStreamContext {
void* pad[3];
};
struct CFRange {
CFIndex location;
CFIndex length;
};
static const CFStringEncoding kCFStringEncodingUTF8 = 0x8000100;
static const OSStatus noErr = 0;

View File

@ -33,13 +33,10 @@
#include <sys/sysctl.h>
#include <unistd.h> /* sysconf */
#include "darwin-stub.h"
static uv_once_t once = UV_ONCE_INIT;
static uint64_t (*time_func)(void);
static mach_timebase_info_data_t timebase;
typedef unsigned char UInt8;
int uv__platform_loop_init(uv_loop_t* loop) {
loop->cf_state = NULL;
@ -110,7 +107,7 @@ uint64_t uv_get_free_memory(void) {
if (host_statistics(mach_host_self(), HOST_VM_INFO,
(host_info_t)&info, &count) != KERN_SUCCESS) {
return UV_EINVAL; /* FIXME(bnoordhuis) Translate error. */
return 0;
}
return (uint64_t) info.free_count * sysconf(_SC_PAGESIZE);
@ -123,7 +120,7 @@ uint64_t uv_get_total_memory(void) {
size_t size = sizeof(info);
if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0))
return UV__ERR(errno);
return 0;
return (uint64_t) info;
}
@ -134,6 +131,11 @@ uint64_t uv_get_constrained_memory(void) {
}
uint64_t uv_get_available_memory(void) {
return uv_get_free_memory();
}
void uv_loadavg(double avg[3]) {
struct loadavg info;
size_t size = sizeof(info);
@ -183,159 +185,17 @@ int uv_uptime(double* uptime) {
return 0;
}
static int uv__get_cpu_speed(uint64_t* speed) {
/* IOKit */
void (*pIOObjectRelease)(io_object_t);
kern_return_t (*pIOMasterPort)(mach_port_t, mach_port_t*);
CFMutableDictionaryRef (*pIOServiceMatching)(const char*);
kern_return_t (*pIOServiceGetMatchingServices)(mach_port_t,
CFMutableDictionaryRef,
io_iterator_t*);
io_service_t (*pIOIteratorNext)(io_iterator_t);
CFTypeRef (*pIORegistryEntryCreateCFProperty)(io_registry_entry_t,
CFStringRef,
CFAllocatorRef,
IOOptionBits);
/* CoreFoundation */
CFStringRef (*pCFStringCreateWithCString)(CFAllocatorRef,
const char*,
CFStringEncoding);
CFStringEncoding (*pCFStringGetSystemEncoding)(void);
UInt8 *(*pCFDataGetBytePtr)(CFDataRef);
CFIndex (*pCFDataGetLength)(CFDataRef);
void (*pCFDataGetBytes)(CFDataRef, CFRange, UInt8*);
void (*pCFRelease)(CFTypeRef);
void* core_foundation_handle;
void* iokit_handle;
int err;
kern_return_t kr;
mach_port_t mach_port;
io_iterator_t it;
io_object_t service;
mach_port = 0;
err = UV_ENOENT;
core_foundation_handle = dlopen("/System/Library/Frameworks/"
"CoreFoundation.framework/"
"CoreFoundation",
RTLD_LAZY | RTLD_LOCAL);
iokit_handle = dlopen("/System/Library/Frameworks/IOKit.framework/"
"IOKit",
RTLD_LAZY | RTLD_LOCAL);
if (core_foundation_handle == NULL || iokit_handle == NULL)
goto out;
#define V(handle, symbol) \
do { \
*(void **)(&p ## symbol) = dlsym((handle), #symbol); \
if (p ## symbol == NULL) \
goto out; \
} \
while (0)
V(iokit_handle, IOMasterPort);
V(iokit_handle, IOServiceMatching);
V(iokit_handle, IOServiceGetMatchingServices);
V(iokit_handle, IOIteratorNext);
V(iokit_handle, IOObjectRelease);
V(iokit_handle, IORegistryEntryCreateCFProperty);
V(core_foundation_handle, CFStringCreateWithCString);
V(core_foundation_handle, CFStringGetSystemEncoding);
V(core_foundation_handle, CFDataGetBytePtr);
V(core_foundation_handle, CFDataGetLength);
V(core_foundation_handle, CFDataGetBytes);
V(core_foundation_handle, CFRelease);
#undef V
#define S(s) pCFStringCreateWithCString(NULL, (s), kCFStringEncodingUTF8)
kr = pIOMasterPort(MACH_PORT_NULL, &mach_port);
assert(kr == KERN_SUCCESS);
CFMutableDictionaryRef classes_to_match
= pIOServiceMatching("IOPlatformDevice");
kr = pIOServiceGetMatchingServices(mach_port, classes_to_match, &it);
assert(kr == KERN_SUCCESS);
service = pIOIteratorNext(it);
CFStringRef device_type_str = S("device_type");
CFStringRef clock_frequency_str = S("clock-frequency");
while (service != 0) {
CFDataRef data;
data = pIORegistryEntryCreateCFProperty(service,
device_type_str,
NULL,
0);
if (data) {
const UInt8* raw = pCFDataGetBytePtr(data);
if (strncmp((char*)raw, "cpu", 3) == 0 ||
strncmp((char*)raw, "processor", 9) == 0) {
CFDataRef freq_ref;
freq_ref = pIORegistryEntryCreateCFProperty(service,
clock_frequency_str,
NULL,
0);
if (freq_ref) {
const UInt8* freq_ref_ptr = pCFDataGetBytePtr(freq_ref);
CFIndex len = pCFDataGetLength(freq_ref);
if (len == 8)
memcpy(speed, freq_ref_ptr, 8);
else if (len == 4) {
uint32_t v;
memcpy(&v, freq_ref_ptr, 4);
*speed = v;
} else {
*speed = 0;
}
pCFRelease(freq_ref);
pCFRelease(data);
break;
}
}
pCFRelease(data);
}
service = pIOIteratorNext(it);
}
pIOObjectRelease(it);
err = 0;
if (device_type_str != NULL)
pCFRelease(device_type_str);
if (clock_frequency_str != NULL)
pCFRelease(clock_frequency_str);
out:
if (core_foundation_handle != NULL)
dlclose(core_foundation_handle);
if (iokit_handle != NULL)
dlclose(iokit_handle);
mach_port_deallocate(mach_task_self(), mach_port);
return err;
}
int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
unsigned int ticks = (unsigned int)sysconf(_SC_CLK_TCK),
multiplier = ((uint64_t)1000L / ticks);
char model[512];
uint64_t cpuspeed;
size_t size;
unsigned int i;
natural_t numcpus;
mach_msg_type_number_t msg_type;
processor_cpu_load_info_data_t *info;
uv_cpu_info_t* cpu_info;
uint64_t cpuspeed;
int err;
size = sizeof(model);
if (sysctlbyname("machdep.cpu.brand_string", &model, &size, NULL, 0) &&
@ -343,9 +203,13 @@ int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
return UV__ERR(errno);
}
err = uv__get_cpu_speed(&cpuspeed);
if (err < 0)
return err;
cpuspeed = 0;
size = sizeof(cpuspeed);
sysctlbyname("hw.cpufrequency", &cpuspeed, &size, NULL, 0);
if (cpuspeed == 0)
/* If sysctl hw.cputype == CPU_TYPE_ARM64, the correct value is unavailable
* from Apple, but we can hard-code it here to a plausible value. */
cpuspeed = 2400000000;
if (host_processor_info(mach_host_self(), PROCESSOR_CPU_LOAD_INFO, &numcpus,
(processor_info_array_t*)&info,

View File

@ -1,422 +0,0 @@
/* Copyright libuv contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <errno.h>
#include <sys/epoll.h>
int uv__epoll_init(uv_loop_t* loop) {
int fd;
fd = epoll_create1(O_CLOEXEC);
/* epoll_create1() can fail either because it's not implemented (old kernel)
* or because it doesn't understand the O_CLOEXEC flag.
*/
if (fd == -1 && (errno == ENOSYS || errno == EINVAL)) {
fd = epoll_create(256);
if (fd != -1)
uv__cloexec(fd, 1);
}
loop->backend_fd = fd;
if (fd == -1)
return UV__ERR(errno);
return 0;
}
void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
struct epoll_event* events;
struct epoll_event dummy;
uintptr_t i;
uintptr_t nfds;
assert(loop->watchers != NULL);
assert(fd >= 0);
events = (struct epoll_event*) loop->watchers[loop->nwatchers];
nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
if (events != NULL)
/* Invalidate events with same file descriptor */
for (i = 0; i < nfds; i++)
if (events[i].data.fd == fd)
events[i].data.fd = -1;
/* Remove the file descriptor from the epoll.
* This avoids a problem where the same file description remains open
* in another process, causing repeated junk epoll events.
*
* We pass in a dummy epoll_event, to work around a bug in old kernels.
*/
if (loop->backend_fd >= 0) {
/* Work around a bug in kernels 3.10 to 3.19 where passing a struct that
* has the EPOLLWAKEUP flag set generates spurious audit syslog warnings.
*/
memset(&dummy, 0, sizeof(dummy));
epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, &dummy);
}
}
int uv__io_check_fd(uv_loop_t* loop, int fd) {
struct epoll_event e;
int rc;
memset(&e, 0, sizeof(e));
e.events = POLLIN;
e.data.fd = -1;
rc = 0;
if (epoll_ctl(loop->backend_fd, EPOLL_CTL_ADD, fd, &e))
if (errno != EEXIST)
rc = UV__ERR(errno);
if (rc == 0)
if (epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, &e))
abort();
return rc;
}
void uv__io_poll(uv_loop_t* loop, int timeout) {
/* A bug in kernels < 2.6.37 makes timeouts larger than ~30 minutes
* effectively infinite on 32 bits architectures. To avoid blocking
* indefinitely, we cap the timeout and poll again if necessary.
*
* Note that "30 minutes" is a simplification because it depends on
* the value of CONFIG_HZ. The magic constant assumes CONFIG_HZ=1200,
* that being the largest value I have seen in the wild (and only once.)
*/
static const int max_safe_timeout = 1789569;
static int no_epoll_pwait_cached;
static int no_epoll_wait_cached;
int no_epoll_pwait;
int no_epoll_wait;
struct epoll_event events[1024];
struct epoll_event* pe;
struct epoll_event e;
int real_timeout;
QUEUE* q;
uv__io_t* w;
sigset_t sigset;
uint64_t sigmask;
uint64_t base;
int have_signals;
int nevents;
int count;
int nfds;
int fd;
int op;
int i;
int user_timeout;
int reset_timeout;
if (loop->nfds == 0) {
assert(QUEUE_EMPTY(&loop->watcher_queue));
return;
}
memset(&e, 0, sizeof(e));
while (!QUEUE_EMPTY(&loop->watcher_queue)) {
q = QUEUE_HEAD(&loop->watcher_queue);
QUEUE_REMOVE(q);
QUEUE_INIT(q);
w = QUEUE_DATA(q, uv__io_t, watcher_queue);
assert(w->pevents != 0);
assert(w->fd >= 0);
assert(w->fd < (int) loop->nwatchers);
e.events = w->pevents;
e.data.fd = w->fd;
if (w->events == 0)
op = EPOLL_CTL_ADD;
else
op = EPOLL_CTL_MOD;
/* XXX Future optimization: do EPOLL_CTL_MOD lazily if we stop watching
* events, skip the syscall and squelch the events after epoll_wait().
*/
if (epoll_ctl(loop->backend_fd, op, w->fd, &e)) {
if (errno != EEXIST)
abort();
assert(op == EPOLL_CTL_ADD);
/* We've reactivated a file descriptor that's been watched before. */
if (epoll_ctl(loop->backend_fd, EPOLL_CTL_MOD, w->fd, &e))
abort();
}
w->events = w->pevents;
}
sigmask = 0;
if (loop->flags & UV_LOOP_BLOCK_SIGPROF) {
sigemptyset(&sigset);
sigaddset(&sigset, SIGPROF);
sigmask |= 1 << (SIGPROF - 1);
}
assert(timeout >= -1);
base = loop->time;
count = 48; /* Benchmarks suggest this gives the best throughput. */
real_timeout = timeout;
if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
reset_timeout = 1;
user_timeout = timeout;
timeout = 0;
} else {
reset_timeout = 0;
user_timeout = 0;
}
/* You could argue there is a dependency between these two but
* ultimately we don't care about their ordering with respect
* to one another. Worst case, we make a few system calls that
* could have been avoided because another thread already knows
* they fail with ENOSYS. Hardly the end of the world.
*/
no_epoll_pwait = uv__load_relaxed(&no_epoll_pwait_cached);
no_epoll_wait = uv__load_relaxed(&no_epoll_wait_cached);
for (;;) {
/* Only need to set the provider_entry_time if timeout != 0. The function
* will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
*/
if (timeout != 0)
uv__metrics_set_provider_entry_time(loop);
/* See the comment for max_safe_timeout for an explanation of why
* this is necessary. Executive summary: kernel bug workaround.
*/
if (sizeof(int32_t) == sizeof(long) && timeout >= max_safe_timeout)
timeout = max_safe_timeout;
if (sigmask != 0 && no_epoll_pwait != 0)
if (pthread_sigmask(SIG_BLOCK, &sigset, NULL))
abort();
if (no_epoll_wait != 0 || (sigmask != 0 && no_epoll_pwait == 0)) {
nfds = epoll_pwait(loop->backend_fd,
events,
ARRAY_SIZE(events),
timeout,
&sigset);
if (nfds == -1 && errno == ENOSYS) {
uv__store_relaxed(&no_epoll_pwait_cached, 1);
no_epoll_pwait = 1;
}
} else {
nfds = epoll_wait(loop->backend_fd,
events,
ARRAY_SIZE(events),
timeout);
if (nfds == -1 && errno == ENOSYS) {
uv__store_relaxed(&no_epoll_wait_cached, 1);
no_epoll_wait = 1;
}
}
if (sigmask != 0 && no_epoll_pwait != 0)
if (pthread_sigmask(SIG_UNBLOCK, &sigset, NULL))
abort();
/* Update loop->time unconditionally. It's tempting to skip the update when
* timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
* operating system didn't reschedule our process while in the syscall.
*/
SAVE_ERRNO(uv__update_time(loop));
if (nfds == 0) {
assert(timeout != -1);
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
}
if (timeout == -1)
continue;
if (timeout == 0)
return;
/* We may have been inside the system call for longer than |timeout|
* milliseconds so we need to update the timestamp to avoid drift.
*/
goto update_timeout;
}
if (nfds == -1) {
if (errno == ENOSYS) {
/* epoll_wait() or epoll_pwait() failed, try the other system call. */
assert(no_epoll_wait == 0 || no_epoll_pwait == 0);
continue;
}
if (errno != EINTR)
abort();
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
}
if (timeout == -1)
continue;
if (timeout == 0)
return;
/* Interrupted by a signal. Update timeout and poll again. */
goto update_timeout;
}
have_signals = 0;
nevents = 0;
{
/* Squelch a -Waddress-of-packed-member warning with gcc >= 9. */
union {
struct epoll_event* events;
uv__io_t* watchers;
} x;
x.events = events;
assert(loop->watchers != NULL);
loop->watchers[loop->nwatchers] = x.watchers;
loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
}
for (i = 0; i < nfds; i++) {
pe = events + i;
fd = pe->data.fd;
/* Skip invalidated events, see uv__platform_invalidate_fd */
if (fd == -1)
continue;
assert(fd >= 0);
assert((unsigned) fd < loop->nwatchers);
w = loop->watchers[fd];
if (w == NULL) {
/* File descriptor that we've stopped watching, disarm it.
*
* Ignore all errors because we may be racing with another thread
* when the file descriptor is closed.
*/
epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, pe);
continue;
}
/* Give users only events they're interested in. Prevents spurious
* callbacks when previous callback invocation in this loop has stopped
* the current watcher. Also, filters out events that users has not
* requested us to watch.
*/
pe->events &= w->pevents | POLLERR | POLLHUP;
/* Work around an epoll quirk where it sometimes reports just the
* EPOLLERR or EPOLLHUP event. In order to force the event loop to
* move forward, we merge in the read/write events that the watcher
* is interested in; uv__read() and uv__write() will then deal with
* the error or hangup in the usual fashion.
*
* Note to self: happens when epoll reports EPOLLIN|EPOLLHUP, the user
* reads the available data, calls uv_read_stop(), then sometime later
* calls uv_read_start() again. By then, libuv has forgotten about the
* hangup and the kernel won't report EPOLLIN again because there's
* nothing left to read. If anything, libuv is to blame here. The
* current hack is just a quick bandaid; to properly fix it, libuv
* needs to remember the error/hangup event. We should get that for
* free when we switch over to edge-triggered I/O.
*/
if (pe->events == POLLERR || pe->events == POLLHUP)
pe->events |=
w->pevents & (POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI);
if (pe->events != 0) {
/* Run signal watchers last. This also affects child process watchers
* because those are implemented in terms of signal watchers.
*/
if (w == &loop->signal_io_watcher) {
have_signals = 1;
} else {
uv__metrics_update_idle_time(loop);
w->cb(loop, w, pe->events);
}
nevents++;
}
}
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
}
if (have_signals != 0) {
uv__metrics_update_idle_time(loop);
loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
}
loop->watchers[loop->nwatchers] = NULL;
loop->watchers[loop->nwatchers + 1] = NULL;
if (have_signals != 0)
return; /* Event loop should cycle now so don't poll again. */
if (nevents != 0) {
if (nfds == ARRAY_SIZE(events) && --count != 0) {
/* Poll for more events but don't block this time. */
timeout = 0;
continue;
}
return;
}
if (timeout == 0)
return;
if (timeout == -1)
continue;
update_timeout:
assert(timeout > 0);
real_timeout -= (loop->time - base);
if (real_timeout <= 0)
return;
timeout = real_timeout;
}
}

View File

@ -91,7 +91,7 @@ uint64_t uv_get_free_memory(void) {
size_t size = sizeof(freecount);
if (sysctlbyname("vm.stats.vm.v_free_count", &freecount, &size, NULL, 0))
return UV__ERR(errno);
return 0;
return (uint64_t) freecount * sysconf(_SC_PAGESIZE);
@ -105,7 +105,7 @@ uint64_t uv_get_total_memory(void) {
size_t size = sizeof(info);
if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0))
return UV__ERR(errno);
return 0;
return (uint64_t) info;
}
@ -116,6 +116,11 @@ uint64_t uv_get_constrained_memory(void) {
}
uint64_t uv_get_available_memory(void) {
return uv_get_free_memory();
}
void uv_loadavg(double avg[3]) {
struct loadavg info;
size_t size = sizeof(info);
@ -264,30 +269,6 @@ int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
}
int uv__sendmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen) {
#if __FreeBSD__ >= 11 && !defined(__DragonFly__)
return sendmmsg(fd,
(struct mmsghdr*) mmsg,
vlen,
0 /* flags */);
#else
return errno = ENOSYS, -1;
#endif
}
int uv__recvmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen) {
#if __FreeBSD__ >= 11 && !defined(__DragonFly__)
return recvmmsg(fd,
(struct mmsghdr*) mmsg,
vlen,
0 /* flags */,
NULL /* timeout */);
#else
return errno = ENOSYS, -1;
#endif
}
ssize_t
uv__fs_copy_file_range(int fd_in,
off_t* off_in,

View File

@ -48,7 +48,6 @@
#if defined(__DragonFly__) || \
defined(__FreeBSD__) || \
defined(__FreeBSD_kernel__) || \
defined(__OpenBSD__) || \
defined(__NetBSD__)
# define HAVE_PREADV 1
@ -57,10 +56,11 @@
#endif
#if defined(__linux__)
# include "sys/utsname.h"
# include <sys/sendfile.h>
# include <sys/utsname.h>
#endif
#if defined(__linux__) || defined(__sun)
#if defined(__sun)
# include <sys/sendfile.h>
# include <sys/sysmacros.h>
#endif
@ -79,7 +79,6 @@
#if defined(__APPLE__) || \
defined(__DragonFly__) || \
defined(__FreeBSD__) || \
defined(__FreeBSD_kernel__) || \
defined(__OpenBSD__) || \
defined(__NetBSD__)
# include <sys/param.h>
@ -256,7 +255,6 @@ static ssize_t uv__fs_futime(uv_fs_t* req) {
#elif defined(__APPLE__) \
|| defined(__DragonFly__) \
|| defined(__FreeBSD__) \
|| defined(__FreeBSD_kernel__) \
|| defined(__NetBSD__) \
|| defined(__OpenBSD__) \
|| defined(__sun)
@ -311,7 +309,7 @@ static int uv__fs_mkstemp(uv_fs_t* req) {
static uv_once_t once = UV_ONCE_INIT;
int r;
#ifdef O_CLOEXEC
static int no_cloexec_support;
static _Atomic int no_cloexec_support;
#endif
static const char pattern[] = "XXXXXX";
static const size_t pattern_size = sizeof(pattern) - 1;
@ -336,7 +334,8 @@ static int uv__fs_mkstemp(uv_fs_t* req) {
uv_once(&once, uv__mkostemp_initonce);
#ifdef O_CLOEXEC
if (uv__load_relaxed(&no_cloexec_support) == 0 && uv__mkostemp != NULL) {
if (atomic_load_explicit(&no_cloexec_support, memory_order_relaxed) == 0 &&
uv__mkostemp != NULL) {
r = uv__mkostemp(path, O_CLOEXEC);
if (r >= 0)
@ -349,7 +348,7 @@ static int uv__fs_mkstemp(uv_fs_t* req) {
/* We set the static variable so that next calls don't even
try to use mkostemp. */
uv__store_relaxed(&no_cloexec_support, 1);
atomic_store_explicit(&no_cloexec_support, 1, memory_order_relaxed);
}
#endif /* O_CLOEXEC */
@ -459,7 +458,7 @@ static ssize_t uv__fs_preadv(uv_file fd,
static ssize_t uv__fs_read(uv_fs_t* req) {
#if defined(__linux__)
static int no_preadv;
static _Atomic int no_preadv;
#endif
unsigned int iovmax;
ssize_t result;
@ -483,19 +482,19 @@ static ssize_t uv__fs_read(uv_fs_t* req) {
result = preadv(req->file, (struct iovec*) req->bufs, req->nbufs, req->off);
#else
# if defined(__linux__)
if (uv__load_relaxed(&no_preadv)) retry:
if (atomic_load_explicit(&no_preadv, memory_order_relaxed)) retry:
# endif
{
result = uv__fs_preadv(req->file, req->bufs, req->nbufs, req->off);
}
# if defined(__linux__)
else {
result = uv__preadv(req->file,
(struct iovec*)req->bufs,
req->nbufs,
req->off);
result = preadv(req->file,
(struct iovec*) req->bufs,
req->nbufs,
req->off);
if (result == -1 && errno == ENOSYS) {
uv__store_relaxed(&no_preadv, 1);
atomic_store_explicit(&no_preadv, 1, memory_order_relaxed);
goto retry;
}
}
@ -516,7 +515,7 @@ done:
if (result == -1 && errno == EOPNOTSUPP) {
struct stat buf;
ssize_t rc;
rc = fstat(req->file, &buf);
rc = uv__fstat(req->file, &buf);
if (rc == 0 && S_ISDIR(buf.st_mode)) {
errno = EISDIR;
}
@ -527,19 +526,12 @@ done:
}
#if defined(__APPLE__) && !defined(MAC_OS_X_VERSION_10_8)
#define UV_CONST_DIRENT uv__dirent_t
#else
#define UV_CONST_DIRENT const uv__dirent_t
#endif
static int uv__fs_scandir_filter(UV_CONST_DIRENT* dent) {
static int uv__fs_scandir_filter(const uv__dirent_t* dent) {
return strcmp(dent->d_name, ".") != 0 && strcmp(dent->d_name, "..") != 0;
}
static int uv__fs_scandir_sort(UV_CONST_DIRENT** a, UV_CONST_DIRENT** b) {
static int uv__fs_scandir_sort(const uv__dirent_t** a, const uv__dirent_t** b) {
return strcmp((*a)->d_name, (*b)->d_name);
}
@ -715,7 +707,7 @@ static ssize_t uv__fs_readlink(uv_fs_t* req) {
/* We may not have a real PATH_MAX. Read size of link. */
struct stat st;
int ret;
ret = lstat(req->path, &st);
ret = uv__lstat(req->path, &st);
if (ret != 0)
return -1;
if (!S_ISLNK(st.st_mode)) {
@ -908,14 +900,14 @@ out:
#ifdef __linux__
static unsigned uv__kernel_version(void) {
static unsigned cached_version;
static _Atomic unsigned cached_version;
struct utsname u;
unsigned version;
unsigned major;
unsigned minor;
unsigned patch;
version = uv__load_relaxed(&cached_version);
version = atomic_load_explicit(&cached_version, memory_order_relaxed);
if (version != 0)
return version;
@ -926,7 +918,7 @@ static unsigned uv__kernel_version(void) {
return 0;
version = major * 65536 + minor * 256 + patch;
uv__store_relaxed(&cached_version, version);
atomic_store_explicit(&cached_version, version, memory_order_relaxed);
return version;
}
@ -968,10 +960,10 @@ static int uv__is_cifs_or_smb(int fd) {
static ssize_t uv__fs_try_copy_file_range(int in_fd, off_t* off,
int out_fd, size_t len) {
static int no_copy_file_range_support;
static _Atomic int no_copy_file_range_support;
ssize_t r;
if (uv__load_relaxed(&no_copy_file_range_support)) {
if (atomic_load_explicit(&no_copy_file_range_support, memory_order_relaxed)) {
errno = ENOSYS;
return -1;
}
@ -990,7 +982,7 @@ static ssize_t uv__fs_try_copy_file_range(int in_fd, off_t* off,
errno = ENOSYS; /* Use fallback. */
break;
case ENOSYS:
uv__store_relaxed(&no_copy_file_range_support, 1);
atomic_store_explicit(&no_copy_file_range_support, 1, memory_order_relaxed);
break;
case EPERM:
/* It's been reported that CIFS spuriously fails.
@ -1061,10 +1053,7 @@ static ssize_t uv__fs_sendfile(uv_fs_t* req) {
return -1;
}
#elif defined(__APPLE__) || \
defined(__DragonFly__) || \
defined(__FreeBSD__) || \
defined(__FreeBSD_kernel__)
#elif defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__)
{
off_t len;
ssize_t r;
@ -1088,15 +1077,6 @@ static ssize_t uv__fs_sendfile(uv_fs_t* req) {
#endif
len = 0;
r = sendfile(in_fd, out_fd, req->off, req->bufsml[0].len, NULL, &len, 0);
#elif defined(__FreeBSD_kernel__)
len = 0;
r = bsd_sendfile(in_fd,
out_fd,
req->off,
req->bufsml[0].len,
NULL,
&len,
0);
#else
/* The darwin sendfile takes len as an input for the length to send,
* so make sure to initialize it with the caller's value. */
@ -1148,7 +1128,6 @@ static ssize_t uv__fs_utime(uv_fs_t* req) {
#elif defined(__APPLE__) \
|| defined(__DragonFly__) \
|| defined(__FreeBSD__) \
|| defined(__FreeBSD_kernel__) \
|| defined(__NetBSD__) \
|| defined(__OpenBSD__)
struct timeval tv[2];
@ -1190,7 +1169,6 @@ static ssize_t uv__fs_lutime(uv_fs_t* req) {
#elif defined(__APPLE__) || \
defined(__DragonFly__) || \
defined(__FreeBSD__) || \
defined(__FreeBSD_kernel__) || \
defined(__NetBSD__)
struct timeval tv[2];
tv[0] = uv__fs_to_timeval(req->atime);
@ -1241,10 +1219,10 @@ static ssize_t uv__fs_write(uv_fs_t* req) {
}
# if defined(__linux__)
else {
r = uv__pwritev(req->file,
(struct iovec*) req->bufs,
req->nbufs,
req->off);
r = pwritev(req->file,
(struct iovec*) req->bufs,
req->nbufs,
req->off);
if (r == -1 && errno == ENOSYS) {
no_pwritev = 1;
goto retry;
@ -1288,7 +1266,7 @@ static ssize_t uv__fs_copyfile(uv_fs_t* req) {
return srcfd;
/* Get the source file's mode. */
if (fstat(srcfd, &src_statsbuf)) {
if (uv__fstat(srcfd, &src_statsbuf)) {
err = UV__ERR(errno);
goto out;
}
@ -1316,7 +1294,7 @@ static ssize_t uv__fs_copyfile(uv_fs_t* req) {
destination are not the same file. If they are the same, bail out early. */
if ((req->flags & UV_FS_COPYFILE_EXCL) == 0) {
/* Get the destination file's mode. */
if (fstat(dstfd, &dst_statsbuf)) {
if (uv__fstat(dstfd, &dst_statsbuf)) {
err = UV__ERR(errno);
goto out;
}
@ -1330,7 +1308,19 @@ static ssize_t uv__fs_copyfile(uv_fs_t* req) {
/* Truncate the file in case the destination already existed. */
if (ftruncate(dstfd, 0) != 0) {
err = UV__ERR(errno);
goto out;
/* ftruncate() on ceph-fuse fails with EACCES when the file is created
* with read only permissions. Since ftruncate() on a newly created
* file is a meaningless operation anyway, detect that condition
* and squelch the error.
*/
if (err != UV_EACCES)
goto out;
if (dst_statsbuf.st_size > 0)
goto out;
err = 0;
}
}
@ -1514,14 +1504,14 @@ static int uv__fs_statx(int fd,
uv_stat_t* buf) {
STATIC_ASSERT(UV_ENOSYS != -1);
#ifdef __linux__
static int no_statx;
static _Atomic int no_statx;
struct uv__statx statxbuf;
int dirfd;
int flags;
int mode;
int rc;
if (uv__load_relaxed(&no_statx))
if (atomic_load_explicit(&no_statx, memory_order_relaxed))
return UV_ENOSYS;
dirfd = AT_FDCWD;
@ -1555,30 +1545,11 @@ static int uv__fs_statx(int fd,
* implemented, rc might return 1 with 0 set as the error code in which
* case we return ENOSYS.
*/
uv__store_relaxed(&no_statx, 1);
atomic_store_explicit(&no_statx, 1, memory_order_relaxed);
return UV_ENOSYS;
}
buf->st_dev = makedev(statxbuf.stx_dev_major, statxbuf.stx_dev_minor);
buf->st_mode = statxbuf.stx_mode;
buf->st_nlink = statxbuf.stx_nlink;
buf->st_uid = statxbuf.stx_uid;
buf->st_gid = statxbuf.stx_gid;
buf->st_rdev = makedev(statxbuf.stx_rdev_major, statxbuf.stx_rdev_minor);
buf->st_ino = statxbuf.stx_ino;
buf->st_size = statxbuf.stx_size;
buf->st_blksize = statxbuf.stx_blksize;
buf->st_blocks = statxbuf.stx_blocks;
buf->st_atim.tv_sec = statxbuf.stx_atime.tv_sec;
buf->st_atim.tv_nsec = statxbuf.stx_atime.tv_nsec;
buf->st_mtim.tv_sec = statxbuf.stx_mtime.tv_sec;
buf->st_mtim.tv_nsec = statxbuf.stx_mtime.tv_nsec;
buf->st_ctim.tv_sec = statxbuf.stx_ctime.tv_sec;
buf->st_ctim.tv_nsec = statxbuf.stx_ctime.tv_nsec;
buf->st_birthtim.tv_sec = statxbuf.stx_btime.tv_sec;
buf->st_birthtim.tv_nsec = statxbuf.stx_btime.tv_nsec;
buf->st_flags = 0;
buf->st_gen = 0;
uv__statx_to_stat(&statxbuf, buf);
return 0;
#else
@ -1595,7 +1566,7 @@ static int uv__fs_stat(const char *path, uv_stat_t *buf) {
if (ret != UV_ENOSYS)
return ret;
ret = stat(path, &pbuf);
ret = uv__stat(path, &pbuf);
if (ret == 0)
uv__to_stat(&pbuf, buf);
@ -1611,7 +1582,7 @@ static int uv__fs_lstat(const char *path, uv_stat_t *buf) {
if (ret != UV_ENOSYS)
return ret;
ret = lstat(path, &pbuf);
ret = uv__lstat(path, &pbuf);
if (ret == 0)
uv__to_stat(&pbuf, buf);
@ -1627,7 +1598,7 @@ static int uv__fs_fstat(int fd, uv_stat_t *buf) {
if (ret != UV_ENOSYS)
return ret;
ret = fstat(fd, &pbuf);
ret = uv__fstat(fd, &pbuf);
if (ret == 0)
uv__to_stat(&pbuf, buf);
@ -1822,6 +1793,9 @@ int uv_fs_chown(uv_loop_t* loop,
int uv_fs_close(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
INIT(CLOSE);
req->file = file;
if (cb != NULL)
if (uv__iou_fs_close(loop, req))
return 0;
POST;
}
@ -1869,6 +1843,9 @@ int uv_fs_lchown(uv_loop_t* loop,
int uv_fs_fdatasync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
INIT(FDATASYNC);
req->file = file;
if (cb != NULL)
if (uv__iou_fs_fsync_or_fdatasync(loop, req, /* IORING_FSYNC_DATASYNC */ 1))
return 0;
POST;
}
@ -1876,6 +1853,9 @@ int uv_fs_fdatasync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
int uv_fs_fstat(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
INIT(FSTAT);
req->file = file;
if (cb != NULL)
if (uv__iou_fs_statx(loop, req, /* is_fstat */ 1, /* is_lstat */ 0))
return 0;
POST;
}
@ -1883,6 +1863,9 @@ int uv_fs_fstat(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
int uv_fs_fsync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
INIT(FSYNC);
req->file = file;
if (cb != NULL)
if (uv__iou_fs_fsync_or_fdatasync(loop, req, /* no flags */ 0))
return 0;
POST;
}
@ -1929,6 +1912,9 @@ int uv_fs_lutime(uv_loop_t* loop,
int uv_fs_lstat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
INIT(LSTAT);
PATH;
if (cb != NULL)
if (uv__iou_fs_statx(loop, req, /* is_fstat */ 0, /* is_lstat */ 1))
return 0;
POST;
}
@ -1990,6 +1976,9 @@ int uv_fs_open(uv_loop_t* loop,
PATH;
req->flags = flags;
req->mode = mode;
if (cb != NULL)
if (uv__iou_fs_open(loop, req))
return 0;
POST;
}
@ -2018,6 +2007,11 @@ int uv_fs_read(uv_loop_t* loop, uv_fs_t* req,
memcpy(req->bufs, bufs, nbufs * sizeof(*bufs));
req->off = off;
if (cb != NULL)
if (uv__iou_fs_read_or_write(loop, req, /* is_read */ 1))
return 0;
POST;
}
@ -2125,6 +2119,9 @@ int uv_fs_sendfile(uv_loop_t* loop,
int uv_fs_stat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
INIT(STAT);
PATH;
if (cb != NULL)
if (uv__iou_fs_statx(loop, req, /* is_fstat */ 0, /* is_lstat */ 0))
return 0;
POST;
}
@ -2188,6 +2185,11 @@ int uv_fs_write(uv_loop_t* loop,
memcpy(req->bufs, bufs, nbufs * sizeof(*bufs));
req->off = off;
if (cb != NULL)
if (uv__iou_fs_read_or_write(loop, req, /* is_read */ 0))
return 0;
POST;
}
@ -2196,7 +2198,7 @@ void uv_fs_req_cleanup(uv_fs_t* req) {
if (req == NULL)
return;
/* Only necessary for asychronous requests, i.e., requests with a callback.
/* Only necessary for asynchronous requests, i.e., requests with a callback.
* Synchronous ones don't copy their arguments and have req->path and
* req->new_path pointing to user-owned memory. UV_FS_MKDTEMP and
* UV_FS_MKSTEMP are the exception to the rule, they always allocate memory.

View File

@ -132,7 +132,6 @@ static void (*pCFRunLoopWakeUp)(CFRunLoopRef);
static CFStringRef (*pCFStringCreateWithFileSystemRepresentation)(
CFAllocatorRef,
const char*);
static CFStringEncoding (*pCFStringGetSystemEncoding)(void);
static CFStringRef (*pkCFRunLoopDefaultMode);
static FSEventStreamRef (*pFSEventStreamCreate)(CFAllocatorRef,
FSEventStreamCallback,
@ -141,7 +140,6 @@ static FSEventStreamRef (*pFSEventStreamCreate)(CFAllocatorRef,
FSEventStreamEventId,
CFTimeInterval,
FSEventStreamCreateFlags);
static void (*pFSEventStreamFlushSync)(FSEventStreamRef);
static void (*pFSEventStreamInvalidate)(FSEventStreamRef);
static void (*pFSEventStreamRelease)(FSEventStreamRef);
static void (*pFSEventStreamScheduleWithRunLoop)(FSEventStreamRef,
@ -331,8 +329,9 @@ static void uv__fsevents_event_cb(const FSEventStreamRef streamRef,
/* Runs in CF thread */
static int uv__fsevents_create_stream(uv_loop_t* loop, CFArrayRef paths) {
uv__cf_loop_state_t* state;
static int uv__fsevents_create_stream(uv__cf_loop_state_t* state,
uv_loop_t* loop,
CFArrayRef paths) {
FSEventStreamContext ctx;
FSEventStreamRef ref;
CFAbsoluteTime latency;
@ -373,10 +372,7 @@ static int uv__fsevents_create_stream(uv_loop_t* loop, CFArrayRef paths) {
flags);
assert(ref != NULL);
state = loop->cf_state;
pFSEventStreamScheduleWithRunLoop(ref,
state->loop,
*pkCFRunLoopDefaultMode);
pFSEventStreamScheduleWithRunLoop(ref, state->loop, *pkCFRunLoopDefaultMode);
if (!pFSEventStreamStart(ref)) {
pFSEventStreamInvalidate(ref);
pFSEventStreamRelease(ref);
@ -389,11 +385,7 @@ static int uv__fsevents_create_stream(uv_loop_t* loop, CFArrayRef paths) {
/* Runs in CF thread */
static void uv__fsevents_destroy_stream(uv_loop_t* loop) {
uv__cf_loop_state_t* state;
state = loop->cf_state;
static void uv__fsevents_destroy_stream(uv__cf_loop_state_t* state) {
if (state->fsevent_stream == NULL)
return;
@ -408,9 +400,9 @@ static void uv__fsevents_destroy_stream(uv_loop_t* loop) {
/* Runs in CF thread, when there're new fsevent handles to add to stream */
static void uv__fsevents_reschedule(uv_fs_event_t* handle,
static void uv__fsevents_reschedule(uv__cf_loop_state_t* state,
uv_loop_t* loop,
uv__cf_loop_signal_type_t type) {
uv__cf_loop_state_t* state;
QUEUE* q;
uv_fs_event_t* curr;
CFArrayRef cf_paths;
@ -419,7 +411,6 @@ static void uv__fsevents_reschedule(uv_fs_event_t* handle,
int err;
unsigned int path_count;
state = handle->loop->cf_state;
paths = NULL;
cf_paths = NULL;
err = 0;
@ -438,7 +429,7 @@ static void uv__fsevents_reschedule(uv_fs_event_t* handle,
uv_mutex_unlock(&state->fsevent_mutex);
/* Destroy previous FSEventStream */
uv__fsevents_destroy_stream(handle->loop);
uv__fsevents_destroy_stream(state);
/* Any failure below will be a memory failure */
err = UV_ENOMEM;
@ -478,7 +469,7 @@ static void uv__fsevents_reschedule(uv_fs_event_t* handle,
err = UV_ENOMEM;
goto final;
}
err = uv__fsevents_create_stream(handle->loop, cf_paths);
err = uv__fsevents_create_stream(state, loop, cf_paths);
}
final:
@ -563,10 +554,8 @@ static int uv__fsevents_global_init(void) {
V(core_foundation_handle, CFRunLoopStop);
V(core_foundation_handle, CFRunLoopWakeUp);
V(core_foundation_handle, CFStringCreateWithFileSystemRepresentation);
V(core_foundation_handle, CFStringGetSystemEncoding);
V(core_foundation_handle, kCFRunLoopDefaultMode);
V(core_services_handle, FSEventStreamCreate);
V(core_services_handle, FSEventStreamFlushSync);
V(core_services_handle, FSEventStreamInvalidate);
V(core_services_handle, FSEventStreamRelease);
V(core_services_handle, FSEventStreamScheduleWithRunLoop);
@ -767,7 +756,7 @@ static void uv__cf_loop_cb(void* arg) {
if (s->handle == NULL)
pCFRunLoopStop(state->loop);
else
uv__fsevents_reschedule(s->handle, s->type);
uv__fsevents_reschedule(state, loop, s->type);
uv__free(s);
}

View File

@ -84,6 +84,11 @@ uint64_t uv_get_constrained_memory(void) {
}
uint64_t uv_get_available_memory(void) {
return uv_get_free_memory();
}
int uv_resident_set_memory(size_t* rss) {
area_info area;
ssize_t cookie;

View File

@ -165,3 +165,8 @@ int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
uint64_t uv_get_constrained_memory(void) {
return 0; /* Memory constraints are unknown. */
}
uint64_t uv_get_available_memory(void) {
return uv_get_free_memory();
}

View File

@ -249,6 +249,11 @@ uint64_t uv_get_constrained_memory(void) {
}
uint64_t uv_get_available_memory(void) {
return uv_get_free_memory();
}
void uv_loadavg(double avg[3]) {
SSTS0200 rcvr;

View File

@ -26,21 +26,34 @@
#include <assert.h>
#include <limits.h> /* _POSIX_PATH_MAX, PATH_MAX */
#include <stdint.h>
#include <stdlib.h> /* abort */
#include <string.h> /* strrchr */
#include <fcntl.h> /* O_CLOEXEC and O_NONBLOCK, if supported. */
#include <stdio.h>
#include <errno.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/types.h>
#define uv__msan_unpoison(p, n) \
do { \
(void) (p); \
(void) (n); \
} while (0)
#if defined(__has_feature)
# if __has_feature(memory_sanitizer)
# include <sanitizer/msan_interface.h>
# undef uv__msan_unpoison
# define uv__msan_unpoison __msan_unpoison
# endif
#endif
#if defined(__STRICT_ANSI__)
# define inline __inline
#endif
#if defined(__linux__)
# include "linux-syscalls.h"
#endif /* __linux__ */
#if defined(__MVS__)
# include "os390-syscalls.h"
#endif /* __MVS__ */
@ -79,13 +92,11 @@
# define UV__PATH_MAX 8192
#endif
#if defined(__ANDROID__)
int uv__pthread_sigmask(int how, const sigset_t* set, sigset_t* oset);
# ifdef pthread_sigmask
# undef pthread_sigmask
# endif
# define pthread_sigmask(how, set, oldset) uv__pthread_sigmask(how, set, oldset)
#endif
union uv__sockaddr {
struct sockaddr_in6 in6;
struct sockaddr_in in;
struct sockaddr addr;
};
#define ACCESS_ONCE(type, var) \
(*(volatile type*) &(var))
@ -166,12 +177,42 @@ struct uv__stream_queued_fds_s {
int fds[1];
};
#ifdef __linux__
struct uv__statx_timestamp {
int64_t tv_sec;
uint32_t tv_nsec;
int32_t unused0;
};
struct uv__statx {
uint32_t stx_mask;
uint32_t stx_blksize;
uint64_t stx_attributes;
uint32_t stx_nlink;
uint32_t stx_uid;
uint32_t stx_gid;
uint16_t stx_mode;
uint16_t unused0;
uint64_t stx_ino;
uint64_t stx_size;
uint64_t stx_blocks;
uint64_t stx_attributes_mask;
struct uv__statx_timestamp stx_atime;
struct uv__statx_timestamp stx_btime;
struct uv__statx_timestamp stx_ctime;
struct uv__statx_timestamp stx_mtime;
uint32_t stx_rdev_major;
uint32_t stx_rdev_minor;
uint32_t stx_dev_major;
uint32_t stx_dev_minor;
uint64_t unused1[14];
};
#endif /* __linux__ */
#if defined(_AIX) || \
defined(__APPLE__) || \
defined(__DragonFly__) || \
defined(__FreeBSD__) || \
defined(__FreeBSD_kernel__) || \
defined(__linux__) || \
defined(__OpenBSD__) || \
defined(__NetBSD__)
@ -258,10 +299,10 @@ int uv__signal_loop_fork(uv_loop_t* loop);
/* platform specific */
uint64_t uv__hrtime(uv_clocktype_t type);
int uv__kqueue_init(uv_loop_t* loop);
int uv__epoll_init(uv_loop_t* loop);
int uv__platform_loop_init(uv_loop_t* loop);
void uv__platform_loop_delete(uv_loop_t* loop);
void uv__platform_invalidate_fd(uv_loop_t* loop, int fd);
int uv__process_init(uv_loop_t* loop);
/* various */
void uv__async_close(uv_async_t* handle);
@ -278,7 +319,6 @@ size_t uv__thread_stack_size(void);
void uv__udp_close(uv_udp_t* handle);
void uv__udp_finish_close(uv_udp_t* handle);
FILE* uv__open_file(const char* path);
int uv__getpwuid_r(uv_passwd_t* pwd);
int uv__search_path(const char* prog, char* buf, size_t* buflen);
void uv__wait_children(uv_loop_t* loop);
@ -289,6 +329,28 @@ int uv__random_getentropy(void* buf, size_t buflen);
int uv__random_readpath(const char* path, void* buf, size_t buflen);
int uv__random_sysctl(void* buf, size_t buflen);
/* io_uring */
#ifdef __linux__
int uv__iou_fs_close(uv_loop_t* loop, uv_fs_t* req);
int uv__iou_fs_fsync_or_fdatasync(uv_loop_t* loop,
uv_fs_t* req,
uint32_t fsync_flags);
int uv__iou_fs_open(uv_loop_t* loop, uv_fs_t* req);
int uv__iou_fs_read_or_write(uv_loop_t* loop,
uv_fs_t* req,
int is_read);
int uv__iou_fs_statx(uv_loop_t* loop,
uv_fs_t* req,
int is_fstat,
int is_lstat);
#else
#define uv__iou_fs_close(loop, req) 0
#define uv__iou_fs_fsync_or_fdatasync(loop, req, fsync_flags) 0
#define uv__iou_fs_open(loop, req) 0
#define uv__iou_fs_read_or_write(loop, req, is_read) 0
#define uv__iou_fs_statx(loop, req, is_fstat, is_lstat) 0
#endif
#if defined(__APPLE__)
int uv___stream_fd(const uv_stream_t* handle);
#define uv__stream_fd(handle) (uv___stream_fd((const uv_stream_t*) (handle)))
@ -322,8 +384,51 @@ UV_UNUSED(static char* uv__basename_r(const char* path)) {
return s + 1;
}
UV_UNUSED(static int uv__fstat(int fd, struct stat* s)) {
int rc;
rc = fstat(fd, s);
if (rc >= 0)
uv__msan_unpoison(s, sizeof(*s));
return rc;
}
UV_UNUSED(static int uv__lstat(const char* path, struct stat* s)) {
int rc;
rc = lstat(path, s);
if (rc >= 0)
uv__msan_unpoison(s, sizeof(*s));
return rc;
}
UV_UNUSED(static int uv__stat(const char* path, struct stat* s)) {
int rc;
rc = stat(path, s);
if (rc >= 0)
uv__msan_unpoison(s, sizeof(*s));
return rc;
}
#if defined(__linux__)
int uv__inotify_fork(uv_loop_t* loop, void* old_watchers);
ssize_t
uv__fs_copy_file_range(int fd_in,
off_t* off_in,
int fd_out,
off_t* off_out,
size_t len,
unsigned int flags);
int uv__statx(int dirfd,
const char* path,
int flags,
unsigned int mask,
struct uv__statx* statxbuf);
void uv__statx_to_stat(const struct uv__statx* statxbuf, uv_stat_t* buf);
ssize_t uv__getrandom(void* buf, size_t buflen, unsigned flags);
#endif
typedef int (*uv__peersockfunc)(int, struct sockaddr*, socklen_t*);
@ -333,22 +438,6 @@ int uv__getsockpeername(const uv_handle_t* handle,
struct sockaddr* name,
int* namelen);
#if defined(__linux__) || \
defined(__FreeBSD__) || \
defined(__FreeBSD_kernel__) || \
defined(__DragonFly__)
#define HAVE_MMSG 1
struct uv__mmsghdr {
struct msghdr msg_hdr;
unsigned int msg_len;
};
int uv__recvmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen);
int uv__sendmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen);
#else
#define HAVE_MMSG 0
#endif
#if defined(__sun)
#if !defined(_POSIX_VERSION) || _POSIX_VERSION < 200809L
size_t strnlen(const char* s, size_t maxlen);
@ -365,5 +454,10 @@ uv__fs_copy_file_range(int fd_in,
unsigned int flags);
#endif
#if defined(__linux__) || (defined(__FreeBSD__) && __FreeBSD_version >= 1301000)
#define UV__CPU_AFFINITY_SUPPORTED 1
#else
#define UV__CPU_AFFINITY_SUPPORTED 0
#endif
#endif /* UV_UNIX_INTERNAL_H_ */

View File

@ -60,7 +60,7 @@ int uv__kqueue_init(uv_loop_t* loop) {
#if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
static int uv__has_forked_with_cfrunloop;
static _Atomic int uv__has_forked_with_cfrunloop;
#endif
int uv__io_fork(uv_loop_t* loop) {
@ -82,7 +82,9 @@ int uv__io_fork(uv_loop_t* loop) {
process. So we sidestep the issue by pretending like we never
started it in the first place.
*/
uv__store_relaxed(&uv__has_forked_with_cfrunloop, 1);
atomic_store_explicit(&uv__has_forked_with_cfrunloop,
1,
memory_order_relaxed);
uv__free(loop->cf_state);
loop->cf_state = NULL;
}
@ -109,7 +111,23 @@ int uv__io_check_fd(uv_loop_t* loop, int fd) {
}
static void uv__kqueue_delete(int kqfd, const struct kevent *ev) {
struct kevent change;
EV_SET(&change, ev->ident, ev->filter, EV_DELETE, 0, 0, 0);
if (0 == kevent(kqfd, &change, 1, NULL, 0, NULL))
return;
if (errno == EBADF || errno == ENOENT)
return;
abort();
}
void uv__io_poll(uv_loop_t* loop, int timeout) {
uv__loop_internal_fields_t* lfields;
struct kevent events[1024];
struct kevent* ev;
struct timespec spec;
@ -138,6 +156,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
return;
}
lfields = uv__get_internal_fields(loop);
nevents = 0;
while (!QUEUE_EMPTY(&loop->watcher_queue)) {
@ -205,7 +224,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
base = loop->time;
count = 48; /* Benchmarks suggest this gives the best throughput. */
if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
if (lfields->flags & UV_METRICS_IDLE_TIME) {
reset_timeout = 1;
user_timeout = timeout;
timeout = 0;
@ -228,6 +247,12 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
if (pset != NULL)
pthread_sigmask(SIG_BLOCK, pset, NULL);
/* Store the current timeout in a location that's globally accessible so
* other locations like uv__work_done() can determine whether the queue
* of events in the callback were waiting when poll was called.
*/
lfields->current_timeout = timeout;
nfds = kevent(loop->backend_fd,
events,
nevents,
@ -235,6 +260,9 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
ARRAY_SIZE(events),
timeout == -1 ? NULL : &spec);
if (nfds == -1)
assert(errno == EINTR);
if (pset != NULL)
pthread_sigmask(SIG_UNBLOCK, pset, NULL);
@ -242,36 +270,26 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
* timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
* operating system didn't reschedule our process while in the syscall.
*/
SAVE_ERRNO(uv__update_time(loop));
uv__update_time(loop);
if (nfds == 0) {
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
if (timeout == -1)
continue;
if (timeout > 0)
goto update_timeout;
if (nfds == 0 || nfds == -1) {
/* If kqueue is empty or interrupted, we might still have children ready
* to reap immediately. */
if (loop->flags & UV_LOOP_REAP_CHILDREN) {
loop->flags &= ~UV_LOOP_REAP_CHILDREN;
uv__wait_children(loop);
assert((reset_timeout == 0 ? timeout : user_timeout) == 0);
return; /* Equivalent to fall-through behavior. */
}
assert(timeout != -1);
return;
}
if (nfds == -1) {
if (errno != EINTR)
abort();
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
}
if (timeout == 0)
} else if (nfds == 0) {
/* Reached the user timeout value. */
assert(timeout != -1);
return;
if (timeout == -1)
continue;
}
/* Interrupted by a signal. Update timeout and poll again. */
goto update_timeout;
@ -307,15 +325,8 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
w = loop->watchers[fd];
if (w == NULL) {
/* File descriptor that we've stopped watching, disarm it.
* TODO: batch up. */
struct kevent events[1];
EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
if (errno != EBADF && errno != ENOENT)
abort();
/* File descriptor that we've stopped watching, disarm it. */
uv__kqueue_delete(loop->backend_fd, ev);
continue;
}
@ -331,47 +342,27 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
revents = 0;
if (ev->filter == EVFILT_READ) {
if (w->pevents & POLLIN) {
if (w->pevents & POLLIN)
revents |= POLLIN;
w->rcount = ev->data;
} else {
/* TODO batch up */
struct kevent events[1];
EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
if (errno != ENOENT)
abort();
}
else
uv__kqueue_delete(loop->backend_fd, ev);
if ((ev->flags & EV_EOF) && (w->pevents & UV__POLLRDHUP))
revents |= UV__POLLRDHUP;
}
if (ev->filter == EV_OOBAND) {
if (w->pevents & UV__POLLPRI) {
if (w->pevents & UV__POLLPRI)
revents |= UV__POLLPRI;
w->rcount = ev->data;
} else {
/* TODO batch up */
struct kevent events[1];
EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
if (errno != ENOENT)
abort();
}
else
uv__kqueue_delete(loop->backend_fd, ev);
}
if (ev->filter == EVFILT_WRITE) {
if (w->pevents & POLLOUT) {
if (w->pevents & POLLOUT)
revents |= POLLOUT;
w->wcount = ev->data;
} else {
/* TODO batch up */
struct kevent events[1];
EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
if (errno != ENOENT)
abort();
}
else
uv__kqueue_delete(loop->backend_fd, ev);
}
if (ev->flags & EV_ERROR)
@ -398,9 +389,11 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
uv__wait_children(loop);
}
uv__metrics_inc_events(loop, nevents);
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
uv__metrics_inc_events_waiting(loop, nevents);
}
if (have_signals != 0) {
@ -423,13 +416,13 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
return;
}
update_timeout:
if (timeout == 0)
return;
if (timeout == -1)
continue;
update_timeout:
assert(timeout > 0);
diff = loop->time - base;
@ -541,13 +534,14 @@ int uv_fs_event_start(uv_fs_event_t* handle,
handle->realpath_len = 0;
handle->cf_flags = flags;
if (fstat(fd, &statbuf))
if (uv__fstat(fd, &statbuf))
goto fallback;
/* FSEvents works only with directories */
if (!(statbuf.st_mode & S_IFDIR))
goto fallback;
if (0 == uv__load_relaxed(&uv__has_forked_with_cfrunloop)) {
if (0 == atomic_load_explicit(&uv__has_forked_with_cfrunloop,
memory_order_relaxed)) {
int r;
/* The fallback fd is no longer needed */
uv__close_nocheckstdio(fd);
@ -582,7 +576,8 @@ int uv_fs_event_stop(uv_fs_event_t* handle) {
uv__handle_stop(handle);
#if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
if (0 == uv__load_relaxed(&uv__has_forked_with_cfrunloop))
if (0 == atomic_load_explicit(&uv__has_forked_with_cfrunloop,
memory_order_relaxed))
if (handle->cf_cb != NULL)
r = uv__fsevents_close(handle);
#endif

View File

@ -1,834 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
/* We lean on the fact that POLL{IN,OUT,ERR,HUP} correspond with their
* EPOLL* counterparts. We use the POLL* variants in this file because that
* is what libuv uses elsewhere.
*/
#include "uv.h"
#include "internal.h"
#include <inttypes.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <errno.h>
#include <net/if.h>
#include <sys/epoll.h>
#include <sys/param.h>
#include <sys/prctl.h>
#include <sys/sysinfo.h>
#include <unistd.h>
#include <fcntl.h>
#include <time.h>
#define HAVE_IFADDRS_H 1
# if defined(__ANDROID_API__) && __ANDROID_API__ < 24
# undef HAVE_IFADDRS_H
#endif
#ifdef __UCLIBC__
# if __UCLIBC_MAJOR__ < 0 && __UCLIBC_MINOR__ < 9 && __UCLIBC_SUBLEVEL__ < 32
# undef HAVE_IFADDRS_H
# endif
#endif
#ifdef HAVE_IFADDRS_H
# include <ifaddrs.h>
# include <sys/socket.h>
# include <net/ethernet.h>
# include <netpacket/packet.h>
#endif /* HAVE_IFADDRS_H */
/* Available from 2.6.32 onwards. */
#ifndef CLOCK_MONOTONIC_COARSE
# define CLOCK_MONOTONIC_COARSE 6
#endif
/* This is rather annoying: CLOCK_BOOTTIME lives in <linux/time.h> but we can't
* include that file because it conflicts with <time.h>. We'll just have to
* define it ourselves.
*/
#ifndef CLOCK_BOOTTIME
# define CLOCK_BOOTTIME 7
#endif
static int read_models(unsigned int numcpus, uv_cpu_info_t* ci);
static int read_times(FILE* statfile_fp,
unsigned int numcpus,
uv_cpu_info_t* ci);
static void read_speeds(unsigned int numcpus, uv_cpu_info_t* ci);
static uint64_t read_cpufreq(unsigned int cpunum);
int uv__platform_loop_init(uv_loop_t* loop) {
loop->inotify_fd = -1;
loop->inotify_watchers = NULL;
return uv__epoll_init(loop);
}
int uv__io_fork(uv_loop_t* loop) {
int err;
void* old_watchers;
old_watchers = loop->inotify_watchers;
uv__close(loop->backend_fd);
loop->backend_fd = -1;
uv__platform_loop_delete(loop);
err = uv__platform_loop_init(loop);
if (err)
return err;
return uv__inotify_fork(loop, old_watchers);
}
void uv__platform_loop_delete(uv_loop_t* loop) {
if (loop->inotify_fd == -1) return;
uv__io_stop(loop, &loop->inotify_read_watcher, POLLIN);
uv__close(loop->inotify_fd);
loop->inotify_fd = -1;
}
uint64_t uv__hrtime(uv_clocktype_t type) {
static clock_t fast_clock_id = -1;
struct timespec t;
clock_t clock_id;
/* Prefer CLOCK_MONOTONIC_COARSE if available but only when it has
* millisecond granularity or better. CLOCK_MONOTONIC_COARSE is
* serviced entirely from the vDSO, whereas CLOCK_MONOTONIC may
* decide to make a costly system call.
*/
/* TODO(bnoordhuis) Use CLOCK_MONOTONIC_COARSE for UV_CLOCK_PRECISE
* when it has microsecond granularity or better (unlikely).
*/
clock_id = CLOCK_MONOTONIC;
if (type != UV_CLOCK_FAST)
goto done;
clock_id = uv__load_relaxed(&fast_clock_id);
if (clock_id != -1)
goto done;
clock_id = CLOCK_MONOTONIC;
if (0 == clock_getres(CLOCK_MONOTONIC_COARSE, &t))
if (t.tv_nsec <= 1 * 1000 * 1000)
clock_id = CLOCK_MONOTONIC_COARSE;
uv__store_relaxed(&fast_clock_id, clock_id);
done:
if (clock_gettime(clock_id, &t))
return 0; /* Not really possible. */
return t.tv_sec * (uint64_t) 1e9 + t.tv_nsec;
}
int uv_resident_set_memory(size_t* rss) {
char buf[1024];
const char* s;
ssize_t n;
long val;
int fd;
int i;
do
fd = open("/proc/self/stat", O_RDONLY);
while (fd == -1 && errno == EINTR);
if (fd == -1)
return UV__ERR(errno);
do
n = read(fd, buf, sizeof(buf) - 1);
while (n == -1 && errno == EINTR);
uv__close(fd);
if (n == -1)
return UV__ERR(errno);
buf[n] = '\0';
s = strchr(buf, ' ');
if (s == NULL)
goto err;
s += 1;
if (*s != '(')
goto err;
s = strchr(s, ')');
if (s == NULL)
goto err;
for (i = 1; i <= 22; i++) {
s = strchr(s + 1, ' ');
if (s == NULL)
goto err;
}
errno = 0;
val = strtol(s, NULL, 10);
if (errno != 0)
goto err;
if (val < 0)
goto err;
*rss = val * getpagesize();
return 0;
err:
return UV_EINVAL;
}
int uv_uptime(double* uptime) {
static volatile int no_clock_boottime;
char buf[128];
struct timespec now;
int r;
/* Try /proc/uptime first, then fallback to clock_gettime(). */
if (0 == uv__slurp("/proc/uptime", buf, sizeof(buf)))
if (1 == sscanf(buf, "%lf", uptime))
return 0;
/* Try CLOCK_BOOTTIME first, fall back to CLOCK_MONOTONIC if not available
* (pre-2.6.39 kernels). CLOCK_MONOTONIC doesn't increase when the system
* is suspended.
*/
if (no_clock_boottime) {
retry_clock_gettime: r = clock_gettime(CLOCK_MONOTONIC, &now);
}
else if ((r = clock_gettime(CLOCK_BOOTTIME, &now)) && errno == EINVAL) {
no_clock_boottime = 1;
goto retry_clock_gettime;
}
if (r)
return UV__ERR(errno);
*uptime = now.tv_sec;
return 0;
}
static int uv__cpu_num(FILE* statfile_fp, unsigned int* numcpus) {
unsigned int num;
char buf[1024];
if (!fgets(buf, sizeof(buf), statfile_fp))
return UV_EIO;
num = 0;
while (fgets(buf, sizeof(buf), statfile_fp)) {
if (strncmp(buf, "cpu", 3))
break;
num++;
}
if (num == 0)
return UV_EIO;
*numcpus = num;
return 0;
}
int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
unsigned int numcpus;
uv_cpu_info_t* ci;
int err;
FILE* statfile_fp;
*cpu_infos = NULL;
*count = 0;
statfile_fp = uv__open_file("/proc/stat");
if (statfile_fp == NULL)
return UV__ERR(errno);
err = uv__cpu_num(statfile_fp, &numcpus);
if (err < 0)
goto out;
err = UV_ENOMEM;
ci = uv__calloc(numcpus, sizeof(*ci));
if (ci == NULL)
goto out;
err = read_models(numcpus, ci);
if (err == 0)
err = read_times(statfile_fp, numcpus, ci);
if (err) {
uv_free_cpu_info(ci, numcpus);
goto out;
}
/* read_models() on x86 also reads the CPU speed from /proc/cpuinfo.
* We don't check for errors here. Worst case, the field is left zero.
*/
if (ci[0].speed == 0)
read_speeds(numcpus, ci);
*cpu_infos = ci;
*count = numcpus;
err = 0;
out:
if (fclose(statfile_fp))
if (errno != EINTR && errno != EINPROGRESS)
abort();
return err;
}
static void read_speeds(unsigned int numcpus, uv_cpu_info_t* ci) {
unsigned int num;
for (num = 0; num < numcpus; num++)
ci[num].speed = read_cpufreq(num) / 1000;
}
/* Also reads the CPU frequency on ppc and x86. The other architectures only
* have a BogoMIPS field, which may not be very accurate.
*
* Note: Simply returns on error, uv_cpu_info() takes care of the cleanup.
*/
static int read_models(unsigned int numcpus, uv_cpu_info_t* ci) {
#if defined(__PPC__)
static const char model_marker[] = "cpu\t\t: ";
static const char speed_marker[] = "clock\t\t: ";
#else
static const char model_marker[] = "model name\t: ";
static const char speed_marker[] = "cpu MHz\t\t: ";
#endif
const char* inferred_model;
unsigned int model_idx;
unsigned int speed_idx;
unsigned int part_idx;
char buf[1024];
char* model;
FILE* fp;
int model_id;
/* Most are unused on non-ARM, non-MIPS and non-x86 architectures. */
(void) &model_marker;
(void) &speed_marker;
(void) &speed_idx;
(void) &part_idx;
(void) &model;
(void) &buf;
(void) &fp;
(void) &model_id;
model_idx = 0;
speed_idx = 0;
part_idx = 0;
#if defined(__arm__) || \
defined(__i386__) || \
defined(__mips__) || \
defined(__aarch64__) || \
defined(__PPC__) || \
defined(__x86_64__)
fp = uv__open_file("/proc/cpuinfo");
if (fp == NULL)
return UV__ERR(errno);
while (fgets(buf, sizeof(buf), fp)) {
if (model_idx < numcpus) {
if (strncmp(buf, model_marker, sizeof(model_marker) - 1) == 0) {
model = buf + sizeof(model_marker) - 1;
model = uv__strndup(model, strlen(model) - 1); /* Strip newline. */
if (model == NULL) {
fclose(fp);
return UV_ENOMEM;
}
ci[model_idx++].model = model;
continue;
}
}
#if defined(__arm__) || defined(__mips__) || defined(__aarch64__)
if (model_idx < numcpus) {
#if defined(__arm__)
/* Fallback for pre-3.8 kernels. */
static const char model_marker[] = "Processor\t: ";
#elif defined(__aarch64__)
static const char part_marker[] = "CPU part\t: ";
/* Adapted from: https://github.com/karelzak/util-linux */
struct vendor_part {
const int id;
const char* name;
};
static const struct vendor_part arm_chips[] = {
{ 0x811, "ARM810" },
{ 0x920, "ARM920" },
{ 0x922, "ARM922" },
{ 0x926, "ARM926" },
{ 0x940, "ARM940" },
{ 0x946, "ARM946" },
{ 0x966, "ARM966" },
{ 0xa20, "ARM1020" },
{ 0xa22, "ARM1022" },
{ 0xa26, "ARM1026" },
{ 0xb02, "ARM11 MPCore" },
{ 0xb36, "ARM1136" },
{ 0xb56, "ARM1156" },
{ 0xb76, "ARM1176" },
{ 0xc05, "Cortex-A5" },
{ 0xc07, "Cortex-A7" },
{ 0xc08, "Cortex-A8" },
{ 0xc09, "Cortex-A9" },
{ 0xc0d, "Cortex-A17" }, /* Originally A12 */
{ 0xc0f, "Cortex-A15" },
{ 0xc0e, "Cortex-A17" },
{ 0xc14, "Cortex-R4" },
{ 0xc15, "Cortex-R5" },
{ 0xc17, "Cortex-R7" },
{ 0xc18, "Cortex-R8" },
{ 0xc20, "Cortex-M0" },
{ 0xc21, "Cortex-M1" },
{ 0xc23, "Cortex-M3" },
{ 0xc24, "Cortex-M4" },
{ 0xc27, "Cortex-M7" },
{ 0xc60, "Cortex-M0+" },
{ 0xd01, "Cortex-A32" },
{ 0xd03, "Cortex-A53" },
{ 0xd04, "Cortex-A35" },
{ 0xd05, "Cortex-A55" },
{ 0xd06, "Cortex-A65" },
{ 0xd07, "Cortex-A57" },
{ 0xd08, "Cortex-A72" },
{ 0xd09, "Cortex-A73" },
{ 0xd0a, "Cortex-A75" },
{ 0xd0b, "Cortex-A76" },
{ 0xd0c, "Neoverse-N1" },
{ 0xd0d, "Cortex-A77" },
{ 0xd0e, "Cortex-A76AE" },
{ 0xd13, "Cortex-R52" },
{ 0xd20, "Cortex-M23" },
{ 0xd21, "Cortex-M33" },
{ 0xd41, "Cortex-A78" },
{ 0xd42, "Cortex-A78AE" },
{ 0xd4a, "Neoverse-E1" },
{ 0xd4b, "Cortex-A78C" },
};
if (strncmp(buf, part_marker, sizeof(part_marker) - 1) == 0) {
model = buf + sizeof(part_marker) - 1;
errno = 0;
model_id = strtol(model, NULL, 16);
if ((errno != 0) || model_id < 0) {
fclose(fp);
return UV_EINVAL;
}
for (part_idx = 0; part_idx < ARRAY_SIZE(arm_chips); part_idx++) {
if (model_id == arm_chips[part_idx].id) {
model = uv__strdup(arm_chips[part_idx].name);
if (model == NULL) {
fclose(fp);
return UV_ENOMEM;
}
ci[model_idx++].model = model;
break;
}
}
}
#else /* defined(__mips__) */
static const char model_marker[] = "cpu model\t\t: ";
#endif
if (strncmp(buf, model_marker, sizeof(model_marker) - 1) == 0) {
model = buf + sizeof(model_marker) - 1;
model = uv__strndup(model, strlen(model) - 1); /* Strip newline. */
if (model == NULL) {
fclose(fp);
return UV_ENOMEM;
}
ci[model_idx++].model = model;
continue;
}
}
#else /* !__arm__ && !__mips__ && !__aarch64__ */
if (speed_idx < numcpus) {
if (strncmp(buf, speed_marker, sizeof(speed_marker) - 1) == 0) {
ci[speed_idx++].speed = atoi(buf + sizeof(speed_marker) - 1);
continue;
}
}
#endif /* __arm__ || __mips__ || __aarch64__ */
}
fclose(fp);
#endif /* __arm__ || __i386__ || __mips__ || __PPC__ || __x86_64__ || __aarch__ */
/* Now we want to make sure that all the models contain *something* because
* it's not safe to leave them as null. Copy the last entry unless there
* isn't one, in that case we simply put "unknown" into everything.
*/
inferred_model = "unknown";
if (model_idx > 0)
inferred_model = ci[model_idx - 1].model;
while (model_idx < numcpus) {
model = uv__strndup(inferred_model, strlen(inferred_model));
if (model == NULL)
return UV_ENOMEM;
ci[model_idx++].model = model;
}
return 0;
}
static int read_times(FILE* statfile_fp,
unsigned int numcpus,
uv_cpu_info_t* ci) {
struct uv_cpu_times_s ts;
unsigned int ticks;
unsigned int multiplier;
uint64_t user;
uint64_t nice;
uint64_t sys;
uint64_t idle;
uint64_t dummy;
uint64_t irq;
uint64_t num;
uint64_t len;
char buf[1024];
ticks = (unsigned int)sysconf(_SC_CLK_TCK);
assert(ticks != (unsigned int) -1);
assert(ticks != 0);
multiplier = ((uint64_t)1000L / ticks);
rewind(statfile_fp);
if (!fgets(buf, sizeof(buf), statfile_fp))
abort();
num = 0;
while (fgets(buf, sizeof(buf), statfile_fp)) {
if (num >= numcpus)
break;
if (strncmp(buf, "cpu", 3))
break;
/* skip "cpu<num> " marker */
{
unsigned int n;
int r = sscanf(buf, "cpu%u ", &n);
assert(r == 1);
(void) r; /* silence build warning */
for (len = sizeof("cpu0"); n /= 10; len++);
}
/* Line contains user, nice, system, idle, iowait, irq, softirq, steal,
* guest, guest_nice but we're only interested in the first four + irq.
*
* Don't use %*s to skip fields or %ll to read straight into the uint64_t
* fields, they're not allowed in C89 mode.
*/
if (6 != sscanf(buf + len,
"%" PRIu64 " %" PRIu64 " %" PRIu64
"%" PRIu64 " %" PRIu64 " %" PRIu64,
&user,
&nice,
&sys,
&idle,
&dummy,
&irq))
abort();
ts.user = user * multiplier;
ts.nice = nice * multiplier;
ts.sys = sys * multiplier;
ts.idle = idle * multiplier;
ts.irq = irq * multiplier;
ci[num++].cpu_times = ts;
}
assert(num == numcpus);
return 0;
}
static uint64_t read_cpufreq(unsigned int cpunum) {
uint64_t val;
char buf[1024];
FILE* fp;
snprintf(buf,
sizeof(buf),
"/sys/devices/system/cpu/cpu%u/cpufreq/scaling_cur_freq",
cpunum);
fp = uv__open_file(buf);
if (fp == NULL)
return 0;
if (fscanf(fp, "%" PRIu64, &val) != 1)
val = 0;
fclose(fp);
return val;
}
#ifdef HAVE_IFADDRS_H
static int uv__ifaddr_exclude(struct ifaddrs *ent, int exclude_type) {
if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)))
return 1;
if (ent->ifa_addr == NULL)
return 1;
/*
* On Linux getifaddrs returns information related to the raw underlying
* devices. We're not interested in this information yet.
*/
if (ent->ifa_addr->sa_family == PF_PACKET)
return exclude_type;
return !exclude_type;
}
#endif
int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
#ifndef HAVE_IFADDRS_H
*count = 0;
*addresses = NULL;
return UV_ENOSYS;
#else
struct ifaddrs *addrs, *ent;
uv_interface_address_t* address;
int i;
struct sockaddr_ll *sll;
*count = 0;
*addresses = NULL;
if (getifaddrs(&addrs))
return UV__ERR(errno);
/* Count the number of interfaces */
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFADDR))
continue;
(*count)++;
}
if (*count == 0) {
freeifaddrs(addrs);
return 0;
}
/* Make sure the memory is initiallized to zero using calloc() */
*addresses = uv__calloc(*count, sizeof(**addresses));
if (!(*addresses)) {
freeifaddrs(addrs);
return UV_ENOMEM;
}
address = *addresses;
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFADDR))
continue;
address->name = uv__strdup(ent->ifa_name);
if (ent->ifa_addr->sa_family == AF_INET6) {
address->address.address6 = *((struct sockaddr_in6*) ent->ifa_addr);
} else {
address->address.address4 = *((struct sockaddr_in*) ent->ifa_addr);
}
if (ent->ifa_netmask->sa_family == AF_INET6) {
address->netmask.netmask6 = *((struct sockaddr_in6*) ent->ifa_netmask);
} else {
address->netmask.netmask4 = *((struct sockaddr_in*) ent->ifa_netmask);
}
address->is_internal = !!(ent->ifa_flags & IFF_LOOPBACK);
address++;
}
/* Fill in physical addresses for each interface */
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFPHYS))
continue;
address = *addresses;
for (i = 0; i < (*count); i++) {
size_t namelen = strlen(ent->ifa_name);
/* Alias interface share the same physical address */
if (strncmp(address->name, ent->ifa_name, namelen) == 0 &&
(address->name[namelen] == 0 || address->name[namelen] == ':')) {
sll = (struct sockaddr_ll*)ent->ifa_addr;
memcpy(address->phys_addr, sll->sll_addr, sizeof(address->phys_addr));
}
address++;
}
}
freeifaddrs(addrs);
return 0;
#endif
}
void uv_free_interface_addresses(uv_interface_address_t* addresses,
int count) {
int i;
for (i = 0; i < count; i++) {
uv__free(addresses[i].name);
}
uv__free(addresses);
}
void uv__set_process_title(const char* title) {
#if defined(PR_SET_NAME)
prctl(PR_SET_NAME, title); /* Only copies first 16 characters. */
#endif
}
static uint64_t uv__read_proc_meminfo(const char* what) {
uint64_t rc;
char* p;
char buf[4096]; /* Large enough to hold all of /proc/meminfo. */
if (uv__slurp("/proc/meminfo", buf, sizeof(buf)))
return 0;
p = strstr(buf, what);
if (p == NULL)
return 0;
p += strlen(what);
rc = 0;
sscanf(p, "%" PRIu64 " kB", &rc);
return rc * 1024;
}
uint64_t uv_get_free_memory(void) {
struct sysinfo info;
uint64_t rc;
rc = uv__read_proc_meminfo("MemAvailable:");
if (rc != 0)
return rc;
if (0 == sysinfo(&info))
return (uint64_t) info.freeram * info.mem_unit;
return 0;
}
uint64_t uv_get_total_memory(void) {
struct sysinfo info;
uint64_t rc;
rc = uv__read_proc_meminfo("MemTotal:");
if (rc != 0)
return rc;
if (0 == sysinfo(&info))
return (uint64_t) info.totalram * info.mem_unit;
return 0;
}
static uint64_t uv__read_cgroups_uint64(const char* cgroup, const char* param) {
char filename[256];
char buf[32]; /* Large enough to hold an encoded uint64_t. */
uint64_t rc;
rc = 0;
snprintf(filename, sizeof(filename), "/sys/fs/cgroup/%s/%s", cgroup, param);
if (0 == uv__slurp(filename, buf, sizeof(buf)))
sscanf(buf, "%" PRIu64, &rc);
return rc;
}
uint64_t uv_get_constrained_memory(void) {
/*
* This might return 0 if there was a problem getting the memory limit from
* cgroups. This is OK because a return value of 0 signifies that the memory
* limit is unknown.
*/
return uv__read_cgroups_uint64("memory", "memory.limit_in_bytes");
}
void uv_loadavg(double avg[3]) {
struct sysinfo info;
char buf[128]; /* Large enough to hold all of /proc/loadavg. */
if (0 == uv__slurp("/proc/loadavg", buf, sizeof(buf)))
if (3 == sscanf(buf, "%lf %lf %lf", &avg[0], &avg[1], &avg[2]))
return;
if (sysinfo(&info) < 0)
return;
avg[0] = (double) info.loads[0] / 65536.0;
avg[1] = (double) info.loads[1] / 65536.0;
avg[2] = (double) info.loads[2] / 65536.0;
}

View File

@ -1,327 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "uv/tree.h"
#include "internal.h"
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <errno.h>
#include <sys/inotify.h>
#include <sys/types.h>
#include <unistd.h>
struct watcher_list {
RB_ENTRY(watcher_list) entry;
QUEUE watchers;
int iterating;
char* path;
int wd;
};
struct watcher_root {
struct watcher_list* rbh_root;
};
#define CAST(p) ((struct watcher_root*)(p))
static int compare_watchers(const struct watcher_list* a,
const struct watcher_list* b) {
if (a->wd < b->wd) return -1;
if (a->wd > b->wd) return 1;
return 0;
}
RB_GENERATE_STATIC(watcher_root, watcher_list, entry, compare_watchers)
static void uv__inotify_read(uv_loop_t* loop,
uv__io_t* w,
unsigned int revents);
static void maybe_free_watcher_list(struct watcher_list* w,
uv_loop_t* loop);
static int init_inotify(uv_loop_t* loop) {
int fd;
if (loop->inotify_fd != -1)
return 0;
fd = inotify_init1(IN_NONBLOCK | IN_CLOEXEC);
if (fd < 0)
return UV__ERR(errno);
loop->inotify_fd = fd;
uv__io_init(&loop->inotify_read_watcher, uv__inotify_read, loop->inotify_fd);
uv__io_start(loop, &loop->inotify_read_watcher, POLLIN);
return 0;
}
int uv__inotify_fork(uv_loop_t* loop, void* old_watchers) {
/* Open the inotify_fd, and re-arm all the inotify watchers. */
int err;
struct watcher_list* tmp_watcher_list_iter;
struct watcher_list* watcher_list;
struct watcher_list tmp_watcher_list;
QUEUE queue;
QUEUE* q;
uv_fs_event_t* handle;
char* tmp_path;
if (old_watchers != NULL) {
/* We must restore the old watcher list to be able to close items
* out of it.
*/
loop->inotify_watchers = old_watchers;
QUEUE_INIT(&tmp_watcher_list.watchers);
/* Note that the queue we use is shared with the start and stop()
* functions, making QUEUE_FOREACH unsafe to use. So we use the
* QUEUE_MOVE trick to safely iterate. Also don't free the watcher
* list until we're done iterating. c.f. uv__inotify_read.
*/
RB_FOREACH_SAFE(watcher_list, watcher_root,
CAST(&old_watchers), tmp_watcher_list_iter) {
watcher_list->iterating = 1;
QUEUE_MOVE(&watcher_list->watchers, &queue);
while (!QUEUE_EMPTY(&queue)) {
q = QUEUE_HEAD(&queue);
handle = QUEUE_DATA(q, uv_fs_event_t, watchers);
/* It's critical to keep a copy of path here, because it
* will be set to NULL by stop() and then deallocated by
* maybe_free_watcher_list
*/
tmp_path = uv__strdup(handle->path);
assert(tmp_path != NULL);
QUEUE_REMOVE(q);
QUEUE_INSERT_TAIL(&watcher_list->watchers, q);
uv_fs_event_stop(handle);
QUEUE_INSERT_TAIL(&tmp_watcher_list.watchers, &handle->watchers);
handle->path = tmp_path;
}
watcher_list->iterating = 0;
maybe_free_watcher_list(watcher_list, loop);
}
QUEUE_MOVE(&tmp_watcher_list.watchers, &queue);
while (!QUEUE_EMPTY(&queue)) {
q = QUEUE_HEAD(&queue);
QUEUE_REMOVE(q);
handle = QUEUE_DATA(q, uv_fs_event_t, watchers);
tmp_path = handle->path;
handle->path = NULL;
err = uv_fs_event_start(handle, handle->cb, tmp_path, 0);
uv__free(tmp_path);
if (err)
return err;
}
}
return 0;
}
static struct watcher_list* find_watcher(uv_loop_t* loop, int wd) {
struct watcher_list w;
w.wd = wd;
return RB_FIND(watcher_root, CAST(&loop->inotify_watchers), &w);
}
static void maybe_free_watcher_list(struct watcher_list* w, uv_loop_t* loop) {
/* if the watcher_list->watchers is being iterated over, we can't free it. */
if ((!w->iterating) && QUEUE_EMPTY(&w->watchers)) {
/* No watchers left for this path. Clean up. */
RB_REMOVE(watcher_root, CAST(&loop->inotify_watchers), w);
inotify_rm_watch(loop->inotify_fd, w->wd);
uv__free(w);
}
}
static void uv__inotify_read(uv_loop_t* loop,
uv__io_t* dummy,
unsigned int events) {
const struct inotify_event* e;
struct watcher_list* w;
uv_fs_event_t* h;
QUEUE queue;
QUEUE* q;
const char* path;
ssize_t size;
const char *p;
/* needs to be large enough for sizeof(inotify_event) + strlen(path) */
char buf[4096];
for (;;) {
do
size = read(loop->inotify_fd, buf, sizeof(buf));
while (size == -1 && errno == EINTR);
if (size == -1) {
assert(errno == EAGAIN || errno == EWOULDBLOCK);
break;
}
assert(size > 0); /* pre-2.6.21 thing, size=0 == read buffer too small */
/* Now we have one or more inotify_event structs. */
for (p = buf; p < buf + size; p += sizeof(*e) + e->len) {
e = (const struct inotify_event*) p;
events = 0;
if (e->mask & (IN_ATTRIB|IN_MODIFY))
events |= UV_CHANGE;
if (e->mask & ~(IN_ATTRIB|IN_MODIFY))
events |= UV_RENAME;
w = find_watcher(loop, e->wd);
if (w == NULL)
continue; /* Stale event, no watchers left. */
/* inotify does not return the filename when monitoring a single file
* for modifications. Repurpose the filename for API compatibility.
* I'm not convinced this is a good thing, maybe it should go.
*/
path = e->len ? (const char*) (e + 1) : uv__basename_r(w->path);
/* We're about to iterate over the queue and call user's callbacks.
* What can go wrong?
* A callback could call uv_fs_event_stop()
* and the queue can change under our feet.
* So, we use QUEUE_MOVE() trick to safely iterate over the queue.
* And we don't free the watcher_list until we're done iterating.
*
* First,
* tell uv_fs_event_stop() (that could be called from a user's callback)
* not to free watcher_list.
*/
w->iterating = 1;
QUEUE_MOVE(&w->watchers, &queue);
while (!QUEUE_EMPTY(&queue)) {
q = QUEUE_HEAD(&queue);
h = QUEUE_DATA(q, uv_fs_event_t, watchers);
QUEUE_REMOVE(q);
QUEUE_INSERT_TAIL(&w->watchers, q);
h->cb(h, path, events, 0);
}
/* done iterating, time to (maybe) free empty watcher_list */
w->iterating = 0;
maybe_free_watcher_list(w, loop);
}
}
}
int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT);
return 0;
}
int uv_fs_event_start(uv_fs_event_t* handle,
uv_fs_event_cb cb,
const char* path,
unsigned int flags) {
struct watcher_list* w;
size_t len;
int events;
int err;
int wd;
if (uv__is_active(handle))
return UV_EINVAL;
err = init_inotify(handle->loop);
if (err)
return err;
events = IN_ATTRIB
| IN_CREATE
| IN_MODIFY
| IN_DELETE
| IN_DELETE_SELF
| IN_MOVE_SELF
| IN_MOVED_FROM
| IN_MOVED_TO;
wd = inotify_add_watch(handle->loop->inotify_fd, path, events);
if (wd == -1)
return UV__ERR(errno);
w = find_watcher(handle->loop, wd);
if (w)
goto no_insert;
len = strlen(path) + 1;
w = uv__malloc(sizeof(*w) + len);
if (w == NULL)
return UV_ENOMEM;
w->wd = wd;
w->path = memcpy(w + 1, path, len);
QUEUE_INIT(&w->watchers);
w->iterating = 0;
RB_INSERT(watcher_root, CAST(&handle->loop->inotify_watchers), w);
no_insert:
uv__handle_start(handle);
QUEUE_INSERT_TAIL(&w->watchers, &handle->watchers);
handle->path = w->path;
handle->cb = cb;
handle->wd = wd;
return 0;
}
int uv_fs_event_stop(uv_fs_event_t* handle) {
struct watcher_list* w;
if (!uv__is_active(handle))
return 0;
w = find_watcher(handle->loop, handle->wd);
assert(w != NULL);
handle->wd = -1;
handle->path = NULL;
uv__handle_stop(handle);
QUEUE_REMOVE(&handle->watchers);
maybe_free_watcher_list(w, handle->loop);
return 0;
}
void uv__fs_event_close(uv_fs_event_t* handle) {
uv_fs_event_stop(handle);
}

View File

@ -1,264 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "linux-syscalls.h"
#include <unistd.h>
#include <signal.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <errno.h>
#if defined(__arm__)
# if defined(__thumb__) || defined(__ARM_EABI__)
# define UV_SYSCALL_BASE 0
# else
# define UV_SYSCALL_BASE 0x900000
# endif
#endif /* __arm__ */
#ifndef __NR_recvmmsg
# if defined(__x86_64__)
# define __NR_recvmmsg 299
# elif defined(__arm__)
# define __NR_recvmmsg (UV_SYSCALL_BASE + 365)
# endif
#endif /* __NR_recvmsg */
#ifndef __NR_sendmmsg
# if defined(__x86_64__)
# define __NR_sendmmsg 307
# elif defined(__arm__)
# define __NR_sendmmsg (UV_SYSCALL_BASE + 374)
# endif
#endif /* __NR_sendmmsg */
#ifndef __NR_utimensat
# if defined(__x86_64__)
# define __NR_utimensat 280
# elif defined(__i386__)
# define __NR_utimensat 320
# elif defined(__arm__)
# define __NR_utimensat (UV_SYSCALL_BASE + 348)
# endif
#endif /* __NR_utimensat */
#ifndef __NR_preadv
# if defined(__x86_64__)
# define __NR_preadv 295
# elif defined(__i386__)
# define __NR_preadv 333
# elif defined(__arm__)
# define __NR_preadv (UV_SYSCALL_BASE + 361)
# endif
#endif /* __NR_preadv */
#ifndef __NR_pwritev
# if defined(__x86_64__)
# define __NR_pwritev 296
# elif defined(__i386__)
# define __NR_pwritev 334
# elif defined(__arm__)
# define __NR_pwritev (UV_SYSCALL_BASE + 362)
# endif
#endif /* __NR_pwritev */
#ifndef __NR_dup3
# if defined(__x86_64__)
# define __NR_dup3 292
# elif defined(__i386__)
# define __NR_dup3 330
# elif defined(__arm__)
# define __NR_dup3 (UV_SYSCALL_BASE + 358)
# endif
#endif /* __NR_pwritev */
#ifndef __NR_copy_file_range
# if defined(__x86_64__)
# define __NR_copy_file_range 326
# elif defined(__i386__)
# define __NR_copy_file_range 377
# elif defined(__s390__)
# define __NR_copy_file_range 375
# elif defined(__arm__)
# define __NR_copy_file_range (UV_SYSCALL_BASE + 391)
# elif defined(__aarch64__)
# define __NR_copy_file_range 285
# elif defined(__powerpc__)
# define __NR_copy_file_range 379
# elif defined(__arc__)
# define __NR_copy_file_range 285
# endif
#endif /* __NR_copy_file_range */
#ifndef __NR_statx
# if defined(__x86_64__)
# define __NR_statx 332
# elif defined(__i386__)
# define __NR_statx 383
# elif defined(__aarch64__)
# define __NR_statx 397
# elif defined(__arm__)
# define __NR_statx (UV_SYSCALL_BASE + 397)
# elif defined(__ppc__)
# define __NR_statx 383
# elif defined(__s390__)
# define __NR_statx 379
# endif
#endif /* __NR_statx */
#ifndef __NR_getrandom
# if defined(__x86_64__)
# define __NR_getrandom 318
# elif defined(__i386__)
# define __NR_getrandom 355
# elif defined(__aarch64__)
# define __NR_getrandom 384
# elif defined(__arm__)
# define __NR_getrandom (UV_SYSCALL_BASE + 384)
# elif defined(__ppc__)
# define __NR_getrandom 359
# elif defined(__s390__)
# define __NR_getrandom 349
# endif
#endif /* __NR_getrandom */
struct uv__mmsghdr;
int uv__sendmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen) {
#if defined(__i386__)
unsigned long args[4];
int rc;
args[0] = (unsigned long) fd;
args[1] = (unsigned long) mmsg;
args[2] = (unsigned long) vlen;
args[3] = /* flags */ 0;
/* socketcall() raises EINVAL when SYS_SENDMMSG is not supported. */
rc = syscall(/* __NR_socketcall */ 102, 20 /* SYS_SENDMMSG */, args);
if (rc == -1)
if (errno == EINVAL)
errno = ENOSYS;
return rc;
#elif defined(__NR_sendmmsg)
return syscall(__NR_sendmmsg, fd, mmsg, vlen, /* flags */ 0);
#else
return errno = ENOSYS, -1;
#endif
}
int uv__recvmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen) {
#if defined(__i386__)
unsigned long args[5];
int rc;
args[0] = (unsigned long) fd;
args[1] = (unsigned long) mmsg;
args[2] = (unsigned long) vlen;
args[3] = /* flags */ 0;
args[4] = /* timeout */ 0;
/* socketcall() raises EINVAL when SYS_RECVMMSG is not supported. */
rc = syscall(/* __NR_socketcall */ 102, 19 /* SYS_RECVMMSG */, args);
if (rc == -1)
if (errno == EINVAL)
errno = ENOSYS;
return rc;
#elif defined(__NR_recvmmsg)
return syscall(__NR_recvmmsg, fd, mmsg, vlen, /* flags */ 0, /* timeout */ 0);
#else
return errno = ENOSYS, -1;
#endif
}
ssize_t uv__preadv(int fd, const struct iovec *iov, int iovcnt, int64_t offset) {
#if !defined(__NR_preadv) || defined(__ANDROID_API__) && __ANDROID_API__ < 24
return errno = ENOSYS, -1;
#else
return syscall(__NR_preadv, fd, iov, iovcnt, (long)offset, (long)(offset >> 32));
#endif
}
ssize_t uv__pwritev(int fd, const struct iovec *iov, int iovcnt, int64_t offset) {
#if !defined(__NR_pwritev) || defined(__ANDROID_API__) && __ANDROID_API__ < 24
return errno = ENOSYS, -1;
#else
return syscall(__NR_pwritev, fd, iov, iovcnt, (long)offset, (long)(offset >> 32));
#endif
}
int uv__dup3(int oldfd, int newfd, int flags) {
#if !defined(__NR_dup3) || defined(__ANDROID_API__) && __ANDROID_API__ < 21
return errno = ENOSYS, -1;
#else
return syscall(__NR_dup3, oldfd, newfd, flags);
#endif
}
ssize_t
uv__fs_copy_file_range(int fd_in,
off_t* off_in,
int fd_out,
off_t* off_out,
size_t len,
unsigned int flags)
{
#ifdef __NR_copy_file_range
return syscall(__NR_copy_file_range,
fd_in,
off_in,
fd_out,
off_out,
len,
flags);
#else
return errno = ENOSYS, -1;
#endif
}
int uv__statx(int dirfd,
const char* path,
int flags,
unsigned int mask,
struct uv__statx* statxbuf) {
#if !defined(__NR_statx) || defined(__ANDROID_API__) && __ANDROID_API__ < 30
return errno = ENOSYS, -1;
#else
return syscall(__NR_statx, dirfd, path, flags, mask, statxbuf);
#endif
}
ssize_t uv__getrandom(void* buf, size_t buflen, unsigned flags) {
#if !defined(__NR_getrandom) || defined(__ANDROID_API__) && __ANDROID_API__ < 28
return errno = ENOSYS, -1;
#else
return syscall(__NR_getrandom, buf, buflen, flags);
#endif
}

View File

@ -1,78 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef UV_LINUX_SYSCALL_H_
#define UV_LINUX_SYSCALL_H_
#include <stdint.h>
#include <signal.h>
#include <sys/types.h>
#include <sys/time.h>
#include <sys/socket.h>
struct uv__statx_timestamp {
int64_t tv_sec;
uint32_t tv_nsec;
int32_t unused0;
};
struct uv__statx {
uint32_t stx_mask;
uint32_t stx_blksize;
uint64_t stx_attributes;
uint32_t stx_nlink;
uint32_t stx_uid;
uint32_t stx_gid;
uint16_t stx_mode;
uint16_t unused0;
uint64_t stx_ino;
uint64_t stx_size;
uint64_t stx_blocks;
uint64_t stx_attributes_mask;
struct uv__statx_timestamp stx_atime;
struct uv__statx_timestamp stx_btime;
struct uv__statx_timestamp stx_ctime;
struct uv__statx_timestamp stx_mtime;
uint32_t stx_rdev_major;
uint32_t stx_rdev_minor;
uint32_t stx_dev_major;
uint32_t stx_dev_minor;
uint64_t unused1[14];
};
ssize_t uv__preadv(int fd, const struct iovec *iov, int iovcnt, int64_t offset);
ssize_t uv__pwritev(int fd, const struct iovec *iov, int iovcnt, int64_t offset);
int uv__dup3(int oldfd, int newfd, int flags);
ssize_t
uv__fs_copy_file_range(int fd_in,
off_t* off_in,
int fd_out,
off_t* off_out,
size_t len,
unsigned int flags);
int uv__statx(int dirfd,
const char* path,
int flags,
unsigned int mask,
struct uv__statx* statxbuf);
ssize_t uv__getrandom(void* buf, size_t buflen, unsigned flags);
#endif /* UV_LINUX_SYSCALL_H_ */

2341
deps/libuv/src/unix/linux.c vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -45,6 +45,9 @@ int uv_loop_init(uv_loop_t* loop) {
err = uv_mutex_init(&lfields->loop_metrics.lock);
if (err)
goto fail_metrics_mutex_init;
memset(&lfields->loop_metrics.metrics,
0,
sizeof(lfields->loop_metrics.metrics));
heap_init((struct heap*) &loop->timer_heap);
QUEUE_INIT(&loop->wq);
@ -79,12 +82,9 @@ int uv_loop_init(uv_loop_t* loop) {
goto fail_platform_init;
uv__signal_global_once_init();
err = uv_signal_init(loop, &loop->child_watcher);
err = uv__process_init(loop);
if (err)
goto fail_signal_init;
uv__handle_unref(&loop->child_watcher);
loop->child_watcher.flags |= UV_HANDLE_INTERNAL;
QUEUE_INIT(&loop->process_handles);
err = uv_rwlock_init(&loop->cloexec_lock);

View File

@ -103,7 +103,7 @@ uint64_t uv_get_free_memory(void) {
int which[] = {CTL_VM, VM_UVMEXP};
if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0))
return UV__ERR(errno);
return 0;
return (uint64_t) info.free * sysconf(_SC_PAGESIZE);
}
@ -120,7 +120,7 @@ uint64_t uv_get_total_memory(void) {
size_t size = sizeof(info);
if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0))
return UV__ERR(errno);
return 0;
return (uint64_t) info;
}
@ -131,6 +131,11 @@ uint64_t uv_get_constrained_memory(void) {
}
uint64_t uv_get_available_memory(void) {
return uv_get_free_memory();
}
int uv_resident_set_memory(size_t* rss) {
kvm_t *kd = NULL;
struct kinfo_proc2 *kinfo = NULL;

View File

@ -116,7 +116,7 @@ uint64_t uv_get_free_memory(void) {
int which[] = {CTL_VM, VM_UVMEXP};
if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0))
return UV__ERR(errno);
return 0;
return (uint64_t) info.free * sysconf(_SC_PAGESIZE);
}
@ -128,7 +128,7 @@ uint64_t uv_get_total_memory(void) {
size_t size = sizeof(info);
if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0))
return UV__ERR(errno);
return 0;
return (uint64_t) info;
}
@ -139,6 +139,11 @@ uint64_t uv_get_constrained_memory(void) {
}
uint64_t uv_get_available_memory(void) {
return uv_get_free_memory();
}
int uv_resident_set_memory(size_t* rss) {
struct kinfo_proc kinfo;
size_t page_size = getpagesize();

View File

@ -198,6 +198,11 @@ uint64_t uv_get_constrained_memory(void) {
}
uint64_t uv_get_available_memory(void) {
return uv_get_free_memory();
}
int uv_resident_set_memory(size_t* rss) {
char* ascb;
char* rax;
@ -803,6 +808,7 @@ static int os390_message_queue_handler(uv__os390_epoll* ep) {
void uv__io_poll(uv_loop_t* loop, int timeout) {
static const int max_safe_timeout = 1789569;
uv__loop_internal_fields_t* lfields;
struct epoll_event events[1024];
struct epoll_event* pe;
struct epoll_event e;
@ -825,6 +831,8 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
return;
}
lfields = uv__get_internal_fields(loop);
while (!QUEUE_EMPTY(&loop->watcher_queue)) {
uv_stream_t* stream;
@ -872,7 +880,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
int nevents = 0;
have_signals = 0;
if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
if (lfields->flags & UV_METRICS_IDLE_TIME) {
reset_timeout = 1;
user_timeout = timeout;
timeout = 0;
@ -891,6 +899,12 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
if (sizeof(int32_t) == sizeof(long) && timeout >= max_safe_timeout)
timeout = max_safe_timeout;
/* Store the current timeout in a location that's globally accessible so
* other locations like uv__work_done() can determine whether the queue
* of events in the callback were waiting when poll was called.
*/
lfields->current_timeout = timeout;
nfds = epoll_wait(loop->ep, events,
ARRAY_SIZE(events), timeout);
@ -998,9 +1012,11 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
}
}
uv__metrics_inc_events(loop, nevents);
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
uv__metrics_inc_events_waiting(loop, nevents);
}
if (have_signals != 0) {

View File

@ -357,7 +357,7 @@ int uv_pipe_chmod(uv_pipe_t* handle, int mode) {
}
/* stat must be used as fstat has a bug on Darwin */
if (stat(name_buffer, &pipe_stat) == -1) {
if (uv__stat(name_buffer, &pipe_stat) == -1) {
uv__free(name_buffer);
return -errno;
}

View File

@ -23,13 +23,14 @@
#include "internal.h"
#include <stdint.h>
#include <stdlib.h>
#include <time.h>
#undef NANOSEC
#define NANOSEC ((uint64_t) 1e9)
uint64_t uv__hrtime(uv_clocktype_t type) {
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
return (((uint64_t) ts.tv_sec) * NANOSEC + ts.tv_nsec);
struct timespec t;
if (clock_gettime(CLOCK_MONOTONIC, &t))
abort();
return t.tv_sec * (uint64_t) 1e9 + t.tv_nsec;
}

View File

@ -132,6 +132,7 @@ static void uv__pollfds_del(uv_loop_t* loop, int fd) {
void uv__io_poll(uv_loop_t* loop, int timeout) {
uv__loop_internal_fields_t* lfields;
sigset_t* pset;
sigset_t set;
uint64_t time_base;
@ -152,6 +153,8 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
return;
}
lfields = uv__get_internal_fields(loop);
/* Take queued watchers and add their fds to our poll fds array. */
while (!QUEUE_EMPTY(&loop->watcher_queue)) {
q = QUEUE_HEAD(&loop->watcher_queue);
@ -179,7 +182,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
assert(timeout >= -1);
time_base = loop->time;
if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
if (lfields->flags & UV_METRICS_IDLE_TIME) {
reset_timeout = 1;
user_timeout = timeout;
timeout = 0;
@ -198,6 +201,12 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
if (timeout != 0)
uv__metrics_set_provider_entry_time(loop);
/* Store the current timeout in a location that's globally accessible so
* other locations like uv__work_done() can determine whether the queue
* of events in the callback were waiting when poll was called.
*/
lfields->current_timeout = timeout;
if (pset != NULL)
if (pthread_sigmask(SIG_BLOCK, pset, NULL))
abort();
@ -292,9 +301,11 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
}
}
uv__metrics_inc_events(loop, nevents);
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
uv__metrics_inc_events_waiting(loop, nevents);
}
if (have_signals != 0) {

View File

@ -55,7 +55,7 @@
extern char **environ;
#endif
#if defined(__linux__) || defined(__GLIBC__)
#if defined(__linux__)
# include <grp.h>
#endif
@ -79,8 +79,28 @@ static void uv__chld(uv_signal_t* handle, int signum) {
assert(signum == SIGCHLD);
uv__wait_children(handle->loop);
}
int uv__process_init(uv_loop_t* loop) {
int err;
err = uv_signal_init(loop, &loop->child_watcher);
if (err)
return err;
uv__handle_unref(&loop->child_watcher);
loop->child_watcher.flags |= UV_HANDLE_INTERNAL;
return 0;
}
#else
int uv__process_init(uv_loop_t* loop) {
memset(&loop->child_watcher, 0, sizeof(loop->child_watcher));
return 0;
}
#endif
void uv__wait_children(uv_loop_t* loop) {
uv_process_t* process;
int exit_status;
@ -105,6 +125,7 @@ void uv__wait_children(uv_loop_t* loop) {
continue;
options = 0;
process->flags &= ~UV_HANDLE_REAP;
loop->nfds--;
#else
options = WNOHANG;
#endif
@ -665,7 +686,7 @@ static int uv__spawn_resolve_and_spawn(const uv_process_options_t* options,
if (options->file == NULL)
return ENOENT;
/* The environment for the child process is that of the parent unless overriden
/* The environment for the child process is that of the parent unless overridden
* by options->env */
char** env = environ;
if (options->env != NULL)
@ -1012,6 +1033,10 @@ int uv_spawn(uv_loop_t* loop,
process->flags |= UV_HANDLE_REAP;
loop->flags |= UV_LOOP_REAP_CHILDREN;
}
/* This prevents uv__io_poll() from bailing out prematurely, being unaware
* that we added an event here for it to react to. We will decrement this
* again after the waitpid call succeeds. */
loop->nfds++;
#endif
process->pid = pid;
@ -1080,6 +1105,8 @@ int uv_kill(int pid, int signum) {
void uv__process_close(uv_process_t* handle) {
QUEUE_REMOVE(&handle->queue);
uv__handle_stop(handle);
#ifdef UV_USE_SIGCHLD
if (QUEUE_EMPTY(&handle->loop->process_handles))
uv_signal_stop(&handle->loop->child_watcher);
#endif
}

View File

@ -1,58 +0,0 @@
/* Copyright (c) 2013, Sony Mobile Communications AB
* Copyright (c) 2012, Google Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Android versions < 4.1 have a broken pthread_sigmask. */
#include "uv-common.h"
#include <errno.h>
#include <pthread.h>
#include <signal.h>
int uv__pthread_sigmask(int how, const sigset_t* set, sigset_t* oset) {
static int workaround;
int err;
if (uv__load_relaxed(&workaround)) {
return sigprocmask(how, set, oset);
} else {
err = pthread_sigmask(how, set, oset);
if (err) {
if (err == EINVAL && sigprocmask(how, set, oset) == 0) {
uv__store_relaxed(&workaround, 1);
return 0;
} else {
return -1;
}
}
}
return 0;
}

View File

@ -88,6 +88,11 @@ uint64_t uv_get_constrained_memory(void) {
}
uint64_t uv_get_available_memory(void) {
return uv_get_free_memory();
}
int uv_resident_set_memory(size_t* rss) {
int fd;
procfs_asinfo asinfo;

View File

@ -40,7 +40,7 @@ int uv__random_readpath(const char* path, void* buf, size_t buflen) {
if (fd < 0)
return fd;
if (fstat(fd, &s)) {
if (uv__fstat(fd, &s)) {
uv__close(fd);
return UV__ERR(errno);
}

View File

@ -24,8 +24,6 @@
#ifdef __linux__
#include "linux-syscalls.h"
#define uv__random_getrandom_init() 0
#else /* !__linux__ */

View File

@ -279,6 +279,8 @@ static int uv__signal_loop_once_init(uv_loop_t* loop) {
int uv__signal_loop_fork(uv_loop_t* loop) {
if (loop->signal_pipefd[0] == -1)
return 0;
uv__io_stop(loop, &loop->signal_io_watcher, POLLIN);
uv__close(loop->signal_pipefd[0]);
uv__close(loop->signal_pipefd[1]);

View File

@ -1,53 +0,0 @@
/* Copyright (c) 2013, Ben Noordhuis <info@bnoordhuis.nl>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef UV_SPINLOCK_H_
#define UV_SPINLOCK_H_
#include "internal.h" /* ACCESS_ONCE, UV_UNUSED */
#include "atomic-ops.h"
#define UV_SPINLOCK_INITIALIZER { 0 }
typedef struct {
int lock;
} uv_spinlock_t;
UV_UNUSED(static void uv_spinlock_init(uv_spinlock_t* spinlock));
UV_UNUSED(static void uv_spinlock_lock(uv_spinlock_t* spinlock));
UV_UNUSED(static void uv_spinlock_unlock(uv_spinlock_t* spinlock));
UV_UNUSED(static int uv_spinlock_trylock(uv_spinlock_t* spinlock));
UV_UNUSED(static void uv_spinlock_init(uv_spinlock_t* spinlock)) {
ACCESS_ONCE(int, spinlock->lock) = 0;
}
UV_UNUSED(static void uv_spinlock_lock(uv_spinlock_t* spinlock)) {
while (!uv_spinlock_trylock(spinlock)) cpu_relax();
}
UV_UNUSED(static void uv_spinlock_unlock(uv_spinlock_t* spinlock)) {
ACCESS_ONCE(int, spinlock->lock) = 0;
}
UV_UNUSED(static int uv_spinlock_trylock(uv_spinlock_t* spinlock)) {
/* TODO(bnoordhuis) Maybe change to a ticket lock to guarantee fair queueing.
* Not really critical until we have locks that are (frequently) contended
* for by several threads.
*/
return 0 == cmpxchgi(&spinlock->lock, 0, 1);
}
#endif /* UV_SPINLOCK_H_ */

View File

@ -60,6 +60,16 @@ struct uv__stream_select_s {
};
#endif /* defined(__APPLE__) */
union uv__cmsg {
struct cmsghdr hdr;
/* This cannot be larger because of the IBMi PASE limitation that
* the total size of control messages cannot exceed 256 bytes.
*/
char pad[256];
};
STATIC_ASSERT(256 == sizeof(union uv__cmsg));
static void uv__stream_connect(uv_stream_t*);
static void uv__write(uv_stream_t* stream);
static void uv__read(uv_stream_t* stream);
@ -495,76 +505,34 @@ static int uv__emfile_trick(uv_loop_t* loop, int accept_fd) {
}
#if defined(UV_HAVE_KQUEUE)
# define UV_DEC_BACKLOG(w) w->rcount--;
#else
# define UV_DEC_BACKLOG(w) /* no-op */
#endif /* defined(UV_HAVE_KQUEUE) */
void uv__server_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
uv_stream_t* stream;
int err;
int fd;
stream = container_of(w, uv_stream_t, io_watcher);
assert(events & POLLIN);
assert(stream->accepted_fd == -1);
assert(!(stream->flags & UV_HANDLE_CLOSING));
uv__io_start(stream->loop, &stream->io_watcher, POLLIN);
fd = uv__stream_fd(stream);
err = uv__accept(fd);
/* connection_cb can close the server socket while we're
* in the loop so check it on each iteration.
*/
while (uv__stream_fd(stream) != -1) {
assert(stream->accepted_fd == -1);
if (err == UV_EMFILE || err == UV_ENFILE)
err = uv__emfile_trick(loop, fd); /* Shed load. */
#if defined(UV_HAVE_KQUEUE)
if (w->rcount <= 0)
return;
#endif /* defined(UV_HAVE_KQUEUE) */
if (err < 0)
return;
err = uv__accept(uv__stream_fd(stream));
if (err < 0) {
if (err == UV_EAGAIN || err == UV__ERR(EWOULDBLOCK))
return; /* Not an error. */
stream->accepted_fd = err;
stream->connection_cb(stream, 0);
if (err == UV_ECONNABORTED)
continue; /* Ignore. Nothing we can do about that. */
if (err == UV_EMFILE || err == UV_ENFILE) {
err = uv__emfile_trick(loop, uv__stream_fd(stream));
if (err == UV_EAGAIN || err == UV__ERR(EWOULDBLOCK))
break;
}
stream->connection_cb(stream, err);
continue;
}
UV_DEC_BACKLOG(w)
stream->accepted_fd = err;
stream->connection_cb(stream, 0);
if (stream->accepted_fd != -1) {
/* The user hasn't yet accepted called uv_accept() */
uv__io_stop(loop, &stream->io_watcher, POLLIN);
return;
}
if (stream->type == UV_TCP &&
(stream->flags & UV_HANDLE_TCP_SINGLE_ACCEPT)) {
/* Give other processes a chance to accept connections. */
struct timespec timeout = { 0, 1 };
nanosleep(&timeout, NULL);
}
}
if (stream->accepted_fd != -1)
/* The user hasn't yet accepted called uv_accept() */
uv__io_stop(loop, &stream->io_watcher, POLLIN);
}
#undef UV_DEC_BACKLOG
int uv_accept(uv_stream_t* server, uv_stream_t* client) {
int err;
@ -665,7 +633,7 @@ static void uv__drain(uv_stream_t* stream) {
uv__stream_osx_interrupt_select(stream);
}
if (!(stream->flags & UV_HANDLE_SHUTTING))
if (!uv__is_stream_shutting(stream))
return;
req = stream->shutdown_req;
@ -674,7 +642,6 @@ static void uv__drain(uv_stream_t* stream) {
if ((stream->flags & UV_HANDLE_CLOSING) ||
!(stream->flags & UV_HANDLE_SHUT)) {
stream->shutdown_req = NULL;
stream->flags &= ~UV_HANDLE_SHUTTING;
uv__req_unregister(stream->loop, req);
err = 0;
@ -812,18 +779,14 @@ static int uv__try_write(uv_stream_t* stream,
if (send_handle != NULL) {
int fd_to_send;
struct msghdr msg;
struct cmsghdr *cmsg;
union {
char data[64];
struct cmsghdr alias;
} scratch;
union uv__cmsg cmsg;
if (uv__is_closing(send_handle))
return UV_EBADF;
fd_to_send = uv__handle_fd((uv_handle_t*) send_handle);
memset(&scratch, 0, sizeof(scratch));
memset(&cmsg, 0, sizeof(cmsg));
assert(fd_to_send >= 0);
@ -833,20 +796,13 @@ static int uv__try_write(uv_stream_t* stream,
msg.msg_iovlen = iovcnt;
msg.msg_flags = 0;
msg.msg_control = &scratch.alias;
msg.msg_control = &cmsg.hdr;
msg.msg_controllen = CMSG_SPACE(sizeof(fd_to_send));
cmsg = CMSG_FIRSTHDR(&msg);
cmsg->cmsg_level = SOL_SOCKET;
cmsg->cmsg_type = SCM_RIGHTS;
cmsg->cmsg_len = CMSG_LEN(sizeof(fd_to_send));
/* silence aliasing warning */
{
void* pv = CMSG_DATA(cmsg);
int* pi = pv;
*pi = fd_to_send;
}
cmsg.hdr.cmsg_level = SOL_SOCKET;
cmsg.hdr.cmsg_type = SCM_RIGHTS;
cmsg.hdr.cmsg_len = CMSG_LEN(sizeof(fd_to_send));
memcpy(CMSG_DATA(&cmsg.hdr), &fd_to_send, sizeof(fd_to_send));
do
n = sendmsg(uv__stream_fd(stream), &msg, 0);
@ -884,9 +840,16 @@ static void uv__write(uv_stream_t* stream) {
QUEUE* q;
uv_write_t* req;
ssize_t n;
int count;
assert(uv__stream_fd(stream) >= 0);
/* Prevent loop starvation when the consumer of this stream read as fast as
* (or faster than) we can write it. This `count` mechanism does not need to
* change even if we switch to edge-triggered I/O.
*/
count = 32;
for (;;) {
if (QUEUE_EMPTY(&stream->write_queue))
return;
@ -905,10 +868,13 @@ static void uv__write(uv_stream_t* stream) {
req->send_handle = NULL;
if (uv__write_req_update(stream, req, n)) {
uv__write_req_finish(req);
return; /* TODO(bnoordhuis) Start trying to write the next request. */
if (count-- > 0)
continue; /* Start trying to write the next request. */
return;
}
} else if (n != UV_EAGAIN)
break;
goto error;
/* If this is a blocking stream, try again. */
if (stream->flags & UV_HANDLE_BLOCKING_WRITES)
@ -923,6 +889,7 @@ static void uv__write(uv_stream_t* stream) {
return;
}
error:
req->error = n;
uv__write_req_finish(req);
uv__io_stop(stream->loop, &stream->io_watcher, POLLOUT);
@ -1010,57 +977,38 @@ static int uv__stream_queue_fd(uv_stream_t* stream, int fd) {
}
#if defined(__PASE__)
/* on IBMi PASE the control message length can not exceed 256. */
# define UV__CMSG_FD_COUNT 60
#else
# define UV__CMSG_FD_COUNT 64
#endif
#define UV__CMSG_FD_SIZE (UV__CMSG_FD_COUNT * sizeof(int))
static int uv__stream_recv_cmsg(uv_stream_t* stream, struct msghdr* msg) {
struct cmsghdr* cmsg;
int fd;
int err;
size_t i;
size_t count;
for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg)) {
char* start;
char* end;
int err;
void* pv;
int* pi;
unsigned int i;
unsigned int count;
if (cmsg->cmsg_type != SCM_RIGHTS) {
fprintf(stderr, "ignoring non-SCM_RIGHTS ancillary data: %d\n",
cmsg->cmsg_type);
continue;
}
/* silence aliasing warning */
pv = CMSG_DATA(cmsg);
pi = pv;
/* Count available fds */
start = (char*) cmsg;
end = (char*) cmsg + cmsg->cmsg_len;
count = 0;
while (start + CMSG_LEN(count * sizeof(*pi)) < end)
count++;
assert(start + CMSG_LEN(count * sizeof(*pi)) == end);
assert(cmsg->cmsg_len >= CMSG_LEN(0));
count = cmsg->cmsg_len - CMSG_LEN(0);
assert(count % sizeof(fd) == 0);
count /= sizeof(fd);
for (i = 0; i < count; i++) {
memcpy(&fd, (char*) CMSG_DATA(cmsg) + i * sizeof(fd), sizeof(fd));
/* Already has accepted fd, queue now */
if (stream->accepted_fd != -1) {
err = uv__stream_queue_fd(stream, pi[i]);
err = uv__stream_queue_fd(stream, fd);
if (err != 0) {
/* Close rest */
for (; i < count; i++)
uv__close(pi[i]);
uv__close(fd);
return err;
}
} else {
stream->accepted_fd = pi[i];
stream->accepted_fd = fd;
}
}
}
@ -1069,17 +1017,11 @@ static int uv__stream_recv_cmsg(uv_stream_t* stream, struct msghdr* msg) {
}
#ifdef __clang__
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wgnu-folding-constant"
# pragma clang diagnostic ignored "-Wvla-extension"
#endif
static void uv__read(uv_stream_t* stream) {
uv_buf_t buf;
ssize_t nread;
struct msghdr msg;
char cmsg_space[CMSG_SPACE(UV__CMSG_FD_SIZE)];
union uv__cmsg cmsg;
int count;
int err;
int is_ipc;
@ -1125,8 +1067,8 @@ static void uv__read(uv_stream_t* stream) {
msg.msg_name = NULL;
msg.msg_namelen = 0;
/* Set up to receive a descriptor even if one isn't in the message */
msg.msg_controllen = sizeof(cmsg_space);
msg.msg_control = cmsg_space;
msg.msg_controllen = sizeof(cmsg);
msg.msg_control = &cmsg.hdr;
do {
nread = uv__recvmsg(uv__stream_fd(stream), &msg, 0);
@ -1210,14 +1152,6 @@ static void uv__read(uv_stream_t* stream) {
}
#ifdef __clang__
# pragma clang diagnostic pop
#endif
#undef UV__CMSG_FD_COUNT
#undef UV__CMSG_FD_SIZE
int uv_shutdown(uv_shutdown_t* req, uv_stream_t* stream, uv_shutdown_cb cb) {
assert(stream->type == UV_TCP ||
stream->type == UV_TTY ||
@ -1225,7 +1159,7 @@ int uv_shutdown(uv_shutdown_t* req, uv_stream_t* stream, uv_shutdown_cb cb) {
if (!(stream->flags & UV_HANDLE_WRITABLE) ||
stream->flags & UV_HANDLE_SHUT ||
stream->flags & UV_HANDLE_SHUTTING ||
uv__is_stream_shutting(stream) ||
uv__is_closing(stream)) {
return UV_ENOTCONN;
}
@ -1238,7 +1172,6 @@ int uv_shutdown(uv_shutdown_t* req, uv_stream_t* stream, uv_shutdown_cb cb) {
req->handle = stream;
req->cb = cb;
stream->shutdown_req = req;
stream->flags |= UV_HANDLE_SHUTTING;
stream->flags &= ~UV_HANDLE_WRITABLE;
if (QUEUE_EMPTY(&stream->write_queue))

View File

@ -320,9 +320,11 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
}
uv__metrics_inc_events(loop, nevents);
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
uv__metrics_inc_events_waiting(loop, nevents);
}
if (have_signals != 0) {
@ -415,6 +417,11 @@ uint64_t uv_get_constrained_memory(void) {
}
uint64_t uv_get_available_memory(void) {
return uv_get_free_memory();
}
void uv_loadavg(double avg[3]) {
(void) getloadavg(avg, 3);
}

View File

@ -28,16 +28,39 @@
#include <errno.h>
static int new_socket(uv_tcp_t* handle, int domain, unsigned long flags) {
struct sockaddr_storage saddr;
static int maybe_bind_socket(int fd) {
union uv__sockaddr s;
socklen_t slen;
slen = sizeof(s);
memset(&s, 0, sizeof(s));
if (getsockname(fd, &s.addr, &slen))
return UV__ERR(errno);
if (s.addr.sa_family == AF_INET)
if (s.in.sin_port != 0)
return 0; /* Already bound to a port. */
if (s.addr.sa_family == AF_INET6)
if (s.in6.sin6_port != 0)
return 0; /* Already bound to a port. */
/* Bind to an arbitrary port. */
if (bind(fd, &s.addr, slen))
return UV__ERR(errno);
return 0;
}
static int new_socket(uv_tcp_t* handle, int domain, unsigned int flags) {
int sockfd;
int err;
err = uv__socket(domain, SOCK_STREAM, 0);
if (err < 0)
return err;
sockfd = err;
sockfd = uv__socket(domain, SOCK_STREAM, 0);
if (sockfd < 0)
return sockfd;
err = uv__stream_open((uv_stream_t*) handle, sockfd, flags);
if (err) {
@ -45,74 +68,44 @@ static int new_socket(uv_tcp_t* handle, int domain, unsigned long flags) {
return err;
}
if (flags & UV_HANDLE_BOUND) {
/* Bind this new socket to an arbitrary port */
slen = sizeof(saddr);
memset(&saddr, 0, sizeof(saddr));
if (getsockname(uv__stream_fd(handle), (struct sockaddr*) &saddr, &slen)) {
uv__close(sockfd);
return UV__ERR(errno);
}
if (bind(uv__stream_fd(handle), (struct sockaddr*) &saddr, slen)) {
uv__close(sockfd);
return UV__ERR(errno);
}
}
if (flags & UV_HANDLE_BOUND)
return maybe_bind_socket(sockfd);
return 0;
}
static int maybe_new_socket(uv_tcp_t* handle, int domain, unsigned long flags) {
struct sockaddr_storage saddr;
socklen_t slen;
static int maybe_new_socket(uv_tcp_t* handle, int domain, unsigned int flags) {
int sockfd;
int err;
if (domain == AF_UNSPEC) {
handle->flags |= flags;
return 0;
}
if (domain == AF_UNSPEC)
goto out;
if (uv__stream_fd(handle) != -1) {
sockfd = uv__stream_fd(handle);
if (sockfd == -1)
return new_socket(handle, domain, flags);
if (flags & UV_HANDLE_BOUND) {
if (!(flags & UV_HANDLE_BOUND))
goto out;
if (handle->flags & UV_HANDLE_BOUND) {
/* It is already bound to a port. */
handle->flags |= flags;
return 0;
}
if (handle->flags & UV_HANDLE_BOUND)
goto out; /* Already bound to a port. */
/* Query to see if tcp socket is bound. */
slen = sizeof(saddr);
memset(&saddr, 0, sizeof(saddr));
if (getsockname(uv__stream_fd(handle), (struct sockaddr*) &saddr, &slen))
return UV__ERR(errno);
err = maybe_bind_socket(sockfd);
if (err)
return err;
if ((saddr.ss_family == AF_INET6 &&
((struct sockaddr_in6*) &saddr)->sin6_port != 0) ||
(saddr.ss_family == AF_INET &&
((struct sockaddr_in*) &saddr)->sin_port != 0)) {
/* Handle is already bound to a port. */
handle->flags |= flags;
return 0;
}
out:
/* Bind to arbitrary port */
if (bind(uv__stream_fd(handle), (struct sockaddr*) &saddr, slen))
return UV__ERR(errno);
}
handle->flags |= flags;
return 0;
}
return new_socket(handle, domain, flags);
handle->flags |= flags;
return 0;
}
int uv_tcp_init_ex(uv_loop_t* loop, uv_tcp_t* tcp, unsigned int flags) {
int domain;
int err;
/* Use the lower 8 bits for the domain */
domain = flags & 0xFF;
@ -129,9 +122,12 @@ int uv_tcp_init_ex(uv_loop_t* loop, uv_tcp_t* tcp, unsigned int flags) {
*/
if (domain != AF_UNSPEC) {
int err = maybe_new_socket(tcp, domain, 0);
err = new_socket(tcp, domain, 0);
if (err) {
QUEUE_REMOVE(&tcp->handle_queue);
if (tcp->io_watcher.fd != -1)
uv__close(tcp->io_watcher.fd);
tcp->io_watcher.fd = -1;
return err;
}
}
@ -317,7 +313,7 @@ int uv_tcp_close_reset(uv_tcp_t* handle, uv_close_cb close_cb) {
struct linger l = { 1, 0 };
/* Disallow setting SO_LINGER to zero due to some platform inconsistencies */
if (handle->flags & UV_HANDLE_SHUTTING)
if (uv__is_stream_shutting(handle))
return UV_EINVAL;
fd = uv__stream_fd(handle);
@ -338,24 +334,12 @@ int uv_tcp_close_reset(uv_tcp_t* handle, uv_close_cb close_cb) {
int uv__tcp_listen(uv_tcp_t* tcp, int backlog, uv_connection_cb cb) {
static int single_accept_cached = -1;
unsigned long flags;
int single_accept;
unsigned int flags;
int err;
if (tcp->delayed_error)
return tcp->delayed_error;
single_accept = uv__load_relaxed(&single_accept_cached);
if (single_accept == -1) {
const char* val = getenv("UV_TCP_SINGLE_ACCEPT");
single_accept = (val != NULL && atoi(val) != 0); /* Off by default. */
uv__store_relaxed(&single_accept_cached, single_accept);
}
if (single_accept)
tcp->flags |= UV_HANDLE_TCP_SINGLE_ACCEPT;
flags = 0;
#if defined(__MVS__)
/* on zOS the listen call does not bind automatically
@ -460,10 +444,6 @@ int uv_tcp_keepalive(uv_tcp_t* handle, int on, unsigned int delay) {
int uv_tcp_simultaneous_accepts(uv_tcp_t* handle, int enable) {
if (enable)
handle->flags &= ~UV_HANDLE_TCP_SINGLE_ACCEPT;
else
handle->flags |= UV_HANDLE_TCP_SINGLE_ACCEPT;
return 0;
}

View File

@ -41,127 +41,20 @@
#include <gnu/libc-version.h> /* gnu_get_libc_version() */
#endif
#if defined(__linux__)
# include <sched.h>
# define uv__cpu_set_t cpu_set_t
#elif defined(__FreeBSD__)
# include <sys/param.h>
# include <sys/cpuset.h>
# include <pthread_np.h>
# define uv__cpu_set_t cpuset_t
#endif
#undef NANOSEC
#define NANOSEC ((uint64_t) 1e9)
#if defined(PTHREAD_BARRIER_SERIAL_THREAD)
STATIC_ASSERT(sizeof(uv_barrier_t) == sizeof(pthread_barrier_t));
#endif
/* Note: guard clauses should match uv_barrier_t's in include/uv/unix.h. */
#if defined(_AIX) || \
defined(__OpenBSD__) || \
!defined(PTHREAD_BARRIER_SERIAL_THREAD)
int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
struct _uv_barrier* b;
int rc;
if (barrier == NULL || count == 0)
return UV_EINVAL;
b = uv__malloc(sizeof(*b));
if (b == NULL)
return UV_ENOMEM;
b->in = 0;
b->out = 0;
b->threshold = count;
rc = uv_mutex_init(&b->mutex);
if (rc != 0)
goto error2;
rc = uv_cond_init(&b->cond);
if (rc != 0)
goto error;
barrier->b = b;
return 0;
error:
uv_mutex_destroy(&b->mutex);
error2:
uv__free(b);
return rc;
}
int uv_barrier_wait(uv_barrier_t* barrier) {
struct _uv_barrier* b;
int last;
if (barrier == NULL || barrier->b == NULL)
return UV_EINVAL;
b = barrier->b;
uv_mutex_lock(&b->mutex);
if (++b->in == b->threshold) {
b->in = 0;
b->out = b->threshold;
uv_cond_signal(&b->cond);
} else {
do
uv_cond_wait(&b->cond, &b->mutex);
while (b->in != 0);
}
last = (--b->out == 0);
uv_cond_signal(&b->cond);
uv_mutex_unlock(&b->mutex);
return last;
}
void uv_barrier_destroy(uv_barrier_t* barrier) {
struct _uv_barrier* b;
b = barrier->b;
uv_mutex_lock(&b->mutex);
assert(b->in == 0);
while (b->out != 0)
uv_cond_wait(&b->cond, &b->mutex);
if (b->in != 0)
abort();
uv_mutex_unlock(&b->mutex);
uv_mutex_destroy(&b->mutex);
uv_cond_destroy(&b->cond);
uv__free(barrier->b);
barrier->b = NULL;
}
#else
int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
return UV__ERR(pthread_barrier_init(barrier, NULL, count));
}
int uv_barrier_wait(uv_barrier_t* barrier) {
int rc;
rc = pthread_barrier_wait(barrier);
if (rc != 0)
if (rc != PTHREAD_BARRIER_SERIAL_THREAD)
abort();
return rc == PTHREAD_BARRIER_SERIAL_THREAD;
}
void uv_barrier_destroy(uv_barrier_t* barrier) {
if (pthread_barrier_destroy(barrier))
abort();
}
#endif
/* Musl's PTHREAD_STACK_MIN is 2 KB on all architectures, which is
* too small to safely receive signals on.
*
@ -284,6 +177,106 @@ int uv_thread_create_ex(uv_thread_t* tid,
return UV__ERR(err);
}
#if UV__CPU_AFFINITY_SUPPORTED
int uv_thread_setaffinity(uv_thread_t* tid,
char* cpumask,
char* oldmask,
size_t mask_size) {
int i;
int r;
uv__cpu_set_t cpuset;
int cpumasksize;
cpumasksize = uv_cpumask_size();
if (cpumasksize < 0)
return cpumasksize;
if (mask_size < (size_t)cpumasksize)
return UV_EINVAL;
if (oldmask != NULL) {
r = uv_thread_getaffinity(tid, oldmask, mask_size);
if (r < 0)
return r;
}
CPU_ZERO(&cpuset);
for (i = 0; i < cpumasksize; i++)
if (cpumask[i])
CPU_SET(i, &cpuset);
#if defined(__ANDROID__)
if (sched_setaffinity(pthread_gettid_np(*tid), sizeof(cpuset), &cpuset))
r = errno;
else
r = 0;
#else
r = pthread_setaffinity_np(*tid, sizeof(cpuset), &cpuset);
#endif
return UV__ERR(r);
}
int uv_thread_getaffinity(uv_thread_t* tid,
char* cpumask,
size_t mask_size) {
int r;
int i;
uv__cpu_set_t cpuset;
int cpumasksize;
cpumasksize = uv_cpumask_size();
if (cpumasksize < 0)
return cpumasksize;
if (mask_size < (size_t)cpumasksize)
return UV_EINVAL;
CPU_ZERO(&cpuset);
#if defined(__ANDROID__)
if (sched_getaffinity(pthread_gettid_np(*tid), sizeof(cpuset), &cpuset))
r = errno;
else
r = 0;
#else
r = pthread_getaffinity_np(*tid, sizeof(cpuset), &cpuset);
#endif
if (r)
return UV__ERR(r);
for (i = 0; i < cpumasksize; i++)
cpumask[i] = !!CPU_ISSET(i, &cpuset);
return 0;
}
#else
int uv_thread_setaffinity(uv_thread_t* tid,
char* cpumask,
char* oldmask,
size_t mask_size) {
return UV_ENOTSUP;
}
int uv_thread_getaffinity(uv_thread_t* tid,
char* cpumask,
size_t mask_size) {
return UV_ENOTSUP;
}
#endif /* defined(__linux__) || defined(UV_BSD_H) */
int uv_thread_getcpu(void) {
#if UV__CPU_AFFINITY_SUPPORTED
int cpu;
cpu = sched_getcpu();
if (cpu < 0)
return UV__ERR(errno);
return cpu;
#else
return UV_ENOTSUP;
#endif
}
uv_thread_t uv_thread_self(void) {
return pthread_self();
@ -585,7 +578,7 @@ static void uv__custom_sem_post(uv_sem_t* sem_) {
uv_mutex_lock(&sem->mutex);
sem->value++;
if (sem->value == 1)
uv_cond_signal(&sem->cond);
uv_cond_signal(&sem->cond); /* Release one to replace us. */
uv_mutex_unlock(&sem->mutex);
}

View File

@ -21,8 +21,8 @@
#include "uv.h"
#include "internal.h"
#include "spinlock.h"
#include <stdatomic.h>
#include <stdlib.h>
#include <assert.h>
#include <unistd.h>
@ -64,7 +64,7 @@ static int isreallyatty(int file) {
static int orig_termios_fd = -1;
static struct termios orig_termios;
static uv_spinlock_t termios_spinlock = UV_SPINLOCK_INITIALIZER;
static _Atomic int termios_spinlock;
int uv__tcsetattr(int fd, int how, const struct termios *term) {
int rc;
@ -81,7 +81,7 @@ int uv__tcsetattr(int fd, int how, const struct termios *term) {
static int uv__tty_is_slave(const int fd) {
int result;
#if defined(__linux__) || defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
#if defined(__linux__) || defined(__FreeBSD__)
int dummy;
result = ioctl(fd, TIOCGPTN, &dummy) != 0;
@ -113,7 +113,7 @@ static int uv__tty_is_slave(const int fd) {
}
/* Lookup stat structure behind the file descriptor. */
if (fstat(fd, &sb) != 0)
if (uv__fstat(fd, &sb) != 0)
abort();
/* Assert character device. */
@ -280,6 +280,7 @@ static void uv__tty_make_raw(struct termios* tio) {
int uv_tty_set_mode(uv_tty_t* tty, uv_tty_mode_t mode) {
struct termios tmp;
int expected;
int fd;
int rc;
@ -296,12 +297,16 @@ int uv_tty_set_mode(uv_tty_t* tty, uv_tty_mode_t mode) {
return UV__ERR(errno);
/* This is used for uv_tty_reset_mode() */
uv_spinlock_lock(&termios_spinlock);
do
expected = 0;
while (!atomic_compare_exchange_strong(&termios_spinlock, &expected, 1));
if (orig_termios_fd == -1) {
orig_termios = tty->orig_termios;
orig_termios_fd = fd;
}
uv_spinlock_unlock(&termios_spinlock);
atomic_store(&termios_spinlock, 0);
}
tmp = tty->orig_termios;
@ -360,7 +365,7 @@ uv_handle_type uv_guess_handle(uv_file file) {
if (isatty(file))
return UV_TTY;
if (fstat(file, &s)) {
if (uv__fstat(file, &s)) {
#if defined(__PASE__)
/* On ibmi receiving RST from TCP instead of FIN immediately puts fd into
* an error state. fstat will return EINVAL, getsockname will also return
@ -445,14 +450,15 @@ int uv_tty_reset_mode(void) {
int err;
saved_errno = errno;
if (!uv_spinlock_trylock(&termios_spinlock))
if (atomic_exchange(&termios_spinlock, 1))
return UV_EBUSY; /* In uv_tty_set_mode(). */
err = 0;
if (orig_termios_fd != -1)
err = uv__tcsetattr(orig_termios_fd, TCSANOW, &orig_termios);
uv_spinlock_unlock(&termios_spinlock);
atomic_store(&termios_spinlock, 0);
errno = saved_errno;
return err;

View File

@ -40,12 +40,6 @@
# define IPV6_DROP_MEMBERSHIP IPV6_LEAVE_GROUP
#endif
union uv__sockaddr {
struct sockaddr_in6 in6;
struct sockaddr_in in;
struct sockaddr addr;
};
static void uv__udp_run_completed(uv_udp_t* handle);
static void uv__udp_io(uv_loop_t* loop, uv__io_t* w, unsigned int revents);
static void uv__udp_recvmsg(uv_udp_t* handle);
@ -54,36 +48,6 @@ static int uv__udp_maybe_deferred_bind(uv_udp_t* handle,
int domain,
unsigned int flags);
#if HAVE_MMSG
#define UV__MMSG_MAXWIDTH 20
static int uv__udp_recvmmsg(uv_udp_t* handle, uv_buf_t* buf);
static void uv__udp_sendmmsg(uv_udp_t* handle);
static int uv__recvmmsg_avail;
static int uv__sendmmsg_avail;
static uv_once_t once = UV_ONCE_INIT;
static void uv__udp_mmsg_init(void) {
int ret;
int s;
s = uv__socket(AF_INET, SOCK_DGRAM, 0);
if (s < 0)
return;
ret = uv__sendmmsg(s, NULL, 0);
if (ret == 0 || errno != ENOSYS) {
uv__sendmmsg_avail = 1;
uv__recvmmsg_avail = 1;
} else {
ret = uv__recvmmsg(s, NULL, 0);
if (ret == 0 || errno != ENOSYS)
uv__recvmmsg_avail = 1;
}
uv__close(s);
}
#endif
void uv__udp_close(uv_udp_t* handle) {
uv__io_close(handle->loop, &handle->io_watcher);
@ -183,11 +147,11 @@ static void uv__udp_io(uv_loop_t* loop, uv__io_t* w, unsigned int revents) {
}
}
#if HAVE_MMSG
static int uv__udp_recvmmsg(uv_udp_t* handle, uv_buf_t* buf) {
struct sockaddr_in6 peers[UV__MMSG_MAXWIDTH];
struct iovec iov[UV__MMSG_MAXWIDTH];
struct uv__mmsghdr msgs[UV__MMSG_MAXWIDTH];
#if defined(__linux__) || defined(__FreeBSD__)
struct sockaddr_in6 peers[20];
struct iovec iov[ARRAY_SIZE(peers)];
struct mmsghdr msgs[ARRAY_SIZE(peers)];
ssize_t nread;
uv_buf_t chunk_buf;
size_t chunks;
@ -212,7 +176,7 @@ static int uv__udp_recvmmsg(uv_udp_t* handle, uv_buf_t* buf) {
}
do
nread = uv__recvmmsg(handle->io_watcher.fd, msgs, chunks);
nread = recvmmsg(handle->io_watcher.fd, msgs, chunks, 0, NULL);
while (nread == -1 && errno == EINTR);
if (nread < 1) {
@ -240,8 +204,10 @@ static int uv__udp_recvmmsg(uv_udp_t* handle, uv_buf_t* buf) {
handle->recv_cb(handle, 0, buf, NULL, UV_UDP_MMSG_FREE);
}
return nread;
#else /* __linux__ || ____FreeBSD__ */
return UV_ENOSYS;
#endif /* __linux__ || ____FreeBSD__ */
}
#endif
static void uv__udp_recvmsg(uv_udp_t* handle) {
struct sockaddr_storage peer;
@ -268,14 +234,12 @@ static void uv__udp_recvmsg(uv_udp_t* handle) {
}
assert(buf.base != NULL);
#if HAVE_MMSG
if (uv_udp_using_recvmmsg(handle)) {
nread = uv__udp_recvmmsg(handle, &buf);
if (nread > 0)
count -= nread;
continue;
}
#endif
memset(&h, 0, sizeof(h));
memset(&peer, 0, sizeof(peer));
@ -311,11 +275,11 @@ static void uv__udp_recvmsg(uv_udp_t* handle) {
&& handle->recv_cb != NULL);
}
#if HAVE_MMSG
static void uv__udp_sendmmsg(uv_udp_t* handle) {
static void uv__udp_sendmsg(uv_udp_t* handle) {
#if defined(__linux__) || defined(__FreeBSD__)
uv_udp_send_t* req;
struct uv__mmsghdr h[UV__MMSG_MAXWIDTH];
struct uv__mmsghdr *p;
struct mmsghdr h[20];
struct mmsghdr* p;
QUEUE* q;
ssize_t npkts;
size_t pkts;
@ -326,7 +290,7 @@ static void uv__udp_sendmmsg(uv_udp_t* handle) {
write_queue_drain:
for (pkts = 0, q = QUEUE_HEAD(&handle->write_queue);
pkts < UV__MMSG_MAXWIDTH && q != &handle->write_queue;
pkts < ARRAY_SIZE(h) && q != &handle->write_queue;
++pkts, q = QUEUE_HEAD(q)) {
assert(q != NULL);
req = QUEUE_DATA(q, uv_udp_send_t, queue);
@ -355,7 +319,7 @@ write_queue_drain:
}
do
npkts = uv__sendmmsg(handle->io_watcher.fd, h, pkts);
npkts = sendmmsg(handle->io_watcher.fd, h, pkts, 0);
while (npkts == -1 && errno == EINTR);
if (npkts < 1) {
@ -401,24 +365,12 @@ write_queue_drain:
if (!QUEUE_EMPTY(&handle->write_queue))
goto write_queue_drain;
uv__io_feed(handle->loop, &handle->io_watcher);
return;
}
#endif
static void uv__udp_sendmsg(uv_udp_t* handle) {
#else /* __linux__ || ____FreeBSD__ */
uv_udp_send_t* req;
struct msghdr h;
QUEUE* q;
ssize_t size;
#if HAVE_MMSG
uv_once(&once, uv__udp_mmsg_init);
if (uv__sendmmsg_avail) {
uv__udp_sendmmsg(handle);
return;
}
#endif
while (!QUEUE_EMPTY(&handle->write_queue)) {
q = QUEUE_HEAD(&handle->write_queue);
assert(q != NULL);
@ -466,6 +418,7 @@ static void uv__udp_sendmsg(uv_udp_t* handle) {
QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue);
uv__io_feed(handle->loop, &handle->io_watcher);
}
#endif /* __linux__ || ____FreeBSD__ */
}
/* On the BSDs, SO_REUSEPORT implies SO_REUSEADDR but with some additional
@ -495,7 +448,8 @@ static int uv__set_reuse(int fd) {
if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &yes, sizeof(yes)))
return UV__ERR(errno);
}
#elif defined(SO_REUSEPORT) && !defined(__linux__) && !defined(__GNU__)
#elif defined(SO_REUSEPORT) && !defined(__linux__) && !defined(__GNU__) && \
!defined(__sun__)
if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &yes, sizeof(yes)))
return UV__ERR(errno);
#else
@ -1061,11 +1015,9 @@ int uv__udp_init_ex(uv_loop_t* loop,
int uv_udp_using_recvmmsg(const uv_udp_t* handle) {
#if HAVE_MMSG
if (handle->flags & UV_HANDLE_UDP_RECVMMSG) {
uv_once(&once, uv__udp_mmsg_init);
return uv__recvmmsg_avail;
}
#if defined(__linux__) || defined(__FreeBSD__)
if (handle->flags & UV_HANDLE_UDP_RECVMMSG)
return 1;
#endif
return 0;
}