libuv 1.42.0.

git-svn-id: https://www.unprompted.com/svn/projects/tildefriends/trunk@3650 ed5197a5-7fde-0310-b194-c3ffbd925b24
This commit is contained in:
2021-07-27 22:08:18 +00:00
parent 5197eb91f7
commit da51e87774
183 changed files with 4013 additions and 1768 deletions

49
deps/libuv/src/idna.c vendored
View File

@ -19,6 +19,7 @@
#include "uv.h"
#include "idna.h"
#include <assert.h>
#include <string.h>
static unsigned uv__utf8_decode1_slow(const char** p,
@ -32,7 +33,7 @@ static unsigned uv__utf8_decode1_slow(const char** p,
if (a > 0xF7)
return -1;
switch (*p - pe) {
switch (pe - *p) {
default:
if (a > 0xEF) {
min = 0x10000;
@ -62,6 +63,8 @@ static unsigned uv__utf8_decode1_slow(const char** p,
a = 0;
break;
}
/* Fall through. */
case 0:
return -1; /* Invalid continuation byte. */
}
@ -88,6 +91,8 @@ static unsigned uv__utf8_decode1_slow(const char** p,
unsigned uv__utf8_decode1(const char** p, const char* pe) {
unsigned a;
assert(*p < pe);
a = (unsigned char) *(*p)++;
if (a < 128)
@ -96,9 +101,6 @@ unsigned uv__utf8_decode1(const char** p, const char* pe) {
return uv__utf8_decode1_slow(p, pe, a);
}
#define foreach_codepoint(c, p, pe) \
for (; (void) (*p <= pe && (c = uv__utf8_decode1(p, pe))), *p <= pe;)
static int uv__idna_toascii_label(const char* s, const char* se,
char** d, char* de) {
static const char alphabet[] = "abcdefghijklmnopqrstuvwxyz0123456789";
@ -121,15 +123,22 @@ static int uv__idna_toascii_label(const char* s, const char* se,
ss = s;
todo = 0;
foreach_codepoint(c, &s, se) {
/* Note: after this loop we've visited all UTF-8 characters and know
* they're legal so we no longer need to check for decode errors.
*/
while (s < se) {
c = uv__utf8_decode1(&s, se);
if (c == -1u)
return UV_EINVAL;
if (c < 128)
h++;
else if (c == (unsigned) -1)
return UV_EINVAL;
else
todo++;
}
/* Only write "xn--" when there are non-ASCII characters. */
if (todo > 0) {
if (*d < de) *(*d)++ = 'x';
if (*d < de) *(*d)++ = 'n';
@ -137,9 +146,13 @@ static int uv__idna_toascii_label(const char* s, const char* se,
if (*d < de) *(*d)++ = '-';
}
/* Write ASCII characters. */
x = 0;
s = ss;
foreach_codepoint(c, &s, se) {
while (s < se) {
c = uv__utf8_decode1(&s, se);
assert(c != -1u);
if (c > 127)
continue;
@ -166,10 +179,15 @@ static int uv__idna_toascii_label(const char* s, const char* se,
while (todo > 0) {
m = -1;
s = ss;
foreach_codepoint(c, &s, se)
while (s < se) {
c = uv__utf8_decode1(&s, se);
assert(c != -1u);
if (c >= n)
if (c < m)
m = c;
}
x = m - n;
y = h + 1;
@ -181,7 +199,10 @@ static int uv__idna_toascii_label(const char* s, const char* se,
n = m;
s = ss;
foreach_codepoint(c, &s, se) {
while (s < se) {
c = uv__utf8_decode1(&s, se);
assert(c != -1u);
if (c < n)
if (++delta == 0)
return UV_E2BIG; /* Overflow. */
@ -245,8 +266,6 @@ static int uv__idna_toascii_label(const char* s, const char* se,
return 0;
}
#undef foreach_codepoint
long uv__idna_toascii(const char* s, const char* se, char* d, char* de) {
const char* si;
const char* st;
@ -256,10 +275,14 @@ long uv__idna_toascii(const char* s, const char* se, char* d, char* de) {
ds = d;
for (si = s; si < se; /* empty */) {
si = s;
while (si < se) {
st = si;
c = uv__utf8_decode1(&si, se);
if (c == -1u)
return UV_EINVAL;
if (c != '.')
if (c != 0x3002) /* 。 */
if (c != 0xFF0E) /* */

View File

@ -141,8 +141,9 @@ static int inet_ntop6(const unsigned char *src, char *dst, size_t size) {
if (best.base != -1 && (best.base + best.len) == ARRAY_SIZE(words))
*tp++ = ':';
*tp++ = '\0';
if (UV_E2BIG == uv__strscpy(dst, tmp, size))
if ((size_t) (tp - tmp) > size)
return UV_ENOSPC;
uv__strscpy(dst, tmp, size);
return 0;
}

View File

@ -161,7 +161,6 @@ static void post(QUEUE* q, enum uv__work_kind kind) {
void uv__threadpool_cleanup(void) {
#ifndef _WIN32
unsigned int i;
if (nthreads == 0)
@ -181,7 +180,6 @@ void uv__threadpool_cleanup(void) {
threads = NULL;
nthreads = 0;
#endif
}

View File

@ -58,6 +58,7 @@ static int timer_less_than(const struct heap_node* ha,
int uv_timer_init(uv_loop_t* loop, uv_timer_t* handle) {
uv__handle_init(loop, (uv_handle_t*)handle, UV_TIMER);
handle->timer_cb = NULL;
handle->timeout = 0;
handle->repeat = 0;
return 0;
}

View File

@ -214,7 +214,7 @@ static int uv__async_start(uv_loop_t* loop) {
pipefd[0] = err;
pipefd[1] = -1;
#else
err = uv__make_pipe(pipefd, UV__F_NONBLOCK);
err = uv__make_pipe(pipefd, UV_NONBLOCK_PIPE);
if (err < 0)
return err;
#endif

View File

@ -52,9 +52,11 @@ UV_UNUSED(static int cmpxchgi(int* ptr, int oldval, int newval)) {
UV_UNUSED(static void cpu_relax(void)) {
#if defined(__i386__) || defined(__x86_64__)
__asm__ __volatile__ ("rep; nop"); /* a.k.a. PAUSE */
__asm__ __volatile__ ("rep; nop" ::: "memory"); /* a.k.a. PAUSE */
#elif (defined(__arm__) && __ARM_ARCH >= 7) || defined(__aarch64__)
__asm__ volatile("yield");
__asm__ __volatile__ ("yield" ::: "memory");
#elif defined(__powerpc64__) || defined(__ppc64__) || defined(__PPC64__)
__asm__ __volatile__ ("or 1,1,1; or 2,2,2" ::: "memory");
#endif
}

View File

@ -42,8 +42,8 @@ static int uv__ifaddr_exclude(struct ifaddrs *ent, int exclude_type) {
return 1;
#if !defined(__CYGWIN__) && !defined(__MSYS__)
/*
* If `exclude_type` is `UV__EXCLUDE_IFPHYS`, just see whether `sa_family`
* equals to `AF_LINK` or not. Otherwise, the result depends on the operation
* If `exclude_type` is `UV__EXCLUDE_IFPHYS`, return whether `sa_family`
* equals `AF_LINK`. Otherwise, the result depends on the operating
* system with `AF_LINK` or `PF_INET`.
*/
if (exclude_type == UV__EXCLUDE_IFPHYS)
@ -53,7 +53,7 @@ static int uv__ifaddr_exclude(struct ifaddrs *ent, int exclude_type) {
defined(__HAIKU__)
/*
* On BSD getifaddrs returns information related to the raw underlying
* devices. We're not interested in this information.
* devices. We're not interested in this information.
*/
if (ent->ifa_addr->sa_family == AF_LINK)
return 1;

View File

@ -88,6 +88,10 @@ extern char** environ;
# define uv__accept4 accept4
#endif
#if defined(__linux__) && defined(__SANITIZE_THREAD__) && defined(__clang__)
# include <sanitizer/linux_syscall_hooks.h>
#endif
static int uv__run_pending(uv_loop_t* loop);
/* Verify that uv_buf_t is ABI-compatible with struct iovec. */
@ -539,7 +543,13 @@ int uv__close_nocancel(int fd) {
return close$NOCANCEL$UNIX2003(fd);
#endif
#pragma GCC diagnostic pop
#elif defined(__linux__)
#elif defined(__linux__) && defined(__SANITIZE_THREAD__) && defined(__clang__)
long rc;
__sanitizer_syscall_pre_close(fd);
rc = syscall(SYS_close, fd);
__sanitizer_syscall_post_close(rc, fd);
return rc;
#elif defined(__linux__) && !defined(__SANITIZE_THREAD__)
return syscall(SYS_close, fd);
#else
return close(fd);
@ -574,7 +584,7 @@ int uv__close(int fd) {
return uv__close_nocheckstdio(fd);
}
#if UV__NONBLOCK_IS_IOCTL
int uv__nonblock_ioctl(int fd, int set) {
int r;
@ -589,7 +599,6 @@ int uv__nonblock_ioctl(int fd, int set) {
}
#if !defined(__CYGWIN__) && !defined(__MSYS__) && !defined(__HAIKU__)
int uv__cloexec_ioctl(int fd, int set) {
int r;
@ -925,13 +934,12 @@ void uv__io_stop(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
if (w->pevents == 0) {
QUEUE_REMOVE(&w->watcher_queue);
QUEUE_INIT(&w->watcher_queue);
w->events = 0;
if (loop->watchers[w->fd] != NULL) {
assert(loop->watchers[w->fd] == w);
if (w == loop->watchers[w->fd]) {
assert(loop->nfds > 0);
loop->watchers[w->fd] = NULL;
loop->nfds--;
w->events = 0;
}
}
else if (QUEUE_EMPTY(&w->watcher_queue))
@ -1175,7 +1183,9 @@ int uv__getpwuid_r(uv_passwd_t* pwd) {
if (buf == NULL)
return UV_ENOMEM;
r = getpwuid_r(uid, &pw, buf, bufsize, &result);
do
r = getpwuid_r(uid, &pw, buf, bufsize, &result);
while (r == EINTR);
if (r != ERANGE)
break;
@ -1185,7 +1195,7 @@ int uv__getpwuid_r(uv_passwd_t* pwd) {
if (r != 0) {
uv__free(buf);
return -r;
return UV__ERR(r);
}
if (result == NULL) {
@ -1571,7 +1581,7 @@ int uv__search_path(const char* prog, char* buf, size_t* buflen) {
buf[*buflen] = '\0';
return 0;
}
}
/* Case iii). Search PATH environment variable */
cloned_path = NULL;

View File

@ -33,9 +33,7 @@
#include <sys/sysctl.h>
#include <unistd.h> /* sysconf */
#if !TARGET_OS_IPHONE
#include "darwin-stub.h"
#endif
static uv_once_t once = UV_ONCE_INIT;
static uint64_t (*time_func)(void);
@ -223,10 +221,10 @@ static int uv__get_cpu_speed(uint64_t* speed) {
err = UV_ENOENT;
core_foundation_handle = dlopen("/System/Library/Frameworks/"
"CoreFoundation.framework/"
"Versions/A/CoreFoundation",
"CoreFoundation",
RTLD_LAZY | RTLD_LOCAL);
iokit_handle = dlopen("/System/Library/Frameworks/IOKit.framework/"
"Versions/A/IOKit",
"IOKit",
RTLD_LAZY | RTLD_LOCAL);
if (core_foundation_handle == NULL || iokit_handle == NULL)
@ -304,6 +302,12 @@ static int uv__get_cpu_speed(uint64_t* speed) {
pIOObjectRelease(it);
err = 0;
if (device_type_str != NULL)
pCFRelease(device_type_str);
if (clock_frequency_str != NULL)
pCFRelease(clock_frequency_str);
out:
if (core_foundation_handle != NULL)
dlclose(core_foundation_handle);

422
deps/libuv/src/unix/epoll.c vendored Normal file
View File

@ -0,0 +1,422 @@
/* Copyright libuv contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <errno.h>
#include <sys/epoll.h>
int uv__epoll_init(uv_loop_t* loop) {
int fd;
fd = epoll_create1(O_CLOEXEC);
/* epoll_create1() can fail either because it's not implemented (old kernel)
* or because it doesn't understand the O_CLOEXEC flag.
*/
if (fd == -1 && (errno == ENOSYS || errno == EINVAL)) {
fd = epoll_create(256);
if (fd != -1)
uv__cloexec(fd, 1);
}
loop->backend_fd = fd;
if (fd == -1)
return UV__ERR(errno);
return 0;
}
void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
struct epoll_event* events;
struct epoll_event dummy;
uintptr_t i;
uintptr_t nfds;
assert(loop->watchers != NULL);
assert(fd >= 0);
events = (struct epoll_event*) loop->watchers[loop->nwatchers];
nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
if (events != NULL)
/* Invalidate events with same file descriptor */
for (i = 0; i < nfds; i++)
if (events[i].data.fd == fd)
events[i].data.fd = -1;
/* Remove the file descriptor from the epoll.
* This avoids a problem where the same file description remains open
* in another process, causing repeated junk epoll events.
*
* We pass in a dummy epoll_event, to work around a bug in old kernels.
*/
if (loop->backend_fd >= 0) {
/* Work around a bug in kernels 3.10 to 3.19 where passing a struct that
* has the EPOLLWAKEUP flag set generates spurious audit syslog warnings.
*/
memset(&dummy, 0, sizeof(dummy));
epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, &dummy);
}
}
int uv__io_check_fd(uv_loop_t* loop, int fd) {
struct epoll_event e;
int rc;
memset(&e, 0, sizeof(e));
e.events = POLLIN;
e.data.fd = -1;
rc = 0;
if (epoll_ctl(loop->backend_fd, EPOLL_CTL_ADD, fd, &e))
if (errno != EEXIST)
rc = UV__ERR(errno);
if (rc == 0)
if (epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, &e))
abort();
return rc;
}
void uv__io_poll(uv_loop_t* loop, int timeout) {
/* A bug in kernels < 2.6.37 makes timeouts larger than ~30 minutes
* effectively infinite on 32 bits architectures. To avoid blocking
* indefinitely, we cap the timeout and poll again if necessary.
*
* Note that "30 minutes" is a simplification because it depends on
* the value of CONFIG_HZ. The magic constant assumes CONFIG_HZ=1200,
* that being the largest value I have seen in the wild (and only once.)
*/
static const int max_safe_timeout = 1789569;
static int no_epoll_pwait_cached;
static int no_epoll_wait_cached;
int no_epoll_pwait;
int no_epoll_wait;
struct epoll_event events[1024];
struct epoll_event* pe;
struct epoll_event e;
int real_timeout;
QUEUE* q;
uv__io_t* w;
sigset_t sigset;
uint64_t sigmask;
uint64_t base;
int have_signals;
int nevents;
int count;
int nfds;
int fd;
int op;
int i;
int user_timeout;
int reset_timeout;
if (loop->nfds == 0) {
assert(QUEUE_EMPTY(&loop->watcher_queue));
return;
}
memset(&e, 0, sizeof(e));
while (!QUEUE_EMPTY(&loop->watcher_queue)) {
q = QUEUE_HEAD(&loop->watcher_queue);
QUEUE_REMOVE(q);
QUEUE_INIT(q);
w = QUEUE_DATA(q, uv__io_t, watcher_queue);
assert(w->pevents != 0);
assert(w->fd >= 0);
assert(w->fd < (int) loop->nwatchers);
e.events = w->pevents;
e.data.fd = w->fd;
if (w->events == 0)
op = EPOLL_CTL_ADD;
else
op = EPOLL_CTL_MOD;
/* XXX Future optimization: do EPOLL_CTL_MOD lazily if we stop watching
* events, skip the syscall and squelch the events after epoll_wait().
*/
if (epoll_ctl(loop->backend_fd, op, w->fd, &e)) {
if (errno != EEXIST)
abort();
assert(op == EPOLL_CTL_ADD);
/* We've reactivated a file descriptor that's been watched before. */
if (epoll_ctl(loop->backend_fd, EPOLL_CTL_MOD, w->fd, &e))
abort();
}
w->events = w->pevents;
}
sigmask = 0;
if (loop->flags & UV_LOOP_BLOCK_SIGPROF) {
sigemptyset(&sigset);
sigaddset(&sigset, SIGPROF);
sigmask |= 1 << (SIGPROF - 1);
}
assert(timeout >= -1);
base = loop->time;
count = 48; /* Benchmarks suggest this gives the best throughput. */
real_timeout = timeout;
if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
reset_timeout = 1;
user_timeout = timeout;
timeout = 0;
} else {
reset_timeout = 0;
user_timeout = 0;
}
/* You could argue there is a dependency between these two but
* ultimately we don't care about their ordering with respect
* to one another. Worst case, we make a few system calls that
* could have been avoided because another thread already knows
* they fail with ENOSYS. Hardly the end of the world.
*/
no_epoll_pwait = uv__load_relaxed(&no_epoll_pwait_cached);
no_epoll_wait = uv__load_relaxed(&no_epoll_wait_cached);
for (;;) {
/* Only need to set the provider_entry_time if timeout != 0. The function
* will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
*/
if (timeout != 0)
uv__metrics_set_provider_entry_time(loop);
/* See the comment for max_safe_timeout for an explanation of why
* this is necessary. Executive summary: kernel bug workaround.
*/
if (sizeof(int32_t) == sizeof(long) && timeout >= max_safe_timeout)
timeout = max_safe_timeout;
if (sigmask != 0 && no_epoll_pwait != 0)
if (pthread_sigmask(SIG_BLOCK, &sigset, NULL))
abort();
if (no_epoll_wait != 0 || (sigmask != 0 && no_epoll_pwait == 0)) {
nfds = epoll_pwait(loop->backend_fd,
events,
ARRAY_SIZE(events),
timeout,
&sigset);
if (nfds == -1 && errno == ENOSYS) {
uv__store_relaxed(&no_epoll_pwait_cached, 1);
no_epoll_pwait = 1;
}
} else {
nfds = epoll_wait(loop->backend_fd,
events,
ARRAY_SIZE(events),
timeout);
if (nfds == -1 && errno == ENOSYS) {
uv__store_relaxed(&no_epoll_wait_cached, 1);
no_epoll_wait = 1;
}
}
if (sigmask != 0 && no_epoll_pwait != 0)
if (pthread_sigmask(SIG_UNBLOCK, &sigset, NULL))
abort();
/* Update loop->time unconditionally. It's tempting to skip the update when
* timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
* operating system didn't reschedule our process while in the syscall.
*/
SAVE_ERRNO(uv__update_time(loop));
if (nfds == 0) {
assert(timeout != -1);
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
}
if (timeout == -1)
continue;
if (timeout == 0)
return;
/* We may have been inside the system call for longer than |timeout|
* milliseconds so we need to update the timestamp to avoid drift.
*/
goto update_timeout;
}
if (nfds == -1) {
if (errno == ENOSYS) {
/* epoll_wait() or epoll_pwait() failed, try the other system call. */
assert(no_epoll_wait == 0 || no_epoll_pwait == 0);
continue;
}
if (errno != EINTR)
abort();
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
}
if (timeout == -1)
continue;
if (timeout == 0)
return;
/* Interrupted by a signal. Update timeout and poll again. */
goto update_timeout;
}
have_signals = 0;
nevents = 0;
{
/* Squelch a -Waddress-of-packed-member warning with gcc >= 9. */
union {
struct epoll_event* events;
uv__io_t* watchers;
} x;
x.events = events;
assert(loop->watchers != NULL);
loop->watchers[loop->nwatchers] = x.watchers;
loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
}
for (i = 0; i < nfds; i++) {
pe = events + i;
fd = pe->data.fd;
/* Skip invalidated events, see uv__platform_invalidate_fd */
if (fd == -1)
continue;
assert(fd >= 0);
assert((unsigned) fd < loop->nwatchers);
w = loop->watchers[fd];
if (w == NULL) {
/* File descriptor that we've stopped watching, disarm it.
*
* Ignore all errors because we may be racing with another thread
* when the file descriptor is closed.
*/
epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, pe);
continue;
}
/* Give users only events they're interested in. Prevents spurious
* callbacks when previous callback invocation in this loop has stopped
* the current watcher. Also, filters out events that users has not
* requested us to watch.
*/
pe->events &= w->pevents | POLLERR | POLLHUP;
/* Work around an epoll quirk where it sometimes reports just the
* EPOLLERR or EPOLLHUP event. In order to force the event loop to
* move forward, we merge in the read/write events that the watcher
* is interested in; uv__read() and uv__write() will then deal with
* the error or hangup in the usual fashion.
*
* Note to self: happens when epoll reports EPOLLIN|EPOLLHUP, the user
* reads the available data, calls uv_read_stop(), then sometime later
* calls uv_read_start() again. By then, libuv has forgotten about the
* hangup and the kernel won't report EPOLLIN again because there's
* nothing left to read. If anything, libuv is to blame here. The
* current hack is just a quick bandaid; to properly fix it, libuv
* needs to remember the error/hangup event. We should get that for
* free when we switch over to edge-triggered I/O.
*/
if (pe->events == POLLERR || pe->events == POLLHUP)
pe->events |=
w->pevents & (POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI);
if (pe->events != 0) {
/* Run signal watchers last. This also affects child process watchers
* because those are implemented in terms of signal watchers.
*/
if (w == &loop->signal_io_watcher) {
have_signals = 1;
} else {
uv__metrics_update_idle_time(loop);
w->cb(loop, w, pe->events);
}
nevents++;
}
}
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
}
if (have_signals != 0) {
uv__metrics_update_idle_time(loop);
loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
}
loop->watchers[loop->nwatchers] = NULL;
loop->watchers[loop->nwatchers + 1] = NULL;
if (have_signals != 0)
return; /* Event loop should cycle now so don't poll again. */
if (nevents != 0) {
if (nfds == ARRAY_SIZE(events) && --count != 0) {
/* Poll for more events but don't block this time. */
timeout = 0;
continue;
}
return;
}
if (timeout == 0)
return;
if (timeout == -1)
continue;
update_timeout:
assert(timeout > 0);
real_timeout -= (loop->time - base);
if (real_timeout <= 0)
return;
timeout = real_timeout;
}
}

View File

@ -265,8 +265,11 @@ int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
int uv__sendmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen) {
#if __FreeBSD__ >= 11
return sendmmsg(fd, mmsg, vlen, /* flags */ 0);
#if __FreeBSD__ >= 11 && !defined(__DragonFly__)
return sendmmsg(fd,
(struct mmsghdr*) mmsg,
vlen,
0 /* flags */);
#else
return errno = ENOSYS, -1;
#endif
@ -274,8 +277,12 @@ int uv__sendmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen) {
int uv__recvmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen) {
#if __FreeBSD__ >= 11
return recvmmsg(fd, mmsg, vlen, 0 /* flags */, NULL /* timeout */);
#if __FreeBSD__ >= 11 && !defined(__DragonFly__)
return recvmmsg(fd,
(struct mmsghdr*) mmsg,
vlen,
0 /* flags */,
NULL /* timeout */);
#else
return errno = ENOSYS, -1;
#endif

View File

@ -56,8 +56,13 @@
# define HAVE_PREADV 0
#endif
#if defined(__linux__)
# include "sys/utsname.h"
#endif
#if defined(__linux__) || defined(__sun)
# include <sys/sendfile.h>
# include <sys/sysmacros.h>
#endif
#if defined(__APPLE__)
@ -212,14 +217,30 @@ static ssize_t uv__fs_fdatasync(uv_fs_t* req) {
UV_UNUSED(static struct timespec uv__fs_to_timespec(double time)) {
struct timespec ts;
ts.tv_sec = time;
ts.tv_nsec = (uint64_t)(time * 1000000) % 1000000 * 1000;
ts.tv_nsec = (time - ts.tv_sec) * 1e9;
/* TODO(bnoordhuis) Remove this. utimesat() has nanosecond resolution but we
* stick to microsecond resolution for the sake of consistency with other
* platforms. I'm the original author of this compatibility hack but I'm
* less convinced it's useful nowadays.
*/
ts.tv_nsec -= ts.tv_nsec % 1000;
if (ts.tv_nsec < 0) {
ts.tv_nsec += 1e9;
ts.tv_sec -= 1;
}
return ts;
}
UV_UNUSED(static struct timeval uv__fs_to_timeval(double time)) {
struct timeval tv;
tv.tv_sec = time;
tv.tv_usec = (uint64_t)(time * 1000000) % 1000000;
tv.tv_usec = (time - tv.tv_sec) * 1e6;
if (tv.tv_usec < 0) {
tv.tv_usec += 1e6;
tv.tv_sec -= 1;
}
return tv;
}
@ -227,9 +248,6 @@ static ssize_t uv__fs_futime(uv_fs_t* req) {
#if defined(__linux__) \
|| defined(_AIX71) \
|| defined(__HAIKU__)
/* utimesat() has nanosecond resolution but we stick to microseconds
* for the sake of consistency with other platforms.
*/
struct timespec ts[2];
ts[0] = uv__fs_to_timespec(req->atime);
ts[1] = uv__fs_to_timespec(req->mtime);
@ -887,6 +905,50 @@ out:
}
#ifdef __linux__
static unsigned uv__kernel_version(void) {
static unsigned cached_version;
struct utsname u;
unsigned version;
unsigned major;
unsigned minor;
unsigned patch;
version = uv__load_relaxed(&cached_version);
if (version != 0)
return version;
if (-1 == uname(&u))
return 0;
if (3 != sscanf(u.release, "%u.%u.%u", &major, &minor, &patch))
return 0;
version = major * 65536 + minor * 256 + patch;
uv__store_relaxed(&cached_version, version);
return version;
}
/* Pre-4.20 kernels have a bug where CephFS uses the RADOS copy-from command
* in copy_file_range() when it shouldn't. There is no workaround except to
* fall back to a regular copy.
*/
static int uv__is_buggy_cephfs(int fd) {
struct statfs s;
if (-1 == fstatfs(fd, &s))
return 0;
if (s.f_type != /* CephFS */ 0xC36400)
return 0;
return uv__kernel_version() < /* 4.20.0 */ 0x041400;
}
#endif /* __linux__ */
static ssize_t uv__fs_sendfile(uv_fs_t* req) {
int in_fd;
int out_fd;
@ -903,14 +965,25 @@ static ssize_t uv__fs_sendfile(uv_fs_t* req) {
#ifdef __linux__
{
static int copy_file_range_support = 1;
static int no_copy_file_range_support;
if (copy_file_range_support) {
r = uv__fs_copy_file_range(in_fd, NULL, out_fd, &off, req->bufsml[0].len, 0);
if (uv__load_relaxed(&no_copy_file_range_support) == 0) {
r = uv__fs_copy_file_range(in_fd, &off, out_fd, NULL, req->bufsml[0].len, 0);
if (r == -1 && errno == ENOSYS) {
/* ENOSYS - it will never work */
errno = 0;
uv__store_relaxed(&no_copy_file_range_support, 1);
} else if (r == -1 && errno == EACCES && uv__is_buggy_cephfs(in_fd)) {
/* EACCES - pre-4.20 kernels have a bug where CephFS uses the RADOS
copy-from command when it shouldn't */
errno = 0;
uv__store_relaxed(&no_copy_file_range_support, 1);
} else if (r == -1 && (errno == ENOTSUP || errno == EXDEV)) {
/* ENOTSUP - it could work on another file system type */
/* EXDEV - it will not work when in_fd and out_fd are not on the same
mounted filesystem (pre Linux 5.3) */
errno = 0;
copy_file_range_support = 0;
} else {
goto ok;
}
@ -1010,9 +1083,6 @@ static ssize_t uv__fs_utime(uv_fs_t* req) {
|| defined(_AIX71) \
|| defined(__sun) \
|| defined(__HAIKU__)
/* utimesat() has nanosecond resolution but we stick to microseconds
* for the sake of consistency with other platforms.
*/
struct timespec ts[2];
ts[0] = uv__fs_to_timespec(req->atime);
ts[1] = uv__fs_to_timespec(req->mtime);
@ -1220,7 +1290,7 @@ static ssize_t uv__fs_copyfile(uv_fs_t* req) {
if (fstatfs(dstfd, &s) == -1)
goto out;
if (s.f_type != /* CIFS */ 0xFF534D42u)
if ((unsigned) s.f_type != /* CIFS */ 0xFF534D42u)
goto out;
}
@ -1340,7 +1410,8 @@ static void uv__to_stat(struct stat* src, uv_stat_t* dst) {
dst->st_birthtim.tv_nsec = src->st_ctimensec;
dst->st_flags = 0;
dst->st_gen = 0;
#elif !defined(_AIX) && ( \
#elif !defined(_AIX) && \
!defined(__MVS__) && ( \
defined(__DragonFly__) || \
defined(__FreeBSD__) || \
defined(__OpenBSD__) || \
@ -1420,8 +1491,9 @@ static int uv__fs_statx(int fd,
case -1:
/* EPERM happens when a seccomp filter rejects the system call.
* Has been observed with libseccomp < 2.3.3 and docker < 18.04.
* EOPNOTSUPP is used on DVS exported filesystems
*/
if (errno != EINVAL && errno != EPERM && errno != ENOSYS)
if (errno != EINVAL && errno != EPERM && errno != ENOSYS && errno != EOPNOTSUPP)
return -1;
/* Fall through. */
default:
@ -1434,12 +1506,12 @@ static int uv__fs_statx(int fd,
return UV_ENOSYS;
}
buf->st_dev = 256 * statxbuf.stx_dev_major + statxbuf.stx_dev_minor;
buf->st_dev = makedev(statxbuf.stx_dev_major, statxbuf.stx_dev_minor);
buf->st_mode = statxbuf.stx_mode;
buf->st_nlink = statxbuf.stx_nlink;
buf->st_uid = statxbuf.stx_uid;
buf->st_gid = statxbuf.stx_gid;
buf->st_rdev = statxbuf.stx_rdev_major;
buf->st_rdev = makedev(statxbuf.stx_rdev_major, statxbuf.stx_rdev_minor);
buf->st_ino = statxbuf.stx_ino;
buf->st_size = statxbuf.stx_size;
buf->st_blksize = statxbuf.stx_blksize;

View File

@ -595,8 +595,7 @@ out:
static int uv__fsevents_loop_init(uv_loop_t* loop) {
CFRunLoopSourceContext ctx;
uv__cf_loop_state_t* state;
pthread_attr_t attr_storage;
pthread_attr_t* attr;
pthread_attr_t attr;
int err;
if (loop->cf_state != NULL)
@ -641,25 +640,19 @@ static int uv__fsevents_loop_init(uv_loop_t* loop) {
goto fail_signal_source_create;
}
/* In the unlikely event that pthread_attr_init() fails, create the thread
* with the default stack size. We'll use a little more address space but
* that in itself is not a fatal error.
*/
attr = &attr_storage;
if (pthread_attr_init(attr))
attr = NULL;
if (pthread_attr_init(&attr))
abort();
if (attr != NULL)
if (pthread_attr_setstacksize(attr, 4 * PTHREAD_STACK_MIN))
abort();
if (pthread_attr_setstacksize(&attr, uv__thread_stack_size()))
abort();
loop->cf_state = state;
/* uv_thread_t is an alias for pthread_t. */
err = UV__ERR(pthread_create(&loop->cf_thread, attr, uv__cf_loop_runner, loop));
err = UV__ERR(pthread_create(&loop->cf_thread, &attr, uv__cf_loop_runner, loop));
if (attr != NULL)
pthread_attr_destroy(attr);
if (pthread_attr_destroy(&attr))
abort();
if (err)
goto fail_thread_create;

View File

@ -21,9 +21,6 @@
/* Expose glibc-specific EAI_* error codes. Needs to be defined before we
* include any headers.
*/
#ifndef _GNU_SOURCE
# define _GNU_SOURCE
#endif
#include "uv.h"
#include "internal.h"

View File

@ -26,7 +26,6 @@
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <errno.h>
#include <sys/types.h>
@ -166,7 +165,7 @@ static void iconv_a2e(const char* src, unsigned char dst[], size_t length) {
srclen = strlen(src);
if (srclen > length)
abort();
srclen = length;
for (i = 0; i < srclen; i++)
dst[i] = a2e[src[i]];
/* padding the remaining part with spaces */
@ -360,6 +359,10 @@ static int get_ibmi_physical_address(const char* line, char (*phys_addr)[6]) {
if (rc != 0)
return rc;
if (err.bytes_available > 0) {
return -1;
}
/* convert ebcdic loca_adapter_address to ascii first */
iconv_e2a(rcvr.loca_adapter_address, mac_addr,
sizeof(rcvr.loca_adapter_address));
@ -443,9 +446,42 @@ int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
}
address->is_internal = cur->ifa_flags & IFF_LOOPBACK ? 1 : 0;
if (!address->is_internal) {
int rc = get_ibmi_physical_address(address->name, &address->phys_addr);
if (rc != 0)
r = rc;
int rc = -1;
size_t name_len = strlen(address->name);
/* To get the associated MAC address, we must convert the address to a
* line description. Normally, the name field contains the line
* description name, but for VLANs it has the VLAN appended with a
* period. Since object names can also contain periods and numbers, there
* is no way to know if a returned name is for a VLAN or not. eg.
* *LIND ETH1.1 and *LIND ETH1, VLAN 1 both have the same name: ETH1.1
*
* Instead, we apply the same heuristic used by some of the XPF ioctls:
* - names > 10 *must* contain a VLAN
* - assume names <= 10 do not contain a VLAN and try directly
* - if >10 or QDCRLIND returned an error, try to strip off a VLAN
* and try again
* - if we still get an error or couldn't find a period, leave the MAC as
* 00:00:00:00:00:00
*/
if (name_len <= 10) {
/* Assume name does not contain a VLAN ID */
rc = get_ibmi_physical_address(address->name, &address->phys_addr);
}
if (name_len > 10 || rc != 0) {
/* The interface name must contain a VLAN ID suffix. Attempt to strip
* it off so we can get the line description to pass to QDCRLIND.
*/
char* temp_name = uv__strdup(address->name);
char* dot = strrchr(temp_name, '.');
if (dot != NULL) {
*dot = '\0';
if (strlen(temp_name) <= 10) {
rc = get_ibmi_physical_address(temp_name, &address->phys_addr);
}
}
uv__free(temp_name);
}
}
address++;
@ -498,4 +534,4 @@ int uv_get_process_title(char* buffer, size_t size) {
}
void uv__process_title_cleanup(void) {
}
}

View File

@ -62,6 +62,17 @@
# include <AvailabilityMacros.h>
#endif
/*
* Define common detection for active Thread Sanitizer
* - clang uses __has_feature(thread_sanitizer)
* - gcc-7+ uses __SANITIZE_THREAD__
*/
#if defined(__has_feature)
# if __has_feature(thread_sanitizer)
# define __SANITIZE_THREAD__ 1
# endif
#endif
#if defined(PATH_MAX)
# define UV__PATH_MAX PATH_MAX
#else
@ -165,9 +176,11 @@ struct uv__stream_queued_fds_s {
defined(__NetBSD__)
#define uv__cloexec uv__cloexec_ioctl
#define uv__nonblock uv__nonblock_ioctl
#define UV__NONBLOCK_IS_IOCTL 1
#else
#define uv__cloexec uv__cloexec_fcntl
#define uv__nonblock uv__nonblock_fcntl
#define UV__NONBLOCK_IS_IOCTL 0
#endif
/* On Linux, uv__nonblock_fcntl() and uv__nonblock_ioctl() do not commute
@ -246,6 +259,7 @@ int uv__signal_loop_fork(uv_loop_t* loop);
/* platform specific */
uint64_t uv__hrtime(uv_clocktype_t type);
int uv__kqueue_init(uv_loop_t* loop);
int uv__epoll_init(uv_loop_t* loop);
int uv__platform_loop_init(uv_loop_t* loop);
void uv__platform_loop_delete(uv_loop_t* loop);
void uv__platform_invalidate_fd(uv_loop_t* loop, int fd);
@ -261,6 +275,7 @@ void uv__prepare_close(uv_prepare_t* handle);
void uv__process_close(uv_process_t* handle);
void uv__stream_close(uv_stream_t* handle);
void uv__tcp_close(uv_tcp_t* handle);
size_t uv__thread_stack_size(void);
void uv__udp_close(uv_udp_t* handle);
void uv__udp_finish_close(uv_udp_t* handle);
uv_handle_type uv__handle_type(int fd);
@ -282,12 +297,6 @@ int uv___stream_fd(const uv_stream_t* handle);
#define uv__stream_fd(handle) ((handle)->io_watcher.fd)
#endif /* defined(__APPLE__) */
#ifdef O_NONBLOCK
# define UV__F_NONBLOCK O_NONBLOCK
#else
# define UV__F_NONBLOCK 1
#endif
int uv__make_pipe(int fds[2], int flags);
#if defined(__APPLE__)
@ -327,7 +336,8 @@ int uv__getsockpeername(const uv_handle_t* handle,
#if defined(__linux__) || \
defined(__FreeBSD__) || \
defined(__FreeBSD_kernel__)
defined(__FreeBSD_kernel__) || \
defined(__DragonFly__)
#define HAVE_MMSG 1
struct uv__mmsghdr {
struct msghdr msg_hdr;
@ -340,5 +350,11 @@ int uv__sendmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen);
#define HAVE_MMSG 0
#endif
#if defined(__sun)
#if !defined(_POSIX_VERSION) || _POSIX_VERSION < 200809L
size_t strnlen(const char* s, size_t maxlen);
#endif
#endif
#endif /* UV_UNIX_INTERNAL_H_ */

View File

@ -82,29 +82,12 @@ static int read_times(FILE* statfile_fp,
static void read_speeds(unsigned int numcpus, uv_cpu_info_t* ci);
static uint64_t read_cpufreq(unsigned int cpunum);
int uv__platform_loop_init(uv_loop_t* loop) {
int fd;
fd = epoll_create1(O_CLOEXEC);
/* epoll_create1() can fail either because it's not implemented (old kernel)
* or because it doesn't understand the O_CLOEXEC flag.
*/
if (fd == -1 && (errno == ENOSYS || errno == EINVAL)) {
fd = epoll_create(256);
if (fd != -1)
uv__cloexec(fd, 1);
}
loop->backend_fd = fd;
loop->inotify_fd = -1;
loop->inotify_watchers = NULL;
if (fd == -1)
return UV__ERR(errno);
return 0;
return uv__epoll_init(loop);
}
@ -134,380 +117,6 @@ void uv__platform_loop_delete(uv_loop_t* loop) {
}
void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
struct epoll_event* events;
struct epoll_event dummy;
uintptr_t i;
uintptr_t nfds;
assert(loop->watchers != NULL);
assert(fd >= 0);
events = (struct epoll_event*) loop->watchers[loop->nwatchers];
nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
if (events != NULL)
/* Invalidate events with same file descriptor */
for (i = 0; i < nfds; i++)
if (events[i].data.fd == fd)
events[i].data.fd = -1;
/* Remove the file descriptor from the epoll.
* This avoids a problem where the same file description remains open
* in another process, causing repeated junk epoll events.
*
* We pass in a dummy epoll_event, to work around a bug in old kernels.
*/
if (loop->backend_fd >= 0) {
/* Work around a bug in kernels 3.10 to 3.19 where passing a struct that
* has the EPOLLWAKEUP flag set generates spurious audit syslog warnings.
*/
memset(&dummy, 0, sizeof(dummy));
epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, &dummy);
}
}
int uv__io_check_fd(uv_loop_t* loop, int fd) {
struct epoll_event e;
int rc;
memset(&e, 0, sizeof(e));
e.events = POLLIN;
e.data.fd = -1;
rc = 0;
if (epoll_ctl(loop->backend_fd, EPOLL_CTL_ADD, fd, &e))
if (errno != EEXIST)
rc = UV__ERR(errno);
if (rc == 0)
if (epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, &e))
abort();
return rc;
}
void uv__io_poll(uv_loop_t* loop, int timeout) {
/* A bug in kernels < 2.6.37 makes timeouts larger than ~30 minutes
* effectively infinite on 32 bits architectures. To avoid blocking
* indefinitely, we cap the timeout and poll again if necessary.
*
* Note that "30 minutes" is a simplification because it depends on
* the value of CONFIG_HZ. The magic constant assumes CONFIG_HZ=1200,
* that being the largest value I have seen in the wild (and only once.)
*/
static const int max_safe_timeout = 1789569;
static int no_epoll_pwait_cached;
static int no_epoll_wait_cached;
int no_epoll_pwait;
int no_epoll_wait;
struct epoll_event events[1024];
struct epoll_event* pe;
struct epoll_event e;
int real_timeout;
QUEUE* q;
uv__io_t* w;
sigset_t sigset;
uint64_t sigmask;
uint64_t base;
int have_signals;
int nevents;
int count;
int nfds;
int fd;
int op;
int i;
int user_timeout;
int reset_timeout;
if (loop->nfds == 0) {
assert(QUEUE_EMPTY(&loop->watcher_queue));
return;
}
memset(&e, 0, sizeof(e));
while (!QUEUE_EMPTY(&loop->watcher_queue)) {
q = QUEUE_HEAD(&loop->watcher_queue);
QUEUE_REMOVE(q);
QUEUE_INIT(q);
w = QUEUE_DATA(q, uv__io_t, watcher_queue);
assert(w->pevents != 0);
assert(w->fd >= 0);
assert(w->fd < (int) loop->nwatchers);
e.events = w->pevents;
e.data.fd = w->fd;
if (w->events == 0)
op = EPOLL_CTL_ADD;
else
op = EPOLL_CTL_MOD;
/* XXX Future optimization: do EPOLL_CTL_MOD lazily if we stop watching
* events, skip the syscall and squelch the events after epoll_wait().
*/
if (epoll_ctl(loop->backend_fd, op, w->fd, &e)) {
if (errno != EEXIST)
abort();
assert(op == EPOLL_CTL_ADD);
/* We've reactivated a file descriptor that's been watched before. */
if (epoll_ctl(loop->backend_fd, EPOLL_CTL_MOD, w->fd, &e))
abort();
}
w->events = w->pevents;
}
sigmask = 0;
if (loop->flags & UV_LOOP_BLOCK_SIGPROF) {
sigemptyset(&sigset);
sigaddset(&sigset, SIGPROF);
sigmask |= 1 << (SIGPROF - 1);
}
assert(timeout >= -1);
base = loop->time;
count = 48; /* Benchmarks suggest this gives the best throughput. */
real_timeout = timeout;
if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
reset_timeout = 1;
user_timeout = timeout;
timeout = 0;
} else {
reset_timeout = 0;
user_timeout = 0;
}
/* You could argue there is a dependency between these two but
* ultimately we don't care about their ordering with respect
* to one another. Worst case, we make a few system calls that
* could have been avoided because another thread already knows
* they fail with ENOSYS. Hardly the end of the world.
*/
no_epoll_pwait = uv__load_relaxed(&no_epoll_pwait_cached);
no_epoll_wait = uv__load_relaxed(&no_epoll_wait_cached);
for (;;) {
/* Only need to set the provider_entry_time if timeout != 0. The function
* will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
*/
if (timeout != 0)
uv__metrics_set_provider_entry_time(loop);
/* See the comment for max_safe_timeout for an explanation of why
* this is necessary. Executive summary: kernel bug workaround.
*/
if (sizeof(int32_t) == sizeof(long) && timeout >= max_safe_timeout)
timeout = max_safe_timeout;
if (sigmask != 0 && no_epoll_pwait != 0)
if (pthread_sigmask(SIG_BLOCK, &sigset, NULL))
abort();
if (no_epoll_wait != 0 || (sigmask != 0 && no_epoll_pwait == 0)) {
nfds = epoll_pwait(loop->backend_fd,
events,
ARRAY_SIZE(events),
timeout,
&sigset);
if (nfds == -1 && errno == ENOSYS) {
uv__store_relaxed(&no_epoll_pwait_cached, 1);
no_epoll_pwait = 1;
}
} else {
nfds = epoll_wait(loop->backend_fd,
events,
ARRAY_SIZE(events),
timeout);
if (nfds == -1 && errno == ENOSYS) {
uv__store_relaxed(&no_epoll_wait_cached, 1);
no_epoll_wait = 1;
}
}
if (sigmask != 0 && no_epoll_pwait != 0)
if (pthread_sigmask(SIG_UNBLOCK, &sigset, NULL))
abort();
/* Update loop->time unconditionally. It's tempting to skip the update when
* timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
* operating system didn't reschedule our process while in the syscall.
*/
SAVE_ERRNO(uv__update_time(loop));
if (nfds == 0) {
assert(timeout != -1);
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
}
if (timeout == -1)
continue;
if (timeout == 0)
return;
/* We may have been inside the system call for longer than |timeout|
* milliseconds so we need to update the timestamp to avoid drift.
*/
goto update_timeout;
}
if (nfds == -1) {
if (errno == ENOSYS) {
/* epoll_wait() or epoll_pwait() failed, try the other system call. */
assert(no_epoll_wait == 0 || no_epoll_pwait == 0);
continue;
}
if (errno != EINTR)
abort();
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
}
if (timeout == -1)
continue;
if (timeout == 0)
return;
/* Interrupted by a signal. Update timeout and poll again. */
goto update_timeout;
}
have_signals = 0;
nevents = 0;
{
/* Squelch a -Waddress-of-packed-member warning with gcc >= 9. */
union {
struct epoll_event* events;
uv__io_t* watchers;
} x;
x.events = events;
assert(loop->watchers != NULL);
loop->watchers[loop->nwatchers] = x.watchers;
loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
}
for (i = 0; i < nfds; i++) {
pe = events + i;
fd = pe->data.fd;
/* Skip invalidated events, see uv__platform_invalidate_fd */
if (fd == -1)
continue;
assert(fd >= 0);
assert((unsigned) fd < loop->nwatchers);
w = loop->watchers[fd];
if (w == NULL) {
/* File descriptor that we've stopped watching, disarm it.
*
* Ignore all errors because we may be racing with another thread
* when the file descriptor is closed.
*/
epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, pe);
continue;
}
/* Give users only events they're interested in. Prevents spurious
* callbacks when previous callback invocation in this loop has stopped
* the current watcher. Also, filters out events that users has not
* requested us to watch.
*/
pe->events &= w->pevents | POLLERR | POLLHUP;
/* Work around an epoll quirk where it sometimes reports just the
* EPOLLERR or EPOLLHUP event. In order to force the event loop to
* move forward, we merge in the read/write events that the watcher
* is interested in; uv__read() and uv__write() will then deal with
* the error or hangup in the usual fashion.
*
* Note to self: happens when epoll reports EPOLLIN|EPOLLHUP, the user
* reads the available data, calls uv_read_stop(), then sometime later
* calls uv_read_start() again. By then, libuv has forgotten about the
* hangup and the kernel won't report EPOLLIN again because there's
* nothing left to read. If anything, libuv is to blame here. The
* current hack is just a quick bandaid; to properly fix it, libuv
* needs to remember the error/hangup event. We should get that for
* free when we switch over to edge-triggered I/O.
*/
if (pe->events == POLLERR || pe->events == POLLHUP)
pe->events |=
w->pevents & (POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI);
if (pe->events != 0) {
/* Run signal watchers last. This also affects child process watchers
* because those are implemented in terms of signal watchers.
*/
if (w == &loop->signal_io_watcher) {
have_signals = 1;
} else {
uv__metrics_update_idle_time(loop);
w->cb(loop, w, pe->events);
}
nevents++;
}
}
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
}
if (have_signals != 0) {
uv__metrics_update_idle_time(loop);
loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
}
loop->watchers[loop->nwatchers] = NULL;
loop->watchers[loop->nwatchers + 1] = NULL;
if (have_signals != 0)
return; /* Event loop should cycle now so don't poll again. */
if (nevents != 0) {
if (nfds == ARRAY_SIZE(events) && --count != 0) {
/* Poll for more events but don't block this time. */
timeout = 0;
continue;
}
return;
}
if (timeout == 0)
return;
if (timeout == -1)
continue;
update_timeout:
assert(timeout > 0);
real_timeout -= (loop->time - base);
if (real_timeout <= 0)
return;
timeout = real_timeout;
}
}
uint64_t uv__hrtime(uv_clocktype_t type) {
static clock_t fast_clock_id = -1;
@ -602,22 +211,53 @@ err:
return UV_EINVAL;
}
static int uv__slurp(const char* filename, char* buf, size_t len) {
ssize_t n;
int fd;
assert(len > 0);
fd = uv__open_cloexec(filename, O_RDONLY);
if (fd < 0)
return fd;
do
n = read(fd, buf, len - 1);
while (n == -1 && errno == EINTR);
if (uv__close_nocheckstdio(fd))
abort();
if (n < 0)
return UV__ERR(errno);
buf[n] = '\0';
return 0;
}
int uv_uptime(double* uptime) {
static volatile int no_clock_boottime;
char buf[128];
struct timespec now;
int r;
/* Try /proc/uptime first, then fallback to clock_gettime(). */
if (0 == uv__slurp("/proc/uptime", buf, sizeof(buf)))
if (1 == sscanf(buf, "%lf", uptime))
return 0;
/* Try CLOCK_BOOTTIME first, fall back to CLOCK_MONOTONIC if not available
* (pre-2.6.39 kernels). CLOCK_MONOTONIC doesn't increase when the system
* is suspended.
*/
if (no_clock_boottime) {
retry: r = clock_gettime(CLOCK_MONOTONIC, &now);
retry_clock_gettime: r = clock_gettime(CLOCK_MONOTONIC, &now);
}
else if ((r = clock_gettime(CLOCK_BOOTTIME, &now)) && errno == EINVAL) {
no_clock_boottime = 1;
goto retry;
goto retry_clock_gettime;
}
if (r)
@ -709,14 +349,19 @@ static void read_speeds(unsigned int numcpus, uv_cpu_info_t* ci) {
}
/* Also reads the CPU frequency on x86. The other architectures only have
* a BogoMIPS field, which may not be very accurate.
/* Also reads the CPU frequency on ppc and x86. The other architectures only
* have a BogoMIPS field, which may not be very accurate.
*
* Note: Simply returns on error, uv_cpu_info() takes care of the cleanup.
*/
static int read_models(unsigned int numcpus, uv_cpu_info_t* ci) {
#if defined(__PPC__)
static const char model_marker[] = "cpu\t\t: ";
static const char speed_marker[] = "clock\t\t: ";
#else
static const char model_marker[] = "model name\t: ";
static const char speed_marker[] = "cpu MHz\t\t: ";
#endif
const char* inferred_model;
unsigned int model_idx;
unsigned int speed_idx;
@ -738,6 +383,7 @@ static int read_models(unsigned int numcpus, uv_cpu_info_t* ci) {
#if defined(__arm__) || \
defined(__i386__) || \
defined(__mips__) || \
defined(__PPC__) || \
defined(__x86_64__)
fp = uv__open_file("/proc/cpuinfo");
if (fp == NULL)
@ -786,7 +432,7 @@ static int read_models(unsigned int numcpus, uv_cpu_info_t* ci) {
}
fclose(fp);
#endif /* __arm__ || __i386__ || __mips__ || __x86_64__ */
#endif /* __arm__ || __i386__ || __mips__ || __PPC__ || __x86_64__ */
/* Now we want to make sure that all the models contain *something* because
* it's not safe to leave them as null. Copy the last entry unless there
@ -824,9 +470,9 @@ static int read_times(FILE* statfile_fp,
char buf[1024];
ticks = (unsigned int)sysconf(_SC_CLK_TCK);
multiplier = ((uint64_t)1000L / ticks);
assert(ticks != (unsigned int) -1);
assert(ticks != 0);
multiplier = ((uint64_t)1000L / ticks);
rewind(statfile_fp);
@ -1025,32 +671,6 @@ void uv__set_process_title(const char* title) {
}
static int uv__slurp(const char* filename, char* buf, size_t len) {
ssize_t n;
int fd;
assert(len > 0);
fd = uv__open_cloexec(filename, O_RDONLY);
if (fd < 0)
return fd;
do
n = read(fd, buf, len - 1);
while (n == -1 && errno == EINTR);
if (uv__close_nocheckstdio(fd))
abort();
if (n < 0)
return UV__ERR(errno);
buf[n] = '\0';
return 0;
}
static uint64_t uv__read_proc_meminfo(const char* what) {
uint64_t rc;
char* p;

View File

@ -178,7 +178,7 @@ static void uv__inotify_read(uv_loop_t* loop,
/* needs to be large enough for sizeof(inotify_event) + strlen(path) */
char buf[4096];
while (1) {
for (;;) {
do
size = read(loop->inotify_fd, buf, sizeof(buf));
while (size == -1 && errno == EINTR);

View File

@ -194,37 +194,37 @@ int uv__recvmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen) {
ssize_t uv__preadv(int fd, const struct iovec *iov, int iovcnt, int64_t offset) {
#if defined(__NR_preadv)
return syscall(__NR_preadv, fd, iov, iovcnt, (long)offset, (long)(offset >> 32));
#else
#if !defined(__NR_preadv) || defined(__ANDROID_API__) && __ANDROID_API__ < 24
return errno = ENOSYS, -1;
#else
return syscall(__NR_preadv, fd, iov, iovcnt, (long)offset, (long)(offset >> 32));
#endif
}
ssize_t uv__pwritev(int fd, const struct iovec *iov, int iovcnt, int64_t offset) {
#if defined(__NR_pwritev)
return syscall(__NR_pwritev, fd, iov, iovcnt, (long)offset, (long)(offset >> 32));
#else
#if !defined(__NR_pwritev) || defined(__ANDROID_API__) && __ANDROID_API__ < 24
return errno = ENOSYS, -1;
#else
return syscall(__NR_pwritev, fd, iov, iovcnt, (long)offset, (long)(offset >> 32));
#endif
}
int uv__dup3(int oldfd, int newfd, int flags) {
#if defined(__NR_dup3)
return syscall(__NR_dup3, oldfd, newfd, flags);
#else
#if !defined(__NR_dup3) || defined(__ANDROID_API__) && __ANDROID_API__ < 21
return errno = ENOSYS, -1;
#else
return syscall(__NR_dup3, oldfd, newfd, flags);
#endif
}
ssize_t
uv__fs_copy_file_range(int fd_in,
ssize_t* off_in,
off_t* off_in,
int fd_out,
ssize_t* off_out,
off_t* off_out,
size_t len,
unsigned int flags)
{
@ -247,21 +247,18 @@ int uv__statx(int dirfd,
int flags,
unsigned int mask,
struct uv__statx* statxbuf) {
/* __NR_statx make Android box killed by SIGSYS.
* That looks like a seccomp2 sandbox filter rejecting the system call.
*/
#if defined(__NR_statx) && !defined(__ANDROID__)
return syscall(__NR_statx, dirfd, path, flags, mask, statxbuf);
#else
#if !defined(__NR_statx) || defined(__ANDROID_API__) && __ANDROID_API__ < 30
return errno = ENOSYS, -1;
#else
return syscall(__NR_statx, dirfd, path, flags, mask, statxbuf);
#endif
}
ssize_t uv__getrandom(void* buf, size_t buflen, unsigned flags) {
#if defined(__NR_getrandom)
return syscall(__NR_getrandom, buf, buflen, flags);
#else
#if !defined(__NR_getrandom) || defined(__ANDROID_API__) && __ANDROID_API__ < 28
return errno = ENOSYS, -1;
#else
return syscall(__NR_getrandom, buf, buflen, flags);
#endif
}

View File

@ -22,9 +22,6 @@
#ifndef UV_LINUX_SYSCALL_H_
#define UV_LINUX_SYSCALL_H_
#undef _GNU_SOURCE
#define _GNU_SOURCE
#include <stdint.h>
#include <signal.h>
#include <sys/types.h>
@ -66,9 +63,9 @@ ssize_t uv__pwritev(int fd, const struct iovec *iov, int iovcnt, int64_t offset)
int uv__dup3(int oldfd, int newfd, int flags);
ssize_t
uv__fs_copy_file_range(int fd_in,
ssize_t* off_in,
off_t* off_in,
int fd_out,
ssize_t* off_out,
off_t* off_out,
size_t len,
unsigned int flags);
int uv__statx(int dirfd,

136
deps/libuv/src/unix/os390-proctitle.c vendored Normal file
View File

@ -0,0 +1,136 @@
/* Copyright libuv project contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <stdlib.h>
#include <string.h>
static uv_mutex_t process_title_mutex;
static uv_once_t process_title_mutex_once = UV_ONCE_INIT;
static char* process_title = NULL;
static void* args_mem = NULL;
static void init_process_title_mutex_once(void) {
uv_mutex_init(&process_title_mutex);
}
char** uv_setup_args(int argc, char** argv) {
char** new_argv;
size_t size;
char* s;
int i;
if (argc <= 0)
return argv;
/* Calculate how much memory we need for the argv strings. */
size = 0;
for (i = 0; i < argc; i++)
size += strlen(argv[i]) + 1;
/* Add space for the argv pointers. */
size += (argc + 1) * sizeof(char*);
new_argv = uv__malloc(size);
if (new_argv == NULL)
return argv;
/* Copy over the strings and set up the pointer table. */
s = (char*) &new_argv[argc + 1];
for (i = 0; i < argc; i++) {
size = strlen(argv[i]) + 1;
memcpy(s, argv[i], size);
new_argv[i] = s;
s += size;
}
new_argv[i] = NULL;
args_mem = new_argv;
process_title = uv__strdup(argv[0]);
return new_argv;
}
int uv_set_process_title(const char* title) {
char* new_title;
/* If uv_setup_args wasn't called or failed, we can't continue. */
if (args_mem == NULL)
return UV_ENOBUFS;
/* We cannot free this pointer when libuv shuts down,
* the process may still be using it.
*/
new_title = uv__strdup(title);
if (new_title == NULL)
return UV_ENOMEM;
uv_once(&process_title_mutex_once, init_process_title_mutex_once);
uv_mutex_lock(&process_title_mutex);
if (process_title != NULL)
uv__free(process_title);
process_title = new_title;
uv_mutex_unlock(&process_title_mutex);
return 0;
}
int uv_get_process_title(char* buffer, size_t size) {
size_t len;
if (buffer == NULL || size == 0)
return UV_EINVAL;
/* If uv_setup_args wasn't called or failed, we can't continue. */
if (args_mem == NULL || process_title == NULL)
return UV_ENOBUFS;
uv_once(&process_title_mutex_once, init_process_title_mutex_once);
uv_mutex_lock(&process_title_mutex);
len = strlen(process_title);
if (size <= len) {
uv_mutex_unlock(&process_title_mutex);
return UV_ENOBUFS;
}
strcpy(buffer, process_title);
uv_mutex_unlock(&process_title_mutex);
return 0;
}
void uv__process_title_cleanup(void) {
uv__free(args_mem); /* Keep valgrind happy. */
args_mem = NULL;
}

View File

@ -27,12 +27,6 @@
#include <termios.h>
#include <sys/msg.h>
#define CW_INTRPT 1
#define CW_CONDVAR 32
#pragma linkage(BPX4CTW, OS)
#pragma linkage(BPX1CTW, OS)
static QUEUE global_epoll_queue;
static uv_mutex_t global_epoll_lock;
static uv_once_t once = UV_ONCE_INIT;
@ -55,7 +49,7 @@ int scandir(const char* maindir, struct dirent*** namelist,
if (!mdir)
return -1;
while (1) {
for (;;) {
dirent = readdir(mdir);
if (!dirent)
break;
@ -381,46 +375,6 @@ void epoll_queue_close(uv__os390_epoll* lst) {
}
int nanosleep(const struct timespec* req, struct timespec* rem) {
unsigned nano;
unsigned seconds;
unsigned events;
unsigned secrem;
unsigned nanorem;
int rv;
int err;
int rsn;
nano = (int)req->tv_nsec;
seconds = req->tv_sec;
events = CW_CONDVAR | CW_INTRPT;
secrem = 0;
nanorem = 0;
#if defined(_LP64)
BPX4CTW(&seconds, &nano, &events, &secrem, &nanorem, &rv, &err, &rsn);
#else
BPX1CTW(&seconds, &nano, &events, &secrem, &nanorem, &rv, &err, &rsn);
#endif
/* Don't clobber errno unless BPX1CTW/BPX4CTW errored.
* Don't leak EAGAIN, that just means the timeout expired.
*/
if (rv == -1)
if (err == EAGAIN)
rv = 0;
else
errno = err;
if (rem != NULL && (rv == 0 || err == EINTR)) {
rem->tv_nsec = nanorem;
rem->tv_sec = secrem;
}
return rv;
}
char* mkdtemp(char* path) {
static const char* tempchars =
"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
@ -550,15 +504,6 @@ ssize_t os390_readlink(const char* path, char* buf, size_t len) {
}
size_t strnlen(const char* str, size_t maxlen) {
char* p = memchr(str, 0, maxlen);
if (p == NULL)
return maxlen;
else
return p - str;
}
int sem_init(UV_PLATFORM_SEM_T* semid, int pshared, unsigned int value) {
UNREACHABLE();
}

View File

@ -28,6 +28,7 @@
#include <dirent.h>
#include <poll.h>
#include <pthread.h>
#include "zos-base.h"
#define EPOLL_CTL_ADD 1
#define EPOLL_CTL_DEL 2
@ -57,7 +58,6 @@ int epoll_wait(uv__os390_epoll* ep, struct epoll_event *events, int maxevents, i
int epoll_file_close(int fd);
/* utility functions */
int nanosleep(const struct timespec* req, struct timespec* rem);
int scandir(const char* maindir, struct dirent*** namelist,
int (*filter)(const struct dirent *),
int (*compar)(const struct dirent **,

View File

@ -28,6 +28,8 @@
#include <builtins.h>
#include <termios.h>
#include <sys/msg.h>
#include <sys/resource.h>
#include "zos-base.h"
#if defined(__clang__)
#include "csrsic.h"
#else
@ -61,12 +63,6 @@
/* Address of the rsm control and enumeration area. */
#define CVTRCEP_OFFSET 0x490
/*
Number of frames currently available to system.
Excluded are frames backing perm storage, frames offline, and bad frames.
*/
#define RCEPOOL_OFFSET 0x004
/* Total number of frames currently on all available frame queues. */
#define RCEAFC_OFFSET 0x088
@ -144,102 +140,8 @@ uint64_t uv__hrtime(uv_clocktype_t type) {
}
/*
Get the exe path using the thread entry information
in the address space.
*/
static int getexe(const int pid, char* buf, size_t len) {
struct {
int pid;
int thid[2];
char accesspid;
char accessthid;
char asid[2];
char loginname[8];
char flag;
char len;
} Input_data;
union {
struct {
char gthb[4];
int pid;
int thid[2];
char accesspid;
char accessthid[3];
int lenused;
int offsetProcess;
int offsetConTTY;
int offsetPath;
int offsetCommand;
int offsetFileData;
int offsetThread;
} Output_data;
char buf[2048];
} Output_buf;
struct Output_path_type {
char gthe[4];
short int len;
char path[1024];
};
int Input_length;
int Output_length;
void* Input_address;
void* Output_address;
struct Output_path_type* Output_path;
int rv;
int rc;
int rsn;
Input_length = PGTH_LEN;
Output_length = sizeof(Output_buf);
Output_address = &Output_buf;
Input_address = &Input_data;
memset(&Input_data, 0, sizeof Input_data);
Input_data.flag |= PGTHAPATH;
Input_data.pid = pid;
Input_data.accesspid = PGTH_CURRENT;
#ifdef _LP64
BPX4GTH(&Input_length,
&Input_address,
&Output_length,
&Output_address,
&rv,
&rc,
&rsn);
#else
BPX1GTH(&Input_length,
&Input_address,
&Output_length,
&Output_address,
&rv,
&rc,
&rsn);
#endif
if (rv == -1) {
errno = rc;
return -1;
}
/* Check highest byte to ensure data availability */
assert(((Output_buf.Output_data.offsetPath >>24) & 0xFF) == 'A');
/* Get the offset from the lowest 3 bytes */
Output_path = (struct Output_path_type*) ((char*) (&Output_buf) +
(Output_buf.Output_data.offsetPath & 0x00FFFFFF));
if (Output_path->len >= len) {
errno = ENOBUFS;
return -1;
}
uv__strscpy(buf, Output_path->path, len);
return 0;
static int getexe(char* buf, size_t len) {
return uv__strscpy(buf, __getargv()[0], len);
}
@ -259,8 +161,7 @@ int uv_exepath(char* buffer, size_t* size) {
if (buffer == NULL || size == NULL || *size == 0)
return UV_EINVAL;
pid = getpid();
res = getexe(pid, args, sizeof(args));
res = getexe(args, sizeof(args));
if (res < 0)
return UV_EINVAL;
@ -275,25 +176,25 @@ uint64_t uv_get_free_memory(void) {
data_area_ptr rcep = {0};
cvt.assign = *(data_area_ptr_assign_type*)(CVT_PTR);
rcep.assign = *(data_area_ptr_assign_type*)(cvt.deref + CVTRCEP_OFFSET);
freeram = *((uint64_t*)(rcep.deref + RCEAFC_OFFSET)) * 4;
freeram = (uint64_t)*((uint32_t*)(rcep.deref + RCEAFC_OFFSET)) * 4096;
return freeram;
}
uint64_t uv_get_total_memory(void) {
uint64_t totalram;
data_area_ptr cvt = {0};
data_area_ptr rcep = {0};
cvt.assign = *(data_area_ptr_assign_type*)(CVT_PTR);
rcep.assign = *(data_area_ptr_assign_type*)(cvt.deref + CVTRCEP_OFFSET);
totalram = *((uint64_t*)(rcep.deref + RCEPOOL_OFFSET)) * 4;
return totalram;
/* Use CVTRLSTG to get the size of actual real storage online at IPL in K. */
return (uint64_t)((int)((char *__ptr32 *__ptr32 *)0)[4][214]) * 1024;
}
uint64_t uv_get_constrained_memory(void) {
return 0; /* Memory constraints are unknown. */
struct rlimit rl;
/* RLIMIT_MEMLIMIT return value is in megabytes rather than bytes. */
if (getrlimit(RLIMIT_MEMLIMIT, &rl) == 0)
return rl.rlim_cur * 1024 * 1024;
return 0; /* There is no memory limit set. */
}
@ -733,6 +634,10 @@ static int os390_message_queue_handler(uv__os390_epoll* ep) {
/* Some event that we are not interested in. */
return 0;
/* `__rfim_utok` is treated as text when it should be treated as binary while
* running in ASCII mode, resulting in an unwanted autoconversion.
*/
__a2e_l(msg.__rfim_utok, sizeof(msg.__rfim_utok));
handle = *(uv_fs_event_t**)(msg.__rfim_utok);
handle->cb(handle, uv__basename_r(handle->path), events, 0);
return 1;
@ -959,9 +864,6 @@ update_timeout:
}
}
void uv__set_process_title(const char* title) {
/* do nothing */
}
int uv__io_fork(uv_loop_t* loop) {
/*

View File

@ -379,3 +379,57 @@ int uv_pipe_chmod(uv_pipe_t* handle, int mode) {
return r != -1 ? 0 : UV__ERR(errno);
}
int uv_pipe(uv_os_fd_t fds[2], int read_flags, int write_flags) {
uv_os_fd_t temp[2];
int err;
#if defined(__FreeBSD__) || defined(__linux__)
int flags = O_CLOEXEC;
if ((read_flags & UV_NONBLOCK_PIPE) && (write_flags & UV_NONBLOCK_PIPE))
flags |= UV_FS_O_NONBLOCK;
if (pipe2(temp, flags))
return UV__ERR(errno);
if (flags & UV_FS_O_NONBLOCK) {
fds[0] = temp[0];
fds[1] = temp[1];
return 0;
}
#else
if (pipe(temp))
return UV__ERR(errno);
if ((err = uv__cloexec(temp[0], 1)))
goto fail;
if ((err = uv__cloexec(temp[1], 1)))
goto fail;
#endif
if (read_flags & UV_NONBLOCK_PIPE)
if ((err = uv__nonblock(temp[0], 1)))
goto fail;
if (write_flags & UV_NONBLOCK_PIPE)
if ((err = uv__nonblock(temp[1], 1)))
goto fail;
fds[0] = temp[0];
fds[1] = temp[1];
return 0;
fail:
uv__close(temp[0]);
uv__close(temp[1]);
return err;
}
int uv__make_pipe(int fds[2], int flags) {
return uv_pipe(fds,
flags & UV_NONBLOCK_PIPE,
flags & UV_NONBLOCK_PIPE);
}

View File

@ -79,9 +79,10 @@ int uv_poll_init(uv_loop_t* loop, uv_poll_t* handle, int fd) {
* Workaround for e.g. kqueue fds not supporting ioctls.
*/
err = uv__nonblock(fd, 1);
#if UV__NONBLOCK_IS_IOCTL
if (err == UV_ENOTTY)
if (uv__nonblock == uv__nonblock_ioctl)
err = uv__nonblock_fcntl(fd, 1);
err = uv__nonblock_fcntl(fd, 1);
#endif
if (err)
return err;
@ -116,12 +117,21 @@ int uv_poll_stop(uv_poll_t* handle) {
int uv_poll_start(uv_poll_t* handle, int pevents, uv_poll_cb poll_cb) {
uv__io_t** watchers;
uv__io_t* w;
int events;
assert((pevents & ~(UV_READABLE | UV_WRITABLE | UV_DISCONNECT |
UV_PRIORITIZED)) == 0);
assert(!uv__is_closing(handle));
watchers = handle->loop->watchers;
w = &handle->io_watcher;
if (uv__fd_exists(handle->loop, w->fd))
if (watchers[w->fd] != w)
return UV_EEXIST;
uv__poll_stop(handle);
if (pevents == 0)

View File

@ -44,6 +44,10 @@ extern char **environ;
# include <grp.h>
#endif
#if defined(__MVS__)
# include "zos-base.h"
#endif
static void uv__chld(uv_signal_t* handle, int signum) {
uv_process_t* process;
@ -111,68 +115,6 @@ static void uv__chld(uv_signal_t* handle, int signum) {
assert(QUEUE_EMPTY(&pending));
}
static int uv__make_socketpair(int fds[2]) {
#if defined(__FreeBSD__) || defined(__linux__)
if (socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0, fds))
return UV__ERR(errno);
return 0;
#else
int err;
if (socketpair(AF_UNIX, SOCK_STREAM, 0, fds))
return UV__ERR(errno);
err = uv__cloexec(fds[0], 1);
if (err == 0)
err = uv__cloexec(fds[1], 1);
if (err != 0) {
uv__close(fds[0]);
uv__close(fds[1]);
return UV__ERR(errno);
}
return 0;
#endif
}
int uv__make_pipe(int fds[2], int flags) {
#if defined(__FreeBSD__) || defined(__linux__)
if (pipe2(fds, flags | O_CLOEXEC))
return UV__ERR(errno);
return 0;
#else
if (pipe(fds))
return UV__ERR(errno);
if (uv__cloexec(fds[0], 1))
goto fail;
if (uv__cloexec(fds[1], 1))
goto fail;
if (flags & UV__F_NONBLOCK) {
if (uv__nonblock(fds[0], 1))
goto fail;
if (uv__nonblock(fds[1], 1))
goto fail;
}
return 0;
fail:
uv__close(fds[0]);
uv__close(fds[1]);
return UV__ERR(errno);
#endif
}
/*
* Used for initializing stdio streams like options.stdin_stream. Returns
* zero on success. See also the cleanup section in uv_spawn().
@ -192,7 +134,7 @@ static int uv__process_init_stdio(uv_stdio_container_t* container, int fds[2]) {
if (container->data.stream->type != UV_NAMED_PIPE)
return UV_EINVAL;
else
return uv__make_socketpair(fds);
return uv_socketpair(SOCK_STREAM, 0, fds, 0, 0);
case UV_INHERIT_FD:
case UV_INHERIT_STREAM:
@ -259,6 +201,12 @@ static void uv__write_int(int fd, int val) {
}
static void uv__write_errno(int error_fd) {
uv__write_int(error_fd, UV__ERR(errno));
_exit(127);
}
#if !(defined(__APPLE__) && (TARGET_OS_TV || TARGET_OS_WATCH))
/* execvp is marked __WATCHOS_PROHIBITED __TVOS_PROHIBITED, so must be
* avoided. Since this isn't called on those targets, the function
@ -287,10 +235,8 @@ static void uv__process_child_init(const uv_process_options_t* options,
if (use_fd < 0 || use_fd >= fd)
continue;
pipes[fd][1] = fcntl(use_fd, F_DUPFD, stdio_count);
if (pipes[fd][1] == -1) {
uv__write_int(error_fd, UV__ERR(errno));
_exit(127);
}
if (pipes[fd][1] == -1)
uv__write_errno(error_fd);
}
for (fd = 0; fd < stdio_count; fd++) {
@ -307,10 +253,8 @@ static void uv__process_child_init(const uv_process_options_t* options,
use_fd = open("/dev/null", fd == 0 ? O_RDONLY : O_RDWR);
close_fd = use_fd;
if (use_fd < 0) {
uv__write_int(error_fd, UV__ERR(errno));
_exit(127);
}
if (use_fd < 0)
uv__write_errno(error_fd);
}
}
@ -319,10 +263,8 @@ static void uv__process_child_init(const uv_process_options_t* options,
else
fd = dup2(use_fd, fd);
if (fd == -1) {
uv__write_int(error_fd, UV__ERR(errno));
_exit(127);
}
if (fd == -1)
uv__write_errno(error_fd);
if (fd <= 2)
uv__nonblock_fcntl(fd, 0);
@ -338,10 +280,8 @@ static void uv__process_child_init(const uv_process_options_t* options,
uv__close(use_fd);
}
if (options->cwd != NULL && chdir(options->cwd)) {
uv__write_int(error_fd, UV__ERR(errno));
_exit(127);
}
if (options->cwd != NULL && chdir(options->cwd))
uv__write_errno(error_fd);
if (options->flags & (UV_PROCESS_SETUID | UV_PROCESS_SETGID)) {
/* When dropping privileges from root, the `setgroups` call will
@ -354,15 +294,11 @@ static void uv__process_child_init(const uv_process_options_t* options,
SAVE_ERRNO(setgroups(0, NULL));
}
if ((options->flags & UV_PROCESS_SETGID) && setgid(options->gid)) {
uv__write_int(error_fd, UV__ERR(errno));
_exit(127);
}
if ((options->flags & UV_PROCESS_SETGID) && setgid(options->gid))
uv__write_errno(error_fd);
if ((options->flags & UV_PROCESS_SETUID) && setuid(options->uid)) {
uv__write_int(error_fd, UV__ERR(errno));
_exit(127);
}
if ((options->flags & UV_PROCESS_SETUID) && setuid(options->uid))
uv__write_errno(error_fd);
if (options->env != NULL) {
environ = options->env;
@ -385,22 +321,23 @@ static void uv__process_child_init(const uv_process_options_t* options,
if (SIG_ERR != signal(n, SIG_DFL))
continue;
uv__write_int(error_fd, UV__ERR(errno));
_exit(127);
uv__write_errno(error_fd);
}
/* Reset signal mask. */
sigemptyset(&set);
err = pthread_sigmask(SIG_SETMASK, &set, NULL);
if (err != 0) {
uv__write_int(error_fd, UV__ERR(err));
_exit(127);
}
if (err != 0)
uv__write_errno(error_fd);
#ifdef __MVS__
execvpe(options->file, options->args, environ);
#else
execvp(options->file, options->args);
uv__write_int(error_fd, UV__ERR(errno));
_exit(127);
#endif
uv__write_errno(error_fd);
}
#endif

View File

@ -84,10 +84,7 @@ char** uv_setup_args(int argc, char** argv) {
}
new_argv[i] = NULL;
/* argv is not adjacent on z/os, we use just argv[0] on that platform. */
#ifndef __MVS__
pt.cap = argv[i - 1] + size - argv[0];
#endif
args_mem = new_argv;
process_title = pt;
@ -119,6 +116,7 @@ int uv_set_process_title(const char* title) {
memcpy(pt->str, title, len);
memset(pt->str + len, '\0', pt->cap - len);
pt->len = len;
uv__set_process_title(pt->str);
uv_mutex_unlock(&process_title_mutex);

View File

@ -265,7 +265,7 @@ static int uv__signal_loop_once_init(uv_loop_t* loop) {
if (loop->signal_pipefd[0] != -1)
return 0;
err = uv__make_pipe(loop->signal_pipefd, UV__F_NONBLOCK);
err = uv__make_pipe(loop->signal_pipefd, UV_NONBLOCK_PIPE);
if (err)
return err;

View File

@ -164,7 +164,7 @@ static void uv__stream_osx_select(void* arg) {
else
max_fd = s->int_fd;
while (1) {
for (;;) {
/* Terminate on semaphore */
if (uv_sem_trywait(&s->close_sem) == 0)
break;
@ -195,7 +195,7 @@ static void uv__stream_osx_select(void* arg) {
/* Empty socketpair's buffer in case of interruption */
if (FD_ISSET(s->int_fd, s->sread))
while (1) {
for (;;) {
r = read(s->int_fd, buf, sizeof(buf));
if (r == sizeof(buf))
@ -799,33 +799,21 @@ static int uv__handle_fd(uv_handle_t* handle) {
}
}
static void uv__write(uv_stream_t* stream) {
static int uv__try_write(uv_stream_t* stream,
const uv_buf_t bufs[],
unsigned int nbufs,
uv_stream_t* send_handle) {
struct iovec* iov;
QUEUE* q;
uv_write_t* req;
int iovmax;
int iovcnt;
ssize_t n;
int err;
start:
assert(uv__stream_fd(stream) >= 0);
if (QUEUE_EMPTY(&stream->write_queue))
return;
q = QUEUE_HEAD(&stream->write_queue);
req = QUEUE_DATA(q, uv_write_t, queue);
assert(req->handle == stream);
/*
* Cast to iovec. We had to have our own uv_buf_t instead of iovec
* because Windows's WSABUF is not an iovec.
*/
assert(sizeof(uv_buf_t) == sizeof(struct iovec));
iov = (struct iovec*) &(req->bufs[req->write_index]);
iovcnt = req->nbufs - req->write_index;
iov = (struct iovec*) bufs;
iovcnt = nbufs;
iovmax = uv__getiovmax();
@ -837,8 +825,7 @@ start:
* Now do the actual writev. Note that we've been updating the pointers
* inside the iov each time we write. So there is no need to offset it.
*/
if (req->send_handle) {
if (send_handle != NULL) {
int fd_to_send;
struct msghdr msg;
struct cmsghdr *cmsg;
@ -847,12 +834,10 @@ start:
struct cmsghdr alias;
} scratch;
if (uv__is_closing(req->send_handle)) {
err = UV_EBADF;
goto error;
}
if (uv__is_closing(send_handle))
return UV_EBADF;
fd_to_send = uv__handle_fd((uv_handle_t*) req->send_handle);
fd_to_send = uv__handle_fd((uv_handle_t*) send_handle);
memset(&scratch, 0, sizeof(scratch));
@ -882,44 +867,68 @@ start:
do
n = sendmsg(uv__stream_fd(stream), &msg, 0);
while (n == -1 && RETRY_ON_WRITE_ERROR(errno));
/* Ensure the handle isn't sent again in case this is a partial write. */
if (n >= 0)
req->send_handle = NULL;
} else {
do
n = uv__writev(uv__stream_fd(stream), iov, iovcnt);
while (n == -1 && RETRY_ON_WRITE_ERROR(errno));
}
if (n == -1 && !IS_TRANSIENT_WRITE_ERROR(errno, req->send_handle)) {
err = UV__ERR(errno);
goto error;
if (n >= 0)
return n;
if (IS_TRANSIENT_WRITE_ERROR(errno, send_handle))
return UV_EAGAIN;
return UV__ERR(errno);
}
static void uv__write(uv_stream_t* stream) {
QUEUE* q;
uv_write_t* req;
ssize_t n;
assert(uv__stream_fd(stream) >= 0);
for (;;) {
if (QUEUE_EMPTY(&stream->write_queue))
return;
q = QUEUE_HEAD(&stream->write_queue);
req = QUEUE_DATA(q, uv_write_t, queue);
assert(req->handle == stream);
n = uv__try_write(stream,
&(req->bufs[req->write_index]),
req->nbufs - req->write_index,
req->send_handle);
/* Ensure the handle isn't sent again in case this is a partial write. */
if (n >= 0) {
req->send_handle = NULL;
if (uv__write_req_update(stream, req, n)) {
uv__write_req_finish(req);
return; /* TODO(bnoordhuis) Start trying to write the next request. */
}
} else if (n != UV_EAGAIN)
break;
/* If this is a blocking stream, try again. */
if (stream->flags & UV_HANDLE_BLOCKING_WRITES)
continue;
/* We're not done. */
uv__io_start(stream->loop, &stream->io_watcher, POLLOUT);
/* Notify select() thread about state change */
uv__stream_osx_interrupt_select(stream);
return;
}
if (n >= 0 && uv__write_req_update(stream, req, n)) {
uv__write_req_finish(req);
return; /* TODO(bnoordhuis) Start trying to write the next request. */
}
/* If this is a blocking stream, try again. */
if (stream->flags & UV_HANDLE_BLOCKING_WRITES)
goto start;
/* We're not done. */
uv__io_start(stream->loop, &stream->io_watcher, POLLOUT);
/* Notify select() thread about state change */
uv__stream_osx_interrupt_select(stream);
return;
error:
req->error = err;
req->error = n;
// XXX(jwn): this must call uv__stream_flush_write_queue(stream, n) here, since we won't generate any more events
uv__write_req_finish(req);
uv__io_stop(stream->loop, &stream->io_watcher, POLLOUT);
if (!uv__io_active(&stream->io_watcher, POLLIN))
uv__handle_stop(stream);
uv__stream_osx_interrupt_select(stream);
}
@ -1001,9 +1010,9 @@ uv_handle_type uv__handle_type(int fd) {
static void uv__stream_eof(uv_stream_t* stream, const uv_buf_t* buf) {
stream->flags |= UV_HANDLE_READ_EOF;
stream->flags &= ~UV_HANDLE_READING;
stream->flags &= ~UV_HANDLE_READABLE;
uv__io_stop(stream->loop, &stream->io_watcher, POLLIN);
if (!uv__io_active(&stream->io_watcher, POLLOUT))
uv__handle_stop(stream);
uv__handle_stop(stream);
uv__stream_osx_interrupt_select(stream);
stream->read_cb(stream, UV_EOF, buf);
}
@ -1188,12 +1197,12 @@ static void uv__read(uv_stream_t* stream) {
#endif
} else {
/* Error. User should call uv_close(). */
stream->flags &= ~(UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);
stream->read_cb(stream, UV__ERR(errno), &buf);
if (stream->flags & UV_HANDLE_READING) {
stream->flags &= ~UV_HANDLE_READING;
uv__io_stop(stream->loop, &stream->io_watcher, POLLIN);
if (!uv__io_active(&stream->io_watcher, POLLOUT))
uv__handle_stop(stream);
uv__handle_stop(stream);
uv__stream_osx_interrupt_select(stream);
}
}
@ -1276,6 +1285,7 @@ int uv_shutdown(uv_shutdown_t* req, uv_stream_t* stream, uv_shutdown_cb cb) {
req->cb = cb;
stream->shutdown_req = req;
stream->flags |= UV_HANDLE_SHUTTING;
stream->flags &= ~UV_HANDLE_WRITABLE;
uv__io_start(stream->loop, &stream->io_watcher, POLLOUT);
uv__stream_osx_interrupt_select(stream);
@ -1390,14 +1400,9 @@ static void uv__stream_connect(uv_stream_t* stream) {
}
int uv_write2(uv_write_t* req,
uv_stream_t* stream,
const uv_buf_t bufs[],
unsigned int nbufs,
uv_stream_t* send_handle,
uv_write_cb cb) {
int empty_queue;
static int uv__check_before_write(uv_stream_t* stream,
unsigned int nbufs,
uv_stream_t* send_handle) {
assert(nbufs > 0);
assert((stream->type == UV_TCP ||
stream->type == UV_NAMED_PIPE ||
@ -1410,7 +1415,7 @@ int uv_write2(uv_write_t* req,
if (!(stream->flags & UV_HANDLE_WRITABLE))
return UV_EPIPE;
if (send_handle) {
if (send_handle != NULL) {
if (stream->type != UV_NAMED_PIPE || !((uv_pipe_t*)stream)->ipc)
return UV_EINVAL;
@ -1430,6 +1435,22 @@ int uv_write2(uv_write_t* req,
#endif
}
return 0;
}
int uv_write2(uv_write_t* req,
uv_stream_t* stream,
const uv_buf_t bufs[],
unsigned int nbufs,
uv_stream_t* send_handle,
uv_write_cb cb) {
int empty_queue;
int err;
err = uv__check_before_write(stream, nbufs, send_handle);
if (err < 0)
return err;
/* It's legal for write_queue_size > 0 even when the write_queue is empty;
* it means there are error-state requests in the write_completed_queue that
* will touch up write_queue_size later, see also uv__write_req_finish().
@ -1498,72 +1519,37 @@ int uv_write(uv_write_t* req,
}
void uv_try_write_cb(uv_write_t* req, int status) {
/* Should not be called */
abort();
}
int uv_try_write(uv_stream_t* stream,
const uv_buf_t bufs[],
unsigned int nbufs) {
int r;
int has_pollout;
size_t written;
size_t req_size;
uv_write_t req;
return uv_try_write2(stream, bufs, nbufs, NULL);
}
int uv_try_write2(uv_stream_t* stream,
const uv_buf_t bufs[],
unsigned int nbufs,
uv_stream_t* send_handle) {
int err;
/* Connecting or already writing some data */
if (stream->connect_req != NULL || stream->write_queue_size != 0)
return UV_EAGAIN;
has_pollout = uv__io_active(&stream->io_watcher, POLLOUT);
err = uv__check_before_write(stream, nbufs, NULL);
if (err < 0)
return err;
r = uv_write(&req, stream, bufs, nbufs, uv_try_write_cb);
if (r != 0)
return r;
/* Remove not written bytes from write queue size */
written = uv__count_bufs(bufs, nbufs);
if (req.bufs != NULL)
req_size = uv__write_req_size(&req);
else
req_size = 0;
written -= req_size;
stream->write_queue_size -= req_size;
/* Unqueue request, regardless of immediateness */
QUEUE_REMOVE(&req.queue);
uv__req_unregister(stream->loop, &req);
if (req.bufs != req.bufsml)
uv__free(req.bufs);
req.bufs = NULL;
/* Do not poll for writable, if we wasn't before calling this */
if (!has_pollout) {
uv__io_stop(stream->loop, &stream->io_watcher, POLLOUT);
uv__stream_osx_interrupt_select(stream);
}
if (written == 0 && req_size != 0)
return req.error < 0 ? req.error : UV_EAGAIN;
else
return written;
return uv__try_write(stream, bufs, nbufs, send_handle);
}
int uv_read_start(uv_stream_t* stream,
uv_alloc_cb alloc_cb,
uv_read_cb read_cb) {
int uv__read_start(uv_stream_t* stream,
uv_alloc_cb alloc_cb,
uv_read_cb read_cb) {
assert(stream->type == UV_TCP || stream->type == UV_NAMED_PIPE ||
stream->type == UV_TTY);
if (stream->flags & UV_HANDLE_CLOSING)
return UV_EINVAL;
if (!(stream->flags & UV_HANDLE_READABLE))
return UV_ENOTCONN;
/* The UV_HANDLE_READING flag is irrelevant of the state of the tcp - it just
* expresses the desired state of the user.
*/
@ -1593,8 +1579,7 @@ int uv_read_stop(uv_stream_t* stream) {
stream->flags &= ~UV_HANDLE_READING;
uv__io_stop(stream->loop, &stream->io_watcher, POLLIN);
if (!uv__io_active(&stream->io_watcher, POLLOUT))
uv__handle_stop(stream);
uv__handle_stop(stream);
uv__stream_osx_interrupt_select(stream);
stream->read_cb = NULL;

View File

@ -865,3 +865,14 @@ void uv_free_interface_addresses(uv_interface_address_t* addresses,
uv__free(addresses);
}
#if !defined(_POSIX_VERSION) || _POSIX_VERSION < 200809L
size_t strnlen(const char* s, size_t maxlen) {
const char* end;
end = memchr(s, '\0', maxlen);
if (end == NULL)
return maxlen;
return end - s;
}
#endif

View File

@ -214,14 +214,15 @@ int uv__tcp_connect(uv_connect_t* req,
if (handle->connect_req != NULL)
return UV_EALREADY; /* FIXME(bnoordhuis) UV_EINVAL or maybe UV_EBUSY. */
if (handle->delayed_error != 0)
goto out;
err = maybe_new_socket(handle,
addr->sa_family,
UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);
if (err)
return err;
handle->delayed_error = 0;
do {
errno = 0;
r = connect(uv__stream_fd(handle), addr, addrlen);
@ -249,6 +250,8 @@ int uv__tcp_connect(uv_connect_t* req,
return UV__ERR(errno);
}
out:
uv__req_init(handle->loop, req, UV_CONNECT);
req->cb = cb;
req->handle = (uv_stream_t*) handle;
@ -459,3 +462,49 @@ int uv_tcp_simultaneous_accepts(uv_tcp_t* handle, int enable) {
void uv__tcp_close(uv_tcp_t* handle) {
uv__stream_close((uv_stream_t*)handle);
}
int uv_socketpair(int type, int protocol, uv_os_sock_t fds[2], int flags0, int flags1) {
uv_os_sock_t temp[2];
int err;
#if defined(__FreeBSD__) || defined(__linux__)
int flags;
flags = type | SOCK_CLOEXEC;
if ((flags0 & UV_NONBLOCK_PIPE) && (flags1 & UV_NONBLOCK_PIPE))
flags |= SOCK_NONBLOCK;
if (socketpair(AF_UNIX, flags, protocol, temp))
return UV__ERR(errno);
if (flags & UV_FS_O_NONBLOCK) {
fds[0] = temp[0];
fds[1] = temp[1];
return 0;
}
#else
if (socketpair(AF_UNIX, type, protocol, temp))
return UV__ERR(errno);
if ((err = uv__cloexec(temp[0], 1)))
goto fail;
if ((err = uv__cloexec(temp[1], 1)))
goto fail;
#endif
if (flags0 & UV_NONBLOCK_PIPE)
if ((err = uv__nonblock(temp[0], 1)))
goto fail;
if (flags1 & UV_NONBLOCK_PIPE)
if ((err = uv__nonblock(temp[1], 1)))
goto fail;
fds[0] = temp[0];
fds[1] = temp[1];
return 0;
fail:
uv__close(temp[0]);
uv__close(temp[1]);
return err;
}

View File

@ -107,8 +107,7 @@ int uv_barrier_wait(uv_barrier_t* barrier) {
}
last = (--b->out == 0);
if (!last)
uv_cond_signal(&b->cond); /* Not needed for last thread. */
uv_cond_signal(&b->cond);
uv_mutex_unlock(&b->mutex);
return last;
@ -122,9 +121,10 @@ void uv_barrier_destroy(uv_barrier_t* barrier) {
uv_mutex_lock(&b->mutex);
assert(b->in == 0);
assert(b->out == 0);
while (b->out != 0)
uv_cond_wait(&b->cond, &b->mutex);
if (b->in != 0 || b->out != 0)
if (b->in != 0)
abort();
uv_mutex_unlock(&b->mutex);
@ -168,7 +168,7 @@ void uv_barrier_destroy(uv_barrier_t* barrier) {
* On Linux, threads created by musl have a much smaller stack than threads
* created by glibc (80 vs. 2048 or 4096 kB.) Follow glibc for consistency.
*/
static size_t thread_stack_size(void) {
size_t uv__thread_stack_size(void) {
#if defined(__APPLE__) || defined(__linux__)
struct rlimit lim;
@ -234,7 +234,7 @@ int uv_thread_create_ex(uv_thread_t* tid,
attr = NULL;
if (stack_size == 0) {
stack_size = thread_stack_size();
stack_size = uv__thread_stack_size();
} else {
pagesize = (size_t)getpagesize();
/* Round up to the nearest page boundary. */

View File

@ -242,6 +242,24 @@ static void uv__tty_make_raw(struct termios* tio) {
tio->c_lflag &= ~(ECHO | ECHONL | ICANON | ISIG | IEXTEN);
tio->c_cflag &= ~(CSIZE | PARENB);
tio->c_cflag |= CS8;
/*
* By default, most software expects a pending read to block until at
* least one byte becomes available. As per termio(7I), this requires
* setting the MIN and TIME parameters appropriately.
*
* As a somewhat unfortunate artifact of history, the MIN and TIME slots
* in the control character array overlap with the EOF and EOL slots used
* for canonical mode processing. Because the EOF character needs to be
* the ASCII EOT value (aka Control-D), it has the byte value 4. When
* switching to raw mode, this is interpreted as a MIN value of 4; i.e.,
* reads will block until at least four bytes have been input.
*
* Other platforms with a distinct MIN slot like Linux and FreeBSD appear
* to default to a MIN value of 1, so we'll force that value here:
*/
tio->c_cc[VMIN] = 1;
tio->c_cc[VTIME] = 0;
#else
cfmakeraw(tio);
#endif /* #ifdef __sun */

View File

@ -32,8 +32,6 @@
#endif
#include <sys/un.h>
#define UV__UDP_DGRAM_MAXSIZE (64 * 1024)
#if defined(IPV6_JOIN_GROUP) && !defined(IPV6_ADD_MEMBERSHIP)
# define IPV6_ADD_MEMBERSHIP IPV6_JOIN_GROUP
#endif
@ -504,6 +502,28 @@ static int uv__set_reuse(int fd) {
return 0;
}
/*
* The Linux kernel suppresses some ICMP error messages by default for UDP
* sockets. Setting IP_RECVERR/IPV6_RECVERR on the socket enables full ICMP
* error reporting, hopefully resulting in faster failover to working name
* servers.
*/
static int uv__set_recverr(int fd, sa_family_t ss_family) {
#if defined(__linux__)
int yes;
yes = 1;
if (ss_family == AF_INET) {
if (setsockopt(fd, IPPROTO_IP, IP_RECVERR, &yes, sizeof(yes)))
return UV__ERR(errno);
} else if (ss_family == AF_INET6) {
if (setsockopt(fd, IPPROTO_IPV6, IPV6_RECVERR, &yes, sizeof(yes)))
return UV__ERR(errno);
}
#endif
return 0;
}
int uv__udp_bind(uv_udp_t* handle,
const struct sockaddr* addr,
@ -514,7 +534,7 @@ int uv__udp_bind(uv_udp_t* handle,
int fd;
/* Check for bad flags. */
if (flags & ~(UV_UDP_IPV6ONLY | UV_UDP_REUSEADDR))
if (flags & ~(UV_UDP_IPV6ONLY | UV_UDP_REUSEADDR | UV_UDP_LINUX_RECVERR))
return UV_EINVAL;
/* Cannot set IPv6-only mode on non-IPv6 socket. */
@ -530,6 +550,12 @@ int uv__udp_bind(uv_udp_t* handle,
handle->io_watcher.fd = fd;
}
if (flags & UV_UDP_LINUX_RECVERR) {
err = uv__set_recverr(fd, addr->sa_family);
if (err)
return err;
}
if (flags & UV_UDP_REUSEADDR) {
err = uv__set_reuse(fd);
if (err)

View File

@ -832,6 +832,25 @@ void uv_loop_delete(uv_loop_t* loop) {
}
int uv_read_start(uv_stream_t* stream,
uv_alloc_cb alloc_cb,
uv_read_cb read_cb) {
if (stream == NULL || alloc_cb == NULL || read_cb == NULL)
return UV_EINVAL;
if (stream->flags & UV_HANDLE_CLOSING)
return UV_EINVAL;
if (stream->flags & UV_HANDLE_READING)
return UV_EALREADY;
if (!(stream->flags & UV_HANDLE_READABLE))
return UV_ENOTCONN;
return uv__read_start(stream, alloc_cb, read_cb);
}
void uv_os_free_environ(uv_env_item_t* envitems, int count) {
int i;
@ -853,7 +872,11 @@ void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) {
}
#ifdef __GNUC__ /* Also covers __clang__ and __INTEL_COMPILER. */
/* Also covers __clang__ and __INTEL_COMPILER. Disabled on Windows because
* threads have already been forcibly terminated by the operating system
* by the time destructors run, ergo, it's not safe to try to clean them up.
*/
#if defined(__GNUC__) && !defined(_WIN32)
__attribute__((destructor))
#endif
void uv_library_shutdown(void) {

View File

@ -68,6 +68,8 @@ extern int snprintf(char*, size_t, const char*, ...);
#define uv__store_relaxed(p, v) do *p = v; while (0)
#endif
#define UV__UDP_DGRAM_MAXSIZE (64 * 1024)
/* Handle flags. Some flags are specific to Windows or UNIX. */
enum {
/* Used by all handles. */
@ -106,8 +108,7 @@ enum {
UV_HANDLE_TCP_KEEPALIVE = 0x02000000,
UV_HANDLE_TCP_SINGLE_ACCEPT = 0x04000000,
UV_HANDLE_TCP_ACCEPT_STATE_CHANGING = 0x08000000,
UV_HANDLE_TCP_SOCKET_CLOSED = 0x10000000,
UV_HANDLE_SHARED_TCP_SOCKET = 0x20000000,
UV_HANDLE_SHARED_TCP_SOCKET = 0x10000000,
/* Only used by uv_udp_t handles. */
UV_HANDLE_UDP_PROCESSING = 0x01000000,
@ -136,6 +137,10 @@ int uv__loop_configure(uv_loop_t* loop, uv_loop_option option, va_list ap);
void uv__loop_close(uv_loop_t* loop);
int uv__read_start(uv_stream_t* stream,
uv_alloc_cb alloc_cb,
uv_read_cb read_cb);
int uv__tcp_bind(uv_tcp_t* tcp,
const struct sockaddr* addr,
unsigned int addrlen,

View File

@ -39,10 +39,11 @@ static char INLINE uv__atomic_exchange_set(char volatile* target) {
return _InterlockedOr8(target, 1);
}
#else /* GCC */
#else /* GCC, Clang in mingw mode */
/* Mingw-32 version, hopefully this works for 64-bit gcc as well. */
static inline char uv__atomic_exchange_set(char volatile* target) {
#if defined(__i386__) || defined(__x86_64__)
/* Mingw-32 version, hopefully this works for 64-bit gcc as well. */
const char one = 1;
char old_value;
__asm__ __volatile__ ("lock xchgb %0, %1\n\t"
@ -50,6 +51,9 @@ static inline char uv__atomic_exchange_set(char volatile* target) {
: "0"(one), "m"(*target)
: "memory");
return old_value;
#else
return __sync_fetch_and_or(target, 1);
#endif
}
#endif

View File

@ -105,7 +105,6 @@ int uv_translate_sys_error(int sys_errno) {
case ERROR_SYMLINK_NOT_SUPPORTED: return UV_EINVAL;
case WSAEINVAL: return UV_EINVAL;
case WSAEPFNOSUPPORT: return UV_EINVAL;
case WSAESOCKTNOSUPPORT: return UV_EINVAL;
case ERROR_BEGINNING_OF_MEDIA: return UV_EIO;
case ERROR_BUS_RESET: return UV_EIO;
case ERROR_CRC: return UV_EIO;
@ -168,6 +167,7 @@ int uv_translate_sys_error(int sys_errno) {
case ERROR_NOT_SAME_DEVICE: return UV_EXDEV;
case ERROR_INVALID_FUNCTION: return UV_EISDIR;
case ERROR_META_EXPANSION_TOO_LONG: return UV_E2BIG;
case WSAESOCKTNOSUPPORT: return UV_ESOCKTNOSUPPORT;
default: return UV_UNKNOWN;
}
}

View File

@ -92,30 +92,24 @@
return; \
}
#define MILLIONu (1000U * 1000U)
#define BILLIONu (1000U * 1000U * 1000U)
#define MILLION ((int64_t) 1000 * 1000)
#define BILLION ((int64_t) 1000 * 1000 * 1000)
#define FILETIME_TO_UINT(filetime) \
(*((uint64_t*) &(filetime)) - (uint64_t) 116444736 * BILLIONu)
#define FILETIME_TO_TIME_T(filetime) \
(FILETIME_TO_UINT(filetime) / (10u * MILLIONu))
#define FILETIME_TO_TIME_NS(filetime, secs) \
((FILETIME_TO_UINT(filetime) - (secs * (uint64_t) 10 * MILLIONu)) * 100U)
#define FILETIME_TO_TIMESPEC(ts, filetime) \
do { \
(ts).tv_sec = (long) FILETIME_TO_TIME_T(filetime); \
(ts).tv_nsec = (long) FILETIME_TO_TIME_NS(filetime, (ts).tv_sec); \
} while(0)
static void uv__filetime_to_timespec(uv_timespec_t *ts, int64_t filetime) {
filetime -= 116444736 * BILLION;
ts->tv_sec = (long) (filetime / (10 * MILLION));
ts->tv_nsec = (long) ((filetime - ts->tv_sec * 10 * MILLION) * 100U);
if (ts->tv_nsec < 0) {
ts->tv_sec -= 1;
ts->tv_nsec += 1e9;
}
}
#define TIME_T_TO_FILETIME(time, filetime_ptr) \
do { \
uint64_t bigtime = ((uint64_t) ((time) * (uint64_t) 10 * MILLIONu)) + \
(uint64_t) 116444736 * BILLIONu; \
(filetime_ptr)->dwLowDateTime = bigtime & 0xFFFFFFFF; \
(filetime_ptr)->dwHighDateTime = bigtime >> 32; \
int64_t bigtime = ((time) * 10 * MILLION + 116444736 * BILLION); \
(filetime_ptr)->dwLowDateTime = (uint64_t) bigtime & 0xFFFFFFFF; \
(filetime_ptr)->dwHighDateTime = (uint64_t) bigtime >> 32; \
} while(0)
#define IS_SLASH(c) ((c) == L'\\' || (c) == L'/')
@ -1224,7 +1218,8 @@ void fs__mkdir(uv_fs_t* req) {
SET_REQ_RESULT(req, 0);
} else {
SET_REQ_WIN32_ERROR(req, GetLastError());
if (req->sys_errno_ == ERROR_INVALID_NAME)
if (req->sys_errno_ == ERROR_INVALID_NAME ||
req->sys_errno_ == ERROR_DIRECTORY)
req->result = UV_EINVAL;
}
}
@ -1243,7 +1238,7 @@ void fs__mktemp(uv_fs_t* req, uv__fs_mktemp_func func) {
uint64_t v;
char* path;
path = req->path;
path = (char*)req->path;
len = wcslen(req->file.pathw);
ep = req->file.pathw + len;
if (len < num_x || wcsncmp(ep - num_x, L"XXXXXX", num_x)) {
@ -1791,10 +1786,14 @@ INLINE static int fs__stat_handle(HANDLE handle, uv_stat_t* statbuf,
statbuf->st_mode |= (_S_IREAD | _S_IWRITE) | ((_S_IREAD | _S_IWRITE) >> 3) |
((_S_IREAD | _S_IWRITE) >> 6);
FILETIME_TO_TIMESPEC(statbuf->st_atim, file_info.BasicInformation.LastAccessTime);
FILETIME_TO_TIMESPEC(statbuf->st_ctim, file_info.BasicInformation.ChangeTime);
FILETIME_TO_TIMESPEC(statbuf->st_mtim, file_info.BasicInformation.LastWriteTime);
FILETIME_TO_TIMESPEC(statbuf->st_birthtim, file_info.BasicInformation.CreationTime);
uv__filetime_to_timespec(&statbuf->st_atim,
file_info.BasicInformation.LastAccessTime.QuadPart);
uv__filetime_to_timespec(&statbuf->st_ctim,
file_info.BasicInformation.ChangeTime.QuadPart);
uv__filetime_to_timespec(&statbuf->st_mtim,
file_info.BasicInformation.LastWriteTime.QuadPart);
uv__filetime_to_timespec(&statbuf->st_birthtim,
file_info.BasicInformation.CreationTime.QuadPart);
statbuf->st_ino = file_info.InternalInformation.IndexNumber.QuadPart;

View File

@ -115,8 +115,8 @@ void uv_udp_endgame(uv_loop_t* loop, uv_udp_t* handle);
/*
* Pipes
*/
int uv_stdio_pipe_server(uv_loop_t* loop, uv_pipe_t* handle, DWORD access,
char* name, size_t nameSize);
int uv__create_stdio_pipe_pair(uv_loop_t* loop,
uv_pipe_t* parent_pipe, HANDLE* child_pipe_ptr, unsigned int flags);
int uv_pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb);
int uv_pipe_accept(uv_pipe_t* server, uv_stream_t* client);

View File

@ -202,17 +202,17 @@ static void close_pipe(uv_pipe_t* pipe) {
}
int uv_stdio_pipe_server(uv_loop_t* loop, uv_pipe_t* handle, DWORD access,
char* name, size_t nameSize) {
static int uv__pipe_server(
HANDLE* pipeHandle_ptr, DWORD access,
char* name, size_t nameSize, char* random) {
HANDLE pipeHandle;
int err;
char* ptr = (char*)handle;
for (;;) {
uv_unique_pipe_name(ptr, name, nameSize);
uv_unique_pipe_name(random, name, nameSize);
pipeHandle = CreateNamedPipeA(name,
access | FILE_FLAG_OVERLAPPED | FILE_FLAG_FIRST_PIPE_INSTANCE | WRITE_DAC,
access | FILE_FLAG_FIRST_PIPE_INSTANCE,
PIPE_TYPE_BYTE | PIPE_READMODE_BYTE | PIPE_WAIT, 1, 65536, 65536, 0,
NULL);
@ -226,20 +226,11 @@ int uv_stdio_pipe_server(uv_loop_t* loop, uv_pipe_t* handle, DWORD access,
goto error;
}
/* Pipe name collision. Increment the pointer and try again. */
ptr++;
/* Pipe name collision. Increment the random number and try again. */
random++;
}
if (CreateIoCompletionPort(pipeHandle,
loop->iocp,
(ULONG_PTR)handle,
0) == NULL) {
err = GetLastError();
goto error;
}
uv_pipe_connection_init(handle);
handle->handle = pipeHandle;
*pipeHandle_ptr = pipeHandle;
return 0;
@ -251,6 +242,214 @@ int uv_stdio_pipe_server(uv_loop_t* loop, uv_pipe_t* handle, DWORD access,
}
static int uv__create_pipe_pair(
HANDLE* server_pipe_ptr, HANDLE* client_pipe_ptr,
unsigned int server_flags, unsigned int client_flags,
int inherit_client, char* random) {
/* allowed flags are: UV_READABLE_PIPE | UV_WRITABLE_PIPE | UV_NONBLOCK_PIPE */
char pipe_name[64];
SECURITY_ATTRIBUTES sa;
DWORD server_access;
DWORD client_access;
HANDLE server_pipe;
HANDLE client_pipe;
int err;
server_pipe = INVALID_HANDLE_VALUE;
client_pipe = INVALID_HANDLE_VALUE;
server_access = 0;
if (server_flags & UV_READABLE_PIPE)
server_access |= PIPE_ACCESS_INBOUND;
if (server_flags & UV_WRITABLE_PIPE)
server_access |= PIPE_ACCESS_OUTBOUND;
if (server_flags & UV_NONBLOCK_PIPE)
server_access |= FILE_FLAG_OVERLAPPED;
server_access |= WRITE_DAC;
client_access = 0;
if (client_flags & UV_READABLE_PIPE)
client_access |= GENERIC_READ;
else
client_access |= FILE_READ_ATTRIBUTES;
if (client_flags & UV_WRITABLE_PIPE)
client_access |= GENERIC_WRITE;
else
client_access |= FILE_WRITE_ATTRIBUTES;
client_access |= WRITE_DAC;
/* Create server pipe handle. */
err = uv__pipe_server(&server_pipe,
server_access,
pipe_name,
sizeof(pipe_name),
random);
if (err)
goto error;
/* Create client pipe handle. */
sa.nLength = sizeof sa;
sa.lpSecurityDescriptor = NULL;
sa.bInheritHandle = inherit_client;
client_pipe = CreateFileA(pipe_name,
client_access,
0,
&sa,
OPEN_EXISTING,
(client_flags & UV_NONBLOCK_PIPE) ? FILE_FLAG_OVERLAPPED : 0,
NULL);
if (client_pipe == INVALID_HANDLE_VALUE) {
err = GetLastError();
goto error;
}
#ifndef NDEBUG
/* Validate that the pipe was opened in the right mode. */
{
DWORD mode;
BOOL r;
r = GetNamedPipeHandleState(client_pipe, &mode, NULL, NULL, NULL, NULL, 0);
if (r == TRUE) {
assert(mode == (PIPE_READMODE_BYTE | PIPE_WAIT));
} else {
fprintf(stderr, "libuv assertion failure: GetNamedPipeHandleState failed\n");
}
}
#endif
/* Do a blocking ConnectNamedPipe. This should not block because we have
* both ends of the pipe created. */
if (!ConnectNamedPipe(server_pipe, NULL)) {
if (GetLastError() != ERROR_PIPE_CONNECTED) {
err = GetLastError();
goto error;
}
}
*client_pipe_ptr = client_pipe;
*server_pipe_ptr = server_pipe;
return 0;
error:
if (server_pipe != INVALID_HANDLE_VALUE)
CloseHandle(server_pipe);
if (client_pipe != INVALID_HANDLE_VALUE)
CloseHandle(client_pipe);
return err;
}
int uv_pipe(uv_file fds[2], int read_flags, int write_flags) {
uv_file temp[2];
int err;
HANDLE readh;
HANDLE writeh;
/* Make the server side the inbound (read) end, */
/* so that both ends will have FILE_READ_ATTRIBUTES permission. */
/* TODO: better source of local randomness than &fds? */
read_flags |= UV_READABLE_PIPE;
write_flags |= UV_WRITABLE_PIPE;
err = uv__create_pipe_pair(&readh, &writeh, read_flags, write_flags, 0, (char*) &fds[0]);
if (err != 0)
return err;
temp[0] = _open_osfhandle((intptr_t) readh, 0);
if (temp[0] == -1) {
if (errno == UV_EMFILE)
err = UV_EMFILE;
else
err = UV_UNKNOWN;
CloseHandle(readh);
CloseHandle(writeh);
return err;
}
temp[1] = _open_osfhandle((intptr_t) writeh, 0);
if (temp[1] == -1) {
if (errno == UV_EMFILE)
err = UV_EMFILE;
else
err = UV_UNKNOWN;
_close(temp[0]);
CloseHandle(writeh);
return err;
}
fds[0] = temp[0];
fds[1] = temp[1];
return 0;
}
int uv__create_stdio_pipe_pair(uv_loop_t* loop,
uv_pipe_t* parent_pipe, HANDLE* child_pipe_ptr, unsigned int flags) {
/* The parent_pipe is always the server_pipe and kept by libuv.
* The child_pipe is always the client_pipe and is passed to the child.
* The flags are specified with respect to their usage in the child. */
HANDLE server_pipe;
HANDLE client_pipe;
unsigned int server_flags;
unsigned int client_flags;
int err;
server_pipe = INVALID_HANDLE_VALUE;
client_pipe = INVALID_HANDLE_VALUE;
server_flags = 0;
client_flags = 0;
if (flags & UV_READABLE_PIPE) {
/* The server needs inbound (read) access too, otherwise CreateNamedPipe()
* won't give us the FILE_READ_ATTRIBUTES permission. We need that to probe
* the state of the write buffer when we're trying to shutdown the pipe. */
server_flags |= UV_READABLE_PIPE | UV_WRITABLE_PIPE;
client_flags |= UV_READABLE_PIPE;
}
if (flags & UV_WRITABLE_PIPE) {
server_flags |= UV_READABLE_PIPE;
client_flags |= UV_WRITABLE_PIPE;
}
server_flags |= UV_NONBLOCK_PIPE;
if (flags & UV_NONBLOCK_PIPE || parent_pipe->ipc) {
client_flags |= UV_NONBLOCK_PIPE;
}
err = uv__create_pipe_pair(&server_pipe, &client_pipe,
server_flags, client_flags, 1, (char*) server_pipe);
if (err)
goto error;
if (CreateIoCompletionPort(server_pipe,
loop->iocp,
(ULONG_PTR) parent_pipe,
0) == NULL) {
err = GetLastError();
goto error;
}
uv_pipe_connection_init(parent_pipe);
parent_pipe->handle = server_pipe;
*child_pipe_ptr = client_pipe;
/* The server end is now readable and/or writable. */
if (flags & UV_READABLE_PIPE)
parent_pipe->flags |= UV_HANDLE_WRITABLE;
if (flags & UV_WRITABLE_PIPE)
parent_pipe->flags |= UV_HANDLE_READABLE;
return 0;
error:
if (server_pipe != INVALID_HANDLE_VALUE)
CloseHandle(server_pipe);
if (client_pipe != INVALID_HANDLE_VALUE)
CloseHandle(client_pipe);
return err;
}
static int uv_set_pipe_handle(uv_loop_t* loop,
uv_pipe_t* handle,
HANDLE pipeHandle,
@ -712,9 +911,8 @@ error:
handle->name = NULL;
}
if (pipeHandle != INVALID_HANDLE_VALUE) {
if (pipeHandle != INVALID_HANDLE_VALUE)
CloseHandle(pipeHandle);
}
/* Make this req pending reporting an error. */
SET_REQ_ERROR(req, err);

View File

@ -488,7 +488,8 @@ static int uv__poll_set(uv_poll_t* handle, int events, uv_poll_cb cb) {
assert(handle->type == UV_POLL);
assert(!(handle->flags & UV_HANDLE_CLOSING));
assert((events & ~(UV_READABLE | UV_WRITABLE | UV_DISCONNECT)) == 0);
assert((events & ~(UV_READABLE | UV_WRITABLE | UV_DISCONNECT |
UV_PRIORITIZED)) == 0);
handle->events = events;
handle->poll_cb = cb;

View File

@ -95,102 +95,6 @@ void uv_disable_stdio_inheritance(void) {
}
static int uv__create_stdio_pipe_pair(uv_loop_t* loop,
uv_pipe_t* server_pipe, HANDLE* child_pipe_ptr, unsigned int flags) {
char pipe_name[64];
SECURITY_ATTRIBUTES sa;
DWORD server_access = 0;
DWORD client_access = 0;
HANDLE child_pipe = INVALID_HANDLE_VALUE;
int err;
int overlap;
if (flags & UV_READABLE_PIPE) {
/* The server needs inbound access too, otherwise CreateNamedPipe() won't
* give us the FILE_READ_ATTRIBUTES permission. We need that to probe the
* state of the write buffer when we're trying to shutdown the pipe. */
server_access |= PIPE_ACCESS_OUTBOUND | PIPE_ACCESS_INBOUND;
client_access |= GENERIC_READ | FILE_WRITE_ATTRIBUTES;
}
if (flags & UV_WRITABLE_PIPE) {
server_access |= PIPE_ACCESS_INBOUND;
client_access |= GENERIC_WRITE | FILE_READ_ATTRIBUTES;
}
/* Create server pipe handle. */
err = uv_stdio_pipe_server(loop,
server_pipe,
server_access,
pipe_name,
sizeof(pipe_name));
if (err)
goto error;
/* Create child pipe handle. */
sa.nLength = sizeof sa;
sa.lpSecurityDescriptor = NULL;
sa.bInheritHandle = TRUE;
overlap = server_pipe->ipc || (flags & UV_OVERLAPPED_PIPE);
child_pipe = CreateFileA(pipe_name,
client_access,
0,
&sa,
OPEN_EXISTING,
overlap ? FILE_FLAG_OVERLAPPED : 0,
NULL);
if (child_pipe == INVALID_HANDLE_VALUE) {
err = GetLastError();
goto error;
}
#ifndef NDEBUG
/* Validate that the pipe was opened in the right mode. */
{
DWORD mode;
BOOL r = GetNamedPipeHandleState(child_pipe,
&mode,
NULL,
NULL,
NULL,
NULL,
0);
assert(r == TRUE);
assert(mode == (PIPE_READMODE_BYTE | PIPE_WAIT));
}
#endif
/* Do a blocking ConnectNamedPipe. This should not block because we have both
* ends of the pipe created. */
if (!ConnectNamedPipe(server_pipe->handle, NULL)) {
if (GetLastError() != ERROR_PIPE_CONNECTED) {
err = GetLastError();
goto error;
}
}
/* The server end is now readable and/or writable. */
if (flags & UV_READABLE_PIPE)
server_pipe->flags |= UV_HANDLE_WRITABLE;
if (flags & UV_WRITABLE_PIPE)
server_pipe->flags |= UV_HANDLE_READABLE;
*child_pipe_ptr = child_pipe;
return 0;
error:
if (server_pipe->handle != INVALID_HANDLE_VALUE) {
uv_pipe_cleanup(loop, server_pipe);
}
if (child_pipe != INVALID_HANDLE_VALUE) {
CloseHandle(child_pipe);
}
return err;
}
static int uv__duplicate_handle(uv_loop_t* loop, HANDLE handle, HANDLE* dup) {
HANDLE current_process;

View File

@ -642,7 +642,7 @@ int env_strncmp(const wchar_t* a, int na, const wchar_t* b) {
assert(r==nb);
B[nb] = L'\0';
while (1) {
for (;;) {
wchar_t AA = *A++;
wchar_t BB = *B++;
if (AA < BB) {

View File

@ -65,18 +65,11 @@ int uv_accept(uv_stream_t* server, uv_stream_t* client) {
}
int uv_read_start(uv_stream_t* handle, uv_alloc_cb alloc_cb,
uv_read_cb read_cb) {
int uv__read_start(uv_stream_t* handle,
uv_alloc_cb alloc_cb,
uv_read_cb read_cb) {
int err;
if (handle->flags & UV_HANDLE_READING) {
return UV_EALREADY;
}
if (!(handle->flags & UV_HANDLE_READABLE)) {
return UV_ENOTCONN;
}
err = ERROR_INVALID_PARAMETER;
switch (handle->type) {
case UV_TCP:
@ -195,6 +188,16 @@ int uv_try_write(uv_stream_t* stream,
}
int uv_try_write2(uv_stream_t* stream,
const uv_buf_t bufs[],
unsigned int nbufs,
uv_stream_t* send_handle) {
if (send_handle != NULL)
return UV_EAGAIN;
return uv_try_write(stream, bufs, nbufs);
}
int uv_shutdown(uv_shutdown_t* req, uv_stream_t* handle, uv_shutdown_cb cb) {
uv_loop_t* loop = handle->loop;

View File

@ -236,12 +236,7 @@ void uv_tcp_endgame(uv_loop_t* loop, uv_tcp_t* handle) {
if (handle->flags & UV_HANDLE_CLOSING &&
handle->reqs_pending == 0) {
assert(!(handle->flags & UV_HANDLE_CLOSED));
if (!(handle->flags & UV_HANDLE_TCP_SOCKET_CLOSED)) {
closesocket(handle->socket);
handle->socket = INVALID_SOCKET;
handle->flags |= UV_HANDLE_TCP_SOCKET_CLOSED;
}
assert(handle->socket == INVALID_SOCKET);
if (!(handle->flags & UV_HANDLE_CONNECTION) && handle->tcp.serv.accept_reqs) {
if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
@ -599,6 +594,7 @@ int uv_tcp_listen(uv_tcp_t* handle, int backlog, uv_connection_cb cb) {
}
}
/* If this flag is set, we already made this listen call in xfer. */
if (!(handle->flags & UV_HANDLE_SHARED_TCP_SOCKET) &&
listen(handle->socket, backlog) == SOCKET_ERROR) {
return WSAGetLastError();
@ -769,7 +765,7 @@ static int uv__is_loopback(const struct sockaddr_storage* storage) {
}
// Check if Windows version is 10.0.16299 or later
static int uv__is_fast_loopback_fail_supported() {
static int uv__is_fast_loopback_fail_supported(void) {
OSVERSIONINFOW os_info;
if (!pRtlGetVersion)
return 0;
@ -800,9 +796,8 @@ static int uv_tcp_try_connect(uv_connect_t* req,
if (err)
return err;
if (handle->delayed_error) {
return handle->delayed_error;
}
if (handle->delayed_error != 0)
goto out;
if (!(handle->flags & UV_HANDLE_BOUND)) {
if (addrlen == sizeof(uv_addr_ip4_any_)) {
@ -815,8 +810,8 @@ static int uv_tcp_try_connect(uv_connect_t* req,
err = uv_tcp_try_bind(handle, bind_addr, addrlen, 0);
if (err)
return err;
if (handle->delayed_error)
return handle->delayed_error;
if (handle->delayed_error != 0)
goto out;
}
if (!handle->tcp.conn.func_connectex) {
@ -844,11 +839,21 @@ static int uv_tcp_try_connect(uv_connect_t* req,
NULL);
}
out:
UV_REQ_INIT(req, UV_CONNECT);
req->handle = (uv_stream_t*) handle;
req->cb = cb;
memset(&req->u.io.overlapped, 0, sizeof(req->u.io.overlapped));
if (handle->delayed_error != 0) {
/* Process the req without IOCP. */
handle->reqs_pending++;
REGISTER_HANDLE_REQ(loop, handle, req);
uv_insert_pending_req(loop, (uv_req_t*)req);
return 0;
}
success = handle->tcp.conn.func_connectex(handle->socket,
(const struct sockaddr*) &converted,
addrlen,
@ -1015,6 +1020,7 @@ void uv_process_tcp_read_req(uv_loop_t* loop, uv_tcp_t* handle,
*/
err = WSAECONNRESET;
}
handle->flags &= ~(UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);
handle->read_cb((uv_stream_t*)handle,
uv_translate_sys_error(err),
@ -1096,6 +1102,7 @@ void uv_process_tcp_read_req(uv_loop_t* loop, uv_tcp_t* handle,
* Unix. */
err = WSAECONNRESET;
}
handle->flags &= ~(UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);
handle->read_cb((uv_stream_t*)handle,
uv_translate_sys_error(err),
@ -1149,9 +1156,14 @@ void uv_process_tcp_write_req(uv_loop_t* loop, uv_tcp_t* handle,
}
handle->stream.conn.write_reqs_pending--;
if (handle->stream.conn.shutdown_req != NULL &&
handle->stream.conn.write_reqs_pending == 0) {
uv_want_endgame(loop, (uv_handle_t*)handle);
if (handle->stream.conn.write_reqs_pending == 0) {
if (handle->flags & UV_HANDLE_CLOSING) {
closesocket(handle->socket);
handle->socket = INVALID_SOCKET;
}
if (handle->stream.conn.shutdown_req != NULL) {
uv_want_endgame(loop, (uv_handle_t*)handle);
}
}
DECREASE_PENDING_REQ_COUNT(handle);
@ -1215,7 +1227,14 @@ void uv_process_tcp_connect_req(uv_loop_t* loop, uv_tcp_t* handle,
UNREGISTER_HANDLE_REQ(loop, handle, req);
err = 0;
if (REQ_SUCCESS(req)) {
if (handle->delayed_error) {
/* To smooth over the differences between unixes errors that
* were reported synchronously on the first connect can be delayed
* until the next tick--which is now.
*/
err = handle->delayed_error;
handle->delayed_error = 0;
} else if (REQ_SUCCESS(req)) {
if (handle->flags & UV_HANDLE_CLOSING) {
/* use UV_ECANCELED for consistency with Unix */
err = ERROR_OPERATION_ABORTED;
@ -1320,7 +1339,7 @@ int uv_tcp_nodelay(uv_tcp_t* handle, int enable) {
if (handle->socket != INVALID_SOCKET) {
err = uv__tcp_nodelay(handle, handle->socket, enable);
if (err)
return err;
return uv_translate_sys_error(err);
}
if (enable) {
@ -1339,7 +1358,7 @@ int uv_tcp_keepalive(uv_tcp_t* handle, int enable, unsigned int delay) {
if (handle->socket != INVALID_SOCKET) {
err = uv__tcp_keepalive(handle, handle->socket, enable, delay);
if (err)
return err;
return uv_translate_sys_error(err);
}
if (enable) {
@ -1386,9 +1405,24 @@ int uv_tcp_simultaneous_accepts(uv_tcp_t* handle, int enable) {
}
static int uv_tcp_try_cancel_io(uv_tcp_t* tcp) {
SOCKET socket = tcp->socket;
static void uv_tcp_try_cancel_reqs(uv_tcp_t* tcp) {
SOCKET socket;
int non_ifs_lsp;
int reading;
int writing;
socket = tcp->socket;
reading = tcp->flags & UV_HANDLE_READING;
writing = tcp->stream.conn.write_reqs_pending > 0;
if (!reading && !writing)
return;
/* TODO: in libuv v2, keep explicit track of write_reqs, so we can cancel
* them each explicitly with CancelIoEx (like unix). */
if (reading)
CancelIoEx((HANDLE) socket, &tcp->read_req.u.io.overlapped);
if (writing)
CancelIo((HANDLE) socket);
/* Check if we have any non-IFS LSPs stacked on top of TCP */
non_ifs_lsp = (tcp->flags & UV_HANDLE_IPV6) ? uv_tcp_non_ifs_lsp_ipv6 :
@ -1408,71 +1442,41 @@ static int uv_tcp_try_cancel_io(uv_tcp_t* tcp) {
NULL,
NULL) != 0) {
/* Failed. We can't do CancelIo. */
return -1;
return;
}
}
assert(socket != 0 && socket != INVALID_SOCKET);
if (!CancelIo((HANDLE) socket)) {
return GetLastError();
if (socket != tcp->socket) {
if (reading)
CancelIoEx((HANDLE) socket, &tcp->read_req.u.io.overlapped);
if (writing)
CancelIo((HANDLE) socket);
}
/* It worked. */
return 0;
}
void uv_tcp_close(uv_loop_t* loop, uv_tcp_t* tcp) {
int close_socket = 1;
if (tcp->flags & UV_HANDLE_READ_PENDING) {
/* In order for winsock to do a graceful close there must not be any any
* pending reads, or the socket must be shut down for writing */
if (!(tcp->flags & UV_HANDLE_SHARED_TCP_SOCKET)) {
/* Just do shutdown on non-shared sockets, which ensures graceful close. */
shutdown(tcp->socket, SD_SEND);
} else if (uv_tcp_try_cancel_io(tcp) == 0) {
/* In case of a shared socket, we try to cancel all outstanding I/O,. If
* that works, don't close the socket yet - wait for the read req to
* return and close the socket in uv_tcp_endgame. */
close_socket = 0;
} else {
/* When cancelling isn't possible - which could happen when an LSP is
* present on an old Windows version, we will have to close the socket
* with a read pending. That is not nice because trailing sent bytes may
* not make it to the other side. */
if (tcp->flags & UV_HANDLE_CONNECTION) {
uv_tcp_try_cancel_reqs(tcp);
if (tcp->flags & UV_HANDLE_READING) {
uv_read_stop((uv_stream_t*) tcp);
}
} else if ((tcp->flags & UV_HANDLE_SHARED_TCP_SOCKET) &&
tcp->tcp.serv.accept_reqs != NULL) {
/* Under normal circumstances closesocket() will ensure that all pending
* accept reqs are canceled. However, when the socket is shared the
* presence of another reference to the socket in another process will keep
* the accept reqs going, so we have to ensure that these are canceled. */
if (uv_tcp_try_cancel_io(tcp) != 0) {
/* When cancellation is not possible, there is another option: we can
* close the incoming sockets, which will also cancel the accept
* operations. However this is not cool because we might inadvertently
* close a socket that just accepted a new connection, which will cause
* the connection to be aborted. */
} else {
if (tcp->tcp.serv.accept_reqs != NULL) {
/* First close the incoming sockets to cancel the accept operations before
* we free their resources. */
unsigned int i;
for (i = 0; i < uv_simultaneous_server_accepts; i++) {
uv_tcp_accept_t* req = &tcp->tcp.serv.accept_reqs[i];
if (req->accept_socket != INVALID_SOCKET &&
!HasOverlappedIoCompleted(&req->u.io.overlapped)) {
if (req->accept_socket != INVALID_SOCKET) {
closesocket(req->accept_socket);
req->accept_socket = INVALID_SOCKET;
}
}
}
}
if (tcp->flags & UV_HANDLE_READING) {
tcp->flags &= ~UV_HANDLE_READING;
DECREASE_ACTIVE_COUNT(loop, tcp);
assert(!(tcp->flags & UV_HANDLE_READING));
}
if (tcp->flags & UV_HANDLE_LISTENING) {
@ -1480,10 +1484,15 @@ void uv_tcp_close(uv_loop_t* loop, uv_tcp_t* tcp) {
DECREASE_ACTIVE_COUNT(loop, tcp);
}
if (close_socket) {
/* If any overlapped req failed to cancel, calling `closesocket` now would
* cause Win32 to send an RST packet. Try to avoid that for writes, if
* possibly applicable, by waiting to process the completion notifications
* first (which typically should be cancellations). There's not much we can
* do about canceled reads, which also will generate an RST packet. */
if (!(tcp->flags & UV_HANDLE_CONNECTION) ||
tcp->stream.conn.write_reqs_pending == 0) {
closesocket(tcp->socket);
tcp->socket = INVALID_SOCKET;
tcp->flags |= UV_HANDLE_TCP_SOCKET_CLOSED;
}
tcp->flags &= ~(UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);
@ -1571,3 +1580,118 @@ int uv__tcp_connect(uv_connect_t* req,
return 0;
}
#ifndef WSA_FLAG_NO_HANDLE_INHERIT
/* Added in Windows 7 SP1. Specify this to avoid race conditions, */
/* but also manually clear the inherit flag in case this failed. */
#define WSA_FLAG_NO_HANDLE_INHERIT 0x80
#endif
int uv_socketpair(int type, int protocol, uv_os_sock_t fds[2], int flags0, int flags1) {
SOCKET server = INVALID_SOCKET;
SOCKET client0 = INVALID_SOCKET;
SOCKET client1 = INVALID_SOCKET;
SOCKADDR_IN name;
LPFN_ACCEPTEX func_acceptex;
WSAOVERLAPPED overlap;
char accept_buffer[sizeof(struct sockaddr_storage) * 2 + 32];
int namelen;
int err;
DWORD bytes;
DWORD flags;
DWORD client0_flags = WSA_FLAG_NO_HANDLE_INHERIT;
DWORD client1_flags = WSA_FLAG_NO_HANDLE_INHERIT;
if (flags0 & UV_NONBLOCK_PIPE)
client0_flags |= WSA_FLAG_OVERLAPPED;
if (flags1 & UV_NONBLOCK_PIPE)
client1_flags |= WSA_FLAG_OVERLAPPED;
server = WSASocketW(AF_INET, type, protocol, NULL, 0,
WSA_FLAG_OVERLAPPED | WSA_FLAG_NO_HANDLE_INHERIT);
if (server == INVALID_SOCKET)
goto wsaerror;
if (!SetHandleInformation((HANDLE) server, HANDLE_FLAG_INHERIT, 0))
goto error;
name.sin_family = AF_INET;
name.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
name.sin_port = 0;
if (bind(server, (SOCKADDR*) &name, sizeof(name)) != 0)
goto wsaerror;
if (listen(server, 1) != 0)
goto wsaerror;
namelen = sizeof(name);
if (getsockname(server, (SOCKADDR*) &name, &namelen) != 0)
goto wsaerror;
client0 = WSASocketW(AF_INET, type, protocol, NULL, 0, client0_flags);
if (client0 == INVALID_SOCKET)
goto wsaerror;
if (!SetHandleInformation((HANDLE) client0, HANDLE_FLAG_INHERIT, 0))
goto error;
if (connect(client0, (SOCKADDR*) &name, sizeof(name)) != 0)
goto wsaerror;
client1 = WSASocketW(AF_INET, type, protocol, NULL, 0, client1_flags);
if (client1 == INVALID_SOCKET)
goto wsaerror;
if (!SetHandleInformation((HANDLE) client1, HANDLE_FLAG_INHERIT, 0))
goto error;
if (!uv_get_acceptex_function(server, &func_acceptex)) {
err = WSAEAFNOSUPPORT;
goto cleanup;
}
memset(&overlap, 0, sizeof(overlap));
if (!func_acceptex(server,
client1,
accept_buffer,
0,
sizeof(struct sockaddr_storage),
sizeof(struct sockaddr_storage),
&bytes,
&overlap)) {
err = WSAGetLastError();
if (err == ERROR_IO_PENDING) {
/* Result should complete immediately, since we already called connect,
* but emperically, we sometimes have to poll the kernel a couple times
* until it notices that. */
while (!WSAGetOverlappedResult(client1, &overlap, &bytes, FALSE, &flags)) {
err = WSAGetLastError();
if (err != WSA_IO_INCOMPLETE)
goto cleanup;
SwitchToThread();
}
}
else {
goto cleanup;
}
}
if (setsockopt(client1, SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT,
(char*) &server, sizeof(server)) != 0) {
goto wsaerror;
}
closesocket(server);
fds[0] = client0;
fds[1] = client1;
return 0;
wsaerror:
err = WSAGetLastError();
goto cleanup;
error:
err = GetLastError();
goto cleanup;
cleanup:
if (server != INVALID_SOCKET)
closesocket(server);
if (client0 != INVALID_SOCKET)
closesocket(client0);
if (client1 != INVALID_SOCKET)
closesocket(client1);
assert(err);
return uv_translate_sys_error(err);
}

View File

@ -284,7 +284,7 @@ static void uv_udp_queue_recv(uv_loop_t* loop, uv_udp_t* handle) {
handle->flags &= ~UV_HANDLE_ZERO_READ;
handle->recv_buffer = uv_buf_init(NULL, 0);
handle->alloc_cb((uv_handle_t*) handle, 65536, &handle->recv_buffer);
handle->alloc_cb((uv_handle_t*) handle, UV__UDP_DGRAM_MAXSIZE, &handle->recv_buffer);
if (handle->recv_buffer.base == NULL || handle->recv_buffer.len == 0) {
handle->recv_cb(handle, UV_ENOBUFS, &handle->recv_buffer, NULL, 0);
return;
@ -501,7 +501,7 @@ void uv_process_udp_recv_req(uv_loop_t* loop, uv_udp_t* handle,
/* Do a nonblocking receive.
* TODO: try to read multiple datagrams at once. FIONREAD maybe? */
buf = uv_buf_init(NULL, 0);
handle->alloc_cb((uv_handle_t*) handle, 65536, &buf);
handle->alloc_cb((uv_handle_t*) handle, UV__UDP_DGRAM_MAXSIZE, &buf);
if (buf.base == NULL || buf.len == 0) {
handle->recv_cb(handle, UV_ENOBUFS, &buf, NULL, 0);
goto done;

View File

@ -1664,26 +1664,33 @@ int uv_os_unsetenv(const char* name) {
int uv_os_gethostname(char* buffer, size_t* size) {
char buf[UV_MAXHOSTNAMESIZE];
WCHAR buf[UV_MAXHOSTNAMESIZE];
size_t len;
char* utf8_str;
int convert_result;
if (buffer == NULL || size == NULL || *size == 0)
return UV_EINVAL;
uv__once_init(); /* Initialize winsock */
if (gethostname(buf, sizeof(buf)) != 0)
if (GetHostNameW(buf, UV_MAXHOSTNAMESIZE) != 0)
return uv_translate_sys_error(WSAGetLastError());
buf[sizeof(buf) - 1] = '\0'; /* Null terminate, just to be safe. */
len = strlen(buf);
convert_result = uv__convert_utf16_to_utf8(buf, -1, &utf8_str);
if (convert_result != 0)
return convert_result;
len = strlen(utf8_str);
if (len >= *size) {
*size = len + 1;
uv__free(utf8_str);
return UV_ENOBUFS;
}
memcpy(buffer, buf, len + 1);
memcpy(buffer, utf8_str, len + 1);
uv__free(utf8_str);
*size = len;
return 0;
}