libuv 1.46.0.

git-svn-id: https://www.unprompted.com/svn/projects/tildefriends/trunk@4336 ed5197a5-7fde-0310-b194-c3ffbd925b24
This commit is contained in:
Cory McWilliams 2023-07-04 00:24:48 +00:00
parent f0452704a1
commit ae0a8b0a33
65 changed files with 1519 additions and 755 deletions

View File

@ -6,6 +6,7 @@ sphinx:
fail_on_warning: false
build:
os: "ubuntu-22.04"
tools:
python: "3.9"

6
deps/libuv/AUTHORS vendored
View File

@ -542,3 +542,9 @@ Lewis Russell <me@lewisr.dev>
sivadeilra <arlie.davis@gmail.com>
cui fliter <imcusg@gmail.com>
Mohammed Keyvanzadeh <mohammadkeyvanzade94@gmail.com>
Niklas Mischkulnig <4586894+mischnic@users.noreply.github.com>
Stefan Karpinski <stefan@karpinski.org>
liuxiang88 <94350585+liuxiang88@users.noreply.github.com>
Jeffrey H. Johnson <trnsz@pobox.com>
Abdirahim Musse <33973272+abmusse@users.noreply.github.com>
小明 <7737673+caobug@users.noreply.github.com>

61
deps/libuv/ChangeLog vendored
View File

@ -1,4 +1,63 @@
2023.05.19, Version 1.45.0 (Stable)
2023.06.30, Version 1.46.0 (Stable)
Changes since version 1.45.0:
* Add SHA to ChangeLog (Santiago Gimeno)
* misc: update readthedocs config (Jameson Nash)
* test: remove erroneous RETURN_SKIP (Ben Noordhuis)
* android: disable io_uring support (Ben Noordhuis)
* linux: add some more iouring backed fs ops (Santiago Gimeno)
* build: add autoconf option for disable-maintainer-mode (Jameson Nash)
* fs: use WTF-8 on Windows (Stefan Karpinski)
* unix,win: replace QUEUE with struct uv__queue (Ben Noordhuis)
* linux: fs_read to use io_uring if iovcnt > IOV_MAX (Santiago Gimeno)
* ios: fix uv_getrusage() ru_maxrss calculation (Ben Noordhuis)
* include: update outdated code comment (Ben Noordhuis)
* linux: support abstract unix sockets (Ben Noordhuis)
* unix,win: add UV_PIPE_NO_TRUNCATE flag (Ben Noordhuis)
* unix: add loongarch support (liuxiang88)
* doc: add DPS8M to LINKS.md (Jeffrey H. Johnson)
* include: add EUNATCH errno mapping (Abdirahim Musse)
* src: don't run timers if loop is stopped/unref'd (Trevor Norris)
* win: fix -Wpointer-to-int-cast warning (Ben Noordhuis)
* test,win: fix -Wunused-variable warning (Ben Noordhuis)
* test,win: fix -Wformat warning (Ben Noordhuis)
* linux: work around io_uring IORING_OP_CLOSE bug (Ben Noordhuis)
* win: remove unused functions (Ben Noordhuis)
* bench: add bench to check uv_loop_alive (Trevor Norris)
* test: add uv_cancel test for threadpool (Trevor Norris)
* unix: skip prohibited syscalls on tvOS and watchOS (小明)
* unix,fs: make no_pwritev access thread-safe (Santiago Gimeno)
* unix: fix build for lower versions of Android (小明)
2023.05.19, Version 1.45.0 (Stable), 96e05543f53b19d9642b4b0dd73b86ad3cea313e
Changes since version 1.44.2:

3
deps/libuv/LINKS.md vendored
View File

@ -6,7 +6,8 @@
* [clearskies_core](https://github.com/larroy/clearskies_core): Clearskies file synchronization program. (C++11)
* [CMake](https://cmake.org) open-source, cross-platform family of tools designed to build, test and package software
* [Cocos-Engine](https://github.com/cocos/cocos-engine): The runtime framework for Cocos Creator editor.
* [Coherence](https://github.com/liesware/coherence/): Cryptographic server for modern web apps.
* [Coherence](https://github.com/liesware/coherence/): Cryptographic server for modern web apps.
* [DPS8M](https://dps8m.gitlab.io): GE Honeywell Bull DPS8/M and 6180/L68 mainframe simulator.
* [DPS-For-IoT](https://github.com/intel/dps-for-iot/wiki): Fully distributed publish/subscribe protocol.
* [HashLink](https://github.com/HaxeFoundation/hashlink): Haxe run-time with libuv support included.
* [Haywire](https://github.com/kellabyte/Haywire): Asynchronous HTTP server.

View File

@ -13,12 +13,13 @@
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
AC_PREREQ(2.57)
AC_INIT([libuv], [1.45.0], [https://github.com/libuv/libuv/issues])
AC_INIT([libuv], [1.46.0], [https://github.com/libuv/libuv/issues])
AC_CONFIG_MACRO_DIR([m4])
m4_include([m4/libuv-extra-automake-flags.m4])
m4_include([m4/as_case.m4])
m4_include([m4/libuv-check-flags.m4])
AM_INIT_AUTOMAKE([-Wall -Werror foreign subdir-objects] UV_EXTRA_AUTOMAKE_FLAGS)
AM_MAINTAINER_MODE([enable]) # pass --disable-maintainer-mode if autotools may be unavailable
AC_CANONICAL_HOST
AC_ENABLE_SHARED
AC_ENABLE_STATIC

View File

@ -339,6 +339,9 @@ Error constants
socket type not supported
.. c:macro:: UV_EUNATCH
protocol driver not attached
API
---

View File

@ -55,17 +55,61 @@ API
Bind the pipe to a file path (Unix) or a name (Windows).
Does not support Linux abstract namespace sockets,
unlike :c:func:`uv_pipe_bind2`.
Alias for ``uv_pipe_bind2(handle, name, strlen(name), 0)``.
.. note::
Paths on Unix get truncated to ``sizeof(sockaddr_un.sun_path)`` bytes, typically between
92 and 108 bytes.
Paths on Unix get truncated to ``sizeof(sockaddr_un.sun_path)`` bytes,
typically between 92 and 108 bytes.
.. c:function:: int uv_pipe_bind2(uv_pipe_t* handle, const char* name, size_t namelen, unsigned int flags)
Bind the pipe to a file path (Unix) or a name (Windows).
``flags`` must be zero or ``UV_PIPE_NO_TRUNCATE``. Returns ``UV_EINVAL``
for unsupported flags without performing the bind operation.
Supports Linux abstract namespace sockets. ``namelen`` must include
the leading nul byte but not the trailing nul byte.
.. versionadded:: 1.46.0
.. note::
Paths on Unix get truncated to ``sizeof(sockaddr_un.sun_path)`` bytes,
typically between 92 and 108 bytes, unless the ``UV_PIPE_NO_TRUNCATE``
flag is specified, in which case an ``UV_EINVAL`` error is returned.
.. c:function:: void uv_pipe_connect(uv_connect_t* req, uv_pipe_t* handle, const char* name, uv_connect_cb cb)
Connect to the Unix domain socket or the named pipe.
Connect to the Unix domain socket or the Windows named pipe.
Does not support Linux abstract namespace sockets,
unlike :c:func:`uv_pipe_connect2`.
Alias for ``uv_pipe_connect2(req, handle, name, strlen(name), 0, cb)``.
.. note::
Paths on Unix get truncated to ``sizeof(sockaddr_un.sun_path)`` bytes, typically between
92 and 108 bytes.
Paths on Unix get truncated to ``sizeof(sockaddr_un.sun_path)`` bytes,
typically between 92 and 108 bytes.
.. c:function:: void uv_pipe_connect2(uv_connect_t* req, uv_pipe_t* handle, const char* name, size_t namelen, unsigned int flags, uv_connect_cb cb)
Connect to the Unix domain socket or the Windows named pipe.
``flags`` must be zero or ``UV_PIPE_NO_TRUNCATE``. Returns ``UV_EINVAL``
for unsupported flags without performing the connect operation.
Supports Linux abstract namespace sockets. ``namelen`` must include
the leading nul byte but not the trailing nul byte.
.. versionadded:: 1.46.0
.. note::
Paths on Unix get truncated to ``sizeof(sockaddr_un.sun_path)`` bytes,
typically between 92 and 108 bytes, unless the ``UV_PIPE_NO_TRUNCATE``
flag is specified, in which case an ``UV_EINVAL`` error is returned.
.. c:function:: int uv_pipe_getsockname(const uv_pipe_t* handle, char* buffer, size_t* size)

View File

@ -59,6 +59,12 @@ extern "C" {
#include <stdio.h>
#include <stdint.h>
/* Internal type, do not use. */
struct uv__queue {
struct uv__queue* next;
struct uv__queue* prev;
};
#if defined(_WIN32)
# include "uv/win.h"
#else
@ -150,6 +156,7 @@ extern "C" {
XX(EILSEQ, "illegal byte sequence") \
XX(ESOCKTNOSUPPORT, "socket type not supported") \
XX(ENODATA, "no data available") \
XX(EUNATCH, "protocol driver not attached") \
#define UV_HANDLE_TYPE_MAP(XX) \
XX(ASYNC, async) \
@ -283,13 +290,13 @@ UV_EXTERN int uv_loop_init(uv_loop_t* loop);
UV_EXTERN int uv_loop_close(uv_loop_t* loop);
/*
* NOTE:
* This function is DEPRECATED (to be removed after 0.12), users should
* This function is DEPRECATED, users should
* allocate the loop manually and use uv_loop_init instead.
*/
UV_EXTERN uv_loop_t* uv_loop_new(void);
/*
* NOTE:
* This function is DEPRECATED (to be removed after 0.12). Users should use
* This function is DEPRECATED. Users should use
* uv_loop_close and free the memory manually instead.
*/
UV_EXTERN void uv_loop_delete(uv_loop_t*);
@ -459,7 +466,7 @@ struct uv_shutdown_s {
uv_handle_type type; \
/* private */ \
uv_close_cb close_cb; \
void* handle_queue[2]; \
struct uv__queue handle_queue; \
union { \
int fd; \
void* reserved[4]; \
@ -801,6 +808,10 @@ inline int uv_tty_set_mode(uv_tty_t* handle, int mode) {
UV_EXTERN uv_handle_type uv_guess_handle(uv_file file);
enum {
UV_PIPE_NO_TRUNCATE = 1u << 0
};
/*
* uv_pipe_t is a subclass of uv_stream_t.
*
@ -817,10 +828,20 @@ struct uv_pipe_s {
UV_EXTERN int uv_pipe_init(uv_loop_t*, uv_pipe_t* handle, int ipc);
UV_EXTERN int uv_pipe_open(uv_pipe_t*, uv_file file);
UV_EXTERN int uv_pipe_bind(uv_pipe_t* handle, const char* name);
UV_EXTERN int uv_pipe_bind2(uv_pipe_t* handle,
const char* name,
size_t namelen,
unsigned int flags);
UV_EXTERN void uv_pipe_connect(uv_connect_t* req,
uv_pipe_t* handle,
const char* name,
uv_connect_cb cb);
UV_EXTERN int uv_pipe_connect2(uv_connect_t* req,
uv_pipe_t* handle,
const char* name,
size_t namelen,
unsigned int flags,
uv_connect_cb cb);
UV_EXTERN int uv_pipe_getsockname(const uv_pipe_t* handle,
char* buffer,
size_t* size);
@ -1849,7 +1870,7 @@ struct uv_loop_s {
void* data;
/* Loop reference counting. */
unsigned int active_handles;
void* handle_queue[2];
struct uv__queue handle_queue;
union {
void* unused;
unsigned int count;

View File

@ -40,7 +40,7 @@
void* cf_state; \
uv_mutex_t cf_mutex; \
uv_sem_t cf_sem; \
void* cf_signals[2]; \
struct uv__queue cf_signals; \
#define UV_PLATFORM_FS_EVENT_FIELDS \
uv__io_t event_watcher; \
@ -48,8 +48,8 @@
int realpath_len; \
int cf_flags; \
uv_async_t* cf_cb; \
void* cf_events[2]; \
void* cf_member[2]; \
struct uv__queue cf_events; \
struct uv__queue cf_member; \
int cf_error; \
uv_mutex_t cf_mutex; \

View File

@ -468,4 +468,10 @@
# define UV__ENODATA (-4024)
#endif
#if defined(EUNATCH) && !defined(_WIN32)
# define UV__EUNATCH UV__ERR(EUNATCH)
#else
# define UV__EUNATCH (-4023)
#endif
#endif /* UV_ERRNO_H_ */

View File

@ -28,7 +28,7 @@
int inotify_fd; \
#define UV_PLATFORM_FS_EVENT_FIELDS \
void* watchers[2]; \
struct uv__queue watchers; \
int wd; \
#endif /* UV_LINUX_H */

View File

@ -31,7 +31,7 @@ struct uv__work {
void (*work)(struct uv__work *w);
void (*done)(struct uv__work *w, int status);
struct uv_loop_s* loop;
void* wq[2];
struct uv__queue wq;
};
#endif /* UV_THREADPOOL_H_ */

View File

@ -92,8 +92,8 @@ typedef struct uv__io_s uv__io_t;
struct uv__io_s {
uv__io_cb cb;
void* pending_queue[2];
void* watcher_queue[2];
struct uv__queue pending_queue;
struct uv__queue watcher_queue;
unsigned int pevents; /* Pending event mask i.e. mask at next tick. */
unsigned int events; /* Current event mask. */
int fd;
@ -220,21 +220,21 @@ typedef struct {
#define UV_LOOP_PRIVATE_FIELDS \
unsigned long flags; \
int backend_fd; \
void* pending_queue[2]; \
void* watcher_queue[2]; \
struct uv__queue pending_queue; \
struct uv__queue watcher_queue; \
uv__io_t** watchers; \
unsigned int nwatchers; \
unsigned int nfds; \
void* wq[2]; \
struct uv__queue wq; \
uv_mutex_t wq_mutex; \
uv_async_t wq_async; \
uv_rwlock_t cloexec_lock; \
uv_handle_t* closing_handles; \
void* process_handles[2]; \
void* prepare_handles[2]; \
void* check_handles[2]; \
void* idle_handles[2]; \
void* async_handles[2]; \
struct uv__queue process_handles; \
struct uv__queue prepare_handles; \
struct uv__queue check_handles; \
struct uv__queue idle_handles; \
struct uv__queue async_handles; \
void (*async_unused)(void); /* TODO(bnoordhuis) Remove in libuv v2. */ \
uv__io_t async_io_watcher; \
int async_wfd; \
@ -257,7 +257,7 @@ typedef struct {
#define UV_PRIVATE_REQ_TYPES /* empty */
#define UV_WRITE_PRIVATE_FIELDS \
void* queue[2]; \
struct uv__queue queue; \
unsigned int write_index; \
uv_buf_t* bufs; \
unsigned int nbufs; \
@ -265,12 +265,12 @@ typedef struct {
uv_buf_t bufsml[4]; \
#define UV_CONNECT_PRIVATE_FIELDS \
void* queue[2]; \
struct uv__queue queue; \
#define UV_SHUTDOWN_PRIVATE_FIELDS /* empty */
#define UV_UDP_SEND_PRIVATE_FIELDS \
void* queue[2]; \
struct uv__queue queue; \
struct sockaddr_storage addr; \
unsigned int nbufs; \
uv_buf_t* bufs; \
@ -286,8 +286,8 @@ typedef struct {
uv_connect_t *connect_req; \
uv_shutdown_t *shutdown_req; \
uv__io_t io_watcher; \
void* write_queue[2]; \
void* write_completed_queue[2]; \
struct uv__queue write_queue; \
struct uv__queue write_completed_queue; \
uv_connection_cb connection_cb; \
int delayed_error; \
int accepted_fd; \
@ -300,30 +300,30 @@ typedef struct {
uv_alloc_cb alloc_cb; \
uv_udp_recv_cb recv_cb; \
uv__io_t io_watcher; \
void* write_queue[2]; \
void* write_completed_queue[2]; \
struct uv__queue write_queue; \
struct uv__queue write_completed_queue; \
#define UV_PIPE_PRIVATE_FIELDS \
const char* pipe_fname; /* strdup'ed */
const char* pipe_fname; /* NULL or strdup'ed */
#define UV_POLL_PRIVATE_FIELDS \
uv__io_t io_watcher;
#define UV_PREPARE_PRIVATE_FIELDS \
uv_prepare_cb prepare_cb; \
void* queue[2]; \
struct uv__queue queue; \
#define UV_CHECK_PRIVATE_FIELDS \
uv_check_cb check_cb; \
void* queue[2]; \
struct uv__queue queue; \
#define UV_IDLE_PRIVATE_FIELDS \
uv_idle_cb idle_cb; \
void* queue[2]; \
struct uv__queue queue; \
#define UV_ASYNC_PRIVATE_FIELDS \
uv_async_cb async_cb; \
void* queue[2]; \
struct uv__queue queue; \
int pending; \
#define UV_TIMER_PRIVATE_FIELDS \
@ -352,7 +352,7 @@ typedef struct {
int retcode;
#define UV_PROCESS_PRIVATE_FIELDS \
void* queue[2]; \
struct uv__queue queue; \
int status; \
#define UV_FS_PRIVATE_FIELDS \
@ -417,6 +417,8 @@ typedef struct {
# define UV_FS_O_DIRECT 0x04000
#elif defined(__linux__) && defined(__x86_64__)
# define UV_FS_O_DIRECT 0x04000
#elif defined(__linux__) && defined(__loongarch__)
# define UV_FS_O_DIRECT 0x04000
#elif defined(O_DIRECT)
# define UV_FS_O_DIRECT O_DIRECT
#else

View File

@ -31,7 +31,7 @@
*/
#define UV_VERSION_MAJOR 1
#define UV_VERSION_MINOR 45
#define UV_VERSION_MINOR 46
#define UV_VERSION_PATCH 0
#define UV_VERSION_IS_RELEASE 1
#define UV_VERSION_SUFFIX ""

View File

@ -357,7 +357,7 @@ typedef struct {
/* Counter to started timer */ \
uint64_t timer_counter; \
/* Threadpool */ \
void* wq[2]; \
struct uv__queue wq; \
uv_mutex_t wq_mutex; \
uv_async_t wq_async;
@ -486,7 +486,7 @@ typedef struct {
uint32_t payload_remaining; \
uint64_t dummy; /* TODO: retained for ABI compat; remove this in v2.x. */ \
} ipc_data_frame; \
void* ipc_xfer_queue[2]; \
struct uv__queue ipc_xfer_queue; \
int ipc_xfer_queue_length; \
uv_write_t* non_overlapped_writes_tail; \
CRITICAL_SECTION readfile_thread_lock; \

132
deps/libuv/src/queue.h vendored
View File

@ -18,91 +18,73 @@
#include <stddef.h>
typedef void *QUEUE[2];
#define uv__queue_data(pointer, type, field) \
((type*) ((char*) (pointer) - offsetof(type, field)))
/* Private macros. */
#define QUEUE_NEXT(q) (*(QUEUE **) &((*(q))[0]))
#define QUEUE_PREV(q) (*(QUEUE **) &((*(q))[1]))
#define QUEUE_PREV_NEXT(q) (QUEUE_NEXT(QUEUE_PREV(q)))
#define QUEUE_NEXT_PREV(q) (QUEUE_PREV(QUEUE_NEXT(q)))
#define uv__queue_foreach(q, h) \
for ((q) = (h)->next; (q) != (h); (q) = (q)->next)
/* Public macros. */
#define QUEUE_DATA(ptr, type, field) \
((type *) ((char *) (ptr) - offsetof(type, field)))
static inline void uv__queue_init(struct uv__queue* q) {
q->next = q;
q->prev = q;
}
/* Important note: mutating the list while QUEUE_FOREACH is
* iterating over its elements results in undefined behavior.
*/
#define QUEUE_FOREACH(q, h) \
for ((q) = QUEUE_NEXT(h); (q) != (h); (q) = QUEUE_NEXT(q))
static inline int uv__queue_empty(const struct uv__queue* q) {
return q == q->next;
}
#define QUEUE_EMPTY(q) \
((const QUEUE *) (q) == (const QUEUE *) QUEUE_NEXT(q))
static inline struct uv__queue* uv__queue_head(const struct uv__queue* q) {
return q->next;
}
#define QUEUE_HEAD(q) \
(QUEUE_NEXT(q))
static inline struct uv__queue* uv__queue_next(const struct uv__queue* q) {
return q->next;
}
#define QUEUE_INIT(q) \
do { \
QUEUE_NEXT(q) = (q); \
QUEUE_PREV(q) = (q); \
} \
while (0)
static inline void uv__queue_add(struct uv__queue* h, struct uv__queue* n) {
h->prev->next = n->next;
n->next->prev = h->prev;
h->prev = n->prev;
h->prev->next = h;
}
#define QUEUE_ADD(h, n) \
do { \
QUEUE_PREV_NEXT(h) = QUEUE_NEXT(n); \
QUEUE_NEXT_PREV(n) = QUEUE_PREV(h); \
QUEUE_PREV(h) = QUEUE_PREV(n); \
QUEUE_PREV_NEXT(h) = (h); \
} \
while (0)
static inline void uv__queue_split(struct uv__queue* h,
struct uv__queue* q,
struct uv__queue* n) {
n->prev = h->prev;
n->prev->next = n;
n->next = q;
h->prev = q->prev;
h->prev->next = h;
q->prev = n;
}
#define QUEUE_SPLIT(h, q, n) \
do { \
QUEUE_PREV(n) = QUEUE_PREV(h); \
QUEUE_PREV_NEXT(n) = (n); \
QUEUE_NEXT(n) = (q); \
QUEUE_PREV(h) = QUEUE_PREV(q); \
QUEUE_PREV_NEXT(h) = (h); \
QUEUE_PREV(q) = (n); \
} \
while (0)
static inline void uv__queue_move(struct uv__queue* h, struct uv__queue* n) {
if (uv__queue_empty(h))
uv__queue_init(n);
else
uv__queue_split(h, h->next, n);
}
#define QUEUE_MOVE(h, n) \
do { \
if (QUEUE_EMPTY(h)) \
QUEUE_INIT(n); \
else { \
QUEUE* q = QUEUE_HEAD(h); \
QUEUE_SPLIT(h, q, n); \
} \
} \
while (0)
static inline void uv__queue_insert_head(struct uv__queue* h,
struct uv__queue* q) {
q->next = h->next;
q->prev = h;
q->next->prev = q;
h->next = q;
}
#define QUEUE_INSERT_HEAD(h, q) \
do { \
QUEUE_NEXT(q) = QUEUE_NEXT(h); \
QUEUE_PREV(q) = (h); \
QUEUE_NEXT_PREV(q) = (q); \
QUEUE_NEXT(h) = (q); \
} \
while (0)
static inline void uv__queue_insert_tail(struct uv__queue* h,
struct uv__queue* q) {
q->next = h;
q->prev = h->prev;
q->prev->next = q;
h->prev = q;
}
#define QUEUE_INSERT_TAIL(h, q) \
do { \
QUEUE_NEXT(q) = (h); \
QUEUE_PREV(q) = QUEUE_PREV(h); \
QUEUE_PREV_NEXT(q) = (q); \
QUEUE_PREV(h) = (q); \
} \
while (0)
#define QUEUE_REMOVE(q) \
do { \
QUEUE_PREV_NEXT(q) = QUEUE_NEXT(q); \
QUEUE_NEXT_PREV(q) = QUEUE_PREV(q); \
} \
while (0)
static inline void uv__queue_remove(struct uv__queue* q) {
q->prev->next = q->next;
q->next->prev = q->prev;
}
#endif /* QUEUE_H_ */

View File

@ -37,10 +37,10 @@ static unsigned int slow_io_work_running;
static unsigned int nthreads;
static uv_thread_t* threads;
static uv_thread_t default_threads[4];
static QUEUE exit_message;
static QUEUE wq;
static QUEUE run_slow_work_message;
static QUEUE slow_io_pending_wq;
static struct uv__queue exit_message;
static struct uv__queue wq;
static struct uv__queue run_slow_work_message;
static struct uv__queue slow_io_pending_wq;
static unsigned int slow_work_thread_threshold(void) {
return (nthreads + 1) / 2;
@ -56,7 +56,7 @@ static void uv__cancelled(struct uv__work* w) {
*/
static void worker(void* arg) {
struct uv__work* w;
QUEUE* q;
struct uv__queue* q;
int is_slow_work;
uv_sem_post((uv_sem_t*) arg);
@ -68,49 +68,49 @@ static void worker(void* arg) {
/* Keep waiting while either no work is present or only slow I/O
and we're at the threshold for that. */
while (QUEUE_EMPTY(&wq) ||
(QUEUE_HEAD(&wq) == &run_slow_work_message &&
QUEUE_NEXT(&run_slow_work_message) == &wq &&
while (uv__queue_empty(&wq) ||
(uv__queue_head(&wq) == &run_slow_work_message &&
uv__queue_next(&run_slow_work_message) == &wq &&
slow_io_work_running >= slow_work_thread_threshold())) {
idle_threads += 1;
uv_cond_wait(&cond, &mutex);
idle_threads -= 1;
}
q = QUEUE_HEAD(&wq);
q = uv__queue_head(&wq);
if (q == &exit_message) {
uv_cond_signal(&cond);
uv_mutex_unlock(&mutex);
break;
}
QUEUE_REMOVE(q);
QUEUE_INIT(q); /* Signal uv_cancel() that the work req is executing. */
uv__queue_remove(q);
uv__queue_init(q); /* Signal uv_cancel() that the work req is executing. */
is_slow_work = 0;
if (q == &run_slow_work_message) {
/* If we're at the slow I/O threshold, re-schedule until after all
other work in the queue is done. */
if (slow_io_work_running >= slow_work_thread_threshold()) {
QUEUE_INSERT_TAIL(&wq, q);
uv__queue_insert_tail(&wq, q);
continue;
}
/* If we encountered a request to run slow I/O work but there is none
to run, that means it's cancelled => Start over. */
if (QUEUE_EMPTY(&slow_io_pending_wq))
if (uv__queue_empty(&slow_io_pending_wq))
continue;
is_slow_work = 1;
slow_io_work_running++;
q = QUEUE_HEAD(&slow_io_pending_wq);
QUEUE_REMOVE(q);
QUEUE_INIT(q);
q = uv__queue_head(&slow_io_pending_wq);
uv__queue_remove(q);
uv__queue_init(q);
/* If there is more slow I/O work, schedule it to be run as well. */
if (!QUEUE_EMPTY(&slow_io_pending_wq)) {
QUEUE_INSERT_TAIL(&wq, &run_slow_work_message);
if (!uv__queue_empty(&slow_io_pending_wq)) {
uv__queue_insert_tail(&wq, &run_slow_work_message);
if (idle_threads > 0)
uv_cond_signal(&cond);
}
@ -118,13 +118,13 @@ static void worker(void* arg) {
uv_mutex_unlock(&mutex);
w = QUEUE_DATA(q, struct uv__work, wq);
w = uv__queue_data(q, struct uv__work, wq);
w->work(w);
uv_mutex_lock(&w->loop->wq_mutex);
w->work = NULL; /* Signal uv_cancel() that the work req is done
executing. */
QUEUE_INSERT_TAIL(&w->loop->wq, &w->wq);
uv__queue_insert_tail(&w->loop->wq, &w->wq);
uv_async_send(&w->loop->wq_async);
uv_mutex_unlock(&w->loop->wq_mutex);
@ -139,12 +139,12 @@ static void worker(void* arg) {
}
static void post(QUEUE* q, enum uv__work_kind kind) {
static void post(struct uv__queue* q, enum uv__work_kind kind) {
uv_mutex_lock(&mutex);
if (kind == UV__WORK_SLOW_IO) {
/* Insert into a separate queue. */
QUEUE_INSERT_TAIL(&slow_io_pending_wq, q);
if (!QUEUE_EMPTY(&run_slow_work_message)) {
uv__queue_insert_tail(&slow_io_pending_wq, q);
if (!uv__queue_empty(&run_slow_work_message)) {
/* Running slow I/O tasks is already scheduled => Nothing to do here.
The worker that runs said other task will schedule this one as well. */
uv_mutex_unlock(&mutex);
@ -153,7 +153,7 @@ static void post(QUEUE* q, enum uv__work_kind kind) {
q = &run_slow_work_message;
}
QUEUE_INSERT_TAIL(&wq, q);
uv__queue_insert_tail(&wq, q);
if (idle_threads > 0)
uv_cond_signal(&cond);
uv_mutex_unlock(&mutex);
@ -220,9 +220,9 @@ static void init_threads(void) {
if (uv_mutex_init(&mutex))
abort();
QUEUE_INIT(&wq);
QUEUE_INIT(&slow_io_pending_wq);
QUEUE_INIT(&run_slow_work_message);
uv__queue_init(&wq);
uv__queue_init(&slow_io_pending_wq);
uv__queue_init(&run_slow_work_message);
if (uv_sem_init(&sem, 0))
abort();
@ -285,9 +285,9 @@ static int uv__work_cancel(uv_loop_t* loop, uv_req_t* req, struct uv__work* w) {
uv_mutex_lock(&mutex);
uv_mutex_lock(&w->loop->wq_mutex);
cancelled = !QUEUE_EMPTY(&w->wq) && w->work != NULL;
cancelled = !uv__queue_empty(&w->wq) && w->work != NULL;
if (cancelled)
QUEUE_REMOVE(&w->wq);
uv__queue_remove(&w->wq);
uv_mutex_unlock(&w->loop->wq_mutex);
uv_mutex_unlock(&mutex);
@ -297,7 +297,7 @@ static int uv__work_cancel(uv_loop_t* loop, uv_req_t* req, struct uv__work* w) {
w->work = uv__cancelled;
uv_mutex_lock(&loop->wq_mutex);
QUEUE_INSERT_TAIL(&loop->wq, &w->wq);
uv__queue_insert_tail(&loop->wq, &w->wq);
uv_async_send(&loop->wq_async);
uv_mutex_unlock(&loop->wq_mutex);
@ -308,21 +308,21 @@ static int uv__work_cancel(uv_loop_t* loop, uv_req_t* req, struct uv__work* w) {
void uv__work_done(uv_async_t* handle) {
struct uv__work* w;
uv_loop_t* loop;
QUEUE* q;
QUEUE wq;
struct uv__queue* q;
struct uv__queue wq;
int err;
int nevents;
loop = container_of(handle, uv_loop_t, wq_async);
uv_mutex_lock(&loop->wq_mutex);
QUEUE_MOVE(&loop->wq, &wq);
uv__queue_move(&loop->wq, &wq);
uv_mutex_unlock(&loop->wq_mutex);
nevents = 0;
while (!QUEUE_EMPTY(&wq)) {
q = QUEUE_HEAD(&wq);
QUEUE_REMOVE(q);
while (!uv__queue_empty(&wq)) {
q = uv__queue_head(&wq);
uv__queue_remove(q);
w = container_of(q, struct uv__work, wq);
err = (w->work == uv__cancelled) ? UV_ECANCELED : 0;

View File

@ -136,7 +136,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
struct pollfd pqry;
struct pollfd* pe;
struct poll_ctl pc;
QUEUE* q;
struct uv__queue* q;
uv__io_t* w;
uint64_t base;
uint64_t diff;
@ -151,18 +151,18 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
int reset_timeout;
if (loop->nfds == 0) {
assert(QUEUE_EMPTY(&loop->watcher_queue));
assert(uv__queue_empty(&loop->watcher_queue));
return;
}
lfields = uv__get_internal_fields(loop);
while (!QUEUE_EMPTY(&loop->watcher_queue)) {
q = QUEUE_HEAD(&loop->watcher_queue);
QUEUE_REMOVE(q);
QUEUE_INIT(q);
while (!uv__queue_empty(&loop->watcher_queue)) {
q = uv__queue_head(&loop->watcher_queue);
uv__queue_remove(q);
uv__queue_init(q);
w = QUEUE_DATA(q, uv__io_t, watcher_queue);
w = uv__queue_data(q, uv__io_t, watcher_queue);
assert(w->pevents != 0);
assert(w->fd >= 0);
assert(w->fd < (int) loop->nwatchers);

View File

@ -55,7 +55,7 @@ int uv_async_init(uv_loop_t* loop, uv_async_t* handle, uv_async_cb async_cb) {
handle->pending = 0;
handle->u.fd = 0; /* This will be used as a busy flag. */
QUEUE_INSERT_TAIL(&loop->async_handles, &handle->queue);
uv__queue_insert_tail(&loop->async_handles, &handle->queue);
uv__handle_start(handle);
return 0;
@ -124,7 +124,7 @@ static void uv__async_spin(uv_async_t* handle) {
void uv__async_close(uv_async_t* handle) {
uv__async_spin(handle);
QUEUE_REMOVE(&handle->queue);
uv__queue_remove(&handle->queue);
uv__handle_stop(handle);
}
@ -132,8 +132,8 @@ void uv__async_close(uv_async_t* handle) {
static void uv__async_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
char buf[1024];
ssize_t r;
QUEUE queue;
QUEUE* q;
struct uv__queue queue;
struct uv__queue* q;
uv_async_t* h;
_Atomic int *pending;
@ -157,13 +157,13 @@ static void uv__async_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
abort();
}
QUEUE_MOVE(&loop->async_handles, &queue);
while (!QUEUE_EMPTY(&queue)) {
q = QUEUE_HEAD(&queue);
h = QUEUE_DATA(q, uv_async_t, queue);
uv__queue_move(&loop->async_handles, &queue);
while (!uv__queue_empty(&queue)) {
q = uv__queue_head(&queue);
h = uv__queue_data(q, uv_async_t, queue);
QUEUE_REMOVE(q);
QUEUE_INSERT_TAIL(&loop->async_handles, q);
uv__queue_remove(q);
uv__queue_insert_tail(&loop->async_handles, q);
/* Atomically fetch and clear pending flag */
pending = (_Atomic int*) &h->pending;
@ -241,8 +241,8 @@ static int uv__async_start(uv_loop_t* loop) {
void uv__async_stop(uv_loop_t* loop) {
QUEUE queue;
QUEUE* q;
struct uv__queue queue;
struct uv__queue* q;
uv_async_t* h;
if (loop->async_io_watcher.fd == -1)
@ -251,13 +251,13 @@ void uv__async_stop(uv_loop_t* loop) {
/* Make sure no other thread is accessing the async handle fd after the loop
* cleanup.
*/
QUEUE_MOVE(&loop->async_handles, &queue);
while (!QUEUE_EMPTY(&queue)) {
q = QUEUE_HEAD(&queue);
h = QUEUE_DATA(q, uv_async_t, queue);
uv__queue_move(&loop->async_handles, &queue);
while (!uv__queue_empty(&queue)) {
q = uv__queue_head(&queue);
h = uv__queue_data(q, uv_async_t, queue);
QUEUE_REMOVE(q);
QUEUE_INSERT_TAIL(&loop->async_handles, q);
uv__queue_remove(q);
uv__queue_insert_tail(&loop->async_handles, q);
uv__async_spin(h);
}
@ -275,20 +275,20 @@ void uv__async_stop(uv_loop_t* loop) {
int uv__async_fork(uv_loop_t* loop) {
QUEUE queue;
QUEUE* q;
struct uv__queue queue;
struct uv__queue* q;
uv_async_t* h;
if (loop->async_io_watcher.fd == -1) /* never started */
return 0;
QUEUE_MOVE(&loop->async_handles, &queue);
while (!QUEUE_EMPTY(&queue)) {
q = QUEUE_HEAD(&queue);
h = QUEUE_DATA(q, uv_async_t, queue);
uv__queue_move(&loop->async_handles, &queue);
while (!uv__queue_empty(&queue)) {
q = uv__queue_head(&queue);
h = uv__queue_data(q, uv_async_t, queue);
QUEUE_REMOVE(q);
QUEUE_INSERT_TAIL(&loop->async_handles, q);
uv__queue_remove(q);
uv__queue_insert_tail(&loop->async_handles, q);
/* The state of any thread that set pending is now likely corrupt in this
* child because the user called fork, so just clear these flags and move

View File

@ -344,7 +344,7 @@ static void uv__finish_close(uv_handle_t* handle) {
}
uv__handle_unref(handle);
QUEUE_REMOVE(&handle->handle_queue);
uv__queue_remove(&handle->handle_queue);
if (handle->close_cb) {
handle->close_cb(handle);
@ -380,7 +380,7 @@ int uv_backend_fd(const uv_loop_t* loop) {
static int uv__loop_alive(const uv_loop_t* loop) {
return uv__has_active_handles(loop) ||
uv__has_active_reqs(loop) ||
!QUEUE_EMPTY(&loop->pending_queue) ||
!uv__queue_empty(&loop->pending_queue) ||
loop->closing_handles != NULL;
}
@ -389,8 +389,8 @@ static int uv__backend_timeout(const uv_loop_t* loop) {
if (loop->stop_flag == 0 &&
/* uv__loop_alive(loop) && */
(uv__has_active_handles(loop) || uv__has_active_reqs(loop)) &&
QUEUE_EMPTY(&loop->pending_queue) &&
QUEUE_EMPTY(&loop->idle_handles) &&
uv__queue_empty(&loop->pending_queue) &&
uv__queue_empty(&loop->idle_handles) &&
(loop->flags & UV_LOOP_REAP_CHILDREN) == 0 &&
loop->closing_handles == NULL)
return uv__next_timeout(loop);
@ -399,7 +399,7 @@ static int uv__backend_timeout(const uv_loop_t* loop) {
int uv_backend_timeout(const uv_loop_t* loop) {
if (QUEUE_EMPTY(&loop->watcher_queue))
if (uv__queue_empty(&loop->watcher_queue))
return uv__backend_timeout(loop);
/* Need to call uv_run to update the backend fd state. */
return 0;
@ -424,15 +424,15 @@ int uv_run(uv_loop_t* loop, uv_run_mode mode) {
* while loop for UV_RUN_DEFAULT. Otherwise timers only need to be executed
* once, which should be done after polling in order to maintain proper
* execution order of the conceptual event loop. */
if (mode == UV_RUN_DEFAULT) {
if (r)
uv__update_time(loop);
if (mode == UV_RUN_DEFAULT && r != 0 && loop->stop_flag == 0) {
uv__update_time(loop);
uv__run_timers(loop);
}
while (r != 0 && loop->stop_flag == 0) {
can_sleep =
QUEUE_EMPTY(&loop->pending_queue) && QUEUE_EMPTY(&loop->idle_handles);
uv__queue_empty(&loop->pending_queue) &&
uv__queue_empty(&loop->idle_handles);
uv__run_pending(loop);
uv__run_idle(loop);
@ -448,7 +448,7 @@ int uv_run(uv_loop_t* loop, uv_run_mode mode) {
/* Process immediate callbacks (e.g. write_cb) a small fixed number of
* times to avoid loop starvation.*/
for (r = 0; r < 8 && !QUEUE_EMPTY(&loop->pending_queue); r++)
for (r = 0; r < 8 && !uv__queue_empty(&loop->pending_queue); r++)
uv__run_pending(loop);
/* Run one final update on the provider_idle_time in case uv__io_poll
@ -827,17 +827,17 @@ int uv_fileno(const uv_handle_t* handle, uv_os_fd_t* fd) {
static void uv__run_pending(uv_loop_t* loop) {
QUEUE* q;
QUEUE pq;
struct uv__queue* q;
struct uv__queue pq;
uv__io_t* w;
QUEUE_MOVE(&loop->pending_queue, &pq);
uv__queue_move(&loop->pending_queue, &pq);
while (!QUEUE_EMPTY(&pq)) {
q = QUEUE_HEAD(&pq);
QUEUE_REMOVE(q);
QUEUE_INIT(q);
w = QUEUE_DATA(q, uv__io_t, pending_queue);
while (!uv__queue_empty(&pq)) {
q = uv__queue_head(&pq);
uv__queue_remove(q);
uv__queue_init(q);
w = uv__queue_data(q, uv__io_t, pending_queue);
w->cb(loop, w, POLLOUT);
}
}
@ -892,8 +892,8 @@ static void maybe_resize(uv_loop_t* loop, unsigned int len) {
void uv__io_init(uv__io_t* w, uv__io_cb cb, int fd) {
assert(cb != NULL);
assert(fd >= -1);
QUEUE_INIT(&w->pending_queue);
QUEUE_INIT(&w->watcher_queue);
uv__queue_init(&w->pending_queue);
uv__queue_init(&w->watcher_queue);
w->cb = cb;
w->fd = fd;
w->events = 0;
@ -919,8 +919,8 @@ void uv__io_start(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
return;
#endif
if (QUEUE_EMPTY(&w->watcher_queue))
QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
if (uv__queue_empty(&w->watcher_queue))
uv__queue_insert_tail(&loop->watcher_queue, &w->watcher_queue);
if (loop->watchers[w->fd] == NULL) {
loop->watchers[w->fd] = w;
@ -945,8 +945,8 @@ void uv__io_stop(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
w->pevents &= ~events;
if (w->pevents == 0) {
QUEUE_REMOVE(&w->watcher_queue);
QUEUE_INIT(&w->watcher_queue);
uv__queue_remove(&w->watcher_queue);
uv__queue_init(&w->watcher_queue);
w->events = 0;
if (w == loop->watchers[w->fd]) {
@ -955,14 +955,14 @@ void uv__io_stop(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
loop->nfds--;
}
}
else if (QUEUE_EMPTY(&w->watcher_queue))
QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
else if (uv__queue_empty(&w->watcher_queue))
uv__queue_insert_tail(&loop->watcher_queue, &w->watcher_queue);
}
void uv__io_close(uv_loop_t* loop, uv__io_t* w) {
uv__io_stop(loop, w, POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI);
QUEUE_REMOVE(&w->pending_queue);
uv__queue_remove(&w->pending_queue);
/* Remove stale events for this file descriptor */
if (w->fd != -1)
@ -971,8 +971,8 @@ void uv__io_close(uv_loop_t* loop, uv__io_t* w) {
void uv__io_feed(uv_loop_t* loop, uv__io_t* w) {
if (QUEUE_EMPTY(&w->pending_queue))
QUEUE_INSERT_TAIL(&loop->pending_queue, &w->pending_queue);
if (uv__queue_empty(&w->pending_queue))
uv__queue_insert_tail(&loop->pending_queue, &w->pending_queue);
}
@ -1020,8 +1020,8 @@ int uv_getrusage(uv_rusage_t* rusage) {
/* Most platforms report ru_maxrss in kilobytes; macOS and Solaris are
* the outliers because of course they are.
*/
#if defined(__APPLE__) && !TARGET_OS_IPHONE
rusage->ru_maxrss /= 1024; /* macOS reports bytes. */
#if defined(__APPLE__)
rusage->ru_maxrss /= 1024; /* macOS and iOS report bytes. */
#elif defined(__sun)
rusage->ru_maxrss /= getpagesize() / 1024; /* Solaris reports pages. */
#endif
@ -1271,6 +1271,10 @@ static int uv__getpwuid_r(uv_passwd_t *pwd, uid_t uid) {
int uv_os_get_group(uv_group_t* grp, uv_uid_t gid) {
#if defined(__ANDROID__) && __ANDROID_API__ < 24
/* This function getgrgid_r() was added in Android N (level 24) */
return UV_ENOSYS;
#else
struct group gp;
struct group* result;
char* buf;
@ -1347,6 +1351,7 @@ int uv_os_get_group(uv_group_t* grp, uv_uid_t gid) {
uv__free(buf);
return 0;
#endif
}

View File

@ -55,9 +55,13 @@
# define HAVE_PREADV 0
#endif
/* preadv() and pwritev() were added in Android N (level 24) */
#if defined(__linux__) && !(defined(__ANDROID__) && __ANDROID_API__ < 24)
# define TRY_PREADV 1
#endif
#if defined(__linux__)
# include <sys/sendfile.h>
# include <sys/utsname.h>
#endif
#if defined(__sun)
@ -457,7 +461,7 @@ static ssize_t uv__fs_preadv(uv_file fd,
static ssize_t uv__fs_read(uv_fs_t* req) {
#if defined(__linux__)
#if TRY_PREADV
static _Atomic int no_preadv;
#endif
unsigned int iovmax;
@ -481,13 +485,13 @@ static ssize_t uv__fs_read(uv_fs_t* req) {
#if HAVE_PREADV
result = preadv(req->file, (struct iovec*) req->bufs, req->nbufs, req->off);
#else
# if defined(__linux__)
# if TRY_PREADV
if (atomic_load_explicit(&no_preadv, memory_order_relaxed)) retry:
# endif
{
result = uv__fs_preadv(req->file, req->bufs, req->nbufs, req->off);
}
# if defined(__linux__)
# if TRY_PREADV
else {
result = preadv(req->file,
(struct iovec*) req->bufs,
@ -899,31 +903,6 @@ out:
#ifdef __linux__
static unsigned uv__kernel_version(void) {
static _Atomic unsigned cached_version;
struct utsname u;
unsigned version;
unsigned major;
unsigned minor;
unsigned patch;
version = atomic_load_explicit(&cached_version, memory_order_relaxed);
if (version != 0)
return version;
if (-1 == uname(&u))
return 0;
if (3 != sscanf(u.release, "%u.%u.%u", &major, &minor, &patch))
return 0;
version = major * 65536 + minor * 256 + patch;
atomic_store_explicit(&cached_version, version, memory_order_relaxed);
return version;
}
/* Pre-4.20 kernels have a bug where CephFS uses the RADOS copy-from command
* in copy_file_range() when it shouldn't. There is no workaround except to
* fall back to a regular copy.
@ -1182,8 +1161,8 @@ static ssize_t uv__fs_lutime(uv_fs_t* req) {
static ssize_t uv__fs_write(uv_fs_t* req) {
#if defined(__linux__)
static int no_pwritev;
#if TRY_PREADV
static _Atomic int no_pwritev;
#endif
ssize_t r;
@ -1211,20 +1190,20 @@ static ssize_t uv__fs_write(uv_fs_t* req) {
#if HAVE_PREADV
r = pwritev(req->file, (struct iovec*) req->bufs, req->nbufs, req->off);
#else
# if defined(__linux__)
if (no_pwritev) retry:
# if TRY_PREADV
if (atomic_load_explicit(&no_pwritev, memory_order_relaxed)) retry:
# endif
{
r = pwrite(req->file, req->bufs[0].base, req->bufs[0].len, req->off);
}
# if defined(__linux__)
# if TRY_PREADV
else {
r = pwritev(req->file,
(struct iovec*) req->bufs,
req->nbufs,
req->off);
if (r == -1 && errno == ENOSYS) {
no_pwritev = 1;
atomic_store_explicit(&no_pwritev, 1, memory_order_relaxed);
goto retry;
}
}
@ -1926,6 +1905,9 @@ int uv_fs_link(uv_loop_t* loop,
uv_fs_cb cb) {
INIT(LINK);
PATH2;
if (cb != NULL)
if (uv__iou_fs_link(loop, req))
return 0;
POST;
}
@ -1938,6 +1920,9 @@ int uv_fs_mkdir(uv_loop_t* loop,
INIT(MKDIR);
PATH;
req->mode = mode;
if (cb != NULL)
if (uv__iou_fs_mkdir(loop, req))
return 0;
POST;
}
@ -2089,6 +2074,9 @@ int uv_fs_rename(uv_loop_t* loop,
uv_fs_cb cb) {
INIT(RENAME);
PATH2;
if (cb != NULL)
if (uv__iou_fs_rename(loop, req))
return 0;
POST;
}
@ -2135,6 +2123,9 @@ int uv_fs_symlink(uv_loop_t* loop,
INIT(SYMLINK);
PATH2;
req->flags = flags;
if (cb != NULL)
if (uv__iou_fs_symlink(loop, req))
return 0;
POST;
}
@ -2142,6 +2133,9 @@ int uv_fs_symlink(uv_loop_t* loop,
int uv_fs_unlink(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
INIT(UNLINK);
PATH;
if (cb != NULL)
if (uv__iou_fs_unlink(loop, req))
return 0;
POST;
}

View File

@ -80,13 +80,13 @@ enum uv__cf_loop_signal_type_e {
typedef enum uv__cf_loop_signal_type_e uv__cf_loop_signal_type_t;
struct uv__cf_loop_signal_s {
QUEUE member;
struct uv__queue member;
uv_fs_event_t* handle;
uv__cf_loop_signal_type_t type;
};
struct uv__fsevents_event_s {
QUEUE member;
struct uv__queue member;
int events;
char path[1];
};
@ -98,7 +98,7 @@ struct uv__cf_loop_state_s {
FSEventStreamRef fsevent_stream;
uv_sem_t fsevent_sem;
uv_mutex_t fsevent_mutex;
void* fsevent_handles[2];
struct uv__queue fsevent_handles;
unsigned int fsevent_handle_count;
};
@ -150,22 +150,22 @@ static void (*pFSEventStreamStop)(FSEventStreamRef);
#define UV__FSEVENTS_PROCESS(handle, block) \
do { \
QUEUE events; \
QUEUE* q; \
struct uv__queue events; \
struct uv__queue* q; \
uv__fsevents_event_t* event; \
int err; \
uv_mutex_lock(&(handle)->cf_mutex); \
/* Split-off all events and empty original queue */ \
QUEUE_MOVE(&(handle)->cf_events, &events); \
uv__queue_move(&(handle)->cf_events, &events); \
/* Get error (if any) and zero original one */ \
err = (handle)->cf_error; \
(handle)->cf_error = 0; \
uv_mutex_unlock(&(handle)->cf_mutex); \
/* Loop through events, deallocating each after processing */ \
while (!QUEUE_EMPTY(&events)) { \
q = QUEUE_HEAD(&events); \
event = QUEUE_DATA(q, uv__fsevents_event_t, member); \
QUEUE_REMOVE(q); \
while (!uv__queue_empty(&events)) { \
q = uv__queue_head(&events); \
event = uv__queue_data(q, uv__fsevents_event_t, member); \
uv__queue_remove(q); \
/* NOTE: Checking uv__is_active() is required here, because handle \
* callback may close handle and invoking it after it will lead to \
* incorrect behaviour */ \
@ -193,14 +193,14 @@ static void uv__fsevents_cb(uv_async_t* cb) {
/* Runs in CF thread, pushed event into handle's event list */
static void uv__fsevents_push_event(uv_fs_event_t* handle,
QUEUE* events,
struct uv__queue* events,
int err) {
assert(events != NULL || err != 0);
uv_mutex_lock(&handle->cf_mutex);
/* Concatenate two queues */
if (events != NULL)
QUEUE_ADD(&handle->cf_events, events);
uv__queue_add(&handle->cf_events, events);
/* Propagate error */
if (err != 0)
@ -224,12 +224,12 @@ static void uv__fsevents_event_cb(const FSEventStreamRef streamRef,
char* path;
char* pos;
uv_fs_event_t* handle;
QUEUE* q;
struct uv__queue* q;
uv_loop_t* loop;
uv__cf_loop_state_t* state;
uv__fsevents_event_t* event;
FSEventStreamEventFlags flags;
QUEUE head;
struct uv__queue head;
loop = info;
state = loop->cf_state;
@ -238,9 +238,9 @@ static void uv__fsevents_event_cb(const FSEventStreamRef streamRef,
/* For each handle */
uv_mutex_lock(&state->fsevent_mutex);
QUEUE_FOREACH(q, &state->fsevent_handles) {
handle = QUEUE_DATA(q, uv_fs_event_t, cf_member);
QUEUE_INIT(&head);
uv__queue_foreach(q, &state->fsevent_handles) {
handle = uv__queue_data(q, uv_fs_event_t, cf_member);
uv__queue_init(&head);
/* Process and filter out events */
for (i = 0; i < numEvents; i++) {
@ -318,10 +318,10 @@ static void uv__fsevents_event_cb(const FSEventStreamRef streamRef,
event->events = UV_CHANGE;
}
QUEUE_INSERT_TAIL(&head, &event->member);
uv__queue_insert_tail(&head, &event->member);
}
if (!QUEUE_EMPTY(&head))
if (!uv__queue_empty(&head))
uv__fsevents_push_event(handle, &head, 0);
}
uv_mutex_unlock(&state->fsevent_mutex);
@ -403,7 +403,7 @@ static void uv__fsevents_destroy_stream(uv__cf_loop_state_t* state) {
static void uv__fsevents_reschedule(uv__cf_loop_state_t* state,
uv_loop_t* loop,
uv__cf_loop_signal_type_t type) {
QUEUE* q;
struct uv__queue* q;
uv_fs_event_t* curr;
CFArrayRef cf_paths;
CFStringRef* paths;
@ -446,9 +446,9 @@ static void uv__fsevents_reschedule(uv__cf_loop_state_t* state,
q = &state->fsevent_handles;
for (; i < path_count; i++) {
q = QUEUE_NEXT(q);
q = uv__queue_next(q);
assert(q != &state->fsevent_handles);
curr = QUEUE_DATA(q, uv_fs_event_t, cf_member);
curr = uv__queue_data(q, uv_fs_event_t, cf_member);
assert(curr->realpath != NULL);
paths[i] =
@ -486,8 +486,8 @@ final:
/* Broadcast error to all handles */
uv_mutex_lock(&state->fsevent_mutex);
QUEUE_FOREACH(q, &state->fsevent_handles) {
curr = QUEUE_DATA(q, uv_fs_event_t, cf_member);
uv__queue_foreach(q, &state->fsevent_handles) {
curr = uv__queue_data(q, uv_fs_event_t, cf_member);
uv__fsevents_push_event(curr, NULL, err);
}
uv_mutex_unlock(&state->fsevent_mutex);
@ -606,7 +606,7 @@ static int uv__fsevents_loop_init(uv_loop_t* loop) {
if (err)
goto fail_sem_init;
QUEUE_INIT(&loop->cf_signals);
uv__queue_init(&loop->cf_signals);
err = uv_sem_init(&state->fsevent_sem, 0);
if (err)
@ -616,7 +616,7 @@ static int uv__fsevents_loop_init(uv_loop_t* loop) {
if (err)
goto fail_fsevent_mutex_init;
QUEUE_INIT(&state->fsevent_handles);
uv__queue_init(&state->fsevent_handles);
state->fsevent_need_reschedule = 0;
state->fsevent_handle_count = 0;
@ -675,7 +675,7 @@ fail_mutex_init:
void uv__fsevents_loop_delete(uv_loop_t* loop) {
uv__cf_loop_signal_t* s;
uv__cf_loop_state_t* state;
QUEUE* q;
struct uv__queue* q;
if (loop->cf_state == NULL)
return;
@ -688,10 +688,10 @@ void uv__fsevents_loop_delete(uv_loop_t* loop) {
uv_mutex_destroy(&loop->cf_mutex);
/* Free any remaining data */
while (!QUEUE_EMPTY(&loop->cf_signals)) {
q = QUEUE_HEAD(&loop->cf_signals);
s = QUEUE_DATA(q, uv__cf_loop_signal_t, member);
QUEUE_REMOVE(q);
while (!uv__queue_empty(&loop->cf_signals)) {
q = uv__queue_head(&loop->cf_signals);
s = uv__queue_data(q, uv__cf_loop_signal_t, member);
uv__queue_remove(q);
uv__free(s);
}
@ -735,22 +735,22 @@ static void* uv__cf_loop_runner(void* arg) {
static void uv__cf_loop_cb(void* arg) {
uv_loop_t* loop;
uv__cf_loop_state_t* state;
QUEUE* item;
QUEUE split_head;
struct uv__queue* item;
struct uv__queue split_head;
uv__cf_loop_signal_t* s;
loop = arg;
state = loop->cf_state;
uv_mutex_lock(&loop->cf_mutex);
QUEUE_MOVE(&loop->cf_signals, &split_head);
uv__queue_move(&loop->cf_signals, &split_head);
uv_mutex_unlock(&loop->cf_mutex);
while (!QUEUE_EMPTY(&split_head)) {
item = QUEUE_HEAD(&split_head);
QUEUE_REMOVE(item);
while (!uv__queue_empty(&split_head)) {
item = uv__queue_head(&split_head);
uv__queue_remove(item);
s = QUEUE_DATA(item, uv__cf_loop_signal_t, member);
s = uv__queue_data(item, uv__cf_loop_signal_t, member);
/* This was a termination signal */
if (s->handle == NULL)
@ -778,7 +778,7 @@ int uv__cf_loop_signal(uv_loop_t* loop,
item->type = type;
uv_mutex_lock(&loop->cf_mutex);
QUEUE_INSERT_TAIL(&loop->cf_signals, &item->member);
uv__queue_insert_tail(&loop->cf_signals, &item->member);
state = loop->cf_state;
assert(state != NULL);
@ -807,7 +807,7 @@ int uv__fsevents_init(uv_fs_event_t* handle) {
handle->realpath_len = strlen(handle->realpath);
/* Initialize event queue */
QUEUE_INIT(&handle->cf_events);
uv__queue_init(&handle->cf_events);
handle->cf_error = 0;
/*
@ -832,7 +832,7 @@ int uv__fsevents_init(uv_fs_event_t* handle) {
/* Insert handle into the list */
state = handle->loop->cf_state;
uv_mutex_lock(&state->fsevent_mutex);
QUEUE_INSERT_TAIL(&state->fsevent_handles, &handle->cf_member);
uv__queue_insert_tail(&state->fsevent_handles, &handle->cf_member);
state->fsevent_handle_count++;
state->fsevent_need_reschedule = 1;
uv_mutex_unlock(&state->fsevent_mutex);
@ -872,7 +872,7 @@ int uv__fsevents_close(uv_fs_event_t* handle) {
/* Remove handle from the list */
state = handle->loop->cf_state;
uv_mutex_lock(&state->fsevent_mutex);
QUEUE_REMOVE(&handle->cf_member);
uv__queue_remove(&handle->cf_member);
state->fsevent_handle_count--;
state->fsevent_need_reschedule = 1;
uv_mutex_unlock(&state->fsevent_mutex);

View File

@ -335,20 +335,30 @@ int uv__iou_fs_close(uv_loop_t* loop, uv_fs_t* req);
int uv__iou_fs_fsync_or_fdatasync(uv_loop_t* loop,
uv_fs_t* req,
uint32_t fsync_flags);
int uv__iou_fs_link(uv_loop_t* loop, uv_fs_t* req);
int uv__iou_fs_mkdir(uv_loop_t* loop, uv_fs_t* req);
int uv__iou_fs_open(uv_loop_t* loop, uv_fs_t* req);
int uv__iou_fs_read_or_write(uv_loop_t* loop,
uv_fs_t* req,
int is_read);
int uv__iou_fs_rename(uv_loop_t* loop, uv_fs_t* req);
int uv__iou_fs_statx(uv_loop_t* loop,
uv_fs_t* req,
int is_fstat,
int is_lstat);
int uv__iou_fs_symlink(uv_loop_t* loop, uv_fs_t* req);
int uv__iou_fs_unlink(uv_loop_t* loop, uv_fs_t* req);
#else
#define uv__iou_fs_close(loop, req) 0
#define uv__iou_fs_fsync_or_fdatasync(loop, req, fsync_flags) 0
#define uv__iou_fs_link(loop, req) 0
#define uv__iou_fs_mkdir(loop, req) 0
#define uv__iou_fs_open(loop, req) 0
#define uv__iou_fs_read_or_write(loop, req, is_read) 0
#define uv__iou_fs_rename(loop, req) 0
#define uv__iou_fs_statx(loop, req, is_fstat, is_lstat) 0
#define uv__iou_fs_symlink(loop, req) 0
#define uv__iou_fs_unlink(loop, req) 0
#endif
#if defined(__APPLE__)
@ -429,6 +439,7 @@ int uv__statx(int dirfd,
struct uv__statx* statxbuf);
void uv__statx_to_stat(const struct uv__statx* statxbuf, uv_stat_t* buf);
ssize_t uv__getrandom(void* buf, size_t buflen, unsigned flags);
unsigned uv__kernel_version(void);
#endif
typedef int (*uv__peersockfunc)(int, struct sockaddr*, socklen_t*);

View File

@ -133,7 +133,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
struct timespec spec;
unsigned int nevents;
unsigned int revents;
QUEUE* q;
struct uv__queue* q;
uv__io_t* w;
uv_process_t* process;
sigset_t* pset;
@ -152,19 +152,19 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
int reset_timeout;
if (loop->nfds == 0) {
assert(QUEUE_EMPTY(&loop->watcher_queue));
assert(uv__queue_empty(&loop->watcher_queue));
return;
}
lfields = uv__get_internal_fields(loop);
nevents = 0;
while (!QUEUE_EMPTY(&loop->watcher_queue)) {
q = QUEUE_HEAD(&loop->watcher_queue);
QUEUE_REMOVE(q);
QUEUE_INIT(q);
while (!uv__queue_empty(&loop->watcher_queue)) {
q = uv__queue_head(&loop->watcher_queue);
uv__queue_remove(q);
uv__queue_init(q);
w = QUEUE_DATA(q, uv__io_t, watcher_queue);
w = uv__queue_data(q, uv__io_t, watcher_queue);
assert(w->pevents != 0);
assert(w->fd >= 0);
assert(w->fd < (int) loop->nwatchers);
@ -307,8 +307,8 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
/* Handle kevent NOTE_EXIT results */
if (ev->filter == EVFILT_PROC) {
QUEUE_FOREACH(q, &loop->process_handles) {
process = QUEUE_DATA(q, uv_process_t, queue);
uv__queue_foreach(q, &loop->process_handles) {
process = uv__queue_data(q, uv_process_t, queue);
if (process->pid == fd) {
process->flags |= UV_HANDLE_REAP;
loop->flags |= UV_LOOP_REAP_CHILDREN;

View File

@ -48,6 +48,7 @@
#include <sys/sysinfo.h>
#include <sys/sysmacros.h>
#include <sys/types.h>
#include <sys/utsname.h>
#include <time.h>
#include <unistd.h>
@ -150,6 +151,11 @@ enum {
UV__IORING_OP_CLOSE = 19,
UV__IORING_OP_STATX = 21,
UV__IORING_OP_EPOLL_CTL = 29,
UV__IORING_OP_RENAMEAT = 35,
UV__IORING_OP_UNLINKAT = 36,
UV__IORING_OP_MKDIRAT = 37,
UV__IORING_OP_SYMLINKAT = 38,
UV__IORING_OP_LINKAT = 39,
};
enum {
@ -162,6 +168,10 @@ enum {
UV__IORING_SQ_CQ_OVERFLOW = 2u,
};
enum {
UV__MKDIRAT_SYMLINKAT_LINKAT = 1u,
};
struct uv__io_cqring_offsets {
uint32_t head;
uint32_t tail;
@ -257,7 +267,7 @@ STATIC_ASSERT(EPOLL_CTL_MOD < 4);
struct watcher_list {
RB_ENTRY(watcher_list) entry;
QUEUE watchers;
struct uv__queue watchers;
int iterating;
char* path;
int wd;
@ -300,6 +310,31 @@ static struct watcher_root* uv__inotify_watchers(uv_loop_t* loop) {
}
unsigned uv__kernel_version(void) {
static _Atomic unsigned cached_version;
struct utsname u;
unsigned version;
unsigned major;
unsigned minor;
unsigned patch;
version = atomic_load_explicit(&cached_version, memory_order_relaxed);
if (version != 0)
return version;
if (-1 == uname(&u))
return 0;
if (3 != sscanf(u.release, "%u.%u.%u", &major, &minor, &patch))
return 0;
version = major * 65536 + minor * 256 + patch;
atomic_store_explicit(&cached_version, version, memory_order_relaxed);
return version;
}
ssize_t
uv__fs_copy_file_range(int fd_in,
off_t* off_in,
@ -385,6 +420,9 @@ int uv__io_uring_register(int fd, unsigned opcode, void* arg, unsigned nargs) {
static int uv__use_io_uring(void) {
#if defined(__ANDROID_API__)
return 0; /* Possibly available but blocked by seccomp. */
#else
/* Ternary: unknown=0, yes=1, no=-1 */
static _Atomic int use_io_uring;
char* val;
@ -399,6 +437,7 @@ static int uv__use_io_uring(void) {
}
return use > 0;
#endif
}
@ -503,6 +542,10 @@ static void uv__iou_init(int epollfd,
iou->sqelen = sqelen;
iou->ringfd = ringfd;
iou->in_flight = 0;
iou->flags = 0;
if (uv__kernel_version() >= /* 5.15.0 */ 0x050F00)
iou->flags |= UV__MKDIRAT_SYMLINKAT_LINKAT;
for (i = 0; i <= iou->sqmask; i++)
iou->sqarray[i] = i; /* Slot -> sqe identity mapping. */
@ -684,7 +727,7 @@ static struct uv__io_uring_sqe* uv__iou_get_sqe(struct uv__iou* iou,
req->work_req.loop = loop;
req->work_req.work = NULL;
req->work_req.done = NULL;
QUEUE_INIT(&req->work_req.wq);
uv__queue_init(&req->work_req.wq);
uv__req_register(loop, req);
iou->in_flight++;
@ -714,6 +757,17 @@ int uv__iou_fs_close(uv_loop_t* loop, uv_fs_t* req) {
struct uv__io_uring_sqe* sqe;
struct uv__iou* iou;
/* Work around a poorly understood bug in older kernels where closing a file
* descriptor pointing to /foo/bar results in ETXTBSY errors when trying to
* execve("/foo/bar") later on. The bug seems to have been fixed somewhere
* between 5.15.85 and 5.15.90. I couldn't pinpoint the responsible commit
* but good candidates are the several data race fixes. Interestingly, it
* seems to manifest only when running under Docker so the possibility of
* a Docker bug can't be completely ruled out either. Yay, computers.
*/
if (uv__kernel_version() < /* 5.15.90 */ 0x050F5A)
return 0;
iou = &uv__get_internal_fields(loop)->iou;
sqe = uv__iou_get_sqe(iou, loop, req);
@ -754,6 +808,55 @@ int uv__iou_fs_fsync_or_fdatasync(uv_loop_t* loop,
}
int uv__iou_fs_link(uv_loop_t* loop, uv_fs_t* req) {
struct uv__io_uring_sqe* sqe;
struct uv__iou* iou;
iou = &uv__get_internal_fields(loop)->iou;
if (!(iou->flags & UV__MKDIRAT_SYMLINKAT_LINKAT))
return 0;
sqe = uv__iou_get_sqe(iou, loop, req);
if (sqe == NULL)
return 0;
sqe->addr = (uintptr_t) req->path;
sqe->fd = AT_FDCWD;
sqe->addr2 = (uintptr_t) req->new_path;
sqe->len = AT_FDCWD;
sqe->opcode = UV__IORING_OP_LINKAT;
uv__iou_submit(iou);
return 1;
}
int uv__iou_fs_mkdir(uv_loop_t* loop, uv_fs_t* req) {
struct uv__io_uring_sqe* sqe;
struct uv__iou* iou;
iou = &uv__get_internal_fields(loop)->iou;
if (!(iou->flags & UV__MKDIRAT_SYMLINKAT_LINKAT))
return 0;
sqe = uv__iou_get_sqe(iou, loop, req);
if (sqe == NULL)
return 0;
sqe->addr = (uintptr_t) req->path;
sqe->fd = AT_FDCWD;
sqe->len = req->mode;
sqe->opcode = UV__IORING_OP_MKDIRAT;
uv__iou_submit(iou);
return 1;
}
int uv__iou_fs_open(uv_loop_t* loop, uv_fs_t* req) {
struct uv__io_uring_sqe* sqe;
struct uv__iou* iou;
@ -776,16 +879,86 @@ int uv__iou_fs_open(uv_loop_t* loop, uv_fs_t* req) {
}
int uv__iou_fs_rename(uv_loop_t* loop, uv_fs_t* req) {
struct uv__io_uring_sqe* sqe;
struct uv__iou* iou;
iou = &uv__get_internal_fields(loop)->iou;
sqe = uv__iou_get_sqe(iou, loop, req);
if (sqe == NULL)
return 0;
sqe->addr = (uintptr_t) req->path;
sqe->fd = AT_FDCWD;
sqe->addr2 = (uintptr_t) req->new_path;
sqe->len = AT_FDCWD;
sqe->opcode = UV__IORING_OP_RENAMEAT;
uv__iou_submit(iou);
return 1;
}
int uv__iou_fs_symlink(uv_loop_t* loop, uv_fs_t* req) {
struct uv__io_uring_sqe* sqe;
struct uv__iou* iou;
iou = &uv__get_internal_fields(loop)->iou;
if (!(iou->flags & UV__MKDIRAT_SYMLINKAT_LINKAT))
return 0;
sqe = uv__iou_get_sqe(iou, loop, req);
if (sqe == NULL)
return 0;
sqe->addr = (uintptr_t) req->path;
sqe->fd = AT_FDCWD;
sqe->addr2 = (uintptr_t) req->new_path;
sqe->opcode = UV__IORING_OP_SYMLINKAT;
uv__iou_submit(iou);
return 1;
}
int uv__iou_fs_unlink(uv_loop_t* loop, uv_fs_t* req) {
struct uv__io_uring_sqe* sqe;
struct uv__iou* iou;
iou = &uv__get_internal_fields(loop)->iou;
sqe = uv__iou_get_sqe(iou, loop, req);
if (sqe == NULL)
return 0;
sqe->addr = (uintptr_t) req->path;
sqe->fd = AT_FDCWD;
sqe->opcode = UV__IORING_OP_UNLINKAT;
uv__iou_submit(iou);
return 1;
}
int uv__iou_fs_read_or_write(uv_loop_t* loop,
uv_fs_t* req,
int is_read) {
struct uv__io_uring_sqe* sqe;
struct uv__iou* iou;
/* For the moment, if iovcnt is greater than IOV_MAX, fallback to the
* threadpool. In the future we might take advantage of IOSQE_IO_LINK. */
if (req->nbufs > IOV_MAX)
return 0;
/* If iovcnt is greater than IOV_MAX, cap it to IOV_MAX on reads and fallback
* to the threadpool on writes */
if (req->nbufs > IOV_MAX) {
if (is_read)
req->nbufs = IOV_MAX;
else
return 0;
}
iou = &uv__get_internal_fields(loop)->iou;
@ -1092,7 +1265,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
struct uv__iou* ctl;
struct uv__iou* iou;
int real_timeout;
QUEUE* q;
struct uv__queue* q;
uv__io_t* w;
sigset_t* sigmask;
sigset_t sigset;
@ -1138,11 +1311,11 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
memset(&e, 0, sizeof(e));
while (!QUEUE_EMPTY(&loop->watcher_queue)) {
q = QUEUE_HEAD(&loop->watcher_queue);
w = QUEUE_DATA(q, uv__io_t, watcher_queue);
QUEUE_REMOVE(q);
QUEUE_INIT(q);
while (!uv__queue_empty(&loop->watcher_queue)) {
q = uv__queue_head(&loop->watcher_queue);
w = uv__queue_data(q, uv__io_t, watcher_queue);
uv__queue_remove(q);
uv__queue_init(q);
op = EPOLL_CTL_MOD;
if (w->events == 0)
@ -1479,6 +1652,8 @@ int uv_cpu_info(uv_cpu_info_t** ci, int* count) {
static const char model_marker[] = "CPU part\t: ";
#elif defined(__mips__)
static const char model_marker[] = "cpu model\t\t: ";
#elif defined(__loongarch__)
static const char model_marker[] = "cpu family\t\t: ";
#else
static const char model_marker[] = "model name\t: ";
#endif
@ -2097,8 +2272,8 @@ static int uv__inotify_fork(uv_loop_t* loop, struct watcher_list* root) {
struct watcher_list* tmp_watcher_list_iter;
struct watcher_list* watcher_list;
struct watcher_list tmp_watcher_list;
QUEUE queue;
QUEUE* q;
struct uv__queue queue;
struct uv__queue* q;
uv_fs_event_t* handle;
char* tmp_path;
@ -2110,41 +2285,41 @@ static int uv__inotify_fork(uv_loop_t* loop, struct watcher_list* root) {
*/
loop->inotify_watchers = root;
QUEUE_INIT(&tmp_watcher_list.watchers);
uv__queue_init(&tmp_watcher_list.watchers);
/* Note that the queue we use is shared with the start and stop()
* functions, making QUEUE_FOREACH unsafe to use. So we use the
* QUEUE_MOVE trick to safely iterate. Also don't free the watcher
* functions, making uv__queue_foreach unsafe to use. So we use the
* uv__queue_move trick to safely iterate. Also don't free the watcher
* list until we're done iterating. c.f. uv__inotify_read.
*/
RB_FOREACH_SAFE(watcher_list, watcher_root,
uv__inotify_watchers(loop), tmp_watcher_list_iter) {
watcher_list->iterating = 1;
QUEUE_MOVE(&watcher_list->watchers, &queue);
while (!QUEUE_EMPTY(&queue)) {
q = QUEUE_HEAD(&queue);
handle = QUEUE_DATA(q, uv_fs_event_t, watchers);
uv__queue_move(&watcher_list->watchers, &queue);
while (!uv__queue_empty(&queue)) {
q = uv__queue_head(&queue);
handle = uv__queue_data(q, uv_fs_event_t, watchers);
/* It's critical to keep a copy of path here, because it
* will be set to NULL by stop() and then deallocated by
* maybe_free_watcher_list
*/
tmp_path = uv__strdup(handle->path);
assert(tmp_path != NULL);
QUEUE_REMOVE(q);
QUEUE_INSERT_TAIL(&watcher_list->watchers, q);
uv__queue_remove(q);
uv__queue_insert_tail(&watcher_list->watchers, q);
uv_fs_event_stop(handle);
QUEUE_INSERT_TAIL(&tmp_watcher_list.watchers, &handle->watchers);
uv__queue_insert_tail(&tmp_watcher_list.watchers, &handle->watchers);
handle->path = tmp_path;
}
watcher_list->iterating = 0;
maybe_free_watcher_list(watcher_list, loop);
}
QUEUE_MOVE(&tmp_watcher_list.watchers, &queue);
while (!QUEUE_EMPTY(&queue)) {
q = QUEUE_HEAD(&queue);
QUEUE_REMOVE(q);
handle = QUEUE_DATA(q, uv_fs_event_t, watchers);
uv__queue_move(&tmp_watcher_list.watchers, &queue);
while (!uv__queue_empty(&queue)) {
q = uv__queue_head(&queue);
uv__queue_remove(q);
handle = uv__queue_data(q, uv_fs_event_t, watchers);
tmp_path = handle->path;
handle->path = NULL;
err = uv_fs_event_start(handle, handle->cb, tmp_path, 0);
@ -2166,7 +2341,7 @@ static struct watcher_list* find_watcher(uv_loop_t* loop, int wd) {
static void maybe_free_watcher_list(struct watcher_list* w, uv_loop_t* loop) {
/* if the watcher_list->watchers is being iterated over, we can't free it. */
if ((!w->iterating) && QUEUE_EMPTY(&w->watchers)) {
if ((!w->iterating) && uv__queue_empty(&w->watchers)) {
/* No watchers left for this path. Clean up. */
RB_REMOVE(watcher_root, uv__inotify_watchers(loop), w);
inotify_rm_watch(loop->inotify_fd, w->wd);
@ -2181,8 +2356,8 @@ static void uv__inotify_read(uv_loop_t* loop,
const struct inotify_event* e;
struct watcher_list* w;
uv_fs_event_t* h;
QUEUE queue;
QUEUE* q;
struct uv__queue queue;
struct uv__queue* q;
const char* path;
ssize_t size;
const char *p;
@ -2225,7 +2400,7 @@ static void uv__inotify_read(uv_loop_t* loop,
* What can go wrong?
* A callback could call uv_fs_event_stop()
* and the queue can change under our feet.
* So, we use QUEUE_MOVE() trick to safely iterate over the queue.
* So, we use uv__queue_move() trick to safely iterate over the queue.
* And we don't free the watcher_list until we're done iterating.
*
* First,
@ -2233,13 +2408,13 @@ static void uv__inotify_read(uv_loop_t* loop,
* not to free watcher_list.
*/
w->iterating = 1;
QUEUE_MOVE(&w->watchers, &queue);
while (!QUEUE_EMPTY(&queue)) {
q = QUEUE_HEAD(&queue);
h = QUEUE_DATA(q, uv_fs_event_t, watchers);
uv__queue_move(&w->watchers, &queue);
while (!uv__queue_empty(&queue)) {
q = uv__queue_head(&queue);
h = uv__queue_data(q, uv_fs_event_t, watchers);
QUEUE_REMOVE(q);
QUEUE_INSERT_TAIL(&w->watchers, q);
uv__queue_remove(q);
uv__queue_insert_tail(&w->watchers, q);
h->cb(h, path, events, 0);
}
@ -2301,13 +2476,13 @@ int uv_fs_event_start(uv_fs_event_t* handle,
w->wd = wd;
w->path = memcpy(w + 1, path, len);
QUEUE_INIT(&w->watchers);
uv__queue_init(&w->watchers);
w->iterating = 0;
RB_INSERT(watcher_root, uv__inotify_watchers(loop), w);
no_insert:
uv__handle_start(handle);
QUEUE_INSERT_TAIL(&w->watchers, &handle->watchers);
uv__queue_insert_tail(&w->watchers, &handle->watchers);
handle->path = w->path;
handle->cb = cb;
handle->wd = wd;
@ -2328,7 +2503,7 @@ int uv_fs_event_stop(uv_fs_event_t* handle) {
handle->wd = -1;
handle->path = NULL;
uv__handle_stop(handle);
QUEUE_REMOVE(&handle->watchers);
uv__queue_remove(&handle->watchers);
maybe_free_watcher_list(w, handle->loop);

View File

@ -32,7 +32,7 @@
int uv_##name##_start(uv_##name##_t* handle, uv_##name##_cb cb) { \
if (uv__is_active(handle)) return 0; \
if (cb == NULL) return UV_EINVAL; \
QUEUE_INSERT_HEAD(&handle->loop->name##_handles, &handle->queue); \
uv__queue_insert_head(&handle->loop->name##_handles, &handle->queue); \
handle->name##_cb = cb; \
uv__handle_start(handle); \
return 0; \
@ -40,21 +40,21 @@
\
int uv_##name##_stop(uv_##name##_t* handle) { \
if (!uv__is_active(handle)) return 0; \
QUEUE_REMOVE(&handle->queue); \
uv__queue_remove(&handle->queue); \
uv__handle_stop(handle); \
return 0; \
} \
\
void uv__run_##name(uv_loop_t* loop) { \
uv_##name##_t* h; \
QUEUE queue; \
QUEUE* q; \
QUEUE_MOVE(&loop->name##_handles, &queue); \
while (!QUEUE_EMPTY(&queue)) { \
q = QUEUE_HEAD(&queue); \
h = QUEUE_DATA(q, uv_##name##_t, queue); \
QUEUE_REMOVE(q); \
QUEUE_INSERT_TAIL(&loop->name##_handles, q); \
struct uv__queue queue; \
struct uv__queue* q; \
uv__queue_move(&loop->name##_handles, &queue); \
while (!uv__queue_empty(&queue)) { \
q = uv__queue_head(&queue); \
h = uv__queue_data(q, uv_##name##_t, queue); \
uv__queue_remove(q); \
uv__queue_insert_tail(&loop->name##_handles, q); \
h->name##_cb(h); \
} \
} \

View File

@ -50,20 +50,20 @@ int uv_loop_init(uv_loop_t* loop) {
sizeof(lfields->loop_metrics.metrics));
heap_init((struct heap*) &loop->timer_heap);
QUEUE_INIT(&loop->wq);
QUEUE_INIT(&loop->idle_handles);
QUEUE_INIT(&loop->async_handles);
QUEUE_INIT(&loop->check_handles);
QUEUE_INIT(&loop->prepare_handles);
QUEUE_INIT(&loop->handle_queue);
uv__queue_init(&loop->wq);
uv__queue_init(&loop->idle_handles);
uv__queue_init(&loop->async_handles);
uv__queue_init(&loop->check_handles);
uv__queue_init(&loop->prepare_handles);
uv__queue_init(&loop->handle_queue);
loop->active_handles = 0;
loop->active_reqs.count = 0;
loop->nfds = 0;
loop->watchers = NULL;
loop->nwatchers = 0;
QUEUE_INIT(&loop->pending_queue);
QUEUE_INIT(&loop->watcher_queue);
uv__queue_init(&loop->pending_queue);
uv__queue_init(&loop->watcher_queue);
loop->closing_handles = NULL;
uv__update_time(loop);
@ -85,7 +85,7 @@ int uv_loop_init(uv_loop_t* loop) {
err = uv__process_init(loop);
if (err)
goto fail_signal_init;
QUEUE_INIT(&loop->process_handles);
uv__queue_init(&loop->process_handles);
err = uv_rwlock_init(&loop->cloexec_lock);
if (err)
@ -152,9 +152,9 @@ int uv_loop_fork(uv_loop_t* loop) {
if (w == NULL)
continue;
if (w->pevents != 0 && QUEUE_EMPTY(&w->watcher_queue)) {
if (w->pevents != 0 && uv__queue_empty(&w->watcher_queue)) {
w->events = 0; /* Force re-registration in uv__io_poll. */
QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
uv__queue_insert_tail(&loop->watcher_queue, &w->watcher_queue);
}
}
@ -180,7 +180,7 @@ void uv__loop_close(uv_loop_t* loop) {
}
uv_mutex_lock(&loop->wq_mutex);
assert(QUEUE_EMPTY(&loop->wq) && "thread pool work queue not empty!");
assert(uv__queue_empty(&loop->wq) && "thread pool work queue not empty!");
assert(!uv__has_active_reqs(loop));
uv_mutex_unlock(&loop->wq_mutex);
uv_mutex_destroy(&loop->wq_mutex);
@ -192,8 +192,8 @@ void uv__loop_close(uv_loop_t* loop) {
uv_rwlock_destroy(&loop->cloexec_lock);
#if 0
assert(QUEUE_EMPTY(&loop->pending_queue));
assert(QUEUE_EMPTY(&loop->watcher_queue));
assert(uv__queue_empty(&loop->pending_queue));
assert(uv__queue_empty(&loop->watcher_queue));
assert(loop->nfds == 0);
#endif

View File

@ -27,7 +27,7 @@
#include <termios.h>
#include <sys/msg.h>
static QUEUE global_epoll_queue;
static struct uv__queue global_epoll_queue;
static uv_mutex_t global_epoll_lock;
static uv_once_t once = UV_ONCE_INIT;
@ -178,18 +178,18 @@ static void after_fork(void) {
static void child_fork(void) {
QUEUE* q;
struct uv__queue* q;
uv_once_t child_once = UV_ONCE_INIT;
/* reset once */
memcpy(&once, &child_once, sizeof(child_once));
/* reset epoll list */
while (!QUEUE_EMPTY(&global_epoll_queue)) {
while (!uv__queue_empty(&global_epoll_queue)) {
uv__os390_epoll* lst;
q = QUEUE_HEAD(&global_epoll_queue);
QUEUE_REMOVE(q);
lst = QUEUE_DATA(q, uv__os390_epoll, member);
q = uv__queue_head(&global_epoll_queue);
uv__queue_remove(q);
lst = uv__queue_data(q, uv__os390_epoll, member);
uv__free(lst->items);
lst->items = NULL;
lst->size = 0;
@ -201,7 +201,7 @@ static void child_fork(void) {
static void epoll_init(void) {
QUEUE_INIT(&global_epoll_queue);
uv__queue_init(&global_epoll_queue);
if (uv_mutex_init(&global_epoll_lock))
abort();
@ -225,7 +225,7 @@ uv__os390_epoll* epoll_create1(int flags) {
lst->items[lst->size - 1].revents = 0;
uv_once(&once, epoll_init);
uv_mutex_lock(&global_epoll_lock);
QUEUE_INSERT_TAIL(&global_epoll_queue, &lst->member);
uv__queue_insert_tail(&global_epoll_queue, &lst->member);
uv_mutex_unlock(&global_epoll_lock);
}
@ -352,14 +352,14 @@ int epoll_wait(uv__os390_epoll* lst, struct epoll_event* events,
int epoll_file_close(int fd) {
QUEUE* q;
struct uv__queue* q;
uv_once(&once, epoll_init);
uv_mutex_lock(&global_epoll_lock);
QUEUE_FOREACH(q, &global_epoll_queue) {
uv__queue_foreach(q, &global_epoll_queue) {
uv__os390_epoll* lst;
lst = QUEUE_DATA(q, uv__os390_epoll, member);
lst = uv__queue_data(q, uv__os390_epoll, member);
if (fd < lst->size && lst->items != NULL && lst->items[fd].fd != -1)
lst->items[fd].fd = -1;
}
@ -371,7 +371,7 @@ int epoll_file_close(int fd) {
void epoll_queue_close(uv__os390_epoll* lst) {
/* Remove epoll instance from global queue */
uv_mutex_lock(&global_epoll_lock);
QUEUE_REMOVE(&lst->member);
uv__queue_remove(&lst->member);
uv_mutex_unlock(&global_epoll_lock);
/* Free resources */

View File

@ -45,7 +45,7 @@ struct epoll_event {
};
typedef struct {
QUEUE member;
struct uv__queue member;
struct pollfd* items;
unsigned long size;
int msg_queue;

View File

@ -815,7 +815,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
uv__os390_epoll* ep;
int have_signals;
int real_timeout;
QUEUE* q;
struct uv__queue* q;
uv__io_t* w;
uint64_t base;
int count;
@ -827,19 +827,19 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
int reset_timeout;
if (loop->nfds == 0) {
assert(QUEUE_EMPTY(&loop->watcher_queue));
assert(uv__queue_empty(&loop->watcher_queue));
return;
}
lfields = uv__get_internal_fields(loop);
while (!QUEUE_EMPTY(&loop->watcher_queue)) {
while (!uv__queue_empty(&loop->watcher_queue)) {
uv_stream_t* stream;
q = QUEUE_HEAD(&loop->watcher_queue);
QUEUE_REMOVE(q);
QUEUE_INIT(q);
w = QUEUE_DATA(q, uv__io_t, watcher_queue);
q = uv__queue_head(&loop->watcher_queue);
uv__queue_remove(q);
uv__queue_init(q);
w = uv__queue_data(q, uv__io_t, watcher_queue);
assert(w->pevents != 0);
assert(w->fd >= 0);

View File

@ -41,26 +41,60 @@ int uv_pipe_init(uv_loop_t* loop, uv_pipe_t* handle, int ipc) {
int uv_pipe_bind(uv_pipe_t* handle, const char* name) {
return uv_pipe_bind2(handle, name, strlen(name), 0);
}
int uv_pipe_bind2(uv_pipe_t* handle,
const char* name,
size_t namelen,
unsigned int flags) {
struct sockaddr_un saddr;
const char* pipe_fname;
char* pipe_fname;
int sockfd;
int err;
pipe_fname = NULL;
if (flags & ~UV_PIPE_NO_TRUNCATE)
return UV_EINVAL;
if (name == NULL)
return UV_EINVAL;
if (namelen == 0)
return UV_EINVAL;
#ifndef __linux__
/* Abstract socket namespace only works on Linux. */
if (*name == '\0')
return UV_EINVAL;
#endif
if (flags & UV_PIPE_NO_TRUNCATE)
if (namelen > sizeof(saddr.sun_path))
return UV_EINVAL;
/* Truncate long paths. Documented behavior. */
if (namelen > sizeof(saddr.sun_path))
namelen = sizeof(saddr.sun_path);
/* Already bound? */
if (uv__stream_fd(handle) >= 0)
return UV_EINVAL;
if (uv__is_closing(handle)) {
return UV_EINVAL;
}
/* Make a copy of the file name, it outlives this function's scope. */
pipe_fname = uv__strdup(name);
if (pipe_fname == NULL)
return UV_ENOMEM;
/* We've got a copy, don't touch the original any more. */
name = NULL;
if (uv__is_closing(handle))
return UV_EINVAL;
/* Make a copy of the file path unless it is an abstract socket.
* We unlink the file later but abstract sockets disappear
* automatically since they're not real file system entities.
*/
if (*name != '\0') {
pipe_fname = uv__strdup(name);
if (pipe_fname == NULL)
return UV_ENOMEM;
}
err = uv__socket(AF_UNIX, SOCK_STREAM, 0);
if (err < 0)
@ -68,7 +102,7 @@ int uv_pipe_bind(uv_pipe_t* handle, const char* name) {
sockfd = err;
memset(&saddr, 0, sizeof saddr);
uv__strscpy(saddr.sun_path, pipe_fname, sizeof(saddr.sun_path));
memcpy(&saddr.sun_path, name, namelen);
saddr.sun_family = AF_UNIX;
if (bind(sockfd, (struct sockaddr*)&saddr, sizeof saddr)) {
@ -83,12 +117,12 @@ int uv_pipe_bind(uv_pipe_t* handle, const char* name) {
/* Success. */
handle->flags |= UV_HANDLE_BOUND;
handle->pipe_fname = pipe_fname; /* Is a strdup'ed copy. */
handle->pipe_fname = pipe_fname; /* NULL or a strdup'ed copy. */
handle->io_watcher.fd = sockfd;
return 0;
err_socket:
uv__free((void*)pipe_fname);
uv__free(pipe_fname);
return err;
}
@ -176,11 +210,44 @@ void uv_pipe_connect(uv_connect_t* req,
uv_pipe_t* handle,
const char* name,
uv_connect_cb cb) {
uv_pipe_connect2(req, handle, name, strlen(name), 0, cb);
}
int uv_pipe_connect2(uv_connect_t* req,
uv_pipe_t* handle,
const char* name,
size_t namelen,
unsigned int flags,
uv_connect_cb cb) {
struct sockaddr_un saddr;
int new_sock;
int err;
int r;
if (flags & ~UV_PIPE_NO_TRUNCATE)
return UV_EINVAL;
if (name == NULL)
return UV_EINVAL;
if (namelen == 0)
return UV_EINVAL;
#ifndef __linux__
/* Abstract socket namespace only works on Linux. */
if (*name == '\0')
return UV_EINVAL;
#endif
if (flags & UV_PIPE_NO_TRUNCATE)
if (namelen > sizeof(saddr.sun_path))
return UV_EINVAL;
/* Truncate long paths. Documented behavior. */
if (namelen > sizeof(saddr.sun_path))
namelen = sizeof(saddr.sun_path);
new_sock = (uv__stream_fd(handle) == -1);
if (new_sock) {
@ -191,7 +258,7 @@ void uv_pipe_connect(uv_connect_t* req,
}
memset(&saddr, 0, sizeof saddr);
uv__strscpy(saddr.sun_path, name, sizeof(saddr.sun_path));
memcpy(&saddr.sun_path, name, namelen);
saddr.sun_family = AF_UNIX;
do {
@ -230,12 +297,13 @@ out:
uv__req_init(handle->loop, req, UV_CONNECT);
req->handle = (uv_stream_t*)handle;
req->cb = cb;
QUEUE_INIT(&req->queue);
uv__queue_init(&req->queue);
/* Force callback to run on next tick in case of error. */
if (err)
uv__io_feed(handle->loop, &handle->io_watcher);
return 0;
}

View File

@ -137,7 +137,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
sigset_t set;
uint64_t time_base;
uint64_t time_diff;
QUEUE* q;
struct uv__queue* q;
uv__io_t* w;
size_t i;
unsigned int nevents;
@ -149,19 +149,19 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
int reset_timeout;
if (loop->nfds == 0) {
assert(QUEUE_EMPTY(&loop->watcher_queue));
assert(uv__queue_empty(&loop->watcher_queue));
return;
}
lfields = uv__get_internal_fields(loop);
/* Take queued watchers and add their fds to our poll fds array. */
while (!QUEUE_EMPTY(&loop->watcher_queue)) {
q = QUEUE_HEAD(&loop->watcher_queue);
QUEUE_REMOVE(q);
QUEUE_INIT(q);
while (!uv__queue_empty(&loop->watcher_queue)) {
q = uv__queue_head(&loop->watcher_queue);
uv__queue_remove(q);
uv__queue_init(q);
w = QUEUE_DATA(q, uv__io_t, watcher_queue);
w = uv__queue_data(q, uv__io_t, watcher_queue);
assert(w->pevents != 0);
assert(w->fd >= 0);
assert(w->fd < (int) loop->nwatchers);

View File

@ -108,17 +108,17 @@ void uv__wait_children(uv_loop_t* loop) {
int status;
int options;
pid_t pid;
QUEUE pending;
QUEUE* q;
QUEUE* h;
struct uv__queue pending;
struct uv__queue* q;
struct uv__queue* h;
QUEUE_INIT(&pending);
uv__queue_init(&pending);
h = &loop->process_handles;
q = QUEUE_HEAD(h);
q = uv__queue_head(h);
while (q != h) {
process = QUEUE_DATA(q, uv_process_t, queue);
q = QUEUE_NEXT(q);
process = uv__queue_data(q, uv_process_t, queue);
q = uv__queue_next(q);
#ifndef UV_USE_SIGCHLD
if ((process->flags & UV_HANDLE_REAP) == 0)
@ -149,18 +149,18 @@ void uv__wait_children(uv_loop_t* loop) {
assert(pid == process->pid);
process->status = status;
QUEUE_REMOVE(&process->queue);
QUEUE_INSERT_TAIL(&pending, &process->queue);
uv__queue_remove(&process->queue);
uv__queue_insert_tail(&pending, &process->queue);
}
h = &pending;
q = QUEUE_HEAD(h);
q = uv__queue_head(h);
while (q != h) {
process = QUEUE_DATA(q, uv_process_t, queue);
q = QUEUE_NEXT(q);
process = uv__queue_data(q, uv_process_t, queue);
q = uv__queue_next(q);
QUEUE_REMOVE(&process->queue);
QUEUE_INIT(&process->queue);
uv__queue_remove(&process->queue);
uv__queue_init(&process->queue);
uv__handle_stop(process);
if (process->exit_cb == NULL)
@ -176,13 +176,18 @@ void uv__wait_children(uv_loop_t* loop) {
process->exit_cb(process, exit_status, term_signal);
}
assert(QUEUE_EMPTY(&pending));
assert(uv__queue_empty(&pending));
}
/*
* Used for initializing stdio streams like options.stdin_stream. Returns
* zero on success. See also the cleanup section in uv_spawn().
*/
#if !(defined(__APPLE__) && (TARGET_OS_TV || TARGET_OS_WATCH))
/* execvp is marked __WATCHOS_PROHIBITED __TVOS_PROHIBITED, so must be
* avoided. Since this isn't called on those targets, the function
* doesn't even need to be defined for them.
*/
static int uv__process_init_stdio(uv_stdio_container_t* container, int fds[2]) {
int mask;
int fd;
@ -269,11 +274,6 @@ static void uv__write_errno(int error_fd) {
}
#if !(defined(__APPLE__) && (TARGET_OS_TV || TARGET_OS_WATCH))
/* execvp is marked __WATCHOS_PROHIBITED __TVOS_PROHIBITED, so must be
* avoided. Since this isn't called on those targets, the function
* doesn't even need to be defined for them.
*/
static void uv__process_child_init(const uv_process_options_t* options,
int stdio_count,
int (*pipes)[2],
@ -405,7 +405,6 @@ static void uv__process_child_init(const uv_process_options_t* options,
uv__write_errno(error_fd);
}
#endif
#if defined(__APPLE__)
@ -952,6 +951,7 @@ static int uv__spawn_and_init_child(
return err;
}
#endif /* ISN'T TARGET_OS_TV || TARGET_OS_WATCH */
int uv_spawn(uv_loop_t* loop,
uv_process_t* process,
@ -978,7 +978,7 @@ int uv_spawn(uv_loop_t* loop,
UV_PROCESS_WINDOWS_VERBATIM_ARGUMENTS)));
uv__handle_init(loop, (uv_handle_t*)process, UV_PROCESS);
QUEUE_INIT(&process->queue);
uv__queue_init(&process->queue);
process->status = 0;
stdio_count = options->stdio_count;
@ -1041,7 +1041,7 @@ int uv_spawn(uv_loop_t* loop,
process->pid = pid;
process->exit_cb = options->exit_cb;
QUEUE_INSERT_TAIL(&loop->process_handles, &process->queue);
uv__queue_insert_tail(&loop->process_handles, &process->queue);
uv__handle_start(process);
}
@ -1103,10 +1103,10 @@ int uv_kill(int pid, int signum) {
void uv__process_close(uv_process_t* handle) {
QUEUE_REMOVE(&handle->queue);
uv__queue_remove(&handle->queue);
uv__handle_stop(handle);
#ifdef UV_USE_SIGCHLD
if (QUEUE_EMPTY(&handle->loop->process_handles))
if (uv__queue_empty(&handle->loop->process_handles))
uv_signal_stop(&handle->loop->child_watcher);
#endif
}

View File

@ -291,16 +291,16 @@ int uv__signal_loop_fork(uv_loop_t* loop) {
void uv__signal_loop_cleanup(uv_loop_t* loop) {
QUEUE* q;
struct uv__queue* q;
/* Stop all the signal watchers that are still attached to this loop. This
* ensures that the (shared) signal tree doesn't contain any invalid entries
* entries, and that signal handlers are removed when appropriate.
* It's safe to use QUEUE_FOREACH here because the handles and the handle
* It's safe to use uv__queue_foreach here because the handles and the handle
* queue are not modified by uv__signal_stop().
*/
QUEUE_FOREACH(q, &loop->handle_queue) {
uv_handle_t* handle = QUEUE_DATA(q, uv_handle_t, handle_queue);
uv__queue_foreach(q, &loop->handle_queue) {
uv_handle_t* handle = uv__queue_data(q, uv_handle_t, handle_queue);
if (handle->type == UV_SIGNAL)
uv__signal_stop((uv_signal_t*) handle);

View File

@ -94,8 +94,8 @@ void uv__stream_init(uv_loop_t* loop,
stream->accepted_fd = -1;
stream->queued_fds = NULL;
stream->delayed_error = 0;
QUEUE_INIT(&stream->write_queue);
QUEUE_INIT(&stream->write_completed_queue);
uv__queue_init(&stream->write_queue);
uv__queue_init(&stream->write_completed_queue);
stream->write_queue_size = 0;
if (loop->emfile_fd == -1) {
@ -439,15 +439,15 @@ int uv__stream_open(uv_stream_t* stream, int fd, int flags) {
void uv__stream_flush_write_queue(uv_stream_t* stream, int error) {
uv_write_t* req;
QUEUE* q;
while (!QUEUE_EMPTY(&stream->write_queue)) {
q = QUEUE_HEAD(&stream->write_queue);
QUEUE_REMOVE(q);
struct uv__queue* q;
while (!uv__queue_empty(&stream->write_queue)) {
q = uv__queue_head(&stream->write_queue);
uv__queue_remove(q);
req = QUEUE_DATA(q, uv_write_t, queue);
req = uv__queue_data(q, uv_write_t, queue);
req->error = error;
QUEUE_INSERT_TAIL(&stream->write_completed_queue, &req->queue);
uv__queue_insert_tail(&stream->write_completed_queue, &req->queue);
}
}
@ -627,7 +627,7 @@ static void uv__drain(uv_stream_t* stream) {
uv_shutdown_t* req;
int err;
assert(QUEUE_EMPTY(&stream->write_queue));
assert(uv__queue_empty(&stream->write_queue));
if (!(stream->flags & UV_HANDLE_CLOSING)) {
uv__io_stop(stream->loop, &stream->io_watcher, POLLOUT);
uv__stream_osx_interrupt_select(stream);
@ -714,7 +714,7 @@ static void uv__write_req_finish(uv_write_t* req) {
uv_stream_t* stream = req->handle;
/* Pop the req off tcp->write_queue. */
QUEUE_REMOVE(&req->queue);
uv__queue_remove(&req->queue);
/* Only free when there was no error. On error, we touch up write_queue_size
* right before making the callback. The reason we don't do that right away
@ -731,7 +731,7 @@ static void uv__write_req_finish(uv_write_t* req) {
/* Add it to the write_completed_queue where it will have its
* callback called in the near future.
*/
QUEUE_INSERT_TAIL(&stream->write_completed_queue, &req->queue);
uv__queue_insert_tail(&stream->write_completed_queue, &req->queue);
uv__io_feed(stream->loop, &stream->io_watcher);
}
@ -837,7 +837,7 @@ static int uv__try_write(uv_stream_t* stream,
}
static void uv__write(uv_stream_t* stream) {
QUEUE* q;
struct uv__queue* q;
uv_write_t* req;
ssize_t n;
int count;
@ -851,11 +851,11 @@ static void uv__write(uv_stream_t* stream) {
count = 32;
for (;;) {
if (QUEUE_EMPTY(&stream->write_queue))
if (uv__queue_empty(&stream->write_queue))
return;
q = QUEUE_HEAD(&stream->write_queue);
req = QUEUE_DATA(q, uv_write_t, queue);
q = uv__queue_head(&stream->write_queue);
req = uv__queue_data(q, uv_write_t, queue);
assert(req->handle == stream);
n = uv__try_write(stream,
@ -899,19 +899,19 @@ error:
static void uv__write_callbacks(uv_stream_t* stream) {
uv_write_t* req;
QUEUE* q;
QUEUE pq;
struct uv__queue* q;
struct uv__queue pq;
if (QUEUE_EMPTY(&stream->write_completed_queue))
if (uv__queue_empty(&stream->write_completed_queue))
return;
QUEUE_MOVE(&stream->write_completed_queue, &pq);
uv__queue_move(&stream->write_completed_queue, &pq);
while (!QUEUE_EMPTY(&pq)) {
while (!uv__queue_empty(&pq)) {
/* Pop a req off write_completed_queue. */
q = QUEUE_HEAD(&pq);
req = QUEUE_DATA(q, uv_write_t, queue);
QUEUE_REMOVE(q);
q = uv__queue_head(&pq);
req = uv__queue_data(q, uv_write_t, queue);
uv__queue_remove(q);
uv__req_unregister(stream->loop, req);
if (req->bufs != NULL) {
@ -1174,7 +1174,7 @@ int uv_shutdown(uv_shutdown_t* req, uv_stream_t* stream, uv_shutdown_cb cb) {
stream->shutdown_req = req;
stream->flags &= ~UV_HANDLE_WRITABLE;
if (QUEUE_EMPTY(&stream->write_queue))
if (uv__queue_empty(&stream->write_queue))
uv__io_feed(stream->loop, &stream->io_watcher);
return 0;
@ -1227,7 +1227,7 @@ static void uv__stream_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
uv__write_callbacks(stream);
/* Write queue drained. */
if (QUEUE_EMPTY(&stream->write_queue))
if (uv__queue_empty(&stream->write_queue))
uv__drain(stream);
}
}
@ -1270,7 +1270,7 @@ static void uv__stream_connect(uv_stream_t* stream) {
stream->connect_req = NULL;
uv__req_unregister(stream->loop, req);
if (error < 0 || QUEUE_EMPTY(&stream->write_queue)) {
if (error < 0 || uv__queue_empty(&stream->write_queue)) {
uv__io_stop(stream->loop, &stream->io_watcher, POLLOUT);
}
@ -1352,7 +1352,7 @@ int uv_write2(uv_write_t* req,
req->handle = stream;
req->error = 0;
req->send_handle = send_handle;
QUEUE_INIT(&req->queue);
uv__queue_init(&req->queue);
req->bufs = req->bufsml;
if (nbufs > ARRAY_SIZE(req->bufsml))
@ -1367,7 +1367,7 @@ int uv_write2(uv_write_t* req,
stream->write_queue_size += uv__count_bufs(bufs, nbufs);
/* Append the request to write_queue. */
QUEUE_INSERT_TAIL(&stream->write_queue, &req->queue);
uv__queue_insert_tail(&stream->write_queue, &req->queue);
/* If the queue was empty when this function began, we should attempt to
* do the write immediately. Otherwise start the write_watcher and wait

View File

@ -148,7 +148,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
struct port_event events[1024];
struct port_event* pe;
struct timespec spec;
QUEUE* q;
struct uv__queue* q;
uv__io_t* w;
sigset_t* pset;
sigset_t set;
@ -166,16 +166,16 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
int reset_timeout;
if (loop->nfds == 0) {
assert(QUEUE_EMPTY(&loop->watcher_queue));
assert(uv__queue_empty(&loop->watcher_queue));
return;
}
while (!QUEUE_EMPTY(&loop->watcher_queue)) {
q = QUEUE_HEAD(&loop->watcher_queue);
QUEUE_REMOVE(q);
QUEUE_INIT(q);
while (!uv__queue_empty(&loop->watcher_queue)) {
q = uv__queue_head(&loop->watcher_queue);
uv__queue_remove(q);
uv__queue_init(q);
w = QUEUE_DATA(q, uv__io_t, watcher_queue);
w = uv__queue_data(q, uv__io_t, watcher_queue);
assert(w->pevents != 0);
if (port_associate(loop->backend_fd,
@ -316,8 +316,8 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
continue; /* Disabled by callback. */
/* Events Ports operates in oneshot mode, rearm timer on next run. */
if (w->pevents != 0 && QUEUE_EMPTY(&w->watcher_queue))
QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
if (w->pevents != 0 && uv__queue_empty(&w->watcher_queue))
uv__queue_insert_tail(&loop->watcher_queue, &w->watcher_queue);
}
uv__metrics_inc_events(loop, nevents);

View File

@ -124,7 +124,7 @@ int uv_tcp_init_ex(uv_loop_t* loop, uv_tcp_t* tcp, unsigned int flags) {
if (domain != AF_UNSPEC) {
err = new_socket(tcp, domain, 0);
if (err) {
QUEUE_REMOVE(&tcp->handle_queue);
uv__queue_remove(&tcp->handle_queue);
if (tcp->io_watcher.fd != -1)
uv__close(tcp->io_watcher.fd);
tcp->io_watcher.fd = -1;
@ -252,7 +252,7 @@ out:
uv__req_init(handle->loop, req, UV_CONNECT);
req->cb = cb;
req->handle = (uv_stream_t*) handle;
QUEUE_INIT(&req->queue);
uv__queue_init(&req->queue);
handle->connect_req = req;
uv__io_start(handle->loop, &handle->io_watcher, POLLOUT);

View File

@ -222,7 +222,7 @@ skip:
int rc = r;
if (newfd != -1)
uv__close(newfd);
QUEUE_REMOVE(&tty->handle_queue);
uv__queue_remove(&tty->handle_queue);
do
r = fcntl(fd, F_SETFL, saved_flags);
while (r == -1 && errno == EINTR);

View File

@ -62,18 +62,18 @@ void uv__udp_close(uv_udp_t* handle) {
void uv__udp_finish_close(uv_udp_t* handle) {
uv_udp_send_t* req;
QUEUE* q;
struct uv__queue* q;
assert(!uv__io_active(&handle->io_watcher, POLLIN | POLLOUT));
assert(handle->io_watcher.fd == -1);
while (!QUEUE_EMPTY(&handle->write_queue)) {
q = QUEUE_HEAD(&handle->write_queue);
QUEUE_REMOVE(q);
while (!uv__queue_empty(&handle->write_queue)) {
q = uv__queue_head(&handle->write_queue);
uv__queue_remove(q);
req = QUEUE_DATA(q, uv_udp_send_t, queue);
req = uv__queue_data(q, uv_udp_send_t, queue);
req->status = UV_ECANCELED;
QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue);
uv__queue_insert_tail(&handle->write_completed_queue, &req->queue);
}
uv__udp_run_completed(handle);
@ -90,16 +90,16 @@ void uv__udp_finish_close(uv_udp_t* handle) {
static void uv__udp_run_completed(uv_udp_t* handle) {
uv_udp_send_t* req;
QUEUE* q;
struct uv__queue* q;
assert(!(handle->flags & UV_HANDLE_UDP_PROCESSING));
handle->flags |= UV_HANDLE_UDP_PROCESSING;
while (!QUEUE_EMPTY(&handle->write_completed_queue)) {
q = QUEUE_HEAD(&handle->write_completed_queue);
QUEUE_REMOVE(q);
while (!uv__queue_empty(&handle->write_completed_queue)) {
q = uv__queue_head(&handle->write_completed_queue);
uv__queue_remove(q);
req = QUEUE_DATA(q, uv_udp_send_t, queue);
req = uv__queue_data(q, uv_udp_send_t, queue);
uv__req_unregister(handle->loop, req);
handle->send_queue_size -= uv__count_bufs(req->bufs, req->nbufs);
@ -121,7 +121,7 @@ static void uv__udp_run_completed(uv_udp_t* handle) {
req->send_cb(req, req->status);
}
if (QUEUE_EMPTY(&handle->write_queue)) {
if (uv__queue_empty(&handle->write_queue)) {
/* Pending queue and completion queue empty, stop watcher. */
uv__io_stop(handle->loop, &handle->io_watcher, POLLOUT);
if (!uv__io_active(&handle->io_watcher, POLLIN))
@ -280,20 +280,20 @@ static void uv__udp_sendmsg(uv_udp_t* handle) {
uv_udp_send_t* req;
struct mmsghdr h[20];
struct mmsghdr* p;
QUEUE* q;
struct uv__queue* q;
ssize_t npkts;
size_t pkts;
size_t i;
if (QUEUE_EMPTY(&handle->write_queue))
if (uv__queue_empty(&handle->write_queue))
return;
write_queue_drain:
for (pkts = 0, q = QUEUE_HEAD(&handle->write_queue);
for (pkts = 0, q = uv__queue_head(&handle->write_queue);
pkts < ARRAY_SIZE(h) && q != &handle->write_queue;
++pkts, q = QUEUE_HEAD(q)) {
++pkts, q = uv__queue_head(q)) {
assert(q != NULL);
req = QUEUE_DATA(q, uv_udp_send_t, queue);
req = uv__queue_data(q, uv_udp_send_t, queue);
assert(req != NULL);
p = &h[pkts];
@ -325,16 +325,16 @@ write_queue_drain:
if (npkts < 1) {
if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOBUFS)
return;
for (i = 0, q = QUEUE_HEAD(&handle->write_queue);
for (i = 0, q = uv__queue_head(&handle->write_queue);
i < pkts && q != &handle->write_queue;
++i, q = QUEUE_HEAD(&handle->write_queue)) {
++i, q = uv__queue_head(&handle->write_queue)) {
assert(q != NULL);
req = QUEUE_DATA(q, uv_udp_send_t, queue);
req = uv__queue_data(q, uv_udp_send_t, queue);
assert(req != NULL);
req->status = UV__ERR(errno);
QUEUE_REMOVE(&req->queue);
QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue);
uv__queue_remove(&req->queue);
uv__queue_insert_tail(&handle->write_completed_queue, &req->queue);
}
uv__io_feed(handle->loop, &handle->io_watcher);
return;
@ -343,11 +343,11 @@ write_queue_drain:
/* Safety: npkts known to be >0 below. Hence cast from ssize_t
* to size_t safe.
*/
for (i = 0, q = QUEUE_HEAD(&handle->write_queue);
for (i = 0, q = uv__queue_head(&handle->write_queue);
i < (size_t)npkts && q != &handle->write_queue;
++i, q = QUEUE_HEAD(&handle->write_queue)) {
++i, q = uv__queue_head(&handle->write_queue)) {
assert(q != NULL);
req = QUEUE_DATA(q, uv_udp_send_t, queue);
req = uv__queue_data(q, uv_udp_send_t, queue);
assert(req != NULL);
req->status = req->bufs[0].len;
@ -357,25 +357,25 @@ write_queue_drain:
* why we don't handle partial writes. Just pop the request
* off the write queue and onto the completed queue, done.
*/
QUEUE_REMOVE(&req->queue);
QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue);
uv__queue_remove(&req->queue);
uv__queue_insert_tail(&handle->write_completed_queue, &req->queue);
}
/* couldn't batch everything, continue sending (jump to avoid stack growth) */
if (!QUEUE_EMPTY(&handle->write_queue))
if (!uv__queue_empty(&handle->write_queue))
goto write_queue_drain;
uv__io_feed(handle->loop, &handle->io_watcher);
#else /* __linux__ || ____FreeBSD__ */
uv_udp_send_t* req;
struct msghdr h;
QUEUE* q;
struct uv__queue* q;
ssize_t size;
while (!QUEUE_EMPTY(&handle->write_queue)) {
q = QUEUE_HEAD(&handle->write_queue);
while (!uv__queue_empty(&handle->write_queue)) {
q = uv__queue_head(&handle->write_queue);
assert(q != NULL);
req = QUEUE_DATA(q, uv_udp_send_t, queue);
req = uv__queue_data(q, uv_udp_send_t, queue);
assert(req != NULL);
memset(&h, 0, sizeof h);
@ -414,8 +414,8 @@ write_queue_drain:
* why we don't handle partial writes. Just pop the request
* off the write queue and onto the completed queue, done.
*/
QUEUE_REMOVE(&req->queue);
QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue);
uv__queue_remove(&req->queue);
uv__queue_insert_tail(&handle->write_completed_queue, &req->queue);
uv__io_feed(handle->loop, &handle->io_watcher);
}
#endif /* __linux__ || ____FreeBSD__ */
@ -729,7 +729,7 @@ int uv__udp_send(uv_udp_send_t* req,
memcpy(req->bufs, bufs, nbufs * sizeof(bufs[0]));
handle->send_queue_size += uv__count_bufs(req->bufs, req->nbufs);
handle->send_queue_count++;
QUEUE_INSERT_TAIL(&handle->write_queue, &req->queue);
uv__queue_insert_tail(&handle->write_queue, &req->queue);
uv__handle_start(handle);
if (empty_queue && !(handle->flags & UV_HANDLE_UDP_PROCESSING)) {
@ -739,7 +739,7 @@ int uv__udp_send(uv_udp_send_t* req,
* away. In such cases the `io_watcher` has to be queued for asynchronous
* write.
*/
if (!QUEUE_EMPTY(&handle->write_queue))
if (!uv__queue_empty(&handle->write_queue))
uv__io_start(handle->loop, &handle->io_watcher, POLLOUT);
} else {
uv__io_start(handle->loop, &handle->io_watcher, POLLOUT);
@ -1007,8 +1007,8 @@ int uv__udp_init_ex(uv_loop_t* loop,
handle->send_queue_size = 0;
handle->send_queue_count = 0;
uv__io_init(&handle->io_watcher, uv__udp_io, fd);
QUEUE_INIT(&handle->write_queue);
QUEUE_INIT(&handle->write_completed_queue);
uv__queue_init(&handle->write_queue);
uv__queue_init(&handle->write_completed_queue);
return 0;
}

View File

@ -533,17 +533,17 @@ int uv_udp_recv_stop(uv_udp_t* handle) {
void uv_walk(uv_loop_t* loop, uv_walk_cb walk_cb, void* arg) {
QUEUE queue;
QUEUE* q;
struct uv__queue queue;
struct uv__queue* q;
uv_handle_t* h;
QUEUE_MOVE(&loop->handle_queue, &queue);
while (!QUEUE_EMPTY(&queue)) {
q = QUEUE_HEAD(&queue);
h = QUEUE_DATA(q, uv_handle_t, handle_queue);
uv__queue_move(&loop->handle_queue, &queue);
while (!uv__queue_empty(&queue)) {
q = uv__queue_head(&queue);
h = uv__queue_data(q, uv_handle_t, handle_queue);
QUEUE_REMOVE(q);
QUEUE_INSERT_TAIL(&loop->handle_queue, q);
uv__queue_remove(q);
uv__queue_insert_tail(&loop->handle_queue, q);
if (h->flags & UV_HANDLE_INTERNAL) continue;
walk_cb(h, arg);
@ -553,14 +553,14 @@ void uv_walk(uv_loop_t* loop, uv_walk_cb walk_cb, void* arg) {
static void uv__print_handles(uv_loop_t* loop, int only_active, FILE* stream) {
const char* type;
QUEUE* q;
struct uv__queue* q;
uv_handle_t* h;
if (loop == NULL)
loop = uv_default_loop();
QUEUE_FOREACH(q, &loop->handle_queue) {
h = QUEUE_DATA(q, uv_handle_t, handle_queue);
uv__queue_foreach(q, &loop->handle_queue) {
h = uv__queue_data(q, uv_handle_t, handle_queue);
if (only_active && !uv__is_active(h))
continue;
@ -846,7 +846,7 @@ uv_loop_t* uv_loop_new(void) {
int uv_loop_close(uv_loop_t* loop) {
QUEUE* q;
struct uv__queue* q;
uv_handle_t* h;
#ifndef NDEBUG
void* saved_data;
@ -855,8 +855,8 @@ int uv_loop_close(uv_loop_t* loop) {
if (uv__has_active_reqs(loop))
return UV_EBUSY;
QUEUE_FOREACH(q, &loop->handle_queue) {
h = QUEUE_DATA(q, uv_handle_t, handle_queue);
uv__queue_foreach(q, &loop->handle_queue) {
h = uv__queue_data(q, uv_handle_t, handle_queue);
if (!(h->flags & UV_HANDLE_INTERNAL))
return UV_EBUSY;
}

View File

@ -323,7 +323,7 @@ void uv__threadpool_cleanup(void);
(h)->loop = (loop_); \
(h)->type = (type_); \
(h)->flags = UV_HANDLE_REF; /* Ref the loop when active. */ \
QUEUE_INSERT_TAIL(&(loop_)->handle_queue, &(h)->handle_queue); \
uv__queue_insert_tail(&(loop_)->handle_queue, &(h)->handle_queue); \
uv__handle_platform_init(h); \
} \
while (0)
@ -415,6 +415,7 @@ struct uv__iou {
size_t sqelen;
int ringfd;
uint32_t in_flight;
uint32_t flags;
};
#endif /* __linux__ */

View File

@ -255,8 +255,8 @@ int uv_loop_init(uv_loop_t* loop) {
loop->time = 0;
uv_update_time(loop);
QUEUE_INIT(&loop->wq);
QUEUE_INIT(&loop->handle_queue);
uv__queue_init(&loop->wq);
uv__queue_init(&loop->handle_queue);
loop->active_reqs.count = 0;
loop->active_handles = 0;
@ -358,7 +358,7 @@ void uv__loop_close(uv_loop_t* loop) {
}
uv_mutex_lock(&loop->wq_mutex);
assert(QUEUE_EMPTY(&loop->wq) && "thread pool work queue not empty!");
assert(uv__queue_empty(&loop->wq) && "thread pool work queue not empty!");
assert(!uv__has_active_reqs(loop));
uv_mutex_unlock(&loop->wq_mutex);
uv_mutex_destroy(&loop->wq_mutex);
@ -629,9 +629,8 @@ int uv_run(uv_loop_t *loop, uv_run_mode mode) {
* while loop for UV_RUN_DEFAULT. Otherwise timers only need to be executed
* once, which should be done after polling in order to maintain proper
* execution order of the conceptual event loop. */
if (mode == UV_RUN_DEFAULT) {
if (r)
uv_update_time(loop);
if (mode == UV_RUN_DEFAULT && r != 0 && loop->stop_flag == 0) {
uv_update_time(loop);
uv__run_timers(loop);
}

View File

@ -144,26 +144,97 @@ void uv__fs_init(void) {
}
static int32_t fs__decode_wtf8_char(const char** input) {
uint32_t code_point;
uint8_t b1;
uint8_t b2;
uint8_t b3;
uint8_t b4;
b1 = **input;
if (b1 <= 0x7F)
return b1; /* ASCII code point */
if (b1 < 0xC2)
return -1; /* invalid: continuation byte */
code_point = b1;
b2 = *++*input;
if ((b2 & 0xC0) != 0x80)
return -1; /* invalid: not a continuation byte */
code_point = (code_point << 6) | (b2 & 0x3F);
if (b1 <= 0xDF)
return 0x7FF & code_point; /* two-byte character */
b3 = *++*input;
if ((b3 & 0xC0) != 0x80)
return -1; /* invalid: not a continuation byte */
code_point = (code_point << 6) | (b3 & 0x3F);
if (b1 <= 0xEF)
return 0xFFFF & code_point; /* three-byte character */
b4 = *++*input;
if ((b4 & 0xC0) != 0x80)
return -1; /* invalid: not a continuation byte */
code_point = (code_point << 6) | (b4 & 0x3F);
if (b1 <= 0xF4)
if (code_point <= 0x10FFFF)
return code_point; /* four-byte character */
/* code point too large */
return -1;
}
static ssize_t fs__get_length_wtf8(const char* source_ptr) {
size_t w_target_len = 0;
int32_t code_point;
do {
code_point = fs__decode_wtf8_char(&source_ptr);
if (code_point < 0)
return -1;
if (code_point > 0xFFFF)
w_target_len++;
w_target_len++;
} while (*source_ptr++);
return w_target_len;
}
static void fs__wtf8_to_wide(const char* source_ptr, WCHAR* w_target) {
int32_t code_point;
do {
code_point = fs__decode_wtf8_char(&source_ptr);
/* fs__get_length_wtf8 should have been called and checked first. */
assert(code_point >= 0);
if (code_point > 0x10000) {
assert(code_point < 0x10FFFF);
*w_target++ = (((code_point - 0x10000) >> 10) + 0xD800);
*w_target++ = ((code_point - 0x10000) & 0x3FF) + 0xDC00;
} else {
*w_target++ = code_point;
}
} while (*source_ptr++);
}
INLINE static int fs__capture_path(uv_fs_t* req, const char* path,
const char* new_path, const int copy_path) {
char* buf;
char* pos;
ssize_t buf_sz = 0, path_len = 0, pathw_len = 0, new_pathw_len = 0;
WCHAR* buf;
WCHAR* pos;
size_t buf_sz = 0;
size_t path_len = 0;
ssize_t pathw_len = 0;
ssize_t new_pathw_len = 0;
/* new_path can only be set if path is also set. */
assert(new_path == NULL || path != NULL);
if (path != NULL) {
pathw_len = MultiByteToWideChar(CP_UTF8,
0,
path,
-1,
NULL,
0);
if (pathw_len == 0) {
return GetLastError();
}
pathw_len = fs__get_length_wtf8(path);
if (pathw_len < 0)
return ERROR_INVALID_NAME;
buf_sz += pathw_len * sizeof(WCHAR);
}
@ -173,16 +244,9 @@ INLINE static int fs__capture_path(uv_fs_t* req, const char* path,
}
if (new_path != NULL) {
new_pathw_len = MultiByteToWideChar(CP_UTF8,
0,
new_path,
-1,
NULL,
0);
if (new_pathw_len == 0) {
return GetLastError();
}
new_pathw_len = fs__get_length_wtf8(new_path);
if (new_pathw_len < 0)
return ERROR_INVALID_NAME;
buf_sz += new_pathw_len * sizeof(WCHAR);
}
@ -194,7 +258,7 @@ INLINE static int fs__capture_path(uv_fs_t* req, const char* path,
return 0;
}
buf = (char*) uv__malloc(buf_sz);
buf = uv__malloc(buf_sz);
if (buf == NULL) {
return ERROR_OUTOFMEMORY;
}
@ -202,29 +266,17 @@ INLINE static int fs__capture_path(uv_fs_t* req, const char* path,
pos = buf;
if (path != NULL) {
DWORD r = MultiByteToWideChar(CP_UTF8,
0,
path,
-1,
(WCHAR*) pos,
pathw_len);
assert(r == (DWORD) pathw_len);
req->file.pathw = (WCHAR*) pos;
pos += r * sizeof(WCHAR);
fs__wtf8_to_wide(path, pos);
req->file.pathw = pos;
pos += pathw_len;
} else {
req->file.pathw = NULL;
}
if (new_path != NULL) {
DWORD r = MultiByteToWideChar(CP_UTF8,
0,
new_path,
-1,
(WCHAR*) pos,
new_pathw_len);
assert(r == (DWORD) new_pathw_len);
req->fs.info.new_pathw = (WCHAR*) pos;
pos += r * sizeof(WCHAR);
fs__wtf8_to_wide(new_path, pos);
req->fs.info.new_pathw = pos;
pos += new_pathw_len;
} else {
req->fs.info.new_pathw = NULL;
}
@ -232,8 +284,8 @@ INLINE static int fs__capture_path(uv_fs_t* req, const char* path,
req->path = path;
if (path != NULL && copy_path) {
memcpy(pos, path, path_len);
assert(path_len == buf_sz - (pos - buf));
req->path = pos;
assert(path_len == buf_sz - (pos - buf) * sizeof(WCHAR));
req->path = (char*) pos;
}
req->flags |= UV_FS_FREE_PATHS;
@ -259,57 +311,115 @@ INLINE static void uv__fs_req_init(uv_loop_t* loop, uv_fs_t* req,
}
static int fs__wide_to_utf8(WCHAR* w_source_ptr,
DWORD w_source_len,
char** target_ptr,
uint64_t* target_len_ptr) {
int r;
int target_len;
static int32_t fs__get_surrogate_value(const WCHAR* w_source_ptr,
size_t w_source_len) {
WCHAR u;
WCHAR next;
u = w_source_ptr[0];
if (u >= 0xD800 && u <= 0xDBFF && w_source_len > 1) {
next = w_source_ptr[1];
if (next >= 0xDC00 && next <= 0xDFFF)
return 0x10000 + ((u - 0xD800) << 10) + (next - 0xDC00);
}
return u;
}
static size_t fs__get_length_wide(const WCHAR* w_source_ptr,
size_t w_source_len) {
size_t target_len;
int32_t code_point;
target_len = 0;
for (; w_source_len; w_source_len--, w_source_ptr++) {
code_point = fs__get_surrogate_value(w_source_ptr, w_source_len);
/* Can be invalid UTF-8 but must be valid WTF-8. */
assert(code_point >= 0);
if (code_point < 0x80)
target_len += 1;
else if (code_point < 0x800)
target_len += 2;
else if (code_point < 0x10000)
target_len += 3;
else {
target_len += 4;
w_source_ptr++;
w_source_len--;
}
}
return target_len;
}
static int fs__wide_to_wtf8(WCHAR* w_source_ptr,
size_t w_source_len,
char** target_ptr,
size_t* target_len_ptr) {
size_t target_len;
char* target;
target_len = WideCharToMultiByte(CP_UTF8,
0,
w_source_ptr,
w_source_len,
NULL,
0,
NULL,
NULL);
int32_t code_point;
if (target_len == 0) {
return -1;
/* If *target_ptr is provided, then *target_len_ptr must be its length
* (excluding space for null), otherwise we will compute the target_len_ptr
* length and may return a new allocation in *target_ptr if target_ptr is
* provided. */
if (target_ptr == NULL || *target_ptr == NULL) {
target_len = fs__get_length_wide(w_source_ptr, w_source_len);
if (target_len_ptr != NULL)
*target_len_ptr = target_len;
} else {
target_len = *target_len_ptr;
}
if (target_len_ptr != NULL) {
*target_len_ptr = target_len;
}
if (target_ptr == NULL) {
if (target_ptr == NULL)
return 0;
if (*target_ptr == NULL) {
target = uv__malloc(target_len + 1);
if (target == NULL) {
SetLastError(ERROR_OUTOFMEMORY);
return -1;
}
*target_ptr = target;
} else {
target = *target_ptr;
}
target = uv__malloc(target_len + 1);
if (target == NULL) {
SetLastError(ERROR_OUTOFMEMORY);
return -1;
}
for (; w_source_len; w_source_len--, w_source_ptr++) {
code_point = fs__get_surrogate_value(w_source_ptr, w_source_len);
/* Can be invalid UTF-8 but must be valid WTF-8. */
assert(code_point >= 0);
if (code_point < 0x80) {
*target++ = code_point;
} else if (code_point < 0x800) {
*target++ = 0xC0 | (code_point >> 6);
*target++ = 0x80 | (code_point & 0x3F);
} else if (code_point < 0x10000) {
*target++ = 0xE0 | (code_point >> 12);
*target++ = 0x80 | ((code_point >> 6) & 0x3F);
*target++ = 0x80 | (code_point & 0x3F);
} else {
*target++ = 0xF0 | (code_point >> 18);
*target++ = 0x80 | ((code_point >> 12) & 0x3F);
*target++ = 0x80 | ((code_point >> 6) & 0x3F);
*target++ = 0x80 | (code_point & 0x3F);
w_source_ptr++;
w_source_len--;
}
}
assert((size_t) (target - *target_ptr) == target_len);
*target++ = '\0';
r = WideCharToMultiByte(CP_UTF8,
0,
w_source_ptr,
w_source_len,
target,
target_len,
NULL,
NULL);
assert(r == target_len);
target[target_len] = '\0';
*target_ptr = target;
return 0;
}
INLINE static int fs__readlink_handle(HANDLE handle, char** target_ptr,
uint64_t* target_len_ptr) {
INLINE static int fs__readlink_handle(HANDLE handle,
char** target_ptr,
size_t* target_len_ptr) {
char buffer[MAXIMUM_REPARSE_DATA_BUFFER_SIZE];
REPARSE_DATA_BUFFER* reparse_data = (REPARSE_DATA_BUFFER*) buffer;
WCHAR* w_target;
@ -439,7 +549,8 @@ INLINE static int fs__readlink_handle(HANDLE handle, char** target_ptr,
return -1;
}
return fs__wide_to_utf8(w_target, w_target_len, target_ptr, target_len_ptr);
assert(target_ptr == NULL || *target_ptr == NULL);
return fs__wide_to_wtf8(w_target, w_target_len, target_ptr, target_len_ptr);
}
@ -1429,7 +1540,8 @@ void fs__scandir(uv_fs_t* req) {
uv__dirent_t* dirent;
size_t wchar_len;
size_t utf8_len;
size_t wtf8_len;
char* wtf8;
/* Obtain a pointer to the current directory entry. */
position += next_entry_offset;
@ -1456,11 +1568,8 @@ void fs__scandir(uv_fs_t* req) {
info->FileName[1] == L'.')
continue;
/* Compute the space required to store the filename as UTF-8. */
utf8_len = WideCharToMultiByte(
CP_UTF8, 0, &info->FileName[0], wchar_len, NULL, 0, NULL, NULL);
if (utf8_len == 0)
goto win32_error;
/* Compute the space required to store the filename as WTF-8. */
wtf8_len = fs__get_length_wide(&info->FileName[0], wchar_len);
/* Resize the dirent array if needed. */
if (dirents_used >= dirents_size) {
@ -1480,26 +1589,17 @@ void fs__scandir(uv_fs_t* req) {
* includes room for the first character of the filename, but `utf8_len`
* doesn't count the NULL terminator at this point.
*/
dirent = uv__malloc(sizeof *dirent + utf8_len);
dirent = uv__malloc(sizeof *dirent + wtf8_len);
if (dirent == NULL)
goto out_of_memory_error;
dirents[dirents_used++] = dirent;
/* Convert file name to UTF-8. */
if (WideCharToMultiByte(CP_UTF8,
0,
&info->FileName[0],
wchar_len,
&dirent->d_name[0],
utf8_len,
NULL,
NULL) == 0)
wtf8 = &dirent->d_name[0];
if (fs__wide_to_wtf8(&info->FileName[0], wchar_len, &wtf8, &wtf8_len) == -1)
goto win32_error;
/* Add a null terminator to the filename. */
dirent->d_name[utf8_len] = '\0';
/* Fill out the type field. */
if (info->FileAttributes & FILE_ATTRIBUTE_DEVICE)
dirent->d_type = UV__DT_CHAR;
@ -1708,6 +1808,7 @@ void fs__closedir(uv_fs_t* req) {
INLINE static int fs__stat_handle(HANDLE handle, uv_stat_t* statbuf,
int do_lstat) {
size_t target_length = 0;
FILE_FS_DEVICE_INFORMATION device_info;
FILE_ALL_INFORMATION file_info;
FILE_FS_VOLUME_INFORMATION volume_info;
@ -1803,9 +1904,10 @@ INLINE static int fs__stat_handle(HANDLE handle, uv_stat_t* statbuf,
* to be treated as a regular file. The higher level lstat function will
* detect this failure and retry without do_lstat if appropriate.
*/
if (fs__readlink_handle(handle, NULL, &statbuf->st_size) != 0)
if (fs__readlink_handle(handle, NULL, &target_length) != 0)
return -1;
statbuf->st_mode |= S_IFLNK;
statbuf->st_size = target_length;
}
if (statbuf->st_mode == 0) {
@ -1961,7 +2063,7 @@ INLINE static int fs__fstat_handle(int fd, HANDLE handle, uv_stat_t* statbuf) {
statbuf->st_mode = file_type == UV_TTY ? _S_IFCHR : _S_IFIFO;
statbuf->st_nlink = 1;
statbuf->st_rdev = (file_type == UV_TTY ? FILE_DEVICE_CONSOLE : FILE_DEVICE_NAMED_PIPE) << 16;
statbuf->st_ino = (uint64_t) handle;
statbuf->st_ino = (uintptr_t) handle;
return 0;
/* If file type is unknown it is an error. */
@ -2661,6 +2763,7 @@ static void fs__readlink(uv_fs_t* req) {
return;
}
assert(req->ptr == NULL);
if (fs__readlink_handle(handle, (char**) &req->ptr, NULL) != 0) {
DWORD error = GetLastError();
SET_REQ_WIN32_ERROR(req, error);
@ -2720,7 +2823,8 @@ static ssize_t fs__realpath_handle(HANDLE handle, char** realpath_ptr) {
return -1;
}
r = fs__wide_to_utf8(w_realpath_ptr, w_realpath_len, realpath_ptr, NULL);
assert(*realpath_ptr == NULL);
r = fs__wide_to_wtf8(w_realpath_ptr, w_realpath_len, realpath_ptr, NULL);
uv__free(w_realpath_buf);
return r;
}
@ -2740,6 +2844,7 @@ static void fs__realpath(uv_fs_t* req) {
return;
}
assert(req->ptr == NULL);
if (fs__realpath_handle(handle, (char**) &req->ptr) == -1) {
CloseHandle(handle);
SET_REQ_WIN32_ERROR(req, GetLastError());

View File

@ -75,7 +75,7 @@
#define uv__handle_close(handle) \
do { \
QUEUE_REMOVE(&(handle)->handle_queue); \
uv__queue_remove(&(handle)->handle_queue); \
uv__active_handle_rm((uv_handle_t*) (handle)); \
\
(handle)->flags |= UV_HANDLE_CLOSED; \

View File

@ -168,18 +168,8 @@ void uv__process_tty_read_req(uv_loop_t* loop, uv_tty_t* handle,
uv_req_t* req);
void uv__process_tty_write_req(uv_loop_t* loop, uv_tty_t* handle,
uv_write_t* req);
/*
* uv__process_tty_accept_req() is a stub to keep DELEGATE_STREAM_REQ working
* TODO: find a way to remove it
*/
void uv__process_tty_accept_req(uv_loop_t* loop, uv_tty_t* handle,
uv_req_t* raw_req);
/*
* uv__process_tty_connect_req() is a stub to keep DELEGATE_STREAM_REQ working
* TODO: find a way to remove it
*/
void uv__process_tty_connect_req(uv_loop_t* loop, uv_tty_t* handle,
uv_connect_t* req);
#define uv__process_tty_accept_req(loop, handle, req) abort()
#define uv__process_tty_connect_req(loop, handle, req) abort()
void uv__process_tty_shutdown_req(uv_loop_t* loop,
uv_tty_t* stream,
uv_shutdown_t* req);

View File

@ -55,7 +55,7 @@ static const int pipe_prefix_len = sizeof(pipe_prefix) - 1;
typedef struct {
uv__ipc_socket_xfer_type_t xfer_type;
uv__ipc_socket_xfer_info_t xfer_info;
QUEUE member;
struct uv__queue member;
} uv__ipc_xfer_queue_item_t;
/* IPC frame header flags. */
@ -111,7 +111,7 @@ int uv_pipe_init(uv_loop_t* loop, uv_pipe_t* handle, int ipc) {
handle->name = NULL;
handle->pipe.conn.ipc_remote_pid = 0;
handle->pipe.conn.ipc_data_frame.payload_remaining = 0;
QUEUE_INIT(&handle->pipe.conn.ipc_xfer_queue);
uv__queue_init(&handle->pipe.conn.ipc_xfer_queue);
handle->pipe.conn.ipc_xfer_queue_length = 0;
handle->ipc = ipc;
handle->pipe.conn.non_overlapped_writes_tail = NULL;
@ -637,13 +637,13 @@ void uv__pipe_endgame(uv_loop_t* loop, uv_pipe_t* handle) {
if (handle->flags & UV_HANDLE_CONNECTION) {
/* Free pending sockets */
while (!QUEUE_EMPTY(&handle->pipe.conn.ipc_xfer_queue)) {
QUEUE* q;
while (!uv__queue_empty(&handle->pipe.conn.ipc_xfer_queue)) {
struct uv__queue* q;
SOCKET socket;
q = QUEUE_HEAD(&handle->pipe.conn.ipc_xfer_queue);
QUEUE_REMOVE(q);
xfer_queue_item = QUEUE_DATA(q, uv__ipc_xfer_queue_item_t, member);
q = uv__queue_head(&handle->pipe.conn.ipc_xfer_queue);
uv__queue_remove(q);
xfer_queue_item = uv__queue_data(q, uv__ipc_xfer_queue_item_t, member);
/* Materialize socket and close it */
socket = WSASocketW(FROM_PROTOCOL_INFO,
@ -694,20 +694,48 @@ void uv_pipe_pending_instances(uv_pipe_t* handle, int count) {
/* Creates a pipe server. */
int uv_pipe_bind(uv_pipe_t* handle, const char* name) {
return uv_pipe_bind2(handle, name, strlen(name), 0);
}
int uv_pipe_bind2(uv_pipe_t* handle,
const char* name,
size_t namelen,
unsigned int flags) {
uv_loop_t* loop = handle->loop;
int i, err, nameSize;
uv_pipe_accept_t* req;
if (flags & ~UV_PIPE_NO_TRUNCATE) {
return UV_EINVAL;
}
if (name == NULL) {
return UV_EINVAL;
}
if (namelen == 0) {
return UV_EINVAL;
}
if (*name == '\0') {
return UV_EINVAL;
}
if (flags & UV_PIPE_NO_TRUNCATE) {
if (namelen > 256) {
return UV_EINVAL;
}
}
if (handle->flags & UV_HANDLE_BOUND) {
return UV_EINVAL;
}
if (!name) {
return UV_EINVAL;
}
if (uv__is_closing(handle)) {
return UV_EINVAL;
}
if (!(handle->flags & UV_HANDLE_PIPESERVER)) {
handle->pipe.serv.pending_instances = default_pending_pipe_instances;
}
@ -818,13 +846,47 @@ static DWORD WINAPI pipe_connect_thread_proc(void* parameter) {
}
void uv_pipe_connect(uv_connect_t* req, uv_pipe_t* handle,
const char* name, uv_connect_cb cb) {
void uv_pipe_connect(uv_connect_t* req,
uv_pipe_t* handle,
const char* name,
uv_connect_cb cb) {
uv_pipe_connect2(req, handle, name, strlen(name), 0, cb);
}
int uv_pipe_connect2(uv_connect_t* req,
uv_pipe_t* handle,
const char* name,
size_t namelen,
unsigned int flags,
uv_connect_cb cb) {
uv_loop_t* loop = handle->loop;
int err, nameSize;
HANDLE pipeHandle = INVALID_HANDLE_VALUE;
DWORD duplex_flags;
if (flags & ~UV_PIPE_NO_TRUNCATE) {
return UV_EINVAL;
}
if (name == NULL) {
return UV_EINVAL;
}
if (namelen == 0) {
return UV_EINVAL;
}
if (*name == '\0') {
return UV_EINVAL;
}
if (flags & UV_PIPE_NO_TRUNCATE) {
if (namelen > 256) {
return UV_EINVAL;
}
}
UV_REQ_INIT(req, UV_CONNECT);
req->handle = (uv_stream_t*) handle;
req->cb = cb;
@ -882,7 +944,7 @@ void uv_pipe_connect(uv_connect_t* req, uv_pipe_t* handle,
REGISTER_HANDLE_REQ(loop, handle, req);
handle->reqs_pending++;
return;
return 0;
}
err = GetLastError();
@ -895,7 +957,7 @@ void uv_pipe_connect(uv_connect_t* req, uv_pipe_t* handle,
uv__insert_pending_req(loop, (uv_req_t*) req);
handle->reqs_pending++;
REGISTER_HANDLE_REQ(loop, handle, req);
return;
return 0;
error:
if (handle->name) {
@ -911,7 +973,7 @@ error:
uv__insert_pending_req(loop, (uv_req_t*) req);
handle->reqs_pending++;
REGISTER_HANDLE_REQ(loop, handle, req);
return;
return 0;
}
@ -1062,20 +1124,20 @@ int uv__pipe_accept(uv_pipe_t* server, uv_stream_t* client) {
uv_loop_t* loop = server->loop;
uv_pipe_t* pipe_client;
uv_pipe_accept_t* req;
QUEUE* q;
struct uv__queue* q;
uv__ipc_xfer_queue_item_t* item;
int err;
if (server->ipc) {
if (QUEUE_EMPTY(&server->pipe.conn.ipc_xfer_queue)) {
if (uv__queue_empty(&server->pipe.conn.ipc_xfer_queue)) {
/* No valid pending sockets. */
return WSAEWOULDBLOCK;
}
q = QUEUE_HEAD(&server->pipe.conn.ipc_xfer_queue);
QUEUE_REMOVE(q);
q = uv__queue_head(&server->pipe.conn.ipc_xfer_queue);
uv__queue_remove(q);
server->pipe.conn.ipc_xfer_queue_length--;
item = QUEUE_DATA(q, uv__ipc_xfer_queue_item_t, member);
item = uv__queue_data(q, uv__ipc_xfer_queue_item_t, member);
err = uv__tcp_xfer_import(
(uv_tcp_t*) client, item->xfer_type, &item->xfer_info);
@ -1829,7 +1891,7 @@ static void uv__pipe_queue_ipc_xfer_info(
item->xfer_type = xfer_type;
item->xfer_info = *xfer_info;
QUEUE_INSERT_TAIL(&handle->pipe.conn.ipc_xfer_queue, &item->member);
uv__queue_insert_tail(&handle->pipe.conn.ipc_xfer_queue, &item->member);
handle->pipe.conn.ipc_xfer_queue_length++;
}

View File

@ -175,14 +175,14 @@ int uv_tcp_init_ex(uv_loop_t* loop, uv_tcp_t* handle, unsigned int flags) {
sock = socket(domain, SOCK_STREAM, 0);
if (sock == INVALID_SOCKET) {
err = WSAGetLastError();
QUEUE_REMOVE(&handle->handle_queue);
uv__queue_remove(&handle->handle_queue);
return uv_translate_sys_error(err);
}
err = uv__tcp_set_socket(handle->loop, handle, sock, domain, 0);
if (err) {
closesocket(sock);
QUEUE_REMOVE(&handle->handle_queue);
uv__queue_remove(&handle->handle_queue);
return uv_translate_sys_error(err);
}

View File

@ -2298,26 +2298,6 @@ void uv__tty_endgame(uv_loop_t* loop, uv_tty_t* handle) {
}
/*
* uv__process_tty_accept_req() is a stub to keep DELEGATE_STREAM_REQ working
* TODO: find a way to remove it
*/
void uv__process_tty_accept_req(uv_loop_t* loop, uv_tty_t* handle,
uv_req_t* raw_req) {
abort();
}
/*
* uv__process_tty_connect_req() is a stub to keep DELEGATE_STREAM_REQ working
* TODO: find a way to remove it
*/
void uv__process_tty_connect_req(uv_loop_t* loop, uv_tty_t* handle,
uv_connect_t* req) {
abort();
}
int uv_tty_reset_mode(void) {
/* Not necessary to do anything. */
return 0;

View File

@ -146,14 +146,14 @@ int uv__udp_init_ex(uv_loop_t* loop,
sock = socket(domain, SOCK_DGRAM, 0);
if (sock == INVALID_SOCKET) {
err = WSAGetLastError();
QUEUE_REMOVE(&handle->handle_queue);
uv__queue_remove(&handle->handle_queue);
return uv_translate_sys_error(err);
}
err = uv__udp_set_socket(handle->loop, handle, sock, domain);
if (err) {
closesocket(sock);
QUEUE_REMOVE(&handle->handle_queue);
uv__queue_remove(&handle->handle_queue);
return uv_translate_sys_error(err);
}
}

View File

@ -22,6 +22,7 @@
BENCHMARK_DECLARE (sizes)
BENCHMARK_DECLARE (loop_count)
BENCHMARK_DECLARE (loop_count_timed)
BENCHMARK_DECLARE (loop_alive)
BENCHMARK_DECLARE (ping_pongs)
BENCHMARK_DECLARE (ping_udp1)
BENCHMARK_DECLARE (ping_udp10)
@ -89,6 +90,7 @@ TASK_LIST_START
BENCHMARK_ENTRY (sizes)
BENCHMARK_ENTRY (loop_count)
BENCHMARK_ENTRY (loop_count_timed)
BENCHMARK_ENTRY (loop_alive)
BENCHMARK_ENTRY (ping_pongs)
BENCHMARK_HELPER (ping_pongs, tcp4_echo_server)

View File

@ -26,6 +26,7 @@
#include <stdlib.h>
#define NUM_TICKS (2 * 1000 * 1000)
#define NUM_TICKS2 (2 * 1000 * 1000 * 100)
static unsigned long ticks;
static uv_idle_t idle_handle;
@ -37,6 +38,19 @@ static void idle_cb(uv_idle_t* handle) {
uv_idle_stop(handle);
}
static void idle_alive_cb(uv_idle_t* handle) {
int ticks = 0;
while (++ticks < NUM_TICKS2) {
int r = uv_loop_alive(handle->loop);
if (r == 0)
abort();
}
*(int*)handle->data = ticks;
uv_idle_stop(handle);
}
static void idle2_cb(uv_idle_t* handle) {
ticks++;
@ -90,3 +104,31 @@ BENCHMARK_IMPL(loop_count_timed) {
MAKE_VALGRIND_HAPPY(loop);
return 0;
}
/* Measure the performance of running uv_loop_alive(). Adding this so we can get
* some sort of metric for the impact of switching active_reqs.count to use
* atomics. No other code sits in a hot path. */
BENCHMARK_IMPL(loop_alive) {
uv_loop_t* loop = uv_default_loop();
int ticks = 0;
uint64_t ns;
uv_idle_init(loop, &idle_handle);
idle_handle.data = &ticks;
uv_idle_start(&idle_handle, idle_alive_cb);
ns = uv_hrtime();
uv_run(loop, UV_RUN_DEFAULT);
ns = uv_hrtime() - ns;
ASSERT_EQ(ticks, NUM_TICKS2);
fprintf(stderr, "loop_alive: %d ticks in %.2fs (%.0f/s)\n",
NUM_TICKS2,
ns / 1e9,
NUM_TICKS2 / (ns / 1e9));
fflush(stderr);
MAKE_VALGRIND_HAPPY(loop);
return 0;
}

View File

@ -40,6 +40,10 @@
#include <sys/time.h>
#include <pthread.h>
#ifdef __APPLE__
#include <TargetConditionals.h>
#endif
extern char** environ;
static void closefd(int fd) {
@ -131,7 +135,11 @@ int process_start(char* name, char* part, process_info_t* p, int is_helper) {
p->terminated = 0;
p->status = 0;
#if defined(__APPLE__) && (TARGET_OS_TV || TARGET_OS_WATCH)
pid = -1;
#else
pid = fork();
#endif
if (pid < 0) {
perror("fork");
@ -144,7 +152,9 @@ int process_start(char* name, char* part, process_info_t* p, int is_helper) {
closefd(pipefd[0]);
dup2(stdout_fd, STDOUT_FILENO);
dup2(stdout_fd, STDERR_FILENO);
#if !(defined(__APPLE__) && (TARGET_OS_TV || TARGET_OS_WATCH))
execve(args[0], args, environ);
#endif
perror("execve()");
_exit(127);
}

View File

@ -27,6 +27,10 @@
#include <sys/socket.h>
#include <string.h>
#ifdef __APPLE__
#include <TargetConditionals.h>
#endif
#include "uv.h"
#include "task.h"
@ -100,7 +104,11 @@ TEST_IMPL(fork_timer) {
pid_t child_pid;
run_timer_loop_once();
#if defined(__APPLE__) && (TARGET_OS_TV || TARGET_OS_WATCH)
child_pid = -1;
#else
child_pid = fork();
#endif
ASSERT(child_pid != -1);
if (child_pid != 0) {
@ -132,7 +140,11 @@ TEST_IMPL(fork_socketpair) {
/* Create the server watcher in the parent, use it in the child. */
ASSERT(0 == uv_poll_init(uv_default_loop(), &poll_handle, socket_fds[0]));
#if defined(__APPLE__) && (TARGET_OS_TV || TARGET_OS_WATCH)
child_pid = -1;
#else
child_pid = fork();
#endif
ASSERT(child_pid != -1);
if (child_pid != 0) {
@ -181,7 +193,11 @@ TEST_IMPL(fork_socketpair_started) {
*/
ASSERT(1 == uv_run(uv_default_loop(), UV_RUN_NOWAIT));
#if defined(__APPLE__) && (TARGET_OS_TV || TARGET_OS_WATCH)
child_pid = -1;
#else
child_pid = fork();
#endif
ASSERT(child_pid != -1);
if (child_pid != 0) {
@ -245,7 +261,11 @@ TEST_IMPL(fork_signal_to_child) {
ASSERT(0 == uv_signal_init(uv_default_loop(), &signal_handle));
ASSERT(0 == uv_signal_start(&signal_handle, fork_signal_to_child_cb, SIGUSR1));
#if defined(__APPLE__) && (TARGET_OS_TV || TARGET_OS_WATCH)
child_pid = -1;
#else
child_pid = fork();
#endif
ASSERT(child_pid != -1);
if (child_pid != 0) {
@ -297,7 +317,11 @@ TEST_IMPL(fork_signal_to_child_closed) {
ASSERT(0 == uv_signal_init(uv_default_loop(), &signal_handle));
ASSERT(0 == uv_signal_start(&signal_handle, fork_signal_to_child_cb, SIGUSR1));
#if defined(__APPLE__) && (TARGET_OS_TV || TARGET_OS_WATCH)
child_pid = -1;
#else
child_pid = fork();
#endif
ASSERT(child_pid != -1);
if (child_pid != 0) {
@ -463,7 +487,11 @@ static int _do_fork_fs_events_child(int file_or_dir) {
/* Watch in the parent, prime the loop and/or threads. */
assert_watch_file_current_dir(uv_default_loop(), file_or_dir);
#if defined(__APPLE__) && (TARGET_OS_TV || TARGET_OS_WATCH)
child_pid = -1;
#else
child_pid = fork();
#endif
ASSERT(child_pid != -1);
if (child_pid != 0) {
@ -569,7 +597,11 @@ TEST_IMPL(fork_fs_events_file_parent_child) {
r = uv_timer_init(loop, &timer);
ASSERT(r == 0);
#if defined(__APPLE__) && (TARGET_OS_TV || TARGET_OS_WATCH)
child_pid = -1;
#else
child_pid = fork();
#endif
ASSERT(child_pid != -1);
if (child_pid != 0) {
/* parent */
@ -654,7 +686,11 @@ TEST_IMPL(fork_threadpool_queue_work_simple) {
/* Prime the pool and default loop. */
assert_run_work(uv_default_loop());
#if defined(__APPLE__) && (TARGET_OS_TV || TARGET_OS_WATCH)
child_pid = -1;
#else
child_pid = fork();
#endif
ASSERT(child_pid != -1);
if (child_pid != 0) {

View File

@ -4550,6 +4550,7 @@ TEST_IMPL(fs_get_system_error) {
return 0;
}
TEST_IMPL(fs_stat_batch_multiple) {
uv_fs_t req[300];
int r;
@ -4573,3 +4574,55 @@ TEST_IMPL(fs_stat_batch_multiple) {
MAKE_VALGRIND_HAPPY(loop);
return 0;
}
#ifdef _WIN32
TEST_IMPL(fs_wtf) {
int r;
HANDLE file_handle;
uv_dirent_t dent;
static char test_file_buf[PATHMAX];
/* set-up */
_wunlink(L"test_dir/hi\xD801\x0037");
rmdir("test_dir");
loop = uv_default_loop();
r = uv_fs_mkdir(NULL, &mkdir_req, "test_dir", 0777, NULL);
ASSERT_EQ(r, 0);
uv_fs_req_cleanup(&mkdir_req);
file_handle = CreateFileW(L"test_dir/hi\xD801\x0037",
GENERIC_WRITE | FILE_WRITE_ATTRIBUTES,
0,
NULL,
CREATE_ALWAYS,
FILE_FLAG_OPEN_REPARSE_POINT |
FILE_FLAG_BACKUP_SEMANTICS,
NULL);
ASSERT(file_handle != INVALID_HANDLE_VALUE);
CloseHandle(file_handle);
r = uv_fs_scandir(NULL, &scandir_req, "test_dir", 0, NULL);
ASSERT_EQ(r, 1);
ASSERT_EQ(scandir_req.result, 1);
ASSERT_NOT_NULL(scandir_req.ptr);
while (UV_EOF != uv_fs_scandir_next(&scandir_req, &dent)) {
snprintf(test_file_buf, sizeof(test_file_buf), "test_dir\\%s", dent.name);
printf("stat %s\n", test_file_buf);
r = uv_fs_stat(NULL, &stat_req, test_file_buf, NULL);
ASSERT_EQ(r, 0);
}
uv_fs_req_cleanup(&scandir_req);
ASSERT_NULL(scandir_req.ptr);
/* clean-up */
_wunlink(L"test_dir/hi\xD801\x0037");
rmdir("test_dir");
MAKE_VALGRIND_HAPPY(loop);
return 0;
}
#endif

View File

@ -120,6 +120,7 @@ TEST_IMPL(get_passwd2) {
#ifdef _WIN32
ASSERT_EQ(r, UV_ENOTSUP);
(void) &len;
#else
ASSERT_EQ(r, 0);
@ -179,6 +180,7 @@ TEST_IMPL(get_group) {
#ifdef _WIN32
ASSERT_EQ(r, UV_ENOTSUP);
(void) &len;
#else
ASSERT_EQ(r, 0);

View File

@ -29,6 +29,7 @@ TEST_DECLARE (loop_alive)
TEST_DECLARE (loop_close)
TEST_DECLARE (loop_instant_close)
TEST_DECLARE (loop_stop)
TEST_DECLARE (loop_stop_before_run)
TEST_DECLARE (loop_update_time)
TEST_DECLARE (loop_backend_timeout)
TEST_DECLARE (loop_configure)
@ -198,6 +199,7 @@ TEST_DECLARE (pipe_connect_close_multiple)
TEST_DECLARE (pipe_connect_multiple)
TEST_DECLARE (pipe_listen_without_bind)
TEST_DECLARE (pipe_bind_or_listen_error_after_close)
TEST_DECLARE (pipe_overlong_path)
TEST_DECLARE (pipe_connect_bad_name)
TEST_DECLARE (pipe_connect_to_file)
TEST_DECLARE (pipe_connect_on_prepare)
@ -232,6 +234,7 @@ TEST_DECLARE (timer_null_callback)
TEST_DECLARE (timer_early_check)
TEST_DECLARE (timer_no_double_call_once)
TEST_DECLARE (timer_no_double_call_nowait)
TEST_DECLARE (timer_no_run_on_unref)
TEST_DECLARE (idle_starvation)
TEST_DECLARE (idle_check)
TEST_DECLARE (loop_handles)
@ -437,6 +440,7 @@ TEST_DECLARE (fs_file_flag_no_buffering)
TEST_DECLARE (fs_open_readonly_acl)
TEST_DECLARE (fs_fchmod_archive_readonly)
TEST_DECLARE (fs_invalid_mkdir_name)
TEST_DECLARE (fs_wtf)
#endif
TEST_DECLARE (fs_get_system_error)
TEST_DECLARE (strscpy)
@ -450,6 +454,7 @@ TEST_DECLARE (threadpool_cancel_random)
TEST_DECLARE (threadpool_cancel_work)
TEST_DECLARE (threadpool_cancel_fs)
TEST_DECLARE (threadpool_cancel_single)
TEST_DECLARE (threadpool_cancel_when_busy)
TEST_DECLARE (thread_local_storage)
TEST_DECLARE (thread_stack_size)
TEST_DECLARE (thread_stack_size_explicit)
@ -571,6 +576,7 @@ TASK_LIST_START
TEST_ENTRY (loop_close)
TEST_ENTRY (loop_instant_close)
TEST_ENTRY (loop_stop)
TEST_ENTRY (loop_stop_before_run)
TEST_ENTRY (loop_update_time)
TEST_ENTRY (loop_backend_timeout)
TEST_ENTRY (loop_configure)
@ -798,6 +804,7 @@ TASK_LIST_START
TEST_ENTRY (pipe_connect_multiple)
TEST_ENTRY (pipe_listen_without_bind)
TEST_ENTRY (pipe_bind_or_listen_error_after_close)
TEST_ENTRY (pipe_overlong_path)
TEST_ENTRY (pipe_getsockname)
TEST_ENTRY (pipe_getsockname_abstract)
TEST_ENTRY (pipe_getsockname_blocking)
@ -843,6 +850,7 @@ TASK_LIST_START
TEST_ENTRY (timer_early_check)
TEST_ENTRY (timer_no_double_call_once)
TEST_ENTRY (timer_no_double_call_nowait)
TEST_ENTRY (timer_no_run_on_unref)
TEST_ENTRY (idle_starvation)
TEST_ENTRY (idle_check)
@ -1120,6 +1128,7 @@ TASK_LIST_START
TEST_ENTRY (fs_open_readonly_acl)
TEST_ENTRY (fs_fchmod_archive_readonly)
TEST_ENTRY (fs_invalid_mkdir_name)
TEST_ENTRY (fs_wtf)
#endif
TEST_ENTRY (fs_get_system_error)
TEST_ENTRY (get_osfhandle_valid_handle)
@ -1135,6 +1144,7 @@ TASK_LIST_START
TEST_ENTRY (threadpool_cancel_work)
TEST_ENTRY (threadpool_cancel_fs)
TEST_ENTRY (threadpool_cancel_single)
TEST_ENTRY (threadpool_cancel_when_busy)
TEST_ENTRY (thread_local_storage)
TEST_ENTRY (thread_stack_size)
TEST_ENTRY (thread_stack_size_explicit)

View File

@ -70,3 +70,14 @@ TEST_IMPL(loop_stop) {
MAKE_VALGRIND_HAPPY(uv_default_loop());
return 0;
}
TEST_IMPL(loop_stop_before_run) {
ASSERT_OK(uv_timer_init(uv_default_loop(), &timer_handle));
ASSERT_OK(uv_timer_start(&timer_handle, (uv_timer_cb) abort, 0, 0));
uv_stop(uv_default_loop());
ASSERT_NE(uv_run(uv_default_loop(), UV_RUN_DEFAULT), 0);
MAKE_VALGRIND_HAPPY(uv_default_loop());
return 0;
}

View File

@ -153,3 +153,27 @@ TEST_IMPL(pipe_bind_or_listen_error_after_close) {
MAKE_VALGRIND_HAPPY(uv_default_loop());
return 0;
}
TEST_IMPL(pipe_overlong_path) {
char path[512];
uv_pipe_t pipe;
uv_connect_t req;
memset(path, '@', sizeof(path));
ASSERT_OK(uv_pipe_init(uv_default_loop(), &pipe, 0));
ASSERT_EQ(UV_EINVAL,
uv_pipe_bind2(&pipe, path, sizeof(path), UV_PIPE_NO_TRUNCATE));
ASSERT_EQ(UV_EINVAL,
uv_pipe_connect2(&req,
&pipe,
path,
sizeof(path),
UV_PIPE_NO_TRUNCATE,
(uv_connect_cb) abort));
uv_close((uv_handle_t*) &pipe, NULL);
ASSERT_OK(uv_run(uv_default_loop(), UV_RUN_DEFAULT));
MAKE_VALGRIND_HAPPY(uv_default_loop());
return 0;
}

View File

@ -26,6 +26,10 @@
#include <sys/wait.h>
#include <sys/types.h>
#ifdef __APPLE__
#include <TargetConditionals.h>
#endif
#include "uv.h"
#include "task.h"
@ -58,8 +62,14 @@ TEST_IMPL(pipe_close_stdout_read_stdin) {
r = pipe(fd);
ASSERT(r == 0);
#if defined(__APPLE__) && (TARGET_OS_TV || TARGET_OS_WATCH)
pid = -1;
#else
pid = fork();
#endif
if ((pid = fork()) == 0) {
if (pid == 0) {
/*
* Make the read side of the pipe our stdin.
* The write side will be closed by the parent process.

View File

@ -25,11 +25,6 @@
#include <stdlib.h>
#include <string.h>
#if defined(__linux__)
#include <sys/socket.h>
#include <sys/un.h>
#endif
#ifndef _WIN32
# include <unistd.h> /* close */
#else
@ -63,8 +58,14 @@ static void pipe_client_connect_cb(uv_connect_t* req, int status) {
r = uv_pipe_getpeername(&pipe_client, buf, &len);
ASSERT(r == 0);
ASSERT(buf[len - 1] != 0);
ASSERT(memcmp(buf, TEST_PIPENAME, len) == 0);
if (*buf == '\0') { /* Linux abstract socket. */
const char expected[] = "\0" TEST_PIPENAME;
ASSERT_GE(len, sizeof(expected));
ASSERT_MEM_EQ(buf, expected, sizeof(expected));
} else {
ASSERT_NE(0, buf[len - 1]);
ASSERT_MEM_EQ(buf, TEST_PIPENAME, len);
}
len = sizeof buf;
r = uv_pipe_getsockname(&pipe_client, buf, &len);
@ -72,7 +73,6 @@ static void pipe_client_connect_cb(uv_connect_t* req, int status) {
pipe_client_connect_cb_called++;
uv_close((uv_handle_t*) &pipe_client, pipe_close_cb);
uv_close((uv_handle_t*) &pipe_server, pipe_close_cb);
}
@ -162,47 +162,48 @@ TEST_IMPL(pipe_getsockname) {
TEST_IMPL(pipe_getsockname_abstract) {
/* TODO(bnoordhuis) Use unique name, susceptible to concurrent test runs. */
static const char name[] = "\0" TEST_PIPENAME;
#if defined(__linux__)
char buf[1024];
size_t len;
int r;
int sock;
struct sockaddr_un sun;
socklen_t sun_len;
char abstract_pipe[] = "\0test-pipe";
char buf[256];
size_t buflen;
sock = socket(AF_UNIX, SOCK_STREAM, 0);
ASSERT(sock != -1);
sun_len = sizeof sun;
memset(&sun, 0, sun_len);
sun.sun_family = AF_UNIX;
memcpy(sun.sun_path, abstract_pipe, sizeof abstract_pipe);
r = bind(sock, (struct sockaddr*)&sun, sun_len);
ASSERT(r == 0);
r = uv_pipe_init(uv_default_loop(), &pipe_server, 0);
ASSERT(r == 0);
r = uv_pipe_open(&pipe_server, sock);
ASSERT(r == 0);
len = sizeof buf;
r = uv_pipe_getsockname(&pipe_server, buf, &len);
ASSERT(r == 0);
ASSERT(memcmp(buf, abstract_pipe, sizeof abstract_pipe) == 0);
uv_close((uv_handle_t*)&pipe_server, pipe_close_cb);
uv_run(uv_default_loop(), UV_RUN_DEFAULT);
close(sock);
ASSERT(pipe_close_cb_called == 1);
buflen = sizeof(buf);
memset(buf, 0, sizeof(buf));
ASSERT_OK(uv_pipe_init(uv_default_loop(), &pipe_server, 0));
ASSERT_OK(uv_pipe_bind2(&pipe_server, name, sizeof(name), 0));
ASSERT_OK(uv_pipe_getsockname(&pipe_server, buf, &buflen));
ASSERT_MEM_EQ(name, buf, sizeof(name));
ASSERT_OK(uv_listen((uv_stream_t*) &pipe_server,
0,
pipe_server_connection_cb));
ASSERT_OK(uv_pipe_init(uv_default_loop(), &pipe_client, 0));
ASSERT_OK(uv_pipe_connect2(&connect_req,
&pipe_client,
name,
sizeof(name),
0,
pipe_client_connect_cb));
ASSERT_OK(uv_run(uv_default_loop(), UV_RUN_DEFAULT));
ASSERT_EQ(1, pipe_client_connect_cb_called);
ASSERT_EQ(2, pipe_close_cb_called);
MAKE_VALGRIND_HAPPY(uv_default_loop());
return 0;
#else
/* On other platforms it should simply fail with UV_EINVAL. */
ASSERT_OK(uv_pipe_init(uv_default_loop(), &pipe_server, 0));
ASSERT_EQ(UV_EINVAL, uv_pipe_bind2(&pipe_server, name, sizeof(name), 0));
ASSERT_OK(uv_pipe_init(uv_default_loop(), &pipe_client, 0));
uv_close((uv_handle_t*) &pipe_server, pipe_close_cb);
ASSERT_EQ(UV_EINVAL, uv_pipe_connect2(&connect_req,
&pipe_client,
name,
sizeof(name),
0,
(uv_connect_cb) abort));
uv_close((uv_handle_t*) &pipe_client, pipe_close_cb);
ASSERT_OK(uv_run(uv_default_loop(), UV_RUN_DEFAULT));
ASSERT_EQ(2, pipe_close_cb_called);
MAKE_VALGRIND_HAPPY(uv_default_loop());
return 0;
#endif

View File

@ -29,7 +29,7 @@
* The idea behind the test is as follows.
* Certain handle types are stored in a queue internally.
* Extra care should be taken for removal of a handle from the queue while iterating over the queue.
* (i.e., QUEUE_REMOVE() called within QUEUE_FOREACH())
* (i.e., uv__queue_remove() called within uv__queue_foreach())
* This usually happens when someone closes or stops a handle from within its callback.
* So we need to check that we haven't screwed the queue on close/stop.
* To do so we do the following (for each handle type):
@ -54,7 +54,8 @@
* wrong foreach "next" |
*
* 4. The callback for handle #1 shouldn't be called because the handle #1 is stopped in the previous step.
* However, if QUEUE_REMOVE() is not handled properly within QUEUE_FOREACH(), the callback _will_ be called.
* However, if uv__queue_remove() is not handled properly within uv__queue_foreach(), the callback _will_
* be called.
*/
static const unsigned first_handle_number_idle = 2;

View File

@ -1108,7 +1108,7 @@ TEST_IMPL(spawn_detect_pipe_name_collisions_on_windows) {
/* Create a pipe that'll cause a collision. */
snprintf(name,
sizeof(name),
"\\\\.\\pipe\\uv\\%p-%d",
"\\\\.\\pipe\\uv\\%p-%lu",
&out,
GetCurrentProcessId());
pipe_handle = CreateNamedPipeA(name,

View File

@ -78,10 +78,6 @@ static void getaddrinfo_do(struct getaddrinfo_req* req) {
static void getaddrinfo_cb(uv_getaddrinfo_t* handle,
int status,
struct addrinfo* res) {
/* TODO(gengjiawen): Fix test on QEMU. */
#if defined(__QEMU__)
RETURN_SKIP("Test does not currently work in QEMU");
#endif
struct getaddrinfo_req* req;
ASSERT(status == 0);

View File

@ -98,11 +98,16 @@ static int known_broken(uv_req_t* req) {
case UV_FS_FDATASYNC:
case UV_FS_FSTAT:
case UV_FS_FSYNC:
case UV_FS_LINK:
case UV_FS_LSTAT:
case UV_FS_MKDIR:
case UV_FS_OPEN:
case UV_FS_READ:
case UV_FS_RENAME:
case UV_FS_STAT:
case UV_FS_SYMLINK:
case UV_FS_WRITE:
case UV_FS_UNLINK:
return 1;
default: /* Squelch -Wswitch warnings. */
break;
@ -373,3 +378,36 @@ TEST_IMPL(threadpool_cancel_single) {
MAKE_VALGRIND_HAPPY(loop);
return 0;
}
static void after_busy_cb(uv_work_t* req, int status) {
ASSERT_OK(status);
done_cb_called++;
}
static void busy_cb(uv_work_t* req) {
uv_sem_post((uv_sem_t*) req->data);
/* Assume that calling uv_cancel() takes less than 10ms. */
uv_sleep(10);
}
TEST_IMPL(threadpool_cancel_when_busy) {
uv_sem_t sem_lock;
uv_work_t req;
req.data = &sem_lock;
ASSERT_OK(uv_sem_init(&sem_lock, 0));
ASSERT_OK(uv_queue_work(uv_default_loop(), &req, busy_cb, after_busy_cb));
uv_sem_wait(&sem_lock);
ASSERT_EQ(uv_cancel((uv_req_t*) &req), UV_EBUSY);
ASSERT_OK(uv_run(uv_default_loop(), UV_RUN_DEFAULT));
ASSERT_EQ(done_cb_called, 1);
uv_sem_destroy(&sem_lock);
MAKE_VALGRIND_HAPPY(uv_default_loop());
return 0;
}

View File

@ -407,3 +407,15 @@ TEST_IMPL(timer_no_double_call_nowait) {
MAKE_VALGRIND_HAPPY(uv_default_loop());
return 0;
}
TEST_IMPL(timer_no_run_on_unref) {
uv_timer_t timer_handle;
ASSERT_OK(uv_timer_init(uv_default_loop(), &timer_handle));
ASSERT_OK(uv_timer_start(&timer_handle, (uv_timer_cb) abort, 0, 0));
uv_unref((uv_handle_t*) &timer_handle);
ASSERT_EQ(uv_run(uv_default_loop(), UV_RUN_DEFAULT), 0);
MAKE_VALGRIND_HAPPY(uv_default_loop());
return 0;
}