libuv 1.44.0

git-svn-id: https://www.unprompted.com/svn/projects/tildefriends/trunk@3856 ed5197a5-7fde-0310-b194-c3ffbd925b24
This commit is contained in:
Cory McWilliams 2022-03-07 21:34:07 +00:00
parent b488db9137
commit 41cabad264
81 changed files with 2216 additions and 2026 deletions

View File

@ -44,7 +44,7 @@ jobs:
# see build options you can use in https://developer.android.com/ndk/guides/cmake
run: |
mkdir build && cd build
$ANDROID_HOME/cmake/3.10.2.4988404/bin/cmake -DCMAKE_TOOLCHAIN_FILE=$ANDROID_HOME/ndk/20.0.5594570/build/cmake/android.toolchain.cmake -DCMAKE_BUILD_TYPE=Release -DANDROID_ABI="arm64-v8a" -DANDROID_PLATFORM=android-21 ..
$ANDROID_HOME/cmake/3.10.2.4988404/bin/cmake -DCMAKE_TOOLCHAIN_FILE=$ANDROID_HOME/ndk/20.0.5594570/build/cmake/android.toolchain.cmake -DCMAKE_BUILD_TYPE=Release -DANDROID_ABI="arm64-v8a" -DANDROID_PLATFORM=android-24 ..
$ANDROID_HOME/cmake/3.10.2.4988404/bin/cmake --build .
build-macos:

12
deps/libuv/AUTHORS vendored
View File

@ -496,3 +496,15 @@ Jesper Storm Bache <jsbache@users.noreply.github.com>
Campbell He <duskmoon314@users.noreply.github.com>
Andrey Hohutkin <andrey.hohutkin@gmail.com>
deal <halx99@live.com>
David Machaj <46852402+dmachaj@users.noreply.github.com>
Jessica Clarke <jrtc27@jrtc27.com>
Jeremy Rose <nornagon@nornagon.net>
woclass <git@wo-class.cn>
Luca Adrian L <info@lucalindhorst.de>
WenTao Ou <owt5008137@live.com>
jonilaitinen <joni.laitinen@iki.fi>
UMU <UMU618@users.noreply.github.com>
Paul Evans <leonerd@leonerd.org.uk>
wyckster <wyckster@hotmail.com>
Vittore F. Scolari <vittore.scolari@gmail.com>
roflcopter4 <15476346+roflcopter4@users.noreply.github.com>

View File

@ -215,7 +215,6 @@ if(CMAKE_SYSTEM_NAME STREQUAL "Android")
list(APPEND uv_defines _GNU_SOURCE)
list(APPEND uv_libraries dl)
list(APPEND uv_sources
src/unix/android-ifaddrs.c
src/unix/linux-core.c
src/unix/linux-inotify.c
src/unix/linux-syscalls.c
@ -259,6 +258,17 @@ if(APPLE)
src/unix/fsevents.c)
endif()
if(CMAKE_SYSTEM_NAME STREQUAL "GNU")
list(APPEND uv_libraries dl)
list(APPEND uv_sources
src/unix/bsd-ifaddrs.c
src/unix/no-fsevents.c
src/unix/no-proctitle.c
src/unix/posix-hrtime.c
src/unix/posix-poll.c
src/unix/hurd.c)
endif()
if(CMAKE_SYSTEM_NAME STREQUAL "Linux")
list(APPEND uv_defines _GNU_SOURCE _POSIX_C_SOURCE=200112)
list(APPEND uv_libraries dl rt)
@ -418,6 +428,7 @@ if(LIBUV_BUILD_TESTS)
test/benchmark-fs-stat.c
test/benchmark-getaddrinfo.c
test/benchmark-loop-count.c
test/benchmark-queue-work.c
test/benchmark-million-async.c
test/benchmark-million-timers.c
test/benchmark-multi-accept.c
@ -663,9 +674,11 @@ install(FILES LICENSE DESTINATION ${CMAKE_INSTALL_DOCDIR})
install(FILES ${PROJECT_BINARY_DIR}/libuv.pc ${PROJECT_BINARY_DIR}/libuv-static.pc
DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig)
install(TARGETS uv EXPORT libuvConfig
RUNTIME DESTINATION ${CMAKE_INSTALL_LIBDIR}
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR})
install(TARGETS uv_a ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
install(TARGETS uv_a EXPORT libuvConfig
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
install(EXPORT libuvConfig DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/libuv)
if(MSVC)

175
deps/libuv/ChangeLog vendored
View File

@ -1,4 +1,115 @@
2022.01.05, Version 1.43.0 (Stable)
2022.03.07, Version 1.44.0 (Stable)
Changes since version 1.43.0:
* darwin: remove EPROTOTYPE error workaround (Ben Noordhuis)
* doc: fix v1.43.0 changelog entries (cjihrig)
* win: replace CRITICAL_SECTION+Semaphore with SRWLock (David Machaj)
* darwin: translate EPROTOTYPE to ECONNRESET (Ben Noordhuis)
* android: use libc getifaddrs() (Ben Noordhuis)
* unix: fix STATIC_ASSERT to check what it means to check (Jessica Clarke)
* unix: ensure struct msghdr is zeroed in recvmmsg (Ondřej Surý)
* test: test with maximum recvmmsg buffer (Ondřej Surý)
* unix: don't allow too small thread stack size (Ben Noordhuis)
* bsd: ensure mutex is initialized (Ben Noordhuis)
* doc: add gengjiawen as maintainer (gengjiawen)
* process: monitor for exit with kqueue on BSDs (Jeremy Rose)
* test: fix flaky uv_fs_lutime test (Momtchil Momtchev)
* build: fix cmake install locations (Jameson Nash)
* thread,win: fix C90 style nit (ssrlive)
* build: rename CFLAGS to AM_CFLAGS (Jameson Nash)
* doc/guide: update content and sample code (woclass)
* process,bsd: handle kevent NOTE_EXIT failure (Jameson Nash)
* test: remove flaky test ipc_closed_handle (Ben Noordhuis)
* darwin: bump minimum supported version to 10.15 (Ben Noordhuis)
* win: return fractional seconds in uv_uptime() (Luca Adrian L)
* build: export uv_a for cmake (WenTao Ou)
* loop: add pending work to loop-alive check (Jameson Nash)
* win: use GetTickCount64 for uptime again (Jameson Nash)
* win: restrict system DLL load paths (jonilaitinen)
* win,errors: remap ERROR_ACCESS_DENIED to UV_EACCES (Darshan Sen)
* bench: add `uv_queue_work` ping-pong measurement (Momtchil Momtchev)
* build: fix error C4146 on MSVC (UMU)
* test: fix benchmark-ping-udp (Ryan Liptak)
* win,fs: consider broken pipe error a normal EOF (Momtchil Momtchev)
* document the values of enum uv_stdio_flags (Paul Evans)
* win,loop: add missing uv_update_time (twosee)
* win,fs: avoid closing an invalid handle (Jameson Nash)
* fix oopsie from
* doc: clarify android api level (Ben Noordhuis)
* win: fix style nits [NFC] (Jameson Nash)
* test: fix flaky udp_mmsg test (Santiago Gimeno)
* test: fix ipc_send_recv_pipe flakiness (Ben Noordhuis)
* doc: checkout -> check out (wyckster)
* core: change uv_get_password uid/gid to unsigned (Jameson Nash)
* hurd: unbreak build on GNU/Hurd (Vittore F. Scolari)
* freebsd: use copy_file_range() in uv_fs_sendfile() (David Carlier)
* test: use closefd in runner-unix.c (Guilherme Íscaro)
* Reland "macos: use posix_spawn instead of fork" (Jameson Nash)
* android: fix build error when no ifaddrs.h (ssrlive)
* unix,win: add uv_available_parallelism() (Ben Noordhuis)
* process: remove OpenBSD from kevent list (Jameson Nash)
* zos: fix build breakage (Ben Noordhuis)
* process: only use F_DUPFD_CLOEXEC if it is defined (Jameson Nash)
* win,poll: add the MSAFD GUID for AF_UNIX (roflcopter4)
* unix: simplify uv__cloexec_fcntl() (Ben Noordhuis)
* doc: add secondary GPG ID for vtjnash (Jameson Nash)
* unix: remove uv__cloexec_ioctl() (Jameson Nash)
2022.01.05, Version 1.43.0 (Stable), 988f2bfc4defb9a85a536a3e645834c161143ee0
Changes since version 1.42.0:
@ -18,73 +129,73 @@ Changes since version 1.42.0:
* win,fsevent: fix uv_fs_event_stop() assert (Ben Noordhuis)
* unix: remove redundant include in unix.h (
* unix: remove redundant include in unix.h (Juan José Arboleda)
* doc: mark SmartOS as Tier 3 support (
* doc: mark SmartOS as Tier 3 support (Ben Noordhuis)
* doc: fix broken links for netbsd's sysctl manpage (
* doc: fix broken links for netbsd's sysctl manpage (YAKSH BARIYA)
* misc: adjust stalebot deadline (
* misc: adjust stalebot deadline (Ben Noordhuis)
* test: remove `dns-server.c` as it is not used anywhere (
* test: remove `dns-server.c` as it is not used anywhere (Darshan Sen)
* build: fix non-cmake android builds (
* build: fix non-cmake android builds (YAKSH BARIYA)
* doc: replace pyuv with uvloop (
* doc: replace pyuv with uvloop (Ofek Lev)
* asan: fix some tests (
* asan: fix some tests (Jameson Nash)
* build: add experimental TSAN configuration (
* build: add experimental TSAN configuration (Jameson Nash)
* pipe: remove useless assertion (
* pipe: remove useless assertion (~locpyl-tidnyd)
* bsd: destroy mutex in uv__process_title_cleanup() (
* bsd: destroy mutex in uv__process_title_cleanup() (Darshan Sen)
* build: add windows build to CI (
* build: add windows build to CI (Darshan Sen)
* win,fs: fix error code in uv_fs_read() and uv_fs_write() ( Sen)
* win,fs: fix error code in uv_fs_read() and uv_fs_write() (Darshan Sen)
* build: add macos-latest to ci matrix (
* build: add macos-latest to ci matrix (Ben Noordhuis)
* udp: fix &/&& typo in macro condition (
* udp: fix &/&& typo in macro condition (Evan Miller)
* build: install cmake package module (Petr Menšík)
* win: fix build for mingw32 (
* win: fix build for mingw32 (Nicolas Noble)
* build: fix build failures with MinGW new headers (erw7)
* build: fix win build with cmake versions before v3.14 (
* build: fix win build with cmake versions before v3.14 (AJ Heller)
* unix: support aarch64 in uv_cpu_info() (
* unix: support aarch64 in uv_cpu_info() (Juan José Arboleda)
* linux: work around CIFS EPERM bug (
* linux: work around CIFS EPERM bug (Ben Noordhuis)
* sunos: Oracle Developer Studio support (
* sunos: Oracle Developer Studio support (Stacey Marshall)
* Revert "sunos: Oracle Developer Studio support (
* Revert "sunos: Oracle Developer Studio support (cjihrig)
* sunos: Oracle Developer Studio support (
* sunos: Oracle Developer Studio support (Stacey Marshall)
* stream: permit read after seeing EOF (
* stream: permit read after seeing EOF (Jameson Nash)
* thread: initialize uv_thread_self for all threads (
* thread: initialize uv_thread_self for all threads (Jameson Nash)
* kqueue: ignore write-end closed notifications (
* kqueue: ignore write-end closed notifications (Jameson Nash)
* macos: fix the cfdata length in uv__get_cpu_speed ( Bache)
* macos: fix the cfdata length in uv__get_cpu_speed (Jesper Storm Bache)
* unix,win: add uv_ip_name to get name from sockaddr (
* unix,win: add uv_ip_name to get name from sockaddr (Campbell He)
* win,test: fix a few typos (AJ Heller)
* zos: use destructor for uv__threadpool_cleanup() ( Zhang)
* zos: use destructor for uv__threadpool_cleanup() (Wayne Zhang)
* linux: use MemAvailable instead of MemFree (
* linux: use MemAvailable instead of MemFree (Andrey Hohutkin)
* freebsd: call dlerror() only if necessary (
* freebsd: call dlerror() only if necessary (Jameson Nash)
* bsd,windows,zos: fix udp disconnect EINVAL (
* bsd,windows,zos: fix udp disconnect EINVAL (deal)
2021.07.21, Version 1.42.0 (Stable), 6ce14710da7079eb248868171f6343bc409ea3a4

4
deps/libuv/LICENSE vendored
View File

@ -64,7 +64,3 @@ The externally maintained libraries used by libuv are:
- pthread-fixes.c, copyright Google Inc. and Sony Mobile Communications AB.
Three clause BSD license.
- android-ifaddrs.h, android-ifaddrs.c, copyright Berkeley Software Design
Inc, Kenneth MacKay and Emergya (Cloud4all, FP7/2007-2013, grant agreement
n° 289016). Three clause BSD license.

View File

@ -17,6 +17,8 @@ libuv is currently managed by the following individuals:
- GPG key: 9DFE AA5F 481B BF77 2D90 03CE D592 4925 2F8E C41A (pubkey-iwuzhere)
* **Jameson Nash** ([@vtjnash](https://github.com/vtjnash))
- GPG key: AEAD 0A4B 6867 6775 1A0E 4AEF 34A2 5FB1 2824 6514 (pubkey-vtjnash)
- GPG key: CFBB 9CA9 A5BE AFD7 0E2B 3C5A 79A6 7C55 A367 9C8B (pubkey2022-vtjnash)
* **Jiawen Geng** ([@gengjiawen](https://github.com/gengjiawen))
* **John Barboza** ([@jbarz](https://github.com/jbarz))
* **Kaoru Takanashi** ([@erw7](https://github.com/erw7))
- GPG Key: 5804 F999 8A92 2AFB A398 47A0 7183 5090 6134 887F (pubkey-erw7)

View File

@ -27,8 +27,8 @@ uvinclude_HEADERS = include/uv/errno.h \
CLEANFILES =
lib_LTLIBRARIES = libuv.la
libuv_la_CFLAGS = @CFLAGS@
libuv_la_LDFLAGS = -no-undefined -version-info 1:0:0
libuv_la_CFLAGS = $(AM_CFLAGS)
libuv_la_LDFLAGS = $(AM_LDFLAGS) -no-undefined -version-info 1:0:0
libuv_la_SOURCES = src/fs-poll.c \
src/heap-inl.h \
src/idna.c \
@ -131,7 +131,7 @@ EXTRA_DIST = test/fixtures/empty_file \
TESTS = test/run-tests
check_PROGRAMS = test/run-tests
test_run_tests_CFLAGS =
test_run_tests_CFLAGS = $(AM_CFLAGS)
if SUNOS
# Can't be turned into a CC_CHECK_CFLAGS in configure.ac, it makes compilers
@ -139,7 +139,7 @@ if SUNOS
test_run_tests_CFLAGS += -pthreads
endif
test_run_tests_LDFLAGS =
test_run_tests_LDFLAGS = $(AM_LDFLAGS)
test_run_tests_SOURCES = test/blackhole-server.c \
test/echo-server.c \
test/run-tests.c \
@ -388,10 +388,8 @@ libuv_la_SOURCES += src/unix/aix-common.c \
endif
if ANDROID
uvinclude_HEADERS += include/uv/android-ifaddrs.h
libuv_la_CFLAGS += -D_GNU_SOURCE
libuv_la_SOURCES += src/unix/android-ifaddrs.c \
src/unix/pthread-fixes.c
libuv_la_SOURCES += src/unix/pthread-fixes.c
endif
if CYGWIN
@ -457,9 +455,12 @@ endif
if HURD
uvinclude_HEADERS += include/uv/posix.h
libuv_la_SOURCES += src/unix/no-fsevents.c \
libuv_la_SOURCES += src/unix/bsd-ifaddrs.c \
src/unix/no-fsevents.c \
src/unix/no-proctitle.c \
src/unix/posix-hrtime.c \
src/unix/posix-poll.c
src/unix/posix-poll.c \
src/unix/hurd.c
endif
if LINUX

View File

@ -3,7 +3,7 @@
| System | Support type | Supported versions | Notes |
|---|---|---|---|
| GNU/Linux | Tier 1 | Linux >= 2.6.32 with glibc >= 2.12 | |
| macOS | Tier 1 | macOS >= 10.7 | |
| macOS | Tier 1 | macOS >= 10.15 | Current and previous macOS release |
| Windows | Tier 1 | >= Windows 8 | VS 2015 and later are supported |
| FreeBSD | Tier 1 | >= 10 | |
| AIX | Tier 2 | >= 6 | Maintainers: @libuv/aix |
@ -11,7 +11,7 @@
| z/OS | Tier 2 | >= V2R2 | Maintainers: @libuv/zos |
| Linux with musl | Tier 2 | musl >= 1.0 | |
| SmartOS | Tier 3 | >= 14.4 | |
| Android | Tier 3 | NDK >= r15b | |
| Android | Tier 3 | NDK >= r15b | Android 7.0, `-DANDROID_PLATFORM=android-24` |
| MinGW | Tier 3 | MinGW32 and MinGW-w64 | |
| SunOS | Tier 3 | Solaris 121 and later | |
| Other | Tier 3 | N/A | |

View File

@ -13,7 +13,7 @@
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
AC_PREREQ(2.57)
AC_INIT([libuv], [1.43.0], [https://github.com/libuv/libuv/issues])
AC_INIT([libuv], [1.44.0], [https://github.com/libuv/libuv/issues])
AC_CONFIG_MACRO_DIR([m4])
m4_include([m4/libuv-extra-automake-flags.m4])
m4_include([m4/as_case.m4])

3
deps/libuv/docs/code/.gitignore vendored Normal file
View File

@ -0,0 +1,3 @@
*/*
!*.c
!*.h

82
deps/libuv/docs/code/Makefile vendored Normal file
View File

@ -0,0 +1,82 @@
examples=\
helloworld\
default-loop\
idle-basic\
uvcat\
uvtee\
onchange\
thread-create\
queue-work\
progress\
tcp-echo-server\
dns\
udp-dhcp\
idle-compute\
ref-timer\
spawn\
detach\
proc-streams\
cgi\
pipe-echo-server\
multi-echo-server\
tty\
tty-gravity\
interfaces\
locks \
signal \
uvstop \
queue-cancel
UV_PATH=$(shell pwd)/../..
UV_LIB=$(UV_PATH)/.libs/libuv.a
CFLAGS=-g -Wall -I$(UV_PATH)/include
LIBS=
uname_S=$(shell uname -s)
ifeq (Darwin, $(uname_S))
CFLAGS+=-framework CoreServices
SHARED_LIB_FLAGS=-bundle -undefined dynamic_lookup -o plugin/libhello.dylib
endif
ifeq (Linux, $(uname_S))
LIBS=-lrt -ldl -lm -pthread -lcurl
SHARED_LIB_FLAGS=-shared -Wl,-soname,libhello.so -o plugin/libhello.so
PLUGIN_EXE_FLAGS=-Wl,-export-dynamic
endif
all: $(examples) plugin/plugin proc-streams/test cgi/tick multi-echo-server/worker uvwget/uvwget
$(examples): % : %/main.c
gcc $(CFLAGS) -o $@/$@ $< $(UV_LIB) $(LIBS)
plugin: plugin/plugin
plugin/plugin: plugin/*.c
gcc $(CFLAGS) $(PLUGIN_EXE_FLAGS) -o plugin/plugin plugin/main.c $(UV_LIB) $(LIBS)
gcc -g -Wall -c -fPIC -o plugin/hello.o plugin/hello.c
gcc $(SHARED_LIB_FLAGS) plugin/hello.o
proc-streams/test: proc-streams/test.c
gcc -g -Wall -o proc-streams/test proc-streams/test.c
cgi/tick: cgi/tick.c
gcc -g -Wall -o cgi/tick cgi/tick.c
multi-echo-server/worker: multi-echo-server/worker.c
gcc $(CFLAGS) -o multi-echo-server/worker multi-echo-server/worker.c $(UV_LIB) $(LIBS)
uvwget: uvwget/uvwget
uvwget/uvwget: uvwget/main.c
gcc $(CFLAGS) `curl-config --cflags --libs` -o uvwget/uvwget uvwget/main.c $(UV_LIB) $(LIBS)
clean:
for dir in $(examples); do cd $$dir; rm -f $$dir; rm -rf $$dir.dSYM; cd ..; done
rm -rf plugin/*.o plugin/libhello.*
rm -rf plugin/plugin plugin/plugin.dSYM
rm -rf proc-streams/test proc-streams/test.dSYM
rm -rf cgi/tick cgi/tick.dSYM
rm -rf multi-echo-server/worker multi-echo-server/worker.dSYM
rm -rf uvwget/uvwget uvwget/uvwget.dSYM
.PHONY: clean all $(examples) plugin uvwget

View File

@ -0,0 +1,12 @@
#include <stdio.h>
#include <uv.h>
int main() {
uv_loop_t *loop = uv_default_loop();
printf("Default loop.\n");
uv_run(loop, UV_RUN_DEFAULT);
uv_loop_close(loop);
return 0;
}

View File

@ -125,7 +125,7 @@ File I/O
Unlike network I/O, there are no platform-specific file I/O primitives libuv could rely on,
so the current approach is to run blocking file I/O operations in a thread pool.
For a thorough explanation of the cross-platform file I/O landscape, checkout
For a thorough explanation of the cross-platform file I/O landscape, check out
`this post <https://blog.libtorrent.org/2012/10/asynchronous-disk-io/>`_.
libuv currently uses a global thread pool on which all loops can queue work. 3 types of

View File

@ -71,7 +71,7 @@ architecture of libuv and its background. If you have no prior experience with
either libuv or libev, it is a quick, useful watch.
libuv's event loop is explained in more detail in the `documentation
<http://docs.libuv.org/en/v1.x/design.html#the-i-o-loop>`_.
<https://docs.libuv.org/en/v1.x/design.html#the-i-o-loop>`_.
.. raw:: html
@ -109,6 +109,11 @@ A default loop is provided by libuv and can be accessed using
``uv_default_loop()``. You should use this loop if you only want a single
loop.
.. rubric:: default-loop/main.c
.. literalinclude:: ../../code/default-loop/main.c
:language: c
:linenos:
.. note::
node.js uses the default loop as its main loop. If you are writing bindings
@ -119,9 +124,9 @@ loop.
Error handling
--------------
Initialization functions or synchronous functions which may fail return a negative number on error. Async functions that may fail will pass a status parameter to their callbacks. The error messages are defined as ``UV_E*`` `constants`_.
Initialization functions or synchronous functions which may fail return a negative number on error. Async functions that may fail will pass a status parameter to their callbacks. The error messages are defined as ``UV_E*`` `constants`_.
.. _constants: http://docs.libuv.org/en/v1.x/errors.html#error-constants
.. _constants: https://docs.libuv.org/en/v1.x/errors.html#error-constants
You can use the ``uv_strerror(int)`` and ``uv_err_name(int)`` functions
to get a ``const char *`` describing the error or the error name respectively.
@ -134,7 +139,7 @@ Handles and Requests
libuv works by the user expressing interest in particular events. This is
usually done by creating a **handle** to an I/O device, timer or process.
Handles are opaque structs named as ``uv_TYPE_t`` where type signifies what the
handle is used for.
handle is used for.
.. rubric:: libuv watchers
.. code-block:: c
@ -169,6 +174,16 @@ handle is used for.
typedef struct uv_udp_send_s uv_udp_send_t;
typedef struct uv_fs_s uv_fs_t;
typedef struct uv_work_s uv_work_t;
typedef struct uv_random_s uv_random_t;
/* None of the above. */
typedef struct uv_env_item_s uv_env_item_t;
typedef struct uv_cpu_info_s uv_cpu_info_t;
typedef struct uv_interface_address_s uv_interface_address_t;
typedef struct uv_dirent_s uv_dirent_t;
typedef struct uv_passwd_s uv_passwd_t;
typedef struct uv_utsname_s uv_utsname_t;
typedef struct uv_statfs_s uv_statfs_t;
Handles represent long-lived objects. Async operations on such handles are

View File

@ -13,7 +13,7 @@ Simple filesystem read/write is achieved using the ``uv_fs_*`` functions and the
watchers registered with the event loop when application interaction is
required.
.. _thread pool: http://docs.libuv.org/en/v1.x/threadpool.html#thread-pool-work-scheduling
.. _thread pool: https://docs.libuv.org/en/v1.x/threadpool.html#thread-pool-work-scheduling
All filesystem functions have two forms - *synchronous* and *asynchronous*.
@ -66,7 +66,7 @@ The ``result`` field of a ``uv_fs_t`` is the file descriptor in case of the
.. literalinclude:: ../../code/uvcat/main.c
:language: c
:linenos:
:lines: 26-40
:lines: 26-39
:emphasize-lines: 2,8,12
In the case of a read call, you should pass an *initialized* buffer which will
@ -91,7 +91,7 @@ callbacks.
.. literalinclude:: ../../code/uvcat/main.c
:language: c
:linenos:
:lines: 16-24
:lines: 17-24
:emphasize-lines: 6
.. warning::
@ -132,6 +132,7 @@ same patterns as the read/write/open calls, returning the result in the
int uv_fs_copyfile(uv_loop_t* loop, uv_fs_t* req, const char* path, const char* new_path, int flags, uv_fs_cb cb);
int uv_fs_mkdir(uv_loop_t* loop, uv_fs_t* req, const char* path, int mode, uv_fs_cb cb);
int uv_fs_mkdtemp(uv_loop_t* loop, uv_fs_t* req, const char* tpl, uv_fs_cb cb);
int uv_fs_mkstemp(uv_loop_t* loop, uv_fs_t* req, const char* tpl, uv_fs_cb cb);
int uv_fs_rmdir(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb);
int uv_fs_scandir(uv_loop_t* loop, uv_fs_t* req, const char* path, int flags, uv_fs_cb cb);
int uv_fs_scandir_next(uv_fs_t* req, uv_dirent_t* ent);
@ -149,6 +150,7 @@ same patterns as the read/write/open calls, returning the result in the
int uv_fs_chmod(uv_loop_t* loop, uv_fs_t* req, const char* path, int mode, uv_fs_cb cb);
int uv_fs_utime(uv_loop_t* loop, uv_fs_t* req, const char* path, double atime, double mtime, uv_fs_cb cb);
int uv_fs_futime(uv_loop_t* loop, uv_fs_t* req, uv_file file, double atime, double mtime, uv_fs_cb cb);
int uv_fs_lutime(uv_loop_t* loop, uv_fs_t* req, const char* path, double atime, double mtime, uv_fs_cb cb);
int uv_fs_lstat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb);
int uv_fs_link(uv_loop_t* loop, uv_fs_t* req, const char* path, const char* new_path, uv_fs_cb cb);
int uv_fs_symlink(uv_loop_t* loop, uv_fs_t* req, const char* path, const char* new_path, int flags, uv_fs_cb cb);
@ -158,6 +160,7 @@ same patterns as the read/write/open calls, returning the result in the
int uv_fs_chown(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_uid_t uid, uv_gid_t gid, uv_fs_cb cb);
int uv_fs_fchown(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_uid_t uid, uv_gid_t gid, uv_fs_cb cb);
int uv_fs_lchown(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_uid_t uid, uv_gid_t gid, uv_fs_cb cb);
int uv_fs_statfs(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb);
.. _buffers-and-streams:
@ -190,7 +193,7 @@ and freed by the application.
.. ERROR::
THIS PROGRAM DOES NOT ALWAYS WORK, NEED SOMETHING BETTER**
**THIS PROGRAM DOES NOT ALWAYS WORK, NEED SOMETHING BETTER**
To demonstrate streams we will need to use ``uv_pipe_t``. This allows streaming
local files [#]_. Here is a simple tee utility using libuv. Doing all operations
@ -209,7 +212,7 @@ opened as bidirectional by default.
.. literalinclude:: ../../code/uvtee/main.c
:language: c
:linenos:
:lines: 61-80
:lines: 62-80
:emphasize-lines: 4,5,15
The third argument of ``uv_pipe_init()`` should be set to 1 for IPC using named
@ -285,6 +288,13 @@ a command whenever any of the watched files change::
./onchange <command> <file1> [file2] ...
.. note::
Currently this example only works on OSX and Windows.
Refer to the `notes of uv_fs_event_start`_ function.
.. _notes of uv_fs_event_start: https://docs.libuv.org/en/v1.x/fs_event.html#c.uv_fs_event_start
The file change notification is started using ``uv_fs_event_init()``:
.. rubric:: onchange/main.c - The setup
@ -300,8 +310,8 @@ argument, ``flags``, can be:
.. code-block:: c
/*
* Flags to be passed to uv_fs_event_start().
*/
* Flags to be passed to uv_fs_event_start().
*/
enum uv_fs_event_flags {
UV_FS_EVENT_WATCH_ENTRY = 1,
UV_FS_EVENT_STAT = 2,
@ -319,9 +329,9 @@ The callback will receive the following arguments:
#. ``const char *filename`` - If a directory is being monitored, this is the
file which was changed. Only non-``null`` on Linux and Windows. May be ``null``
even on those platforms.
#. ``int flags`` - one of ``UV_RENAME`` or ``UV_CHANGE``, or a bitwise OR of
both.
#. ``int status`` - Currently 0.
#. ``int events`` - one of ``UV_RENAME`` or ``UV_CHANGE``, or a bitwise OR of
both.
#. ``int status`` - If ``status < 0``, there is an :ref:`libuv error<libuv-error-handling>`.
In our example we simply print the arguments and run the command using
``system()``.

View File

@ -8,7 +8,7 @@ It is meant to cover the main areas of libuv, but is not a comprehensive
reference discussing every function and data structure. The `official libuv
documentation`_ may be consulted for full details.
.. _official libuv documentation: http://docs.libuv.org/en/v1.x/
.. _official libuv documentation: https://docs.libuv.org/en/v1.x/
This book is still a work in progress, so sections may be incomplete, but
I hope you will enjoy it as it grows.
@ -47,25 +47,23 @@ Since then libuv has continued to mature and become a high quality standalone
library for system programming. Users outside of node.js include Mozilla's
Rust_ programming language, and a variety_ of language bindings.
This book and the code is based on libuv version `v1.3.0`_.
This book and the code is based on libuv version `v1.42.0`_.
Code
----
All the code from this book is included as part of the source of the book on
Github. `Clone`_/`Download`_ the book, then build libuv::
All the example code and the source of the book is included as part of
the libuv_ project on Github.
Clone or Download libuv_, then build it::
cd libuv
./autogen.sh
sh autogen.sh
./configure
make
There is no need to ``make install``. To build the examples run ``make`` in the
``code/`` directory.
``docs/code/`` directory.
.. _Clone: https://github.com/nikhilm/uvbook
.. _Download: https://github.com/nikhilm/uvbook/downloads
.. _v1.3.0: https://github.com/libuv/libuv/tags
.. _v1.42.0: https://github.com/libuv/libuv/releases/tag/v1.42.0
.. _V8: https://v8.dev
.. _libev: http://software.schmorp.de/pkg/libev.html
.. _libuv: https://github.com/libuv/libuv

View File

@ -312,7 +312,7 @@ API
.. c:function:: int uv_uptime(double* uptime)
Gets the current system uptime.
Gets the current system uptime. Depending on the system full or fractional seconds are returned.
.. c:function:: int uv_getrusage(uv_rusage_t* rusage)
@ -334,11 +334,30 @@ API
.. versionadded:: 1.16.0
.. c:function:: unsigned int uv_available_parallelism(void)
Returns an estimate of the default amount of parallelism a program should
use. Always returns a non-zero value.
On Linux, inspects the calling thread's CPU affinity mask to determine if
it has been pinned to specific CPUs.
On Windows, the available parallelism may be underreported on systems with
more than 64 logical CPUs.
On other platforms, reports the number of CPUs that the operating system
considers to be online.
.. versionadded:: 1.44.0
.. c:function:: int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count)
Gets information about the CPUs on the system. The `cpu_infos` array will
have `count` elements and needs to be freed with :c:func:`uv_free_cpu_info`.
Use :c:func:`uv_available_parallelism` if you need to know how many CPUs
are available for threads or child processes.
.. c:function:: void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count)
Frees the `cpu_infos` array previously allocated with :c:func:`uv_cpu_info`.

View File

@ -109,10 +109,39 @@ Data types
::
typedef enum {
/*
* The following four options are mutually-exclusive, and define
* the operation to perform for the corresponding file descriptor
* in the child process:
*/
/*
* No file descriptor will be provided (or redirected to
* `/dev/null` if it is fd 0, 1 or 2).
*/
UV_IGNORE = 0x00,
/*
* Open a new pipe into `data.stream`, per the flags below. The
* `data.stream` field must point to a uv_pipe_t object that has
* been initialized with `uv_pipe_init(loop, data.stream, ipc);`,
* but not yet opened or connected.
/*
UV_CREATE_PIPE = 0x01,
/*
* The child process will be given a duplicate of the parent's
* file descriptor given by `data.fd`.
*/
UV_INHERIT_FD = 0x02,
/*
* The child process will be given a duplicate of the parent's
* file descriptor being used by the stream handle given by
* `data.stream`.
*/
UV_INHERIT_STREAM = 0x04,
/*
* When UV_CREATE_PIPE is specified, UV_READABLE_PIPE and UV_WRITABLE_PIPE
* determine the direction of flow, from the child process' perspective. Both
@ -120,6 +149,7 @@ Data types
*/
UV_READABLE_PIPE = 0x10,
UV_WRITABLE_PIPE = 0x20,
/*
* When UV_CREATE_PIPE is specified, specifying UV_NONBLOCK_PIPE opens the
* handle in non-blocking mode in the child. This may cause loss of data,

View File

@ -1133,8 +1133,8 @@ struct uv_interface_address_s {
struct uv_passwd_s {
char* username;
long uid;
long gid;
unsigned long uid;
unsigned long gid;
char* shell;
char* homedir;
};
@ -1242,6 +1242,7 @@ UV_EXTERN uv_pid_t uv_os_getppid(void);
UV_EXTERN int uv_os_getpriority(uv_pid_t pid, int* priority);
UV_EXTERN int uv_os_setpriority(uv_pid_t pid, int priority);
UV_EXTERN unsigned int uv_available_parallelism(void);
UV_EXTERN int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count);
UV_EXTERN void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count);

View File

@ -1,54 +0,0 @@
/*
* Copyright (c) 1995, 1999
* Berkeley Software Design, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* THIS SOFTWARE IS PROVIDED BY Berkeley Software Design, Inc. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL Berkeley Software Design, Inc. BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* BSDI ifaddrs.h,v 2.5 2000/02/23 14:51:59 dab Exp
*/
#ifndef _IFADDRS_H_
#define _IFADDRS_H_
struct ifaddrs {
struct ifaddrs *ifa_next;
char *ifa_name;
unsigned int ifa_flags;
struct sockaddr *ifa_addr;
struct sockaddr *ifa_netmask;
struct sockaddr *ifa_dstaddr;
void *ifa_data;
};
/*
* This may have been defined in <net/if.h>. Note that if <net/if.h> is
* to be included it must be included before this header file.
*/
#ifndef ifa_broadaddr
#define ifa_broadaddr ifa_dstaddr /* broadcast address interface */
#endif
#include <sys/cdefs.h>
__BEGIN_DECLS
extern int getifaddrs(struct ifaddrs **ifap);
extern void freeifaddrs(struct ifaddrs *ifa);
__END_DECLS
#endif

View File

@ -31,7 +31,7 @@
*/
#define UV_VERSION_MAJOR 1
#define UV_VERSION_MINOR 43
#define UV_VERSION_MINOR 44
#define UV_VERSION_PATCH 0
#define UV_VERSION_IS_RELEASE 1
#define UV_VERSION_SUFFIX ""

View File

@ -223,7 +223,7 @@ typedef struct _AFD_POLL_INFO {
AFD_POLL_HANDLE_INFO Handles[1];
} AFD_POLL_INFO, *PAFD_POLL_INFO;
#define UV_MSAFD_PROVIDER_COUNT 3
#define UV_MSAFD_PROVIDER_COUNT 4
/**
@ -263,21 +263,14 @@ typedef union {
} unused_; /* TODO: retained for ABI compatibility; remove me in v2.x. */
} uv_cond_t;
typedef union {
struct {
unsigned int num_readers_;
CRITICAL_SECTION num_readers_lock_;
HANDLE write_semaphore_;
} state_;
/* TODO: remove me in v2.x. */
struct {
SRWLOCK unused_;
} unused1_;
/* TODO: remove me in v2.x. */
struct {
uv_mutex_t unused1_;
uv_mutex_t unused2_;
} unused2_;
typedef struct {
SRWLOCK read_write_lock_;
/* TODO: retained for ABI compatibility; remove me in v2.x */
#ifdef _WIN64
unsigned char padding_[72];
#else
unsigned char padding_[44];
#endif
} uv_rwlock_t;
typedef struct {

View File

@ -1,6 +1,7 @@
dnl Macros to check the presence of generic (non-typed) symbols.
dnl Copyright (c) 2006-2008 Diego Pettenò <flameeyes gmail com>
dnl Copyright (c) 2006-2008 xine project
dnl Copyright (c) 2021 libuv project
dnl
dnl This program is free software; you can redistribute it and/or modify
dnl it under the terms of the GNU General Public License as published by
@ -63,7 +64,7 @@ AC_DEFUN([CC_CHECK_CFLAGS], [
])
dnl CC_CHECK_CFLAG_APPEND(FLAG, [action-if-found], [action-if-not-found])
dnl Check for CFLAG and appends them to CFLAGS if supported
dnl Check for CFLAG and appends them to AM_CFLAGS if supported
AC_DEFUN([CC_CHECK_CFLAG_APPEND], [
AC_CACHE_CHECK([if $CC supports $1 flag],
AS_TR_SH([cc_cv_cflags_$1]),
@ -71,7 +72,7 @@ AC_DEFUN([CC_CHECK_CFLAG_APPEND], [
)
AS_IF([eval test x$]AS_TR_SH([cc_cv_cflags_$1])[ = xyes],
[CFLAGS="$CFLAGS $1"; DEBUG_CFLAGS="$DEBUG_CFLAGS $1"; $2], [$3])
[AM_CFLAGS="$AM_CFLAGS $1"; DEBUG_CFLAGS="$DEBUG_CFLAGS $1"; $2], [$3])
])
dnl CC_CHECK_CFLAGS_APPEND([FLAG1 FLAG2], [action-if-found], [action-if-not])

View File

@ -25,7 +25,7 @@
#ifdef _WIN32
#include "win/internal.h"
#include "win/handle-inl.h"
#define uv__make_close_pending(h) uv_want_endgame((h)->loop, (h))
#define uv__make_close_pending(h) uv__want_endgame((h)->loop, (h))
#else
#include "unix/internal.h"
#endif

11
deps/libuv/src/idna.c vendored
View File

@ -21,6 +21,7 @@
#include "idna.h"
#include <assert.h>
#include <string.h>
#include <limits.h> /* UINT_MAX */
static unsigned uv__utf8_decode1_slow(const char** p,
const char* pe,
@ -129,7 +130,7 @@ static int uv__idna_toascii_label(const char* s, const char* se,
while (s < se) {
c = uv__utf8_decode1(&s, se);
if (c == -1u)
if (c == UINT_MAX)
return UV_EINVAL;
if (c < 128)
@ -151,7 +152,7 @@ static int uv__idna_toascii_label(const char* s, const char* se,
s = ss;
while (s < se) {
c = uv__utf8_decode1(&s, se);
assert(c != -1u);
assert(c != UINT_MAX);
if (c > 127)
continue;
@ -182,7 +183,7 @@ static int uv__idna_toascii_label(const char* s, const char* se,
while (s < se) {
c = uv__utf8_decode1(&s, se);
assert(c != -1u);
assert(c != UINT_MAX);
if (c >= n)
if (c < m)
@ -201,7 +202,7 @@ static int uv__idna_toascii_label(const char* s, const char* se,
s = ss;
while (s < se) {
c = uv__utf8_decode1(&s, se);
assert(c != -1u);
assert(c != UINT_MAX);
if (c < n)
if (++delta == 0)
@ -280,7 +281,7 @@ long uv__idna_toascii(const char* s, const char* se, char* d, char* de) {
st = si;
c = uv__utf8_decode1(&si, se);
if (c == -1u)
if (c == UINT_MAX)
return UV_EINVAL;
if (c != '.')

View File

@ -28,7 +28,7 @@
*/
#include "uv.h"
/* Copies up to |n-1| bytes from |d| to |s| and always zero-terminates
/* Copies up to |n-1| bytes from |s| to |d| and always zero-terminates
* the result, except when |n==0|. Returns the number of bytes copied
* or UV_E2BIG if |d| is too small.
*

View File

@ -1,713 +0,0 @@
/*
Copyright (c) 2013, Kenneth MacKay
Copyright (c) 2014, Emergya (Cloud4all, FP7/2007-2013 grant agreement #289016)
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "uv/android-ifaddrs.h"
#include "uv-common.h"
#include <string.h>
#include <stdlib.h>
#include <errno.h>
#include <unistd.h>
#include <sys/socket.h>
#include <net/if_arp.h>
#include <netinet/in.h>
#include <linux/netlink.h>
#include <linux/rtnetlink.h>
#include <linux/if_packet.h>
typedef struct NetlinkList
{
struct NetlinkList *m_next;
struct nlmsghdr *m_data;
unsigned int m_size;
} NetlinkList;
static int netlink_socket(pid_t *p_pid)
{
struct sockaddr_nl l_addr;
socklen_t l_len;
int l_socket = socket(PF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
if(l_socket < 0)
{
return -1;
}
memset(&l_addr, 0, sizeof(l_addr));
l_addr.nl_family = AF_NETLINK;
if(bind(l_socket, (struct sockaddr *)&l_addr, sizeof(l_addr)) < 0)
{
close(l_socket);
return -1;
}
l_len = sizeof(l_addr);
if(getsockname(l_socket, (struct sockaddr *)&l_addr, &l_len) < 0)
{
close(l_socket);
return -1;
}
*p_pid = l_addr.nl_pid;
return l_socket;
}
static int netlink_send(int p_socket, int p_request)
{
char l_buffer[NLMSG_ALIGN(sizeof(struct nlmsghdr)) + NLMSG_ALIGN(sizeof(struct rtgenmsg))];
struct nlmsghdr *l_hdr;
struct rtgenmsg *l_msg;
struct sockaddr_nl l_addr;
memset(l_buffer, 0, sizeof(l_buffer));
l_hdr = (struct nlmsghdr *)l_buffer;
l_msg = (struct rtgenmsg *)NLMSG_DATA(l_hdr);
l_hdr->nlmsg_len = NLMSG_LENGTH(sizeof(*l_msg));
l_hdr->nlmsg_type = p_request;
l_hdr->nlmsg_flags = NLM_F_ROOT | NLM_F_MATCH | NLM_F_REQUEST;
l_hdr->nlmsg_pid = 0;
l_hdr->nlmsg_seq = p_socket;
l_msg->rtgen_family = AF_UNSPEC;
memset(&l_addr, 0, sizeof(l_addr));
l_addr.nl_family = AF_NETLINK;
return (sendto(p_socket, l_hdr, l_hdr->nlmsg_len, 0, (struct sockaddr *)&l_addr, sizeof(l_addr)));
}
static int netlink_recv(int p_socket, void *p_buffer, size_t p_len)
{
struct sockaddr_nl l_addr;
struct msghdr l_msg;
struct iovec l_iov;
l_iov.iov_base = p_buffer;
l_iov.iov_len = p_len;
for(;;)
{
int l_result;
l_msg.msg_name = (void *)&l_addr;
l_msg.msg_namelen = sizeof(l_addr);
l_msg.msg_iov = &l_iov;
l_msg.msg_iovlen = 1;
l_msg.msg_control = NULL;
l_msg.msg_controllen = 0;
l_msg.msg_flags = 0;
l_result = recvmsg(p_socket, &l_msg, 0);
if(l_result < 0)
{
if(errno == EINTR)
{
continue;
}
return -2;
}
/* Buffer was too small */
if(l_msg.msg_flags & MSG_TRUNC)
{
return -1;
}
return l_result;
}
}
static struct nlmsghdr *getNetlinkResponse(int p_socket, pid_t p_pid, int *p_size, int *p_done)
{
size_t l_size = 4096;
void *l_buffer = NULL;
for(;;)
{
int l_read;
uv__free(l_buffer);
l_buffer = uv__malloc(l_size);
if (l_buffer == NULL)
{
return NULL;
}
l_read = netlink_recv(p_socket, l_buffer, l_size);
*p_size = l_read;
if(l_read == -2)
{
uv__free(l_buffer);
return NULL;
}
if(l_read >= 0)
{
struct nlmsghdr *l_hdr;
for(l_hdr = (struct nlmsghdr *)l_buffer; NLMSG_OK(l_hdr, (unsigned int)l_read); l_hdr = (struct nlmsghdr *)NLMSG_NEXT(l_hdr, l_read))
{
if((pid_t)l_hdr->nlmsg_pid != p_pid || (int)l_hdr->nlmsg_seq != p_socket)
{
continue;
}
if(l_hdr->nlmsg_type == NLMSG_DONE)
{
*p_done = 1;
break;
}
if(l_hdr->nlmsg_type == NLMSG_ERROR)
{
uv__free(l_buffer);
return NULL;
}
}
return l_buffer;
}
l_size *= 2;
}
}
static NetlinkList *newListItem(struct nlmsghdr *p_data, unsigned int p_size)
{
NetlinkList *l_item = uv__malloc(sizeof(NetlinkList));
if (l_item == NULL)
{
return NULL;
}
l_item->m_next = NULL;
l_item->m_data = p_data;
l_item->m_size = p_size;
return l_item;
}
static void freeResultList(NetlinkList *p_list)
{
NetlinkList *l_cur;
while(p_list)
{
l_cur = p_list;
p_list = p_list->m_next;
uv__free(l_cur->m_data);
uv__free(l_cur);
}
}
static NetlinkList *getResultList(int p_socket, int p_request, pid_t p_pid)
{
int l_size;
int l_done;
NetlinkList *l_list;
NetlinkList *l_end;
if(netlink_send(p_socket, p_request) < 0)
{
return NULL;
}
l_list = NULL;
l_end = NULL;
l_done = 0;
while(!l_done)
{
NetlinkList *l_item;
struct nlmsghdr *l_hdr = getNetlinkResponse(p_socket, p_pid, &l_size, &l_done);
/* Error */
if(!l_hdr)
{
freeResultList(l_list);
return NULL;
}
l_item = newListItem(l_hdr, l_size);
if (!l_item)
{
freeResultList(l_list);
return NULL;
}
if(!l_list)
{
l_list = l_item;
}
else
{
l_end->m_next = l_item;
}
l_end = l_item;
}
return l_list;
}
static size_t maxSize(size_t a, size_t b)
{
return (a > b ? a : b);
}
static size_t calcAddrLen(sa_family_t p_family, int p_dataSize)
{
switch(p_family)
{
case AF_INET:
return sizeof(struct sockaddr_in);
case AF_INET6:
return sizeof(struct sockaddr_in6);
case AF_PACKET:
return maxSize(sizeof(struct sockaddr_ll), offsetof(struct sockaddr_ll, sll_addr) + p_dataSize);
default:
return maxSize(sizeof(struct sockaddr), offsetof(struct sockaddr, sa_data) + p_dataSize);
}
}
static void makeSockaddr(sa_family_t p_family, struct sockaddr *p_dest, void *p_data, size_t p_size)
{
switch(p_family)
{
case AF_INET:
memcpy(&((struct sockaddr_in*)p_dest)->sin_addr, p_data, p_size);
break;
case AF_INET6:
memcpy(&((struct sockaddr_in6*)p_dest)->sin6_addr, p_data, p_size);
break;
case AF_PACKET:
memcpy(((struct sockaddr_ll*)p_dest)->sll_addr, p_data, p_size);
((struct sockaddr_ll*)p_dest)->sll_halen = p_size;
break;
default:
memcpy(p_dest->sa_data, p_data, p_size);
break;
}
p_dest->sa_family = p_family;
}
static void addToEnd(struct ifaddrs **p_resultList, struct ifaddrs *p_entry)
{
if(!*p_resultList)
{
*p_resultList = p_entry;
}
else
{
struct ifaddrs *l_cur = *p_resultList;
while(l_cur->ifa_next)
{
l_cur = l_cur->ifa_next;
}
l_cur->ifa_next = p_entry;
}
}
static int interpretLink(struct nlmsghdr *p_hdr, struct ifaddrs **p_resultList)
{
struct ifaddrs *l_entry;
char *l_index;
char *l_name;
char *l_addr;
char *l_data;
struct ifinfomsg *l_info = (struct ifinfomsg *)NLMSG_DATA(p_hdr);
size_t l_nameSize = 0;
size_t l_addrSize = 0;
size_t l_dataSize = 0;
size_t l_rtaSize = NLMSG_PAYLOAD(p_hdr, sizeof(struct ifinfomsg));
struct rtattr *l_rta;
for(l_rta = IFLA_RTA(l_info); RTA_OK(l_rta, l_rtaSize); l_rta = RTA_NEXT(l_rta, l_rtaSize))
{
size_t l_rtaDataSize = RTA_PAYLOAD(l_rta);
switch(l_rta->rta_type)
{
case IFLA_ADDRESS:
case IFLA_BROADCAST:
l_addrSize += NLMSG_ALIGN(calcAddrLen(AF_PACKET, l_rtaDataSize));
break;
case IFLA_IFNAME:
l_nameSize += NLMSG_ALIGN(l_rtaSize + 1);
break;
case IFLA_STATS:
l_dataSize += NLMSG_ALIGN(l_rtaSize);
break;
default:
break;
}
}
l_entry = uv__malloc(sizeof(struct ifaddrs) + sizeof(int) + l_nameSize + l_addrSize + l_dataSize);
if (l_entry == NULL)
{
return -1;
}
memset(l_entry, 0, sizeof(struct ifaddrs));
l_entry->ifa_name = "";
l_index = ((char *)l_entry) + sizeof(struct ifaddrs);
l_name = l_index + sizeof(int);
l_addr = l_name + l_nameSize;
l_data = l_addr + l_addrSize;
/* Save the interface index so we can look it up when handling the
* addresses.
*/
memcpy(l_index, &l_info->ifi_index, sizeof(int));
l_entry->ifa_flags = l_info->ifi_flags;
l_rtaSize = NLMSG_PAYLOAD(p_hdr, sizeof(struct ifinfomsg));
for(l_rta = IFLA_RTA(l_info); RTA_OK(l_rta, l_rtaSize); l_rta = RTA_NEXT(l_rta, l_rtaSize))
{
void *l_rtaData = RTA_DATA(l_rta);
size_t l_rtaDataSize = RTA_PAYLOAD(l_rta);
switch(l_rta->rta_type)
{
case IFLA_ADDRESS:
case IFLA_BROADCAST:
{
size_t l_addrLen = calcAddrLen(AF_PACKET, l_rtaDataSize);
makeSockaddr(AF_PACKET, (struct sockaddr *)l_addr, l_rtaData, l_rtaDataSize);
((struct sockaddr_ll *)l_addr)->sll_ifindex = l_info->ifi_index;
((struct sockaddr_ll *)l_addr)->sll_hatype = l_info->ifi_type;
if(l_rta->rta_type == IFLA_ADDRESS)
{
l_entry->ifa_addr = (struct sockaddr *)l_addr;
}
else
{
l_entry->ifa_broadaddr = (struct sockaddr *)l_addr;
}
l_addr += NLMSG_ALIGN(l_addrLen);
break;
}
case IFLA_IFNAME:
strncpy(l_name, l_rtaData, l_rtaDataSize);
l_name[l_rtaDataSize] = '\0';
l_entry->ifa_name = l_name;
break;
case IFLA_STATS:
memcpy(l_data, l_rtaData, l_rtaDataSize);
l_entry->ifa_data = l_data;
break;
default:
break;
}
}
addToEnd(p_resultList, l_entry);
return 0;
}
static struct ifaddrs *findInterface(int p_index, struct ifaddrs **p_links, int p_numLinks)
{
int l_num = 0;
struct ifaddrs *l_cur = *p_links;
while(l_cur && l_num < p_numLinks)
{
char *l_indexPtr = ((char *)l_cur) + sizeof(struct ifaddrs);
int l_index;
memcpy(&l_index, l_indexPtr, sizeof(int));
if(l_index == p_index)
{
return l_cur;
}
l_cur = l_cur->ifa_next;
++l_num;
}
return NULL;
}
static int interpretAddr(struct nlmsghdr *p_hdr, struct ifaddrs **p_resultList, int p_numLinks)
{
struct ifaddrmsg *l_info = (struct ifaddrmsg *)NLMSG_DATA(p_hdr);
struct ifaddrs *l_interface = findInterface(l_info->ifa_index, p_resultList, p_numLinks);
size_t l_nameSize = 0;
size_t l_addrSize = 0;
int l_addedNetmask = 0;
size_t l_rtaSize = NLMSG_PAYLOAD(p_hdr, sizeof(struct ifaddrmsg));
struct rtattr *l_rta;
struct ifaddrs *l_entry;
char *l_name;
char *l_addr;
for(l_rta = IFA_RTA(l_info); RTA_OK(l_rta, l_rtaSize); l_rta = RTA_NEXT(l_rta, l_rtaSize))
{
size_t l_rtaDataSize = RTA_PAYLOAD(l_rta);
if(l_info->ifa_family == AF_PACKET)
{
continue;
}
switch(l_rta->rta_type)
{
case IFA_ADDRESS:
case IFA_LOCAL:
l_addrSize += NLMSG_ALIGN(calcAddrLen(l_info->ifa_family, l_rtaDataSize));
if((l_info->ifa_family == AF_INET || l_info->ifa_family == AF_INET6) && !l_addedNetmask)
{
/* Make room for netmask */
l_addrSize += NLMSG_ALIGN(calcAddrLen(l_info->ifa_family, l_rtaDataSize));
l_addedNetmask = 1;
}
break;
case IFA_BROADCAST:
l_addrSize += NLMSG_ALIGN(calcAddrLen(l_info->ifa_family, l_rtaDataSize));
break;
case IFA_LABEL:
l_nameSize += NLMSG_ALIGN(l_rtaDataSize + 1);
break;
default:
break;
}
}
l_entry = uv__malloc(sizeof(struct ifaddrs) + l_nameSize + l_addrSize);
if (l_entry == NULL)
{
return -1;
}
memset(l_entry, 0, sizeof(struct ifaddrs));
l_entry->ifa_name = (l_interface ? l_interface->ifa_name : "");
l_name = ((char *)l_entry) + sizeof(struct ifaddrs);
l_addr = l_name + l_nameSize;
l_entry->ifa_flags = l_info->ifa_flags;
if(l_interface)
{
l_entry->ifa_flags |= l_interface->ifa_flags;
}
l_rtaSize = NLMSG_PAYLOAD(p_hdr, sizeof(struct ifaddrmsg));
for(l_rta = IFA_RTA(l_info); RTA_OK(l_rta, l_rtaSize); l_rta = RTA_NEXT(l_rta, l_rtaSize))
{
void *l_rtaData = RTA_DATA(l_rta);
size_t l_rtaDataSize = RTA_PAYLOAD(l_rta);
switch(l_rta->rta_type)
{
case IFA_ADDRESS:
case IFA_BROADCAST:
case IFA_LOCAL:
{
size_t l_addrLen = calcAddrLen(l_info->ifa_family, l_rtaDataSize);
makeSockaddr(l_info->ifa_family, (struct sockaddr *)l_addr, l_rtaData, l_rtaDataSize);
if(l_info->ifa_family == AF_INET6)
{
if(IN6_IS_ADDR_LINKLOCAL((struct in6_addr *)l_rtaData) || IN6_IS_ADDR_MC_LINKLOCAL((struct in6_addr *)l_rtaData))
{
((struct sockaddr_in6 *)l_addr)->sin6_scope_id = l_info->ifa_index;
}
}
/* Apparently in a point-to-point network IFA_ADDRESS contains
* the dest address and IFA_LOCAL contains the local address
*/
if(l_rta->rta_type == IFA_ADDRESS)
{
if(l_entry->ifa_addr)
{
l_entry->ifa_dstaddr = (struct sockaddr *)l_addr;
}
else
{
l_entry->ifa_addr = (struct sockaddr *)l_addr;
}
}
else if(l_rta->rta_type == IFA_LOCAL)
{
if(l_entry->ifa_addr)
{
l_entry->ifa_dstaddr = l_entry->ifa_addr;
}
l_entry->ifa_addr = (struct sockaddr *)l_addr;
}
else
{
l_entry->ifa_broadaddr = (struct sockaddr *)l_addr;
}
l_addr += NLMSG_ALIGN(l_addrLen);
break;
}
case IFA_LABEL:
strncpy(l_name, l_rtaData, l_rtaDataSize);
l_name[l_rtaDataSize] = '\0';
l_entry->ifa_name = l_name;
break;
default:
break;
}
}
if(l_entry->ifa_addr && (l_entry->ifa_addr->sa_family == AF_INET || l_entry->ifa_addr->sa_family == AF_INET6))
{
unsigned l_maxPrefix = (l_entry->ifa_addr->sa_family == AF_INET ? 32 : 128);
unsigned l_prefix = (l_info->ifa_prefixlen > l_maxPrefix ? l_maxPrefix : l_info->ifa_prefixlen);
unsigned char l_mask[16] = {0};
unsigned i;
for(i=0; i<(l_prefix/8); ++i)
{
l_mask[i] = 0xff;
}
if(l_prefix % 8)
{
l_mask[i] = 0xff << (8 - (l_prefix % 8));
}
makeSockaddr(l_entry->ifa_addr->sa_family, (struct sockaddr *)l_addr, l_mask, l_maxPrefix / 8);
l_entry->ifa_netmask = (struct sockaddr *)l_addr;
}
addToEnd(p_resultList, l_entry);
return 0;
}
static int interpretLinks(int p_socket, pid_t p_pid, NetlinkList *p_netlinkList, struct ifaddrs **p_resultList)
{
int l_numLinks = 0;
for(; p_netlinkList; p_netlinkList = p_netlinkList->m_next)
{
unsigned int l_nlsize = p_netlinkList->m_size;
struct nlmsghdr *l_hdr;
for(l_hdr = p_netlinkList->m_data; NLMSG_OK(l_hdr, l_nlsize); l_hdr = NLMSG_NEXT(l_hdr, l_nlsize))
{
if((pid_t)l_hdr->nlmsg_pid != p_pid || (int)l_hdr->nlmsg_seq != p_socket)
{
continue;
}
if(l_hdr->nlmsg_type == NLMSG_DONE)
{
break;
}
if(l_hdr->nlmsg_type == RTM_NEWLINK)
{
if(interpretLink(l_hdr, p_resultList) == -1)
{
return -1;
}
++l_numLinks;
}
}
}
return l_numLinks;
}
static int interpretAddrs(int p_socket, pid_t p_pid, NetlinkList *p_netlinkList, struct ifaddrs **p_resultList, int p_numLinks)
{
for(; p_netlinkList; p_netlinkList = p_netlinkList->m_next)
{
unsigned int l_nlsize = p_netlinkList->m_size;
struct nlmsghdr *l_hdr;
for(l_hdr = p_netlinkList->m_data; NLMSG_OK(l_hdr, l_nlsize); l_hdr = NLMSG_NEXT(l_hdr, l_nlsize))
{
if((pid_t)l_hdr->nlmsg_pid != p_pid || (int)l_hdr->nlmsg_seq != p_socket)
{
continue;
}
if(l_hdr->nlmsg_type == NLMSG_DONE)
{
break;
}
if(l_hdr->nlmsg_type == RTM_NEWADDR)
{
if (interpretAddr(l_hdr, p_resultList, p_numLinks) == -1)
{
return -1;
}
}
}
}
return 0;
}
int getifaddrs(struct ifaddrs **ifap)
{
int l_socket;
int l_result;
int l_numLinks;
pid_t l_pid;
NetlinkList *l_linkResults;
NetlinkList *l_addrResults;
if(!ifap)
{
return -1;
}
*ifap = NULL;
l_socket = netlink_socket(&l_pid);
if(l_socket < 0)
{
return -1;
}
l_linkResults = getResultList(l_socket, RTM_GETLINK, l_pid);
if(!l_linkResults)
{
close(l_socket);
return -1;
}
l_addrResults = getResultList(l_socket, RTM_GETADDR, l_pid);
if(!l_addrResults)
{
close(l_socket);
freeResultList(l_linkResults);
return -1;
}
l_result = 0;
l_numLinks = interpretLinks(l_socket, l_pid, l_linkResults, ifap);
if(l_numLinks == -1 || interpretAddrs(l_socket, l_pid, l_addrResults, ifap, l_numLinks) == -1)
{
l_result = -1;
}
freeResultList(l_linkResults);
freeResultList(l_addrResults);
close(l_socket);
return l_result;
}
void freeifaddrs(struct ifaddrs *ifa)
{
struct ifaddrs *l_cur;
while(ifa)
{
l_cur = ifa;
ifa = ifa->ifa_next;
uv__free(l_cur);
}
}

View File

@ -27,7 +27,7 @@
#include <ifaddrs.h>
#include <net/if.h>
#if !defined(__CYGWIN__) && !defined(__MSYS__)
#if !defined(__CYGWIN__) && !defined(__MSYS__) && !defined(__GNU__)
#include <net/if_dl.h>
#endif
@ -40,7 +40,7 @@ static int uv__ifaddr_exclude(struct ifaddrs *ent, int exclude_type) {
return 1;
if (ent->ifa_addr == NULL)
return 1;
#if !defined(__CYGWIN__) && !defined(__MSYS__)
#if !defined(__CYGWIN__) && !defined(__MSYS__) && !defined(__GNU__)
/*
* If `exclude_type` is `UV__EXCLUDE_IFPHYS`, return whether `sa_family`
* equals `AF_LINK`. Otherwise, the result depends on the operating
@ -69,7 +69,7 @@ int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
struct ifaddrs* addrs;
struct ifaddrs* ent;
uv_interface_address_t* address;
#if !(defined(__CYGWIN__) || defined(__MSYS__))
#if !(defined(__CYGWIN__) || defined(__MSYS__)) && !defined(__GNU__)
int i;
#endif
@ -126,7 +126,7 @@ int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
address++;
}
#if !(defined(__CYGWIN__) || defined(__MSYS__))
#if !(defined(__CYGWIN__) || defined(__MSYS__)) && !defined(__GNU__)
/* Fill in physical addresses for each interface */
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFPHYS))

View File

@ -38,6 +38,7 @@ static void init_process_title_mutex_once(void) {
void uv__process_title_cleanup(void) {
uv_once(&process_title_mutex_once, init_process_title_mutex_once);
uv_mutex_destroy(&process_title_mutex);
}

View File

@ -84,6 +84,7 @@ extern char** environ;
#endif
#if defined(__linux__)
# include <sched.h>
# include <sys/syscall.h>
# define uv__accept4 accept4
#endif
@ -96,9 +97,9 @@ static int uv__run_pending(uv_loop_t* loop);
/* Verify that uv_buf_t is ABI-compatible with struct iovec. */
STATIC_ASSERT(sizeof(uv_buf_t) == sizeof(struct iovec));
STATIC_ASSERT(sizeof(&((uv_buf_t*) 0)->base) ==
STATIC_ASSERT(sizeof(((uv_buf_t*) 0)->base) ==
sizeof(((struct iovec*) 0)->iov_base));
STATIC_ASSERT(sizeof(&((uv_buf_t*) 0)->len) ==
STATIC_ASSERT(sizeof(((uv_buf_t*) 0)->len) ==
sizeof(((struct iovec*) 0)->iov_len));
STATIC_ASSERT(offsetof(uv_buf_t, base) == offsetof(struct iovec, iov_base));
STATIC_ASSERT(offsetof(uv_buf_t, len) == offsetof(struct iovec, iov_len));
@ -334,35 +335,36 @@ int uv_backend_fd(const uv_loop_t* loop) {
}
int uv_backend_timeout(const uv_loop_t* loop) {
if (loop->stop_flag != 0)
return 0;
if (!uv__has_active_handles(loop) && !uv__has_active_reqs(loop))
return 0;
if (!QUEUE_EMPTY(&loop->idle_handles))
return 0;
if (!QUEUE_EMPTY(&loop->pending_queue))
return 0;
if (loop->closing_handles)
return 0;
return uv__next_timeout(loop);
}
static int uv__loop_alive(const uv_loop_t* loop) {
return uv__has_active_handles(loop) ||
uv__has_active_reqs(loop) ||
!QUEUE_EMPTY(&loop->pending_queue) ||
loop->closing_handles != NULL;
}
static int uv__backend_timeout(const uv_loop_t* loop) {
if (loop->stop_flag == 0 &&
/* uv__loop_alive(loop) && */
(uv__has_active_handles(loop) || uv__has_active_reqs(loop)) &&
QUEUE_EMPTY(&loop->pending_queue) &&
QUEUE_EMPTY(&loop->idle_handles) &&
loop->closing_handles == NULL)
return uv__next_timeout(loop);
return 0;
}
int uv_backend_timeout(const uv_loop_t* loop) {
if (QUEUE_EMPTY(&loop->watcher_queue))
return uv__backend_timeout(loop);
/* Need to call uv_run to update the backend fd state. */
return 0;
}
int uv_loop_alive(const uv_loop_t* loop) {
return uv__loop_alive(loop);
return uv__loop_alive(loop);
}
@ -384,7 +386,7 @@ int uv_run(uv_loop_t* loop, uv_run_mode mode) {
timeout = 0;
if ((mode == UV_RUN_ONCE && !ran_pending) || mode == UV_RUN_DEFAULT)
timeout = uv_backend_timeout(loop);
timeout = uv__backend_timeout(loop);
uv__io_poll(loop, timeout);
@ -597,20 +599,6 @@ int uv__nonblock_ioctl(int fd, int set) {
return 0;
}
int uv__cloexec_ioctl(int fd, int set) {
int r;
do
r = ioctl(fd, set ? FIOCLEX : FIONCLEX);
while (r == -1 && errno == EINTR);
if (r)
return UV__ERR(errno);
return 0;
}
#endif
@ -645,25 +633,13 @@ int uv__nonblock_fcntl(int fd, int set) {
}
int uv__cloexec_fcntl(int fd, int set) {
int uv__cloexec(int fd, int set) {
int flags;
int r;
do
r = fcntl(fd, F_GETFD);
while (r == -1 && errno == EINTR);
if (r == -1)
return UV__ERR(errno);
/* Bail out now if already set/clear. */
if (!!(r & FD_CLOEXEC) == !!set)
return 0;
flags = 0;
if (set)
flags = r | FD_CLOEXEC;
else
flags = r & ~FD_CLOEXEC;
flags = FD_CLOEXEC;
do
r = fcntl(fd, F_SETFD, flags);
@ -1036,6 +1012,32 @@ int uv__open_cloexec(const char* path, int flags) {
}
int uv__slurp(const char* filename, char* buf, size_t len) {
ssize_t n;
int fd;
assert(len > 0);
fd = uv__open_cloexec(filename, O_RDONLY);
if (fd < 0)
return fd;
do
n = read(fd, buf, len - 1);
while (n == -1 && errno == EINTR);
if (uv__close_nocheckstdio(fd))
abort();
if (n < 0)
return UV__ERR(errno);
buf[n] = '\0';
return 0;
}
int uv__dup2_cloexec(int oldfd, int newfd) {
#if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__linux__)
int r;
@ -1621,3 +1623,37 @@ int uv__search_path(const char* prog, char* buf, size_t* buflen) {
/* Out of tokens (path entries), and no match found */
return UV_EINVAL;
}
unsigned int uv_available_parallelism(void) {
#ifdef __linux__
cpu_set_t set;
long rc;
memset(&set, 0, sizeof(set));
/* sysconf(_SC_NPROCESSORS_ONLN) in musl calls sched_getaffinity() but in
* glibc it's... complicated... so for consistency try sched_getaffinity()
* before falling back to sysconf(_SC_NPROCESSORS_ONLN).
*/
if (0 == sched_getaffinity(0, sizeof(set), &set))
rc = CPU_COUNT(&set);
else
rc = sysconf(_SC_NPROCESSORS_ONLN);
if (rc < 1)
rc = 1;
return (unsigned) rc;
#elif defined(__MVS__)
return 1; /* TODO(bnoordhuis) Read from CSD_NUMBER_ONLINE_CPUS? */
#else /* __linux__ */
long rc;
rc = sysconf(_SC_NPROCESSORS_ONLN);
if (rc < 1)
rc = 1;
return (unsigned) rc;
#endif /* __linux__ */
}

View File

@ -287,3 +287,18 @@ int uv__recvmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen) {
return errno = ENOSYS, -1;
#endif
}
ssize_t
uv__fs_copy_file_range(int fd_in,
off_t* off_in,
int fd_out,
off_t* off_out,
size_t len,
unsigned int flags)
{
#if __FreeBSD__ >= 13 && !defined(__DragonFly__)
return copy_file_range(fd_in, off_in, fd_out, off_out, len, flags);
#else
return errno = ENOSYS, -1;
#endif
}

View File

@ -247,7 +247,8 @@ UV_UNUSED(static struct timeval uv__fs_to_timeval(double time)) {
static ssize_t uv__fs_futime(uv_fs_t* req) {
#if defined(__linux__) \
|| defined(_AIX71) \
|| defined(__HAIKU__)
|| defined(__HAIKU__) \
|| defined(__GNU__)
struct timespec ts[2];
ts[0] = uv__fs_to_timespec(req->atime);
ts[1] = uv__fs_to_timespec(req->mtime);
@ -1074,6 +1075,17 @@ static ssize_t uv__fs_sendfile(uv_fs_t* req) {
*/
#if defined(__FreeBSD__) || defined(__DragonFly__)
#if defined(__FreeBSD__)
off_t off;
off = req->off;
r = uv__fs_copy_file_range(in_fd, &off, out_fd, NULL, req->bufsml[0].len, 0);
if (r >= 0) {
r = off - req->off;
req->off = off;
return r;
}
#endif
len = 0;
r = sendfile(in_fd, out_fd, req->off, req->bufsml[0].len, NULL, &len, 0);
#elif defined(__FreeBSD_kernel__)
@ -1168,7 +1180,8 @@ static ssize_t uv__fs_lutime(uv_fs_t* req) {
#if defined(__linux__) || \
defined(_AIX71) || \
defined(__sun) || \
defined(__HAIKU__)
defined(__HAIKU__) || \
defined(__GNU__)
struct timespec ts[2];
ts[0] = uv__fs_to_timespec(req->atime);
ts[1] = uv__fs_to_timespec(req->mtime);

167
deps/libuv/src/unix/hurd.c vendored Normal file
View File

@ -0,0 +1,167 @@
/* Copyright libuv project contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#define _GNU_SOURCE 1
#include "uv.h"
#include "internal.h"
#include <hurd.h>
#include <hurd/process.h>
#include <mach/task_info.h>
#include <mach/vm_statistics.h>
#include <mach/vm_param.h>
#include <inttypes.h>
#include <stddef.h>
#include <unistd.h>
#include <string.h>
#include <limits.h>
int uv_exepath(char* buffer, size_t* size) {
kern_return_t err;
/* XXX in current Hurd, strings are char arrays of 1024 elements */
string_t exepath;
ssize_t copied;
if (buffer == NULL || size == NULL || *size == 0)
return UV_EINVAL;
if (*size - 1 > 0) {
/* XXX limited length of buffer in current Hurd, this API will probably
* evolve in the future */
err = proc_get_exe(getproc(), getpid(), exepath);
if (err)
return UV__ERR(err);
}
copied = uv__strscpy(buffer, exepath, *size);
/* do not return error on UV_E2BIG failure */
*size = copied < 0 ? strlen(buffer) : (size_t) copied;
return 0;
}
int uv_resident_set_memory(size_t* rss) {
kern_return_t err;
struct task_basic_info bi;
mach_msg_type_number_t count;
count = TASK_BASIC_INFO_COUNT;
err = task_info(mach_task_self(), TASK_BASIC_INFO,
(task_info_t) &bi, &count);
if (err)
return UV__ERR(err);
*rss = bi.resident_size;
return 0;
}
uint64_t uv_get_free_memory(void) {
kern_return_t err;
struct vm_statistics vmstats;
err = vm_statistics(mach_task_self(), &vmstats);
if (err)
return 0;
return vmstats.free_count * vm_page_size;
}
uint64_t uv_get_total_memory(void) {
kern_return_t err;
host_basic_info_data_t hbi;
mach_msg_type_number_t cnt;
cnt = HOST_BASIC_INFO_COUNT;
err = host_info(mach_host_self(), HOST_BASIC_INFO, (host_info_t) &hbi, &cnt);
if (err)
return 0;
return hbi.memory_size;
}
int uv_uptime(double* uptime) {
char buf[128];
/* Try /proc/uptime first */
if (0 == uv__slurp("/proc/uptime", buf, sizeof(buf)))
if (1 == sscanf(buf, "%lf", uptime))
return 0;
/* Reimplement here code from procfs to calculate uptime if not mounted? */
return UV__ERR(EIO);
}
void uv_loadavg(double avg[3]) {
char buf[128]; /* Large enough to hold all of /proc/loadavg. */
if (0 == uv__slurp("/proc/loadavg", buf, sizeof(buf)))
if (3 == sscanf(buf, "%lf %lf %lf", &avg[0], &avg[1], &avg[2]))
return;
/* Reimplement here code from procfs to calculate loadavg if not mounted? */
}
int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
kern_return_t err;
host_basic_info_data_t hbi;
mach_msg_type_number_t cnt;
/* Get count of cpus */
cnt = HOST_BASIC_INFO_COUNT;
err = host_info(mach_host_self(), HOST_BASIC_INFO, (host_info_t) &hbi, &cnt);
if (err) {
err = UV__ERR(err);
goto abort;
}
/* XXX not implemented on the Hurd */
*cpu_infos = uv__calloc(hbi.avail_cpus, sizeof(**cpu_infos));
if (*cpu_infos == NULL) {
err = UV_ENOMEM;
goto abort;
}
*count = hbi.avail_cpus;
return 0;
abort:
*cpu_infos = NULL;
*count = 0;
return err;
}
uint64_t uv_get_constrained_memory(void) {
return 0; /* Memory constraints are unknown. */
}

View File

@ -145,7 +145,8 @@ typedef struct uv__stream_queued_fds_s uv__stream_queued_fds_t;
/* loop flags */
enum {
UV_LOOP_BLOCK_SIGPROF = 1
UV_LOOP_BLOCK_SIGPROF = 0x1,
UV_LOOP_REAP_CHILDREN = 0x2
};
/* flags of excluding ifaddr */
@ -174,11 +175,9 @@ struct uv__stream_queued_fds_s {
defined(__linux__) || \
defined(__OpenBSD__) || \
defined(__NetBSD__)
#define uv__cloexec uv__cloexec_ioctl
#define uv__nonblock uv__nonblock_ioctl
#define UV__NONBLOCK_IS_IOCTL 1
#else
#define uv__cloexec uv__cloexec_fcntl
#define uv__nonblock uv__nonblock_fcntl
#define UV__NONBLOCK_IS_IOCTL 0
#endif
@ -196,8 +195,7 @@ struct uv__stream_queued_fds_s {
#endif
/* core */
int uv__cloexec_ioctl(int fd, int set);
int uv__cloexec_fcntl(int fd, int set);
int uv__cloexec(int fd, int set);
int uv__nonblock_ioctl(int fd, int set);
int uv__nonblock_fcntl(int fd, int set);
int uv__close(int fd); /* preserves errno */
@ -241,14 +239,15 @@ void uv__server_io(uv_loop_t* loop, uv__io_t* w, unsigned int events);
int uv__accept(int sockfd);
int uv__dup2_cloexec(int oldfd, int newfd);
int uv__open_cloexec(const char* path, int flags);
int uv__slurp(const char* filename, char* buf, size_t len);
/* tcp */
int uv_tcp_listen(uv_tcp_t* tcp, int backlog, uv_connection_cb cb);
int uv__tcp_listen(uv_tcp_t* tcp, int backlog, uv_connection_cb cb);
int uv__tcp_nodelay(int fd, int on);
int uv__tcp_keepalive(int fd, int on, unsigned int delay);
/* pipe */
int uv_pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb);
int uv__pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb);
/* signal */
void uv__signal_close(uv_signal_t* handle);
@ -282,6 +281,7 @@ uv_handle_type uv__handle_type(int fd);
FILE* uv__open_file(const char* path);
int uv__getpwuid_r(uv_passwd_t* pwd);
int uv__search_path(const char* prog, char* buf, size_t* buflen);
void uv__wait_children(uv_loop_t* loop);
/* random */
int uv__random_devurandom(void* buf, size_t buflen);
@ -356,5 +356,15 @@ size_t strnlen(const char* s, size_t maxlen);
#endif
#endif
#if defined(__FreeBSD__)
ssize_t
uv__fs_copy_file_range(int fd_in,
off_t* off_in,
int fd_out,
off_t* off_out,
size_t len,
unsigned int flags);
#endif
#endif /* UV_UNIX_INTERNAL_H_ */

View File

@ -284,6 +284,11 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
for (i = 0; i < nfds; i++) {
ev = events + i;
if (ev->filter == EVFILT_PROC) {
loop->flags |= UV_LOOP_REAP_CHILDREN;
nevents++;
continue;
}
fd = ev->ident;
/* Skip invalidated events, see uv__platform_invalidate_fd */
if (fd == -1)
@ -377,6 +382,11 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
nevents++;
}
if (loop->flags & UV_LOOP_REAP_CHILDREN) {
loop->flags &= ~UV_LOOP_REAP_CHILDREN;
uv__wait_children(loop);
}
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;

View File

@ -45,6 +45,10 @@
#define HAVE_IFADDRS_H 1
# if defined(__ANDROID_API__) && __ANDROID_API__ < 24
# undef HAVE_IFADDRS_H
#endif
#ifdef __UCLIBC__
# if __UCLIBC_MAJOR__ < 0 && __UCLIBC_MINOR__ < 9 && __UCLIBC_SUBLEVEL__ < 32
# undef HAVE_IFADDRS_H
@ -52,11 +56,7 @@
#endif
#ifdef HAVE_IFADDRS_H
# if defined(__ANDROID__)
# include "uv/android-ifaddrs.h"
# else
# include <ifaddrs.h>
# endif
# include <ifaddrs.h>
# include <sys/socket.h>
# include <net/ethernet.h>
# include <netpacket/packet.h>
@ -211,31 +211,6 @@ err:
return UV_EINVAL;
}
static int uv__slurp(const char* filename, char* buf, size_t len) {
ssize_t n;
int fd;
assert(len > 0);
fd = uv__open_cloexec(filename, O_RDONLY);
if (fd < 0)
return fd;
do
n = read(fd, buf, len - 1);
while (n == -1 && errno == EINTR);
if (uv__close_nocheckstdio(fd))
abort();
if (n < 0)
return UV__ERR(errno);
buf[n] = '\0';
return 0;
}
int uv_uptime(double* uptime) {
static volatile int no_clock_boottime;
char buf[128];
@ -243,7 +218,7 @@ int uv_uptime(double* uptime) {
int r;
/* Try /proc/uptime first, then fallback to clock_gettime(). */
if (0 == uv__slurp("/proc/uptime", buf, sizeof(buf)))
if (1 == sscanf(buf, "%lf", uptime))
return 0;
@ -641,6 +616,7 @@ static uint64_t read_cpufreq(unsigned int cpunum) {
}
#ifdef HAVE_IFADDRS_H
static int uv__ifaddr_exclude(struct ifaddrs *ent, int exclude_type) {
if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)))
return 1;
@ -654,6 +630,7 @@ static int uv__ifaddr_exclude(struct ifaddrs *ent, int exclude_type) {
return exclude_type;
return !exclude_type;
}
#endif
int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
#ifndef HAVE_IFADDRS_H

View File

@ -91,7 +91,7 @@ err_socket:
}
int uv_pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb) {
int uv__pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb) {
if (uv__stream_fd(handle) == -1)
return UV_EINVAL;

View File

@ -27,6 +27,7 @@
#include <assert.h>
#include <errno.h>
#include <signal.h>
#include <string.h>
#include <sys/types.h>
#include <sys/wait.h>
@ -35,8 +36,21 @@
#include <poll.h>
#if defined(__APPLE__) && !TARGET_OS_IPHONE
# include <spawn.h>
# include <paths.h>
# include <sys/kauth.h>
# include <sys/types.h>
# include <sys/sysctl.h>
# include <dlfcn.h>
# include <crt_externs.h>
# include <xlocale.h>
# define environ (*_NSGetEnviron())
/* macOS 10.14 back does not define this constant */
# ifndef POSIX_SPAWN_SETSID
# define POSIX_SPAWN_SETSID 1024
# endif
#else
extern char **environ;
#endif
@ -49,10 +63,20 @@ extern char **environ;
# include "zos-base.h"
#endif
#if defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__) || defined(__NetBSD__)
#include <sys/event.h>
#endif
#if !(defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__) || defined(__NetBSD__))
static void uv__chld(uv_signal_t* handle, int signum) {
assert(signum == SIGCHLD);
uv__wait_children(handle->loop);
}
#endif
void uv__wait_children(uv_loop_t* loop) {
uv_process_t* process;
uv_loop_t* loop;
int exit_status;
int term_signal;
int status;
@ -61,10 +85,7 @@ static void uv__chld(uv_signal_t* handle, int signum) {
QUEUE* q;
QUEUE* h;
assert(signum == SIGCHLD);
QUEUE_INIT(&pending);
loop = handle->loop;
h = &loop->process_handles;
q = QUEUE_HEAD(h);
@ -254,22 +275,33 @@ static void uv__process_child_init(const uv_process_options_t* options,
use_fd = pipes[fd][1];
if (use_fd < 0 || use_fd >= fd)
continue;
#ifdef F_DUPFD_CLOEXEC /* POSIX 2008 */
pipes[fd][1] = fcntl(use_fd, F_DUPFD_CLOEXEC, stdio_count);
#else
pipes[fd][1] = fcntl(use_fd, F_DUPFD, stdio_count);
#endif
if (pipes[fd][1] == -1)
uv__write_errno(error_fd);
#ifndef F_DUPFD_CLOEXEC /* POSIX 2008 */
n = uv__cloexec(pipes[fd][1], 1);
if (n) {
uv__write_int(error_fd, n);
_exit(127);
}
#endif
}
for (fd = 0; fd < stdio_count; fd++) {
close_fd = pipes[fd][0];
close_fd = -1;
use_fd = pipes[fd][1];
if (use_fd < 0) {
if (fd >= 3)
continue;
else {
/* redirect stdin, stdout and stderr to /dev/null even if UV_IGNORE is
* set
*/
/* Redirect stdin, stdout and stderr to /dev/null even if UV_IGNORE is
* set. */
uv__close_nocheckstdio(fd); /* Free up fd, if it happens to be open. */
use_fd = open("/dev/null", fd == 0 ? O_RDONLY : O_RDWR);
close_fd = use_fd;
@ -278,28 +310,29 @@ static void uv__process_child_init(const uv_process_options_t* options,
}
}
if (fd == use_fd)
uv__cloexec_fcntl(use_fd, 0);
else
if (fd == use_fd) {
if (close_fd == -1) {
n = uv__cloexec(use_fd, 0);
if (n) {
uv__write_int(error_fd, n);
_exit(127);
}
}
}
else {
fd = dup2(use_fd, fd);
}
if (fd == -1)
uv__write_errno(error_fd);
if (fd <= 2)
if (fd <= 2 && close_fd == -1)
uv__nonblock_fcntl(fd, 0);
if (close_fd >= stdio_count)
uv__close(close_fd);
}
for (fd = 0; fd < stdio_count; fd++) {
use_fd = pipes[fd][1];
if (use_fd >= stdio_count)
uv__close(use_fd);
}
if (options->cwd != NULL && chdir(options->cwd))
uv__write_errno(error_fd);
@ -320,9 +353,8 @@ static void uv__process_child_init(const uv_process_options_t* options,
if ((options->flags & UV_PROCESS_SETUID) && setuid(options->uid))
uv__write_errno(error_fd);
if (options->env != NULL) {
if (options->env != NULL)
environ = options->env;
}
/* Reset signal mask just before exec. */
sigemptyset(&signewset);
@ -341,6 +373,562 @@ static void uv__process_child_init(const uv_process_options_t* options,
#endif
#if defined(__APPLE__)
typedef struct uv__posix_spawn_fncs_tag {
struct {
int (*addchdir_np)(const posix_spawn_file_actions_t *, const char *);
} file_actions;
} uv__posix_spawn_fncs_t;
static uv_once_t posix_spawn_init_once = UV_ONCE_INIT;
static uv__posix_spawn_fncs_t posix_spawn_fncs;
static int posix_spawn_can_use_setsid;
static void uv__spawn_init_posix_spawn_fncs(void) {
/* Try to locate all non-portable functions at runtime */
posix_spawn_fncs.file_actions.addchdir_np =
dlsym(RTLD_DEFAULT, "posix_spawn_file_actions_addchdir_np");
}
static void uv__spawn_init_can_use_setsid(void) {
static const int MACOS_CATALINA_VERSION_MAJOR = 19;
char version_str[256];
char* version_major_str;
size_t version_str_size = 256;
int r;
int version_major;
/* Get a version string */
r = sysctlbyname("kern.osrelease", version_str, &version_str_size, NULL, 0);
if (r != 0)
return;
/* Try to get the major version number. If not found
* fall back to the fork/exec flow */
version_major_str = strtok(version_str, ".");
if (version_major_str == NULL)
return;
/* Parse the version major as a number. If it is greater than
* the major version for macOS Catalina (aka macOS 10.15), then
* the POSIX_SPAWN_SETSID flag is available */
version_major = atoi_l(version_major_str, NULL); /* Use LC_C_LOCALE */
if (version_major >= MACOS_CATALINA_VERSION_MAJOR)
posix_spawn_can_use_setsid = 1;
}
static void uv__spawn_init_posix_spawn(void) {
/* Init handles to all potentially non-defined functions */
uv__spawn_init_posix_spawn_fncs();
/* Init feature detection for POSIX_SPAWN_SETSID flag */
uv__spawn_init_can_use_setsid();
}
static int uv__spawn_set_posix_spawn_attrs(
posix_spawnattr_t* attrs,
const uv__posix_spawn_fncs_t* posix_spawn_fncs,
const uv_process_options_t* options) {
int err;
unsigned int flags;
sigset_t signal_set;
err = posix_spawnattr_init(attrs);
if (err != 0) {
/* If initialization fails, no need to de-init, just return */
return err;
}
if (options->flags & (UV_PROCESS_SETUID | UV_PROCESS_SETGID)) {
/* kauth_cred_issuser currently requires exactly uid == 0 for these
* posixspawn_attrs (set_groups_np, setuid_np, setgid_np), which deviates
* from the normal specification of setuid (which also uses euid), and they
* are also undocumented syscalls, so we do not use them. */
err = ENOSYS;
goto error;
}
/* Set flags for spawn behavior
* 1) POSIX_SPAWN_CLOEXEC_DEFAULT: (Apple Extension) All descriptors in the
* parent will be treated as if they had been created with O_CLOEXEC. The
* only fds that will be passed on to the child are those manipulated by
* the file actions
* 2) POSIX_SPAWN_SETSIGDEF: Signals mentioned in spawn-sigdefault in the
* spawn attributes will be reset to behave as their default
* 3) POSIX_SPAWN_SETSIGMASK: Signal mask will be set to the value of
* spawn-sigmask in attributes
* 4) POSIX_SPAWN_SETSID: Make the process a new session leader if a detached
* session was requested. */
flags = POSIX_SPAWN_CLOEXEC_DEFAULT |
POSIX_SPAWN_SETSIGDEF |
POSIX_SPAWN_SETSIGMASK;
if (options->flags & UV_PROCESS_DETACHED) {
/* If running on a version of macOS where this flag is not supported,
* revert back to the fork/exec flow. Otherwise posix_spawn will
* silently ignore the flag. */
if (!posix_spawn_can_use_setsid) {
err = ENOSYS;
goto error;
}
flags |= POSIX_SPAWN_SETSID;
}
err = posix_spawnattr_setflags(attrs, flags);
if (err != 0)
goto error;
/* Reset all signal the child to their default behavior */
sigfillset(&signal_set);
err = posix_spawnattr_setsigdefault(attrs, &signal_set);
if (err != 0)
goto error;
/* Reset the signal mask for all signals */
sigemptyset(&signal_set);
err = posix_spawnattr_setsigmask(attrs, &signal_set);
if (err != 0)
goto error;
return err;
error:
(void) posix_spawnattr_destroy(attrs);
return err;
}
static int uv__spawn_set_posix_spawn_file_actions(
posix_spawn_file_actions_t* actions,
const uv__posix_spawn_fncs_t* posix_spawn_fncs,
const uv_process_options_t* options,
int stdio_count,
int (*pipes)[2]) {
int fd;
int fd2;
int use_fd;
int err;
err = posix_spawn_file_actions_init(actions);
if (err != 0) {
/* If initialization fails, no need to de-init, just return */
return err;
}
/* Set the current working directory if requested */
if (options->cwd != NULL) {
if (posix_spawn_fncs->file_actions.addchdir_np == NULL) {
err = ENOSYS;
goto error;
}
err = posix_spawn_fncs->file_actions.addchdir_np(actions, options->cwd);
if (err != 0)
goto error;
}
/* Do not return ENOSYS after this point, as we may mutate pipes. */
/* First duplicate low numbered fds, since it's not safe to duplicate them,
* they could get replaced. Example: swapping stdout and stderr; without
* this fd 2 (stderr) would be duplicated into fd 1, thus making both
* stdout and stderr go to the same fd, which was not the intention. */
for (fd = 0; fd < stdio_count; fd++) {
use_fd = pipes[fd][1];
if (use_fd < 0 || use_fd >= fd)
continue;
use_fd = stdio_count;
for (fd2 = 0; fd2 < stdio_count; fd2++) {
/* If we were not setting POSIX_SPAWN_CLOEXEC_DEFAULT, we would need to
* also consider whether fcntl(fd, F_GETFD) returned without the
* FD_CLOEXEC flag set. */
if (pipes[fd2][1] == use_fd) {
use_fd++;
fd2 = 0;
}
}
err = posix_spawn_file_actions_adddup2(
actions,
pipes[fd][1],
use_fd);
assert(err != ENOSYS);
if (err != 0)
goto error;
pipes[fd][1] = use_fd;
}
/* Second, move the descriptors into their respective places */
for (fd = 0; fd < stdio_count; fd++) {
use_fd = pipes[fd][1];
if (use_fd < 0) {
if (fd >= 3)
continue;
else {
/* If ignored, redirect to (or from) /dev/null, */
err = posix_spawn_file_actions_addopen(
actions,
fd,
"/dev/null",
fd == 0 ? O_RDONLY : O_RDWR,
0);
assert(err != ENOSYS);
if (err != 0)
goto error;
continue;
}
}
if (fd == use_fd)
err = posix_spawn_file_actions_addinherit_np(actions, fd);
else
err = posix_spawn_file_actions_adddup2(actions, use_fd, fd);
assert(err != ENOSYS);
if (err != 0)
goto error;
/* Make sure the fd is marked as non-blocking (state shared between child
* and parent). */
uv__nonblock_fcntl(use_fd, 0);
}
/* Finally, close all the superfluous descriptors */
for (fd = 0; fd < stdio_count; fd++) {
use_fd = pipes[fd][1];
if (use_fd < stdio_count)
continue;
/* Check if we already closed this. */
for (fd2 = 0; fd2 < fd; fd2++) {
if (pipes[fd2][1] == use_fd)
break;
}
if (fd2 < fd)
continue;
err = posix_spawn_file_actions_addclose(actions, use_fd);
assert(err != ENOSYS);
if (err != 0)
goto error;
}
return 0;
error:
(void) posix_spawn_file_actions_destroy(actions);
return err;
}
char* uv__spawn_find_path_in_env(char** env) {
char** env_iterator;
const char path_var[] = "PATH=";
/* Look for an environment variable called PATH in the
* provided env array, and return its value if found */
for (env_iterator = env; *env_iterator != NULL; env_iterator++) {
if (strncmp(*env_iterator, path_var, sizeof(path_var) - 1) == 0) {
/* Found "PATH=" at the beginning of the string */
return *env_iterator + sizeof(path_var) - 1;
}
}
return NULL;
}
static int uv__spawn_resolve_and_spawn(const uv_process_options_t* options,
posix_spawnattr_t* attrs,
posix_spawn_file_actions_t* actions,
pid_t* pid) {
const char *p;
const char *z;
const char *path;
size_t l;
size_t k;
int err;
int seen_eacces;
path = NULL;
err = -1;
seen_eacces = 0;
/* Short circuit for erroneous case */
if (options->file == NULL)
return ENOENT;
/* The environment for the child process is that of the parent unless overriden
* by options->env */
char** env = environ;
if (options->env != NULL)
env = options->env;
/* If options->file contains a slash, posix_spawn/posix_spawnp behave
* the same, and don't involve PATH resolution at all. Otherwise, if
* options->file does not include a slash, but no custom environment is
* to be used, the environment used for path resolution as well for the
* child process is that of the parent process, so posix_spawnp is the
* way to go. */
if (strchr(options->file, '/') != NULL || options->env == NULL) {
do
err = posix_spawnp(pid, options->file, actions, attrs, options->args, env);
while (err == EINTR);
return err;
}
/* Look for the definition of PATH in the provided env */
path = uv__spawn_find_path_in_env(options->env);
/* The following resolution logic (execvpe emulation) is copied from
* https://git.musl-libc.org/cgit/musl/tree/src/process/execvp.c
* and adapted to work for our specific usage */
/* If no path was provided in options->env, use the default value
* to look for the executable */
if (path == NULL)
path = _PATH_DEFPATH;
k = strnlen(options->file, NAME_MAX + 1);
if (k > NAME_MAX)
return ENAMETOOLONG;
l = strnlen(path, PATH_MAX - 1) + 1;
for (p = path;; p = z) {
/* Compose the new process file from the entry in the PATH
* environment variable and the actual file name */
char b[PATH_MAX + NAME_MAX];
z = strchr(p, ':');
if (!z)
z = p + strlen(p);
if ((size_t)(z - p) >= l) {
if (!*z++)
break;
continue;
}
memcpy(b, p, z - p);
b[z - p] = '/';
memcpy(b + (z - p) + (z > p), options->file, k + 1);
/* Try to spawn the new process file. If it fails with ENOENT, the
* new process file is not in this PATH entry, continue with the next
* PATH entry. */
do
err = posix_spawn(pid, b, actions, attrs, options->args, env);
while (err == EINTR);
switch (err) {
case EACCES:
seen_eacces = 1;
break; /* continue search */
case ENOENT:
case ENOTDIR:
break; /* continue search */
default:
return err;
}
if (!*z++)
break;
}
if (seen_eacces)
return EACCES;
return err;
}
static int uv__spawn_and_init_child_posix_spawn(
const uv_process_options_t* options,
int stdio_count,
int (*pipes)[2],
pid_t* pid,
const uv__posix_spawn_fncs_t* posix_spawn_fncs) {
int err;
posix_spawnattr_t attrs;
posix_spawn_file_actions_t actions;
err = uv__spawn_set_posix_spawn_attrs(&attrs, posix_spawn_fncs, options);
if (err != 0)
goto error;
/* This may mutate pipes. */
err = uv__spawn_set_posix_spawn_file_actions(&actions,
posix_spawn_fncs,
options,
stdio_count,
pipes);
if (err != 0) {
(void) posix_spawnattr_destroy(&attrs);
goto error;
}
/* Try to spawn options->file resolving in the provided environment
* if any */
err = uv__spawn_resolve_and_spawn(options, &attrs, &actions, pid);
assert(err != ENOSYS);
/* Destroy the actions/attributes */
(void) posix_spawn_file_actions_destroy(&actions);
(void) posix_spawnattr_destroy(&attrs);
error:
/* In an error situation, the attributes and file actions are
* already destroyed, only the happy path requires cleanup */
return UV__ERR(err);
}
#endif
static int uv__spawn_and_init_child_fork(const uv_process_options_t* options,
int stdio_count,
int (*pipes)[2],
int error_fd,
pid_t* pid) {
sigset_t signewset;
sigset_t sigoldset;
/* Start the child with most signals blocked, to avoid any issues before we
* can reset them, but allow program failures to exit (and not hang). */
sigfillset(&signewset);
sigdelset(&signewset, SIGKILL);
sigdelset(&signewset, SIGSTOP);
sigdelset(&signewset, SIGTRAP);
sigdelset(&signewset, SIGSEGV);
sigdelset(&signewset, SIGBUS);
sigdelset(&signewset, SIGILL);
sigdelset(&signewset, SIGSYS);
sigdelset(&signewset, SIGABRT);
if (pthread_sigmask(SIG_BLOCK, &signewset, &sigoldset) != 0)
abort();
*pid = fork();
if (*pid == -1) {
/* Failed to fork */
return UV__ERR(errno);
}
if (*pid == 0) {
/* Fork succeeded, in the child process */
uv__process_child_init(options, stdio_count, pipes, error_fd);
abort();
}
if (pthread_sigmask(SIG_SETMASK, &sigoldset, NULL) != 0)
abort();
/* Fork succeeded, in the parent process */
return 0;
}
static int uv__spawn_and_init_child(
uv_loop_t* loop,
const uv_process_options_t* options,
int stdio_count,
int (*pipes)[2],
pid_t* pid) {
int signal_pipe[2] = { -1, -1 };
int status;
int err;
int exec_errorno;
ssize_t r;
#if defined(__APPLE__)
uv_once(&posix_spawn_init_once, uv__spawn_init_posix_spawn);
/* Special child process spawn case for macOS Big Sur (11.0) onwards
*
* Big Sur introduced a significant performance degradation on a call to
* fork/exec when the process has many pages mmaped in with MAP_JIT, like, say
* a javascript interpreter. Electron-based applications, for example,
* are impacted; though the magnitude of the impact depends on how much the
* app relies on subprocesses.
*
* On macOS, though, posix_spawn is implemented in a way that does not
* exhibit the problem. This block implements the forking and preparation
* logic with posix_spawn and its related primitives. It also takes advantage of
* the macOS extension POSIX_SPAWN_CLOEXEC_DEFAULT that makes impossible to
* leak descriptors to the child process. */
err = uv__spawn_and_init_child_posix_spawn(options,
stdio_count,
pipes,
pid,
&posix_spawn_fncs);
/* The posix_spawn flow will return UV_ENOSYS if any of the posix_spawn_x_np
* non-standard functions is both _needed_ and _undefined_. In those cases,
* default back to the fork/execve strategy. For all other errors, just fail. */
if (err != UV_ENOSYS)
return err;
#endif
/* This pipe is used by the parent to wait until
* the child has called `execve()`. We need this
* to avoid the following race condition:
*
* if ((pid = fork()) > 0) {
* kill(pid, SIGTERM);
* }
* else if (pid == 0) {
* execve("/bin/cat", argp, envp);
* }
*
* The parent sends a signal immediately after forking.
* Since the child may not have called `execve()` yet,
* there is no telling what process receives the signal,
* our fork or /bin/cat.
*
* To avoid ambiguity, we create a pipe with both ends
* marked close-on-exec. Then, after the call to `fork()`,
* the parent polls the read end until it EOFs or errors with EPIPE.
*/
err = uv__make_pipe(signal_pipe, 0);
if (err)
return err;
/* Acquire write lock to prevent opening new fds in worker threads */
uv_rwlock_wrlock(&loop->cloexec_lock);
err = uv__spawn_and_init_child_fork(options, stdio_count, pipes, signal_pipe[1], pid);
/* Release lock in parent process */
uv_rwlock_wrunlock(&loop->cloexec_lock);
uv__close(signal_pipe[1]);
if (err == 0) {
do
r = read(signal_pipe[0], &exec_errorno, sizeof(exec_errorno));
while (r == -1 && errno == EINTR);
if (r == 0)
; /* okay, EOF */
else if (r == sizeof(exec_errorno)) {
do
err = waitpid(*pid, &status, 0); /* okay, read errorno */
while (err == -1 && errno == EINTR);
assert(err == *pid);
err = exec_errorno;
} else if (r == -1 && errno == EPIPE) {
/* Something unknown happened to our child before spawn */
do
err = waitpid(*pid, &status, 0); /* okay, got EPIPE */
while (err == -1 && errno == EINTR);
assert(err == *pid);
err = UV_EPIPE;
} else
abort();
}
uv__close_nocheckstdio(signal_pipe[0]);
return err;
}
int uv_spawn(uv_loop_t* loop,
uv_process_t* process,
const uv_process_options_t* options) {
@ -348,18 +936,13 @@ int uv_spawn(uv_loop_t* loop,
/* fork is marked __WATCHOS_PROHIBITED __TVOS_PROHIBITED. */
return UV_ENOSYS;
#else
sigset_t signewset;
sigset_t sigoldset;
int signal_pipe[2] = { -1, -1 };
int pipes_storage[8][2];
int (*pipes)[2];
int stdio_count;
ssize_t r;
pid_t pid;
int err;
int exec_errorno;
int i;
int status;
assert(options->file != NULL);
assert(!(options->flags & ~(UV_PROCESS_DETACHED |
@ -372,6 +955,7 @@ int uv_spawn(uv_loop_t* loop,
uv__handle_init(loop, (uv_handle_t*)process, UV_PROCESS);
QUEUE_INIT(&process->queue);
process->status = 0;
stdio_count = options->stdio_count;
if (stdio_count < 3)
@ -396,92 +980,42 @@ int uv_spawn(uv_loop_t* loop,
goto error;
}
/* This pipe is used by the parent to wait until
* the child has called `execve()`. We need this
* to avoid the following race condition:
*
* if ((pid = fork()) > 0) {
* kill(pid, SIGTERM);
* }
* else if (pid == 0) {
* execve("/bin/cat", argp, envp);
* }
*
* The parent sends a signal immediately after forking.
* Since the child may not have called `execve()` yet,
* there is no telling what process receives the signal,
* our fork or /bin/cat.
*
* To avoid ambiguity, we create a pipe with both ends
* marked close-on-exec. Then, after the call to `fork()`,
* the parent polls the read end until it EOFs or errors with EPIPE.
*/
err = uv__make_pipe(signal_pipe, 0);
if (err)
goto error;
#if !(defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__) || defined(__NetBSD__))
uv_signal_start(&loop->child_watcher, uv__chld, SIGCHLD);
#endif
/* Acquire write lock to prevent opening new fds in worker threads */
uv_rwlock_wrlock(&loop->cloexec_lock);
/* Spawn the child */
exec_errorno = uv__spawn_and_init_child(loop, options, stdio_count, pipes, &pid);
/* Start the child with most signals blocked, to avoid any issues before we
* can reset them, but allow program failures to exit (and not hang). */
sigfillset(&signewset);
sigdelset(&signewset, SIGKILL);
sigdelset(&signewset, SIGSTOP);
sigdelset(&signewset, SIGTRAP);
sigdelset(&signewset, SIGSEGV);
sigdelset(&signewset, SIGBUS);
sigdelset(&signewset, SIGILL);
sigdelset(&signewset, SIGSYS);
sigdelset(&signewset, SIGABRT);
if (pthread_sigmask(SIG_BLOCK, &signewset, &sigoldset) != 0)
abort();
#if 0
/* This runs into a nodejs issue (it expects initialized streams, even if the
* exec failed).
* See https://github.com/libuv/libuv/pull/3107#issuecomment-782482608 */
if (exec_errorno != 0)
goto error;
#endif
pid = fork();
if (pid == -1)
err = UV__ERR(errno);
/* Activate this handle if exec() happened successfully, even if we later
* fail to open a stdio handle. This ensures we can eventually reap the child
* with waitpid. */
if (exec_errorno == 0) {
#if defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__) || defined(__NetBSD__)
struct kevent event;
EV_SET(&event, pid, EVFILT_PROC, EV_ADD | EV_ONESHOT, NOTE_EXIT, 0, 0);
if (kevent(loop->backend_fd, &event, 1, NULL, 0, NULL)) {
if (errno != ESRCH)
abort();
/* Process already exited. Call waitpid on the next loop iteration. */
loop->flags |= UV_LOOP_REAP_CHILDREN;
}
#endif
if (pid == 0)
uv__process_child_init(options, stdio_count, pipes, signal_pipe[1]);
if (pthread_sigmask(SIG_SETMASK, &sigoldset, NULL) != 0)
abort();
/* Release lock in parent process */
uv_rwlock_wrunlock(&loop->cloexec_lock);
uv__close(signal_pipe[1]);
if (pid == -1) {
uv__close(signal_pipe[0]);
goto error;
process->pid = pid;
process->exit_cb = options->exit_cb;
QUEUE_INSERT_TAIL(&loop->process_handles, &process->queue);
uv__handle_start(process);
}
process->status = 0;
exec_errorno = 0;
do
r = read(signal_pipe[0], &exec_errorno, sizeof(exec_errorno));
while (r == -1 && errno == EINTR);
if (r == 0)
; /* okay, EOF */
else if (r == sizeof(exec_errorno)) {
do
err = waitpid(pid, &status, 0); /* okay, read errorno */
while (err == -1 && errno == EINTR);
assert(err == pid);
} else if (r == -1 && errno == EPIPE) {
do
err = waitpid(pid, &status, 0); /* okay, got EPIPE */
while (err == -1 && errno == EINTR);
assert(err == pid);
} else
abort();
uv__close_nocheckstdio(signal_pipe[0]);
for (i = 0; i < options->stdio_count; i++) {
err = uv__process_open_stream(options->stdio + i, pipes[i]);
if (err == 0)
@ -493,15 +1027,6 @@ int uv_spawn(uv_loop_t* loop,
goto error;
}
/* Only activate this handle if exec() happened successfully */
if (exec_errorno == 0) {
QUEUE_INSERT_TAIL(&loop->process_handles, &process->queue);
uv__handle_start(process);
}
process->pid = pid;
process->exit_cb = options->exit_cb;
if (pipes != pipes_storage)
uv__free(pipes);

View File

@ -58,20 +58,6 @@ struct uv__stream_select_s {
fd_set* swrite;
size_t swrite_sz;
};
/* Due to a possible kernel bug at least in OS X 10.10 "Yosemite",
* EPROTOTYPE can be returned while trying to write to a socket that is
* shutting down. If we retry the write, we should get the expected EPIPE
* instead.
*/
# define RETRY_ON_WRITE_ERROR(errno) (errno == EINTR || errno == EPROTOTYPE)
# define IS_TRANSIENT_WRITE_ERROR(errno, send_handle) \
(errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOBUFS || \
(errno == EMSGSIZE && send_handle != NULL))
#else
# define RETRY_ON_WRITE_ERROR(errno) (errno == EINTR)
# define IS_TRANSIENT_WRITE_ERROR(errno, send_handle) \
(errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOBUFS)
#endif /* defined(__APPLE__) */
static void uv__stream_connect(uv_stream_t*);
@ -658,11 +644,11 @@ int uv_listen(uv_stream_t* stream, int backlog, uv_connection_cb cb) {
switch (stream->type) {
case UV_TCP:
err = uv_tcp_listen((uv_tcp_t*)stream, backlog, cb);
err = uv__tcp_listen((uv_tcp_t*)stream, backlog, cb);
break;
case UV_NAMED_PIPE:
err = uv_pipe_listen((uv_pipe_t*)stream, backlog, cb);
err = uv__pipe_listen((uv_pipe_t*)stream, backlog, cb);
break;
default:
@ -866,19 +852,33 @@ static int uv__try_write(uv_stream_t* stream,
do
n = sendmsg(uv__stream_fd(stream), &msg, 0);
while (n == -1 && RETRY_ON_WRITE_ERROR(errno));
while (n == -1 && errno == EINTR);
} else {
do
n = uv__writev(uv__stream_fd(stream), iov, iovcnt);
while (n == -1 && RETRY_ON_WRITE_ERROR(errno));
while (n == -1 && errno == EINTR);
}
if (n >= 0)
return n;
if (IS_TRANSIENT_WRITE_ERROR(errno, send_handle))
if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOBUFS)
return UV_EAGAIN;
#ifdef __APPLE__
/* macOS versions 10.10 and 10.15 - and presumbaly 10.11 to 10.14, too -
* have a bug where a race condition causes the kernel to return EPROTOTYPE
* because the socket isn't fully constructed. It's probably the result of
* the peer closing the connection and that is why libuv translates it to
* ECONNRESET. Previously, libuv retried until the EPROTOTYPE error went
* away but some VPN software causes the same behavior except the error is
* permanent, not transient, turning the retry mechanism into an infinite
* loop. See https://github.com/libuv/libuv/pull/482.
*/
if (errno == EPROTOTYPE)
return UV_ECONNRESET;
#endif /* __APPLE__ */
return UV__ERR(errno);
}

View File

@ -328,7 +328,7 @@ int uv_tcp_close_reset(uv_tcp_t* handle, uv_close_cb close_cb) {
}
int uv_tcp_listen(uv_tcp_t* tcp, int backlog, uv_connection_cb cb) {
int uv__tcp_listen(uv_tcp_t* tcp, int backlog, uv_connection_cb cb) {
static int single_accept_cached = -1;
unsigned long flags;
int single_accept;

View File

@ -162,11 +162,45 @@ void uv_barrier_destroy(uv_barrier_t* barrier) {
#endif
/* Musl's PTHREAD_STACK_MIN is 2 KB on all architectures, which is
* too small to safely receive signals on.
*
* Musl's PTHREAD_STACK_MIN + MINSIGSTKSZ == 8192 on arm64 (which has
* the largest MINSIGSTKSZ of the architectures that musl supports) so
* let's use that as a lower bound.
*
* We use a hardcoded value because PTHREAD_STACK_MIN + MINSIGSTKSZ
* is between 28 and 133 KB when compiling against glibc, depending
* on the architecture.
*/
static size_t uv__min_stack_size(void) {
static const size_t min = 8192;
#ifdef PTHREAD_STACK_MIN /* Not defined on NetBSD. */
if (min < (size_t) PTHREAD_STACK_MIN)
return PTHREAD_STACK_MIN;
#endif /* PTHREAD_STACK_MIN */
return min;
}
/* On Linux, threads created by musl have a much smaller stack than threads
* created by glibc (80 vs. 2048 or 4096 kB.) Follow glibc for consistency.
*/
static size_t uv__default_stack_size(void) {
#if !defined(__linux__)
return 0;
#elif defined(__PPC__) || defined(__ppc__) || defined(__powerpc__)
return 4 << 20; /* glibc default. */
#else
return 2 << 20; /* glibc default. */
#endif
}
/* On MacOS, threads other than the main thread are created with a reduced
* stack size by default. Adjust to RLIMIT_STACK aligned to the page size.
*
* On Linux, threads created by musl have a much smaller stack than threads
* created by glibc (80 vs. 2048 or 4096 kB.) Follow glibc for consistency.
*/
size_t uv__thread_stack_size(void) {
#if defined(__APPLE__) || defined(__linux__)
@ -176,34 +210,20 @@ size_t uv__thread_stack_size(void) {
* the system call wrapper invokes the wrong system call. Don't treat
* that as fatal, just use the default stack size instead.
*/
if (0 == getrlimit(RLIMIT_STACK, &lim) && lim.rlim_cur != RLIM_INFINITY) {
/* pthread_attr_setstacksize() expects page-aligned values. */
lim.rlim_cur -= lim.rlim_cur % (rlim_t) getpagesize();
if (getrlimit(RLIMIT_STACK, &lim))
return uv__default_stack_size();
/* Musl's PTHREAD_STACK_MIN is 2 KB on all architectures, which is
* too small to safely receive signals on.
*
* Musl's PTHREAD_STACK_MIN + MINSIGSTKSZ == 8192 on arm64 (which has
* the largest MINSIGSTKSZ of the architectures that musl supports) so
* let's use that as a lower bound.
*
* We use a hardcoded value because PTHREAD_STACK_MIN + MINSIGSTKSZ
* is between 28 and 133 KB when compiling against glibc, depending
* on the architecture.
*/
if (lim.rlim_cur >= 8192)
if (lim.rlim_cur >= PTHREAD_STACK_MIN)
return lim.rlim_cur;
}
if (lim.rlim_cur == RLIM_INFINITY)
return uv__default_stack_size();
/* pthread_attr_setstacksize() expects page-aligned values. */
lim.rlim_cur -= lim.rlim_cur % (rlim_t) getpagesize();
if (lim.rlim_cur >= (rlim_t) uv__min_stack_size())
return lim.rlim_cur;
#endif
#if !defined(__linux__)
return 0;
#elif defined(__PPC__) || defined(__ppc__) || defined(__powerpc__)
return 4 << 20; /* glibc default. */
#else
return 2 << 20; /* glibc default. */
#endif
return uv__default_stack_size();
}
@ -222,6 +242,7 @@ int uv_thread_create_ex(uv_thread_t* tid,
pthread_attr_t attr_storage;
size_t pagesize;
size_t stack_size;
size_t min_stack_size;
/* Used to squelch a -Wcast-function-type warning. */
union {
@ -239,10 +260,9 @@ int uv_thread_create_ex(uv_thread_t* tid,
pagesize = (size_t)getpagesize();
/* Round up to the nearest page boundary. */
stack_size = (stack_size + pagesize - 1) &~ (pagesize - 1);
#ifdef PTHREAD_STACK_MIN
if (stack_size < PTHREAD_STACK_MIN)
stack_size = PTHREAD_STACK_MIN;
#endif
min_stack_size = uv__min_stack_size();
if (stack_size < min_stack_size)
stack_size = min_stack_size;
}
if (stack_size > 0) {

View File

@ -201,6 +201,7 @@ static int uv__udp_recvmmsg(uv_udp_t* handle, uv_buf_t* buf) {
for (k = 0; k < chunks; ++k) {
iov[k].iov_base = buf->base + k * UV__UDP_DGRAM_MAXSIZE;
iov[k].iov_len = UV__UDP_DGRAM_MAXSIZE;
memset(&msgs[k].msg_hdr, 0, sizeof(msgs[k].msg_hdr));
msgs[k].msg_hdr.msg_iov = iov + k;
msgs[k].msg_hdr.msg_iovlen = 1;
msgs[k].msg_hdr.msg_name = peers + k;
@ -494,7 +495,7 @@ static int uv__set_reuse(int fd) {
if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &yes, sizeof(yes)))
return UV__ERR(errno);
}
#elif defined(SO_REUSEPORT) && !defined(__linux__)
#elif defined(SO_REUSEPORT) && !defined(__linux__) && !defined(__GNU__)
if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &yes, sizeof(yes)))
return UV__ERR(errno);
#else
@ -655,16 +656,16 @@ int uv__udp_connect(uv_udp_t* handle,
}
/* From https://pubs.opengroup.org/onlinepubs/9699919799/functions/connect.html
* Any of uv supported UNIXs kernel should be standardized, but the kernel
* Any of uv supported UNIXs kernel should be standardized, but the kernel
* implementation logic not same, let's use pseudocode to explain the udp
* disconnect behaviors:
*
*
* Predefined stubs for pseudocode:
* 1. sodisconnect: The function to perform the real udp disconnect
* 2. pru_connect: The function to perform the real udp connect
* 3. so: The kernel object match with socket fd
* 4. addr: The sockaddr parameter from user space
*
*
* BSDs:
* if(sodisconnect(so) == 0) { // udp disconnect succeed
* if (addr->sa_len != so->addr->sa_len) return EINVAL;
@ -694,13 +695,13 @@ int uv__udp_disconnect(uv_udp_t* handle) {
#endif
memset(&addr, 0, sizeof(addr));
#if defined(__MVS__)
addr.ss_family = AF_UNSPEC;
#else
addr.sa_family = AF_UNSPEC;
#endif
do {
errno = 0;
r = connect(handle->io_watcher.fd, (struct sockaddr*) &addr, sizeof(addr));
@ -927,7 +928,8 @@ static int uv__udp_set_membership6(uv_udp_t* handle,
!defined(__NetBSD__) && \
!defined(__ANDROID__) && \
!defined(__DragonFly__) && \
!defined(__QNX__)
!defined(__QNX__) && \
!defined(__GNU__)
static int uv__udp_set_source_membership4(uv_udp_t* handle,
const struct sockaddr_in* multicast_addr,
const char* interface_addr,
@ -1119,7 +1121,8 @@ int uv_udp_set_source_membership(uv_udp_t* handle,
!defined(__NetBSD__) && \
!defined(__ANDROID__) && \
!defined(__DragonFly__) && \
!defined(__QNX__)
!defined(__QNX__) && \
!defined(__GNU__)
int err;
union uv__sockaddr mcast_addr;
union uv__sockaddr src_addr;

View File

@ -28,7 +28,7 @@
#include "req-inl.h"
void uv_async_endgame(uv_loop_t* loop, uv_async_t* handle) {
void uv__async_endgame(uv_loop_t* loop, uv_async_t* handle) {
if (handle->flags & UV_HANDLE_CLOSING &&
!handle->async_sent) {
assert(!(handle->flags & UV_HANDLE_CLOSED));
@ -54,9 +54,9 @@ int uv_async_init(uv_loop_t* loop, uv_async_t* handle, uv_async_cb async_cb) {
}
void uv_async_close(uv_loop_t* loop, uv_async_t* handle) {
void uv__async_close(uv_loop_t* loop, uv_async_t* handle) {
if (!((uv_async_t*)handle)->async_sent) {
uv_want_endgame(loop, (uv_handle_t*) handle);
uv__want_endgame(loop, (uv_handle_t*) handle);
}
uv__handle_closing(handle);
@ -83,7 +83,7 @@ int uv_async_send(uv_async_t* handle) {
}
void uv_process_async_wakeup_req(uv_loop_t* loop, uv_async_t* handle,
void uv__process_async_wakeup_req(uv_loop_t* loop, uv_async_t* handle,
uv_req_t* req) {
assert(handle->type == UV_ASYNC);
assert(req->type == UV_WAKEUP);
@ -91,7 +91,7 @@ void uv_process_async_wakeup_req(uv_loop_t* loop, uv_async_t* handle,
handle->async_sent = 0;
if (handle->flags & UV_HANDLE_CLOSING) {
uv_want_endgame(loop, (uv_handle_t*)handle);
uv__want_endgame(loop, (uv_handle_t*)handle);
} else if (handle->async_cb != NULL) {
handle->async_cb(handle);
}

View File

@ -84,10 +84,12 @@ static int uv__loops_capacity;
#define UV__LOOPS_CHUNK_SIZE 8
static uv_mutex_t uv__loops_lock;
static void uv__loops_init(void) {
uv_mutex_init(&uv__loops_lock);
}
static int uv__loops_add(uv_loop_t* loop) {
uv_loop_t** new_loops;
int new_capacity, i;
@ -115,6 +117,7 @@ failed_loops_realloc:
return ERROR_OUTOFMEMORY;
}
static void uv__loops_remove(uv_loop_t* loop) {
int loop_index;
int smaller_capacity;
@ -173,7 +176,7 @@ void uv__wake_all_loops(void) {
uv_mutex_unlock(&uv__loops_lock);
}
static void uv_init(void) {
static void uv__init(void) {
/* Tell Windows that we will handle critical errors. */
SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX |
SEM_NOOPENFILEERRORBOX);
@ -199,19 +202,19 @@ static void uv_init(void) {
/* Fetch winapi function pointers. This must be done first because other
* initialization code might need these function pointers to be loaded.
*/
uv_winapi_init();
uv__winapi_init();
/* Initialize winsock */
uv_winsock_init();
uv__winsock_init();
/* Initialize FS */
uv_fs_init();
uv__fs_init();
/* Initialize signal stuff */
uv_signals_init();
uv__signals_init();
/* Initialize console */
uv_console_init();
uv__console_init();
/* Initialize utilities */
uv__util_init();
@ -327,7 +330,7 @@ void uv_update_time(uv_loop_t* loop) {
void uv__once_init(void) {
uv_once(&uv_init_guard_, uv_init);
uv_once(&uv_init_guard_, uv__init);
}
@ -395,23 +398,28 @@ int uv_loop_fork(uv_loop_t* loop) {
}
static int uv__loop_alive(const uv_loop_t* loop) {
return uv__has_active_handles(loop) ||
uv__has_active_reqs(loop) ||
loop->pending_reqs_tail != NULL ||
loop->endgame_handles != NULL;
}
int uv_loop_alive(const uv_loop_t* loop) {
return uv__loop_alive(loop);
}
int uv_backend_timeout(const uv_loop_t* loop) {
if (loop->stop_flag != 0)
return 0;
if (!uv__has_active_handles(loop) && !uv__has_active_reqs(loop))
return 0;
if (loop->pending_reqs_tail)
return 0;
if (loop->endgame_handles)
return 0;
if (loop->idle_handles)
return 0;
return uv__next_timeout(loop);
if (loop->stop_flag == 0 &&
/* uv__loop_alive(loop) && */
(uv__has_active_handles(loop) || uv__has_active_reqs(loop)) &&
loop->pending_reqs_tail == NULL &&
loop->idle_handles == NULL &&
loop->endgame_handles == NULL)
return uv__next_timeout(loop);
return 0;
}
@ -462,8 +470,8 @@ static void uv__poll_wine(uv_loop_t* loop, DWORD timeout) {
if (overlapped) {
/* Package was dequeued */
req = uv_overlapped_to_req(overlapped);
uv_insert_pending_req(loop, req);
req = uv__overlapped_to_req(overlapped);
uv__insert_pending_req(loop, req);
/* Some time might have passed waiting for I/O,
* so update the loop time here.
@ -547,8 +555,8 @@ static void uv__poll(uv_loop_t* loop, DWORD timeout) {
* meant only to wake us up.
*/
if (overlappeds[i].lpOverlapped) {
req = uv_overlapped_to_req(overlappeds[i].lpOverlapped);
uv_insert_pending_req(loop, req);
req = uv__overlapped_to_req(overlappeds[i].lpOverlapped);
uv__insert_pending_req(loop, req);
}
}
@ -581,18 +589,6 @@ static void uv__poll(uv_loop_t* loop, DWORD timeout) {
}
static int uv__loop_alive(const uv_loop_t* loop) {
return uv__has_active_handles(loop) ||
uv__has_active_reqs(loop) ||
loop->endgame_handles != NULL;
}
int uv_loop_alive(const uv_loop_t* loop) {
return uv__loop_alive(loop);
}
int uv_run(uv_loop_t *loop, uv_run_mode mode) {
DWORD timeout;
int r;
@ -606,9 +602,9 @@ int uv_run(uv_loop_t *loop, uv_run_mode mode) {
uv_update_time(loop);
uv__run_timers(loop);
ran_pending = uv_process_reqs(loop);
uv_idle_invoke(loop);
uv_prepare_invoke(loop);
ran_pending = uv__process_reqs(loop);
uv__idle_invoke(loop);
uv__prepare_invoke(loop);
timeout = 0;
if ((mode == UV_RUN_ONCE && !ran_pending) || mode == UV_RUN_DEFAULT)
@ -626,8 +622,8 @@ int uv_run(uv_loop_t *loop, uv_run_mode mode) {
*/
uv__metrics_update_idle_time(loop);
uv_check_invoke(loop);
uv_process_endgames(loop);
uv__check_invoke(loop);
uv__process_endgames(loop);
if (mode == UV_RUN_ONCE) {
/* UV_RUN_ONCE implies forward progress: at least one callback must have
@ -638,6 +634,7 @@ int uv_run(uv_loop_t *loop, uv_run_mode mode) {
* UV_RUN_NOWAIT makes no guarantees about progress so it's omitted from
* the check.
*/
uv_update_time(loop);
uv__run_timers(loop);
}

View File

@ -73,6 +73,7 @@ int uv_translate_sys_error(int sys_errno) {
case WSAEACCES: return UV_EACCES;
case ERROR_ELEVATION_REQUIRED: return UV_EACCES;
case ERROR_CANT_ACCESS_FILE: return UV_EACCES;
case ERROR_ACCESS_DENIED: return UV_EACCES;
case ERROR_ADDRESS_ALREADY_ASSOCIATED: return UV_EADDRINUSE;
case WSAEADDRINUSE: return UV_EADDRINUSE;
case WSAEADDRNOTAVAIL: return UV_EADDRNOTAVAIL;
@ -154,7 +155,6 @@ int uv_translate_sys_error(int sys_errno) {
case WSAENOTSOCK: return UV_ENOTSOCK;
case ERROR_NOT_SUPPORTED: return UV_ENOTSUP;
case ERROR_BROKEN_PIPE: return UV_EOF;
case ERROR_ACCESS_DENIED: return UV_EPERM;
case ERROR_PRIVILEGE_NOT_HELD: return UV_EPERM;
case ERROR_BAD_PIPE: return UV_EPIPE;
case ERROR_NO_DATA: return UV_EPIPE;

View File

@ -33,7 +33,7 @@
const unsigned int uv_directory_watcher_buffer_size = 4096;
static void uv_fs_event_queue_readdirchanges(uv_loop_t* loop,
static void uv__fs_event_queue_readdirchanges(uv_loop_t* loop,
uv_fs_event_t* handle) {
assert(handle->dir_handle != INVALID_HANDLE_VALUE);
assert(!handle->req_pending);
@ -57,15 +57,15 @@ static void uv_fs_event_queue_readdirchanges(uv_loop_t* loop,
NULL)) {
/* Make this req pending reporting an error. */
SET_REQ_ERROR(&handle->req, GetLastError());
uv_insert_pending_req(loop, (uv_req_t*)&handle->req);
uv__insert_pending_req(loop, (uv_req_t*)&handle->req);
}
handle->req_pending = 1;
}
static void uv_relative_path(const WCHAR* filename,
const WCHAR* dir,
WCHAR** relpath) {
static void uv__relative_path(const WCHAR* filename,
const WCHAR* dir,
WCHAR** relpath) {
size_t relpathlen;
size_t filenamelen = wcslen(filename);
size_t dirlen = wcslen(dir);
@ -80,7 +80,7 @@ static void uv_relative_path(const WCHAR* filename,
(*relpath)[relpathlen] = L'\0';
}
static int uv_split_path(const WCHAR* filename, WCHAR** dir,
static int uv__split_path(const WCHAR* filename, WCHAR** dir,
WCHAR** file) {
size_t len, i;
DWORD dir_len;
@ -255,12 +255,12 @@ int uv_fs_event_start(uv_fs_event_t* handle,
short_path_done:
short_path = short_path_buffer;
if (uv_split_path(pathw, &dir, &handle->filew) != 0) {
if (uv__split_path(pathw, &dir, &handle->filew) != 0) {
last_error = GetLastError();
goto error;
}
if (uv_split_path(short_path, NULL, &handle->short_filew) != 0) {
if (uv__split_path(short_path, NULL, &handle->short_filew) != 0) {
last_error = GetLastError();
goto error;
}
@ -423,7 +423,7 @@ static int file_info_cmp(WCHAR* str, WCHAR* file_name, size_t file_name_len) {
}
void uv_process_fs_event_req(uv_loop_t* loop, uv_req_t* req,
void uv__process_fs_event_req(uv_loop_t* loop, uv_req_t* req,
uv_fs_event_t* handle) {
FILE_NOTIFY_INFORMATION* file_info;
int err, sizew, size;
@ -442,7 +442,7 @@ void uv_process_fs_event_req(uv_loop_t* loop, uv_req_t* req,
*/
if (!uv__is_active(handle)) {
if (handle->flags & UV_HANDLE_CLOSING) {
uv_want_endgame(loop, (uv_handle_t*) handle);
uv__want_endgame(loop, (uv_handle_t*) handle);
}
return;
}
@ -515,9 +515,9 @@ void uv_process_fs_event_req(uv_loop_t* loop, uv_req_t* req,
if (long_filenamew) {
/* Get the file name out of the long path. */
uv_relative_path(long_filenamew,
handle->dirw,
&filenamew);
uv__relative_path(long_filenamew,
handle->dirw,
&filenamew);
uv__free(long_filenamew);
long_filenamew = filenamew;
sizew = -1;
@ -575,26 +575,26 @@ void uv_process_fs_event_req(uv_loop_t* loop, uv_req_t* req,
}
if (handle->flags & UV_HANDLE_CLOSING) {
uv_want_endgame(loop, (uv_handle_t*)handle);
uv__want_endgame(loop, (uv_handle_t*)handle);
} else if (uv__is_active(handle)) {
uv_fs_event_queue_readdirchanges(loop, handle);
uv__fs_event_queue_readdirchanges(loop, handle);
}
}
void uv_fs_event_close(uv_loop_t* loop, uv_fs_event_t* handle) {
void uv__fs_event_close(uv_loop_t* loop, uv_fs_event_t* handle) {
uv_fs_event_stop(handle);
uv__handle_closing(handle);
if (!handle->req_pending) {
uv_want_endgame(loop, (uv_handle_t*)handle);
uv__want_endgame(loop, (uv_handle_t*)handle);
}
}
void uv_fs_event_endgame(uv_loop_t* loop, uv_fs_event_t* handle) {
void uv__fs_event_endgame(uv_loop_t* loop, uv_fs_event_t* handle) {
if ((handle->flags & UV_HANDLE_CLOSING) && !handle->req_pending) {
assert(!(handle->flags & UV_HANDLE_CLOSED));

View File

@ -46,7 +46,7 @@
do { \
if (req == NULL) \
return UV_EINVAL; \
uv_fs_req_init(loop, req, subtype, cb); \
uv__fs_req_init(loop, req, subtype, cb); \
} \
while (0)
@ -132,7 +132,7 @@ static int uv__file_symlink_usermode_flag = SYMBOLIC_LINK_FLAG_ALLOW_UNPRIVILEGE
static DWORD uv__allocation_granularity;
void uv_fs_init(void) {
void uv__fs_init(void) {
SYSTEM_INFO system_info;
GetSystemInfo(&system_info);
@ -241,7 +241,7 @@ INLINE static int fs__capture_path(uv_fs_t* req, const char* path,
INLINE static void uv_fs_req_init(uv_loop_t* loop, uv_fs_t* req,
INLINE static void uv__fs_req_init(uv_loop_t* loop, uv_fs_t* req,
uv_fs_type fs_type, const uv_fs_cb cb) {
uv__once_init();
UV_REQ_INIT(req, UV_FS);
@ -912,12 +912,11 @@ void fs__read(uv_fs_t* req) {
SET_REQ_RESULT(req, bytes);
} else {
error = GetLastError();
if (error == ERROR_ACCESS_DENIED) {
error = ERROR_INVALID_FLAGS;
}
if (error == ERROR_HANDLE_EOF) {
if (error == ERROR_HANDLE_EOF || error == ERROR_BROKEN_PIPE) {
SET_REQ_RESULT(req, bytes);
} else {
SET_REQ_WIN32_ERROR(req, error);
@ -1881,8 +1880,9 @@ INLINE static DWORD fs__stat_impl_from_path(WCHAR* path,
NULL);
if (handle == INVALID_HANDLE_VALUE)
ret = GetLastError();
else if (fs__stat_handle(handle, statbuf, do_lstat) != 0)
return GetLastError();
if (fs__stat_handle(handle, statbuf, do_lstat) != 0)
ret = GetLastError();
else
ret = 0;
@ -2300,13 +2300,13 @@ INLINE static DWORD fs__utime_impl_from_path(WCHAR* path,
flags,
NULL);
if (handle == INVALID_HANDLE_VALUE) {
if (handle == INVALID_HANDLE_VALUE)
return GetLastError();
if (fs__utime_handle(handle, atime, mtime) != 0)
ret = GetLastError();
} else if (fs__utime_handle(handle, atime, mtime) != 0) {
ret = GetLastError();
} else {
else
ret = 0;
}
CloseHandle(handle);
return ret;

View File

@ -55,7 +55,7 @@
\
if (handle->flags & UV_HANDLE_CLOSING && \
handle->reqs_pending == 0) { \
uv_want_endgame(loop, (uv_handle_t*)handle); \
uv__want_endgame(loop, (uv_handle_t*)handle); \
} \
} while (0)
@ -85,7 +85,7 @@
} while (0)
INLINE static void uv_want_endgame(uv_loop_t* loop, uv_handle_t* handle) {
INLINE static void uv__want_endgame(uv_loop_t* loop, uv_handle_t* handle) {
if (!(handle->flags & UV_HANDLE_ENDGAME_QUEUED)) {
handle->flags |= UV_HANDLE_ENDGAME_QUEUED;
@ -95,7 +95,7 @@ INLINE static void uv_want_endgame(uv_loop_t* loop, uv_handle_t* handle) {
}
INLINE static void uv_process_endgames(uv_loop_t* loop) {
INLINE static void uv__process_endgames(uv_loop_t* loop) {
uv_handle_t* handle;
while (loop->endgame_handles) {
@ -106,23 +106,23 @@ INLINE static void uv_process_endgames(uv_loop_t* loop) {
switch (handle->type) {
case UV_TCP:
uv_tcp_endgame(loop, (uv_tcp_t*) handle);
uv__tcp_endgame(loop, (uv_tcp_t*) handle);
break;
case UV_NAMED_PIPE:
uv_pipe_endgame(loop, (uv_pipe_t*) handle);
uv__pipe_endgame(loop, (uv_pipe_t*) handle);
break;
case UV_TTY:
uv_tty_endgame(loop, (uv_tty_t*) handle);
uv__tty_endgame(loop, (uv_tty_t*) handle);
break;
case UV_UDP:
uv_udp_endgame(loop, (uv_udp_t*) handle);
uv__udp_endgame(loop, (uv_udp_t*) handle);
break;
case UV_POLL:
uv_poll_endgame(loop, (uv_poll_t*) handle);
uv__poll_endgame(loop, (uv_poll_t*) handle);
break;
case UV_TIMER:
@ -133,23 +133,23 @@ INLINE static void uv_process_endgames(uv_loop_t* loop) {
case UV_PREPARE:
case UV_CHECK:
case UV_IDLE:
uv_loop_watcher_endgame(loop, handle);
uv__loop_watcher_endgame(loop, handle);
break;
case UV_ASYNC:
uv_async_endgame(loop, (uv_async_t*) handle);
uv__async_endgame(loop, (uv_async_t*) handle);
break;
case UV_SIGNAL:
uv_signal_endgame(loop, (uv_signal_t*) handle);
uv__signal_endgame(loop, (uv_signal_t*) handle);
break;
case UV_PROCESS:
uv_process_endgame(loop, (uv_process_t*) handle);
uv__process_endgame(loop, (uv_process_t*) handle);
break;
case UV_FS_EVENT:
uv_fs_event_endgame(loop, (uv_fs_event_t*) handle);
uv__fs_event_endgame(loop, (uv_fs_event_t*) handle);
break;
case UV_FS_POLL:

View File

@ -77,63 +77,63 @@ void uv_close(uv_handle_t* handle, uv_close_cb cb) {
/* Handle-specific close actions */
switch (handle->type) {
case UV_TCP:
uv_tcp_close(loop, (uv_tcp_t*)handle);
uv__tcp_close(loop, (uv_tcp_t*)handle);
return;
case UV_NAMED_PIPE:
uv_pipe_close(loop, (uv_pipe_t*) handle);
uv__pipe_close(loop, (uv_pipe_t*) handle);
return;
case UV_TTY:
uv_tty_close((uv_tty_t*) handle);
uv__tty_close((uv_tty_t*) handle);
return;
case UV_UDP:
uv_udp_close(loop, (uv_udp_t*) handle);
uv__udp_close(loop, (uv_udp_t*) handle);
return;
case UV_POLL:
uv_poll_close(loop, (uv_poll_t*) handle);
uv__poll_close(loop, (uv_poll_t*) handle);
return;
case UV_TIMER:
uv_timer_stop((uv_timer_t*)handle);
uv__handle_closing(handle);
uv_want_endgame(loop, handle);
uv__want_endgame(loop, handle);
return;
case UV_PREPARE:
uv_prepare_stop((uv_prepare_t*)handle);
uv__handle_closing(handle);
uv_want_endgame(loop, handle);
uv__want_endgame(loop, handle);
return;
case UV_CHECK:
uv_check_stop((uv_check_t*)handle);
uv__handle_closing(handle);
uv_want_endgame(loop, handle);
uv__want_endgame(loop, handle);
return;
case UV_IDLE:
uv_idle_stop((uv_idle_t*)handle);
uv__handle_closing(handle);
uv_want_endgame(loop, handle);
uv__want_endgame(loop, handle);
return;
case UV_ASYNC:
uv_async_close(loop, (uv_async_t*) handle);
uv__async_close(loop, (uv_async_t*) handle);
return;
case UV_SIGNAL:
uv_signal_close(loop, (uv_signal_t*) handle);
uv__signal_close(loop, (uv_signal_t*) handle);
return;
case UV_PROCESS:
uv_process_close(loop, (uv_process_t*) handle);
uv__process_close(loop, (uv_process_t*) handle);
return;
case UV_FS_EVENT:
uv_fs_event_close(loop, (uv_fs_event_t*) handle);
uv__fs_event_close(loop, (uv_fs_event_t*) handle);
return;
case UV_FS_POLL:

View File

@ -72,25 +72,25 @@ typedef struct {
uint32_t delayed_error;
} uv__ipc_socket_xfer_info_t;
int uv_tcp_listen(uv_tcp_t* handle, int backlog, uv_connection_cb cb);
int uv_tcp_accept(uv_tcp_t* server, uv_tcp_t* client);
int uv_tcp_read_start(uv_tcp_t* handle, uv_alloc_cb alloc_cb,
int uv__tcp_listen(uv_tcp_t* handle, int backlog, uv_connection_cb cb);
int uv__tcp_accept(uv_tcp_t* server, uv_tcp_t* client);
int uv__tcp_read_start(uv_tcp_t* handle, uv_alloc_cb alloc_cb,
uv_read_cb read_cb);
int uv_tcp_write(uv_loop_t* loop, uv_write_t* req, uv_tcp_t* handle,
int uv__tcp_write(uv_loop_t* loop, uv_write_t* req, uv_tcp_t* handle,
const uv_buf_t bufs[], unsigned int nbufs, uv_write_cb cb);
int uv__tcp_try_write(uv_tcp_t* handle, const uv_buf_t bufs[],
unsigned int nbufs);
void uv_process_tcp_read_req(uv_loop_t* loop, uv_tcp_t* handle, uv_req_t* req);
void uv_process_tcp_write_req(uv_loop_t* loop, uv_tcp_t* handle,
void uv__process_tcp_read_req(uv_loop_t* loop, uv_tcp_t* handle, uv_req_t* req);
void uv__process_tcp_write_req(uv_loop_t* loop, uv_tcp_t* handle,
uv_write_t* req);
void uv_process_tcp_accept_req(uv_loop_t* loop, uv_tcp_t* handle,
void uv__process_tcp_accept_req(uv_loop_t* loop, uv_tcp_t* handle,
uv_req_t* req);
void uv_process_tcp_connect_req(uv_loop_t* loop, uv_tcp_t* handle,
void uv__process_tcp_connect_req(uv_loop_t* loop, uv_tcp_t* handle,
uv_connect_t* req);
void uv_tcp_close(uv_loop_t* loop, uv_tcp_t* tcp);
void uv_tcp_endgame(uv_loop_t* loop, uv_tcp_t* handle);
void uv__tcp_close(uv_loop_t* loop, uv_tcp_t* tcp);
void uv__tcp_endgame(uv_loop_t* loop, uv_tcp_t* handle);
int uv__tcp_xfer_export(uv_tcp_t* handle,
int pid,
@ -104,12 +104,12 @@ int uv__tcp_xfer_import(uv_tcp_t* tcp,
/*
* UDP
*/
void uv_process_udp_recv_req(uv_loop_t* loop, uv_udp_t* handle, uv_req_t* req);
void uv_process_udp_send_req(uv_loop_t* loop, uv_udp_t* handle,
void uv__process_udp_recv_req(uv_loop_t* loop, uv_udp_t* handle, uv_req_t* req);
void uv__process_udp_send_req(uv_loop_t* loop, uv_udp_t* handle,
uv_udp_send_t* req);
void uv_udp_close(uv_loop_t* loop, uv_udp_t* handle);
void uv_udp_endgame(uv_loop_t* loop, uv_udp_t* handle);
void uv__udp_close(uv_loop_t* loop, uv_udp_t* handle);
void uv__udp_endgame(uv_loop_t* loop, uv_udp_t* handle);
/*
@ -118,9 +118,9 @@ void uv_udp_endgame(uv_loop_t* loop, uv_udp_t* handle);
int uv__create_stdio_pipe_pair(uv_loop_t* loop,
uv_pipe_t* parent_pipe, HANDLE* child_pipe_ptr, unsigned int flags);
int uv_pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb);
int uv_pipe_accept(uv_pipe_t* server, uv_stream_t* client);
int uv_pipe_read_start(uv_pipe_t* handle, uv_alloc_cb alloc_cb,
int uv__pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb);
int uv__pipe_accept(uv_pipe_t* server, uv_stream_t* client);
int uv__pipe_read_start(uv_pipe_t* handle, uv_alloc_cb alloc_cb,
uv_read_cb read_cb);
void uv__pipe_read_stop(uv_pipe_t* handle);
int uv__pipe_write(uv_loop_t* loop,
@ -131,74 +131,74 @@ int uv__pipe_write(uv_loop_t* loop,
uv_stream_t* send_handle,
uv_write_cb cb);
void uv_process_pipe_read_req(uv_loop_t* loop, uv_pipe_t* handle,
void uv__process_pipe_read_req(uv_loop_t* loop, uv_pipe_t* handle,
uv_req_t* req);
void uv_process_pipe_write_req(uv_loop_t* loop, uv_pipe_t* handle,
void uv__process_pipe_write_req(uv_loop_t* loop, uv_pipe_t* handle,
uv_write_t* req);
void uv_process_pipe_accept_req(uv_loop_t* loop, uv_pipe_t* handle,
void uv__process_pipe_accept_req(uv_loop_t* loop, uv_pipe_t* handle,
uv_req_t* raw_req);
void uv_process_pipe_connect_req(uv_loop_t* loop, uv_pipe_t* handle,
void uv__process_pipe_connect_req(uv_loop_t* loop, uv_pipe_t* handle,
uv_connect_t* req);
void uv_process_pipe_shutdown_req(uv_loop_t* loop, uv_pipe_t* handle,
void uv__process_pipe_shutdown_req(uv_loop_t* loop, uv_pipe_t* handle,
uv_shutdown_t* req);
void uv_pipe_close(uv_loop_t* loop, uv_pipe_t* handle);
void uv_pipe_cleanup(uv_loop_t* loop, uv_pipe_t* handle);
void uv_pipe_endgame(uv_loop_t* loop, uv_pipe_t* handle);
void uv__pipe_close(uv_loop_t* loop, uv_pipe_t* handle);
void uv__pipe_cleanup(uv_loop_t* loop, uv_pipe_t* handle);
void uv__pipe_endgame(uv_loop_t* loop, uv_pipe_t* handle);
/*
* TTY
*/
void uv_console_init(void);
void uv__console_init(void);
int uv_tty_read_start(uv_tty_t* handle, uv_alloc_cb alloc_cb,
int uv__tty_read_start(uv_tty_t* handle, uv_alloc_cb alloc_cb,
uv_read_cb read_cb);
int uv_tty_read_stop(uv_tty_t* handle);
int uv_tty_write(uv_loop_t* loop, uv_write_t* req, uv_tty_t* handle,
int uv__tty_read_stop(uv_tty_t* handle);
int uv__tty_write(uv_loop_t* loop, uv_write_t* req, uv_tty_t* handle,
const uv_buf_t bufs[], unsigned int nbufs, uv_write_cb cb);
int uv__tty_try_write(uv_tty_t* handle, const uv_buf_t bufs[],
unsigned int nbufs);
void uv_tty_close(uv_tty_t* handle);
void uv__tty_close(uv_tty_t* handle);
void uv_process_tty_read_req(uv_loop_t* loop, uv_tty_t* handle,
void uv__process_tty_read_req(uv_loop_t* loop, uv_tty_t* handle,
uv_req_t* req);
void uv_process_tty_write_req(uv_loop_t* loop, uv_tty_t* handle,
void uv__process_tty_write_req(uv_loop_t* loop, uv_tty_t* handle,
uv_write_t* req);
/*
* uv_process_tty_accept_req() is a stub to keep DELEGATE_STREAM_REQ working
* uv__process_tty_accept_req() is a stub to keep DELEGATE_STREAM_REQ working
* TODO: find a way to remove it
*/
void uv_process_tty_accept_req(uv_loop_t* loop, uv_tty_t* handle,
void uv__process_tty_accept_req(uv_loop_t* loop, uv_tty_t* handle,
uv_req_t* raw_req);
/*
* uv_process_tty_connect_req() is a stub to keep DELEGATE_STREAM_REQ working
* uv__process_tty_connect_req() is a stub to keep DELEGATE_STREAM_REQ working
* TODO: find a way to remove it
*/
void uv_process_tty_connect_req(uv_loop_t* loop, uv_tty_t* handle,
void uv__process_tty_connect_req(uv_loop_t* loop, uv_tty_t* handle,
uv_connect_t* req);
void uv_tty_endgame(uv_loop_t* loop, uv_tty_t* handle);
void uv__tty_endgame(uv_loop_t* loop, uv_tty_t* handle);
/*
* Poll watchers
*/
void uv_process_poll_req(uv_loop_t* loop, uv_poll_t* handle,
void uv__process_poll_req(uv_loop_t* loop, uv_poll_t* handle,
uv_req_t* req);
int uv_poll_close(uv_loop_t* loop, uv_poll_t* handle);
void uv_poll_endgame(uv_loop_t* loop, uv_poll_t* handle);
int uv__poll_close(uv_loop_t* loop, uv_poll_t* handle);
void uv__poll_endgame(uv_loop_t* loop, uv_poll_t* handle);
/*
* Loop watchers
*/
void uv_loop_watcher_endgame(uv_loop_t* loop, uv_handle_t* handle);
void uv__loop_watcher_endgame(uv_loop_t* loop, uv_handle_t* handle);
void uv_prepare_invoke(uv_loop_t* loop);
void uv_check_invoke(uv_loop_t* loop);
void uv_idle_invoke(uv_loop_t* loop);
void uv__prepare_invoke(uv_loop_t* loop);
void uv__check_invoke(uv_loop_t* loop);
void uv__idle_invoke(uv_loop_t* loop);
void uv__once_init(void);
@ -206,53 +206,47 @@ void uv__once_init(void);
/*
* Async watcher
*/
void uv_async_close(uv_loop_t* loop, uv_async_t* handle);
void uv_async_endgame(uv_loop_t* loop, uv_async_t* handle);
void uv__async_close(uv_loop_t* loop, uv_async_t* handle);
void uv__async_endgame(uv_loop_t* loop, uv_async_t* handle);
void uv_process_async_wakeup_req(uv_loop_t* loop, uv_async_t* handle,
void uv__process_async_wakeup_req(uv_loop_t* loop, uv_async_t* handle,
uv_req_t* req);
/*
* Signal watcher
*/
void uv_signals_init(void);
void uv__signals_init(void);
int uv__signal_dispatch(int signum);
void uv_signal_close(uv_loop_t* loop, uv_signal_t* handle);
void uv_signal_endgame(uv_loop_t* loop, uv_signal_t* handle);
void uv__signal_close(uv_loop_t* loop, uv_signal_t* handle);
void uv__signal_endgame(uv_loop_t* loop, uv_signal_t* handle);
void uv_process_signal_req(uv_loop_t* loop, uv_signal_t* handle,
void uv__process_signal_req(uv_loop_t* loop, uv_signal_t* handle,
uv_req_t* req);
/*
* Spawn
*/
void uv_process_proc_exit(uv_loop_t* loop, uv_process_t* handle);
void uv_process_close(uv_loop_t* loop, uv_process_t* handle);
void uv_process_endgame(uv_loop_t* loop, uv_process_t* handle);
/*
* Error
*/
int uv_translate_sys_error(int sys_errno);
void uv__process_proc_exit(uv_loop_t* loop, uv_process_t* handle);
void uv__process_close(uv_loop_t* loop, uv_process_t* handle);
void uv__process_endgame(uv_loop_t* loop, uv_process_t* handle);
/*
* FS
*/
void uv_fs_init(void);
void uv__fs_init(void);
/*
* FS Event
*/
void uv_process_fs_event_req(uv_loop_t* loop, uv_req_t* req,
void uv__process_fs_event_req(uv_loop_t* loop, uv_req_t* req,
uv_fs_event_t* handle);
void uv_fs_event_close(uv_loop_t* loop, uv_fs_event_t* handle);
void uv_fs_event_endgame(uv_loop_t* loop, uv_fs_event_t* handle);
void uv__fs_event_close(uv_loop_t* loop, uv_fs_event_t* handle);
void uv__fs_event_endgame(uv_loop_t* loop, uv_fs_event_t* handle);
/*
@ -299,28 +293,28 @@ HANDLE uv__stdio_handle(BYTE* buffer, int fd);
/*
* Winapi and ntapi utility functions
*/
void uv_winapi_init(void);
void uv__winapi_init(void);
/*
* Winsock utility functions
*/
void uv_winsock_init(void);
void uv__winsock_init(void);
int uv_ntstatus_to_winsock_error(NTSTATUS status);
int uv__ntstatus_to_winsock_error(NTSTATUS status);
BOOL uv_get_acceptex_function(SOCKET socket, LPFN_ACCEPTEX* target);
BOOL uv_get_connectex_function(SOCKET socket, LPFN_CONNECTEX* target);
BOOL uv__get_acceptex_function(SOCKET socket, LPFN_ACCEPTEX* target);
BOOL uv__get_connectex_function(SOCKET socket, LPFN_CONNECTEX* target);
int WSAAPI uv_wsarecv_workaround(SOCKET socket, WSABUF* buffers,
int WSAAPI uv__wsarecv_workaround(SOCKET socket, WSABUF* buffers,
DWORD buffer_count, DWORD* bytes, DWORD* flags, WSAOVERLAPPED *overlapped,
LPWSAOVERLAPPED_COMPLETION_ROUTINE completion_routine);
int WSAAPI uv_wsarecvfrom_workaround(SOCKET socket, WSABUF* buffers,
int WSAAPI uv__wsarecvfrom_workaround(SOCKET socket, WSABUF* buffers,
DWORD buffer_count, DWORD* bytes, DWORD* flags, struct sockaddr* addr,
int* addr_len, WSAOVERLAPPED *overlapped,
LPWSAOVERLAPPED_COMPLETION_ROUTINE completion_routine);
int WSAAPI uv_msafd_poll(SOCKET socket, AFD_POLL_INFO* info_in,
int WSAAPI uv__msafd_poll(SOCKET socket, AFD_POLL_INFO* info_in,
AFD_POLL_INFO* info_out, OVERLAPPED* overlapped);
/* Whether there are any non-IFS LSPs stacked on TCP */

View File

@ -26,7 +26,7 @@
#include "handle-inl.h"
void uv_loop_watcher_endgame(uv_loop_t* loop, uv_handle_t* handle) {
void uv__loop_watcher_endgame(uv_loop_t* loop, uv_handle_t* handle) {
if (handle->flags & UV_HANDLE_CLOSING) {
assert(!(handle->flags & UV_HANDLE_CLOSED));
handle->flags |= UV_HANDLE_CLOSED;
@ -104,7 +104,7 @@ void uv_loop_watcher_endgame(uv_loop_t* loop, uv_handle_t* handle) {
} \
\
\
void uv_##name##_invoke(uv_loop_t* loop) { \
void uv__##name##_invoke(uv_loop_t* loop) { \
uv_##name##_t* handle; \
\
(loop)->next_##name##_handle = (loop)->name##_handles; \

View File

@ -98,13 +98,13 @@ static void eof_timer_destroy(uv_pipe_t* pipe);
static void eof_timer_close_cb(uv_handle_t* handle);
static void uv_unique_pipe_name(char* ptr, char* name, size_t size) {
static void uv__unique_pipe_name(char* ptr, char* name, size_t size) {
snprintf(name, size, "\\\\?\\pipe\\uv\\%p-%lu", ptr, GetCurrentProcessId());
}
int uv_pipe_init(uv_loop_t* loop, uv_pipe_t* handle, int ipc) {
uv_stream_init(loop, (uv_stream_t*)handle, UV_NAMED_PIPE);
uv__stream_init(loop, (uv_stream_t*)handle, UV_NAMED_PIPE);
handle->reqs_pending = 0;
handle->handle = INVALID_HANDLE_VALUE;
@ -120,8 +120,8 @@ int uv_pipe_init(uv_loop_t* loop, uv_pipe_t* handle, int ipc) {
}
static void uv_pipe_connection_init(uv_pipe_t* handle) {
uv_connection_init((uv_stream_t*) handle);
static void uv__pipe_connection_init(uv_pipe_t* handle) {
uv__connection_init((uv_stream_t*) handle);
handle->read_req.data = handle;
handle->pipe.conn.eof_timer = NULL;
assert(!(handle->flags & UV_HANDLE_PIPESERVER));
@ -209,7 +209,7 @@ static int uv__pipe_server(
int err;
for (;;) {
uv_unique_pipe_name(random, name, nameSize);
uv__unique_pipe_name(random, name, nameSize);
pipeHandle = CreateNamedPipeA(name,
access | FILE_FLAG_FIRST_PIPE_INSTANCE,
@ -427,7 +427,7 @@ int uv__create_stdio_pipe_pair(uv_loop_t* loop,
goto error;
}
uv_pipe_connection_init(parent_pipe);
uv__pipe_connection_init(parent_pipe);
parent_pipe->handle = server_pipe;
*child_pipe_ptr = client_pipe;
@ -450,11 +450,11 @@ int uv__create_stdio_pipe_pair(uv_loop_t* loop,
}
static int uv_set_pipe_handle(uv_loop_t* loop,
uv_pipe_t* handle,
HANDLE pipeHandle,
int fd,
DWORD duplex_flags) {
static int uv__set_pipe_handle(uv_loop_t* loop,
uv_pipe_t* handle,
HANDLE pipeHandle,
int fd,
DWORD duplex_flags) {
NTSTATUS nt_status;
IO_STATUS_BLOCK io_status;
FILE_MODE_INFORMATION mode_info;
@ -578,7 +578,7 @@ static DWORD WINAPI pipe_shutdown_thread_proc(void* parameter) {
}
void uv_pipe_endgame(uv_loop_t* loop, uv_pipe_t* handle) {
void uv__pipe_endgame(uv_loop_t* loop, uv_pipe_t* handle) {
int err;
DWORD result;
uv_shutdown_t* req;
@ -630,7 +630,7 @@ void uv_pipe_endgame(uv_loop_t* loop, uv_pipe_t* handle) {
if (pipe_info.OutboundQuota == pipe_info.WriteQuotaAvailable) {
/* Short-circuit, no need to call FlushFileBuffers. */
uv_insert_pending_req(loop, (uv_req_t*) req);
uv__insert_pending_req(loop, (uv_req_t*) req);
return;
}
@ -826,7 +826,7 @@ static DWORD WINAPI pipe_connect_thread_proc(void* parameter) {
}
if (pipeHandle != INVALID_HANDLE_VALUE &&
!uv_set_pipe_handle(loop, handle, pipeHandle, -1, duplex_flags)) {
!uv__set_pipe_handle(loop, handle, pipeHandle, -1, duplex_flags)) {
SET_REQ_SUCCESS(req);
} else {
SET_REQ_ERROR(req, GetLastError());
@ -890,17 +890,17 @@ void uv_pipe_connect(uv_connect_t* req, uv_pipe_t* handle,
assert(pipeHandle != INVALID_HANDLE_VALUE);
if (uv_set_pipe_handle(loop,
(uv_pipe_t*) req->handle,
pipeHandle,
-1,
duplex_flags)) {
if (uv__set_pipe_handle(loop,
(uv_pipe_t*) req->handle,
pipeHandle,
-1,
duplex_flags)) {
err = GetLastError();
goto error;
}
SET_REQ_SUCCESS(req);
uv_insert_pending_req(loop, (uv_req_t*) req);
uv__insert_pending_req(loop, (uv_req_t*) req);
handle->reqs_pending++;
REGISTER_HANDLE_REQ(loop, handle, req);
return;
@ -916,7 +916,7 @@ error:
/* Make this req pending reporting an error. */
SET_REQ_ERROR(req, err);
uv_insert_pending_req(loop, (uv_req_t*) req);
uv__insert_pending_req(loop, (uv_req_t*) req);
handle->reqs_pending++;
REGISTER_HANDLE_REQ(loop, handle, req);
return;
@ -980,7 +980,7 @@ void uv__pipe_read_stop(uv_pipe_t* handle) {
/* Cleans up uv_pipe_t (server or connection) and all resources associated with
* it. */
void uv_pipe_cleanup(uv_loop_t* loop, uv_pipe_t* handle) {
void uv__pipe_cleanup(uv_loop_t* loop, uv_pipe_t* handle) {
int i;
HANDLE pipeHandle;
@ -1013,7 +1013,7 @@ void uv_pipe_cleanup(uv_loop_t* loop, uv_pipe_t* handle) {
}
void uv_pipe_close(uv_loop_t* loop, uv_pipe_t* handle) {
void uv__pipe_close(uv_loop_t* loop, uv_pipe_t* handle) {
if (handle->flags & UV_HANDLE_READING) {
handle->flags &= ~UV_HANDLE_READING;
DECREASE_ACTIVE_COUNT(loop, handle);
@ -1024,10 +1024,10 @@ void uv_pipe_close(uv_loop_t* loop, uv_pipe_t* handle) {
DECREASE_ACTIVE_COUNT(loop, handle);
}
uv_pipe_cleanup(loop, handle);
uv__pipe_cleanup(loop, handle);
if (handle->reqs_pending == 0) {
uv_want_endgame(loop, (uv_handle_t*) handle);
uv__want_endgame(loop, (uv_handle_t*) handle);
}
handle->flags &= ~(UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);
@ -1035,13 +1035,13 @@ void uv_pipe_close(uv_loop_t* loop, uv_pipe_t* handle) {
}
static void uv_pipe_queue_accept(uv_loop_t* loop, uv_pipe_t* handle,
static void uv__pipe_queue_accept(uv_loop_t* loop, uv_pipe_t* handle,
uv_pipe_accept_t* req, BOOL firstInstance) {
assert(handle->flags & UV_HANDLE_LISTENING);
if (!firstInstance && !pipe_alloc_accept(loop, handle, req, FALSE)) {
SET_REQ_ERROR(req, GetLastError());
uv_insert_pending_req(loop, (uv_req_t*) req);
uv__insert_pending_req(loop, (uv_req_t*) req);
handle->reqs_pending++;
return;
}
@ -1061,7 +1061,7 @@ static void uv_pipe_queue_accept(uv_loop_t* loop, uv_pipe_t* handle,
/* Make this req pending reporting an error. */
SET_REQ_ERROR(req, GetLastError());
}
uv_insert_pending_req(loop, (uv_req_t*) req);
uv__insert_pending_req(loop, (uv_req_t*) req);
handle->reqs_pending++;
return;
}
@ -1071,7 +1071,7 @@ static void uv_pipe_queue_accept(uv_loop_t* loop, uv_pipe_t* handle,
}
int uv_pipe_accept(uv_pipe_t* server, uv_stream_t* client) {
int uv__pipe_accept(uv_pipe_t* server, uv_stream_t* client) {
uv_loop_t* loop = server->loop;
uv_pipe_t* pipe_client;
uv_pipe_accept_t* req;
@ -1110,7 +1110,7 @@ int uv_pipe_accept(uv_pipe_t* server, uv_stream_t* client) {
}
/* Initialize the client handle and copy the pipeHandle to the client */
uv_pipe_connection_init(pipe_client);
uv__pipe_connection_init(pipe_client);
pipe_client->handle = req->pipeHandle;
pipe_client->flags |= UV_HANDLE_READABLE | UV_HANDLE_WRITABLE;
@ -1121,7 +1121,7 @@ int uv_pipe_accept(uv_pipe_t* server, uv_stream_t* client) {
server->handle = INVALID_HANDLE_VALUE;
if (!(server->flags & UV_HANDLE_CLOSING)) {
uv_pipe_queue_accept(loop, server, req, FALSE);
uv__pipe_queue_accept(loop, server, req, FALSE);
}
}
@ -1130,7 +1130,7 @@ int uv_pipe_accept(uv_pipe_t* server, uv_stream_t* client) {
/* Starts listening for connections for the given pipe. */
int uv_pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb) {
int uv__pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb) {
uv_loop_t* loop = handle->loop;
int i;
@ -1162,7 +1162,7 @@ int uv_pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb) {
assert(handle->pipe.serv.accept_reqs[0].pipeHandle != INVALID_HANDLE_VALUE);
for (i = 0; i < handle->pipe.serv.pending_instances; i++) {
uv_pipe_queue_accept(loop, handle, &handle->pipe.serv.accept_reqs[i], i == 0);
uv__pipe_queue_accept(loop, handle, &handle->pipe.serv.accept_reqs[i], i == 0);
}
return 0;
@ -1306,7 +1306,7 @@ static void CALLBACK post_completion_write_wait(void* context, BOOLEAN timed_out
}
static void uv_pipe_queue_read(uv_loop_t* loop, uv_pipe_t* handle) {
static void uv__pipe_queue_read(uv_loop_t* loop, uv_pipe_t* handle) {
uv_read_t* req;
int result;
@ -1365,15 +1365,15 @@ static void uv_pipe_queue_read(uv_loop_t* loop, uv_pipe_t* handle) {
return;
error:
uv_insert_pending_req(loop, (uv_req_t*)req);
uv__insert_pending_req(loop, (uv_req_t*)req);
handle->flags |= UV_HANDLE_READ_PENDING;
handle->reqs_pending++;
}
int uv_pipe_read_start(uv_pipe_t* handle,
uv_alloc_cb alloc_cb,
uv_read_cb read_cb) {
int uv__pipe_read_start(uv_pipe_t* handle,
uv_alloc_cb alloc_cb,
uv_read_cb read_cb) {
uv_loop_t* loop = handle->loop;
handle->flags |= UV_HANDLE_READING;
@ -1391,14 +1391,14 @@ int uv_pipe_read_start(uv_pipe_t* handle,
uv_fatal_error(GetLastError(), "CreateEvent");
}
}
uv_pipe_queue_read(loop, handle);
uv__pipe_queue_read(loop, handle);
}
return 0;
}
static void uv_insert_non_overlapped_write_req(uv_pipe_t* handle,
static void uv__insert_non_overlapped_write_req(uv_pipe_t* handle,
uv_write_t* req) {
req->next_req = NULL;
if (handle->pipe.conn.non_overlapped_writes_tail) {
@ -1434,7 +1434,7 @@ static uv_write_t* uv_remove_non_overlapped_write_req(uv_pipe_t* handle) {
}
static void uv_queue_non_overlapped_write(uv_pipe_t* handle) {
static void uv__queue_non_overlapped_write(uv_pipe_t* handle) {
uv_write_t* req = uv_remove_non_overlapped_write_req(handle);
if (req) {
if (!QueueUserWorkItem(&uv_pipe_writefile_thread_proc,
@ -1575,9 +1575,9 @@ static int uv__pipe_write_data(uv_loop_t* loop,
return 0;
} else if (handle->flags & UV_HANDLE_NON_OVERLAPPED_PIPE) {
req->write_buffer = write_buf;
uv_insert_non_overlapped_write_req(handle, req);
uv__insert_non_overlapped_write_req(handle, req);
if (handle->stream.conn.write_reqs_pending == 0) {
uv_queue_non_overlapped_write(handle);
uv__queue_non_overlapped_write(handle);
}
/* Request queued by the kernel. */
@ -1790,7 +1790,7 @@ int uv__pipe_write(uv_loop_t* loop,
}
static void uv_pipe_read_eof(uv_loop_t* loop, uv_pipe_t* handle,
static void uv__pipe_read_eof(uv_loop_t* loop, uv_pipe_t* handle,
uv_buf_t buf) {
/* If there is an eof timer running, we don't need it any more, so discard
* it. */
@ -1802,7 +1802,7 @@ static void uv_pipe_read_eof(uv_loop_t* loop, uv_pipe_t* handle,
}
static void uv_pipe_read_error(uv_loop_t* loop, uv_pipe_t* handle, int error,
static void uv__pipe_read_error(uv_loop_t* loop, uv_pipe_t* handle, int error,
uv_buf_t buf) {
/* If there is an eof timer running, we don't need it any more, so discard
* it. */
@ -1814,12 +1814,12 @@ static void uv_pipe_read_error(uv_loop_t* loop, uv_pipe_t* handle, int error,
}
static void uv_pipe_read_error_or_eof(uv_loop_t* loop, uv_pipe_t* handle,
static void uv__pipe_read_error_or_eof(uv_loop_t* loop, uv_pipe_t* handle,
int error, uv_buf_t buf) {
if (error == ERROR_BROKEN_PIPE) {
uv_pipe_read_eof(loop, handle, buf);
uv__pipe_read_eof(loop, handle, buf);
} else {
uv_pipe_read_error(loop, handle, error, buf);
uv__pipe_read_error(loop, handle, error, buf);
}
}
@ -1890,7 +1890,7 @@ static DWORD uv__pipe_read_data(uv_loop_t* loop,
/* Read into the user buffer. */
if (!ReadFile(handle->handle, buf.base, max_bytes, &bytes_read, NULL)) {
uv_pipe_read_error_or_eof(loop, handle, GetLastError(), buf);
uv__pipe_read_error_or_eof(loop, handle, GetLastError(), buf);
return 0; /* Break out of read loop. */
}
@ -1977,14 +1977,14 @@ invalid:
err = WSAECONNABORTED; /* Maps to UV_ECONNABORTED. */
error:
uv_pipe_read_error_or_eof(loop, handle, err, uv_null_buf_);
uv__pipe_read_error_or_eof(loop, handle, err, uv_null_buf_);
return 0; /* Break out of read loop. */
}
void uv_process_pipe_read_req(uv_loop_t* loop,
uv_pipe_t* handle,
uv_req_t* req) {
void uv__process_pipe_read_req(uv_loop_t* loop,
uv_pipe_t* handle,
uv_req_t* req) {
assert(handle->type == UV_NAMED_PIPE);
handle->flags &= ~(UV_HANDLE_READ_PENDING | UV_HANDLE_CANCELLATION_PENDING);
@ -2005,7 +2005,7 @@ void uv_process_pipe_read_req(uv_loop_t* loop,
* indicate an ERROR_OPERATION_ABORTED error. This error isn't relevant to
* the user; we'll start a new zero-read at the end of this function. */
if (err != ERROR_OPERATION_ABORTED)
uv_pipe_read_error_or_eof(loop, handle, err, uv_null_buf_);
uv__pipe_read_error_or_eof(loop, handle, err, uv_null_buf_);
} else {
/* The zero-read completed without error, indicating there is data
@ -2015,7 +2015,7 @@ void uv_process_pipe_read_req(uv_loop_t* loop,
/* Get the number of bytes available. */
avail = 0;
if (!PeekNamedPipe(handle->handle, NULL, 0, NULL, &avail, NULL))
uv_pipe_read_error_or_eof(loop, handle, GetLastError(), uv_null_buf_);
uv__pipe_read_error_or_eof(loop, handle, GetLastError(), uv_null_buf_);
/* Read until we've either read all the bytes available, or the 'reading'
* flag is cleared. */
@ -2044,12 +2044,12 @@ void uv_process_pipe_read_req(uv_loop_t* loop,
/* Start another zero-read request if necessary. */
if ((handle->flags & UV_HANDLE_READING) &&
!(handle->flags & UV_HANDLE_READ_PENDING)) {
uv_pipe_queue_read(loop, handle);
uv__pipe_queue_read(loop, handle);
}
}
void uv_process_pipe_write_req(uv_loop_t* loop, uv_pipe_t* handle,
void uv__process_pipe_write_req(uv_loop_t* loop, uv_pipe_t* handle,
uv_write_t* req) {
int err;
@ -2091,26 +2091,26 @@ void uv_process_pipe_write_req(uv_loop_t* loop, uv_pipe_t* handle,
if (handle->flags & UV_HANDLE_NON_OVERLAPPED_PIPE &&
handle->pipe.conn.non_overlapped_writes_tail) {
assert(handle->stream.conn.write_reqs_pending > 0);
uv_queue_non_overlapped_write(handle);
uv__queue_non_overlapped_write(handle);
}
if (handle->stream.conn.shutdown_req != NULL &&
handle->stream.conn.write_reqs_pending == 0) {
uv_want_endgame(loop, (uv_handle_t*)handle);
uv__want_endgame(loop, (uv_handle_t*)handle);
}
DECREASE_PENDING_REQ_COUNT(handle);
}
void uv_process_pipe_accept_req(uv_loop_t* loop, uv_pipe_t* handle,
void uv__process_pipe_accept_req(uv_loop_t* loop, uv_pipe_t* handle,
uv_req_t* raw_req) {
uv_pipe_accept_t* req = (uv_pipe_accept_t*) raw_req;
assert(handle->type == UV_NAMED_PIPE);
if (handle->flags & UV_HANDLE_CLOSING) {
/* The req->pipeHandle should be freed already in uv_pipe_cleanup(). */
/* The req->pipeHandle should be freed already in uv__pipe_cleanup(). */
assert(req->pipeHandle == INVALID_HANDLE_VALUE);
DECREASE_PENDING_REQ_COUNT(handle);
return;
@ -2130,7 +2130,7 @@ void uv_process_pipe_accept_req(uv_loop_t* loop, uv_pipe_t* handle,
req->pipeHandle = INVALID_HANDLE_VALUE;
}
if (!(handle->flags & UV_HANDLE_CLOSING)) {
uv_pipe_queue_accept(loop, handle, req, FALSE);
uv__pipe_queue_accept(loop, handle, req, FALSE);
}
}
@ -2138,7 +2138,7 @@ void uv_process_pipe_accept_req(uv_loop_t* loop, uv_pipe_t* handle,
}
void uv_process_pipe_connect_req(uv_loop_t* loop, uv_pipe_t* handle,
void uv__process_pipe_connect_req(uv_loop_t* loop, uv_pipe_t* handle,
uv_connect_t* req) {
int err;
@ -2149,7 +2149,7 @@ void uv_process_pipe_connect_req(uv_loop_t* loop, uv_pipe_t* handle,
if (req->cb) {
err = 0;
if (REQ_SUCCESS(req)) {
uv_pipe_connection_init(handle);
uv__pipe_connection_init(handle);
} else {
err = GET_REQ_ERROR(req);
}
@ -2160,7 +2160,7 @@ void uv_process_pipe_connect_req(uv_loop_t* loop, uv_pipe_t* handle,
}
void uv_process_pipe_shutdown_req(uv_loop_t* loop, uv_pipe_t* handle,
void uv__process_pipe_shutdown_req(uv_loop_t* loop, uv_pipe_t* handle,
uv_shutdown_t* req) {
assert(handle->type == UV_NAMED_PIPE);
@ -2171,7 +2171,7 @@ void uv_process_pipe_shutdown_req(uv_loop_t* loop, uv_pipe_t* handle,
* is readable and we haven't seen EOF come in ourselves. */
eof_timer_init(handle);
/* If reading start the timer right now. Otherwise uv_pipe_queue_read will
/* If reading start the timer right now. Otherwise uv__pipe_queue_read will
* start it. */
if (handle->flags & UV_HANDLE_READ_PENDING) {
eof_timer_start(handle);
@ -2231,9 +2231,9 @@ static void eof_timer_cb(uv_timer_t* timer) {
assert(pipe->type == UV_NAMED_PIPE);
/* This should always be true, since we start the timer only in
* uv_pipe_queue_read after successfully calling ReadFile, or in
* uv_process_pipe_shutdown_req if a read is pending, and we always
* immediately stop the timer in uv_process_pipe_read_req. */
* uv__pipe_queue_read after successfully calling ReadFile, or in
* uv__process_pipe_shutdown_req if a read is pending, and we always
* immediately stop the timer in uv__process_pipe_read_req. */
assert(pipe->flags & UV_HANDLE_READ_PENDING);
/* If there are many packets coming off the iocp then the timer callback may
@ -2254,7 +2254,7 @@ static void eof_timer_cb(uv_timer_t* timer) {
/* Report the eof and update flags. This will get reported even if the user
* stopped reading in the meantime. TODO: is that okay? */
uv_pipe_read_eof(loop, pipe, uv_null_buf_);
uv__pipe_read_eof(loop, pipe, uv_null_buf_);
}
@ -2328,15 +2328,15 @@ int uv_pipe_open(uv_pipe_t* pipe, uv_file file) {
duplex_flags |= UV_HANDLE_READABLE;
if (os_handle == INVALID_HANDLE_VALUE ||
uv_set_pipe_handle(pipe->loop,
pipe,
os_handle,
file,
duplex_flags) == -1) {
uv__set_pipe_handle(pipe->loop,
pipe,
os_handle,
file,
duplex_flags) == -1) {
return UV_EINVAL;
}
uv_pipe_connection_init(pipe);
uv__pipe_connection_init(pipe);
if (pipe->ipc) {
assert(!(pipe->flags & UV_HANDLE_NON_OVERLAPPED_PIPE));

View File

@ -34,7 +34,9 @@ static const GUID uv_msafd_provider_ids[UV_MSAFD_PROVIDER_COUNT] = {
{0xf9eab0c0, 0x26d4, 0x11d0,
{0xbb, 0xbf, 0x00, 0xaa, 0x00, 0x6c, 0x34, 0xe4}},
{0x9fc48064, 0x7298, 0x43e4,
{0xb7, 0xbd, 0x18, 0x1f, 0x20, 0x89, 0x79, 0x2a}}
{0xb7, 0xbd, 0x18, 0x1f, 0x20, 0x89, 0x79, 0x2a}},
{0xa00943d9, 0x9c2e, 0x4633,
{0x9b, 0x59, 0x00, 0x57, 0xa3, 0x16, 0x09, 0x94}}
};
typedef struct uv_single_fd_set_s {
@ -122,14 +124,14 @@ static void uv__fast_poll_submit_poll_req(uv_loop_t* loop, uv_poll_t* handle) {
memset(&req->u.io.overlapped, 0, sizeof req->u.io.overlapped);
result = uv_msafd_poll((SOCKET) handle->peer_socket,
afd_poll_info,
afd_poll_info,
&req->u.io.overlapped);
result = uv__msafd_poll((SOCKET) handle->peer_socket,
afd_poll_info,
afd_poll_info,
&req->u.io.overlapped);
if (result != 0 && WSAGetLastError() != WSA_IO_PENDING) {
/* Queue this req, reporting an error. */
SET_REQ_ERROR(req, WSAGetLastError());
uv_insert_pending_req(loop, req);
uv__insert_pending_req(loop, req);
}
}
@ -195,7 +197,7 @@ static void uv__fast_poll_process_poll_req(uv_loop_t* loop, uv_poll_t* handle,
} else if ((handle->flags & UV_HANDLE_CLOSING) &&
handle->submitted_events_1 == 0 &&
handle->submitted_events_2 == 0) {
uv_want_endgame(loop, (uv_handle_t*) handle);
uv__want_endgame(loop, (uv_handle_t*) handle);
}
}
@ -357,7 +359,7 @@ static void uv__slow_poll_submit_poll_req(uv_loop_t* loop, uv_poll_t* handle) {
WT_EXECUTELONGFUNCTION)) {
/* Make this req pending, reporting an error. */
SET_REQ_ERROR(req, GetLastError());
uv_insert_pending_req(loop, req);
uv__insert_pending_req(loop, req);
}
}
@ -400,7 +402,7 @@ static void uv__slow_poll_process_poll_req(uv_loop_t* loop, uv_poll_t* handle,
} else if ((handle->flags & UV_HANDLE_CLOSING) &&
handle->submitted_events_1 == 0 &&
handle->submitted_events_2 == 0) {
uv_want_endgame(loop, (uv_handle_t*) handle);
uv__want_endgame(loop, (uv_handle_t*) handle);
}
}
@ -524,7 +526,7 @@ int uv_poll_stop(uv_poll_t* handle) {
}
void uv_process_poll_req(uv_loop_t* loop, uv_poll_t* handle, uv_req_t* req) {
void uv__process_poll_req(uv_loop_t* loop, uv_poll_t* handle, uv_req_t* req) {
if (!(handle->flags & UV_HANDLE_POLL_SLOW)) {
uv__fast_poll_process_poll_req(loop, handle, req);
} else {
@ -533,7 +535,7 @@ void uv_process_poll_req(uv_loop_t* loop, uv_poll_t* handle, uv_req_t* req) {
}
int uv_poll_close(uv_loop_t* loop, uv_poll_t* handle) {
int uv__poll_close(uv_loop_t* loop, uv_poll_t* handle) {
AFD_POLL_INFO afd_poll_info;
DWORD error;
int result;
@ -543,7 +545,7 @@ int uv_poll_close(uv_loop_t* loop, uv_poll_t* handle) {
if (handle->submitted_events_1 == 0 &&
handle->submitted_events_2 == 0) {
uv_want_endgame(loop, (uv_handle_t*) handle);
uv__want_endgame(loop, (uv_handle_t*) handle);
return 0;
}
@ -559,10 +561,10 @@ int uv_poll_close(uv_loop_t* loop, uv_poll_t* handle) {
afd_poll_info.Handles[0].Status = 0;
afd_poll_info.Handles[0].Events = AFD_POLL_ALL;
result = uv_msafd_poll(handle->socket,
&afd_poll_info,
uv__get_afd_poll_info_dummy(),
uv__get_overlapped_dummy());
result = uv__msafd_poll(handle->socket,
&afd_poll_info,
uv__get_afd_poll_info_dummy(),
uv__get_overlapped_dummy());
if (result == SOCKET_ERROR) {
error = WSAGetLastError();
@ -574,7 +576,7 @@ int uv_poll_close(uv_loop_t* loop, uv_poll_t* handle) {
}
void uv_poll_endgame(uv_loop_t* loop, uv_poll_t* handle) {
void uv__poll_endgame(uv_loop_t* loop, uv_poll_t* handle) {
assert(handle->flags & UV_HANDLE_CLOSING);
assert(!(handle->flags & UV_HANDLE_CLOSED));

View File

@ -105,7 +105,7 @@ static void uv__init_global_job_handle(void) {
}
static int uv_utf8_to_utf16_alloc(const char* s, WCHAR** ws_ptr) {
static int uv__utf8_to_utf16_alloc(const char* s, WCHAR** ws_ptr) {
int ws_len, r;
WCHAR* ws;
@ -137,7 +137,7 @@ static int uv_utf8_to_utf16_alloc(const char* s, WCHAR** ws_ptr) {
}
static void uv_process_init(uv_loop_t* loop, uv_process_t* handle) {
static void uv__process_init(uv_loop_t* loop, uv_process_t* handle) {
uv__handle_init(loop, (uv_handle_t*) handle, UV_PROCESS);
handle->exit_cb = NULL;
handle->pid = 0;
@ -864,7 +864,7 @@ static void CALLBACK exit_wait_callback(void* data, BOOLEAN didTimeout) {
/* Called on main thread after a child process has exited. */
void uv_process_proc_exit(uv_loop_t* loop, uv_process_t* handle) {
void uv__process_proc_exit(uv_loop_t* loop, uv_process_t* handle) {
int64_t exit_code;
DWORD status;
@ -874,7 +874,7 @@ void uv_process_proc_exit(uv_loop_t* loop, uv_process_t* handle) {
/* If we're closing, don't call the exit callback. Just schedule a close
* callback now. */
if (handle->flags & UV_HANDLE_CLOSING) {
uv_want_endgame(loop, (uv_handle_t*) handle);
uv__want_endgame(loop, (uv_handle_t*) handle);
return;
}
@ -902,7 +902,7 @@ void uv_process_proc_exit(uv_loop_t* loop, uv_process_t* handle) {
}
void uv_process_close(uv_loop_t* loop, uv_process_t* handle) {
void uv__process_close(uv_loop_t* loop, uv_process_t* handle) {
uv__handle_closing(handle);
if (handle->wait_handle != INVALID_HANDLE_VALUE) {
@ -918,12 +918,12 @@ void uv_process_close(uv_loop_t* loop, uv_process_t* handle) {
}
if (!handle->exit_cb_pending) {
uv_want_endgame(loop, (uv_handle_t*)handle);
uv__want_endgame(loop, (uv_handle_t*)handle);
}
}
void uv_process_endgame(uv_loop_t* loop, uv_process_t* handle) {
void uv__process_endgame(uv_loop_t* loop, uv_process_t* handle) {
assert(!handle->exit_cb_pending);
assert(handle->flags & UV_HANDLE_CLOSING);
assert(!(handle->flags & UV_HANDLE_CLOSED));
@ -948,7 +948,7 @@ int uv_spawn(uv_loop_t* loop,
PROCESS_INFORMATION info;
DWORD process_flags;
uv_process_init(loop, process);
uv__process_init(loop, process);
process->exit_cb = options->exit_cb;
if (options->flags & (UV_PROCESS_SETGID | UV_PROCESS_SETUID)) {
@ -969,7 +969,7 @@ int uv_spawn(uv_loop_t* loop,
UV_PROCESS_WINDOWS_HIDE_GUI |
UV_PROCESS_WINDOWS_VERBATIM_ARGUMENTS)));
err = uv_utf8_to_utf16_alloc(options->file, &application);
err = uv__utf8_to_utf16_alloc(options->file, &application);
if (err)
goto done;
@ -988,7 +988,7 @@ int uv_spawn(uv_loop_t* loop,
if (options->cwd) {
/* Explicit cwd */
err = uv_utf8_to_utf16_alloc(options->cwd, &cwd);
err = uv__utf8_to_utf16_alloc(options->cwd, &cwd);
if (err)
goto done;

View File

@ -50,7 +50,7 @@
(pRtlNtStatusToDosError(GET_REQ_STATUS((req))))
#define GET_REQ_SOCK_ERROR(req) \
(uv_ntstatus_to_winsock_error(GET_REQ_STATUS((req))))
(uv__ntstatus_to_winsock_error(GET_REQ_STATUS((req))))
#define REGISTER_HANDLE_REQ(loop, handle, req) \
@ -82,12 +82,12 @@
}
INLINE static uv_req_t* uv_overlapped_to_req(OVERLAPPED* overlapped) {
INLINE static uv_req_t* uv__overlapped_to_req(OVERLAPPED* overlapped) {
return CONTAINING_RECORD(overlapped, uv_req_t, u.io.overlapped);
}
INLINE static void uv_insert_pending_req(uv_loop_t* loop, uv_req_t* req) {
INLINE static void uv__insert_pending_req(uv_loop_t* loop, uv_req_t* req) {
req->next_req = NULL;
if (loop->pending_reqs_tail) {
#ifdef _DEBUG
@ -115,19 +115,19 @@ INLINE static void uv_insert_pending_req(uv_loop_t* loop, uv_req_t* req) {
do { \
switch (((uv_handle_t*) (req)->handle_at)->type) { \
case UV_TCP: \
uv_process_tcp_##method##_req(loop, \
uv__process_tcp_##method##_req(loop, \
(uv_tcp_t*) ((req)->handle_at), \
req); \
break; \
\
case UV_NAMED_PIPE: \
uv_process_pipe_##method##_req(loop, \
uv__process_pipe_##method##_req(loop, \
(uv_pipe_t*) ((req)->handle_at), \
req); \
break; \
\
case UV_TTY: \
uv_process_tty_##method##_req(loop, \
uv__process_tty_##method##_req(loop, \
(uv_tty_t*) ((req)->handle_at), \
req); \
break; \
@ -138,7 +138,7 @@ INLINE static void uv_insert_pending_req(uv_loop_t* loop, uv_req_t* req) {
} while (0)
INLINE static int uv_process_reqs(uv_loop_t* loop) {
INLINE static int uv__process_reqs(uv_loop_t* loop) {
uv_req_t* req;
uv_req_t* first;
uv_req_t* next;
@ -174,40 +174,40 @@ INLINE static int uv_process_reqs(uv_loop_t* loop) {
case UV_SHUTDOWN:
/* Tcp shutdown requests don't come here. */
assert(((uv_shutdown_t*) req)->handle->type == UV_NAMED_PIPE);
uv_process_pipe_shutdown_req(
uv__process_pipe_shutdown_req(
loop,
(uv_pipe_t*) ((uv_shutdown_t*) req)->handle,
(uv_shutdown_t*) req);
break;
case UV_UDP_RECV:
uv_process_udp_recv_req(loop, (uv_udp_t*) req->data, req);
uv__process_udp_recv_req(loop, (uv_udp_t*) req->data, req);
break;
case UV_UDP_SEND:
uv_process_udp_send_req(loop,
((uv_udp_send_t*) req)->handle,
(uv_udp_send_t*) req);
uv__process_udp_send_req(loop,
((uv_udp_send_t*) req)->handle,
(uv_udp_send_t*) req);
break;
case UV_WAKEUP:
uv_process_async_wakeup_req(loop, (uv_async_t*) req->data, req);
uv__process_async_wakeup_req(loop, (uv_async_t*) req->data, req);
break;
case UV_SIGNAL_REQ:
uv_process_signal_req(loop, (uv_signal_t*) req->data, req);
uv__process_signal_req(loop, (uv_signal_t*) req->data, req);
break;
case UV_POLL_REQ:
uv_process_poll_req(loop, (uv_poll_t*) req->data, req);
uv__process_poll_req(loop, (uv_poll_t*) req->data, req);
break;
case UV_PROCESS_EXIT:
uv_process_proc_exit(loop, (uv_process_t*) req->data);
uv__process_proc_exit(loop, (uv_process_t*) req->data);
break;
case UV_FS_EVENT_REQ:
uv_process_fs_event_req(loop, req, (uv_fs_event_t*) req->data);
uv__process_fs_event_req(loop, req, (uv_fs_event_t*) req->data);
break;
default:

View File

@ -39,7 +39,7 @@ int uv__signal_start(uv_signal_t* handle,
int signum,
int oneshot);
void uv_signals_init(void) {
void uv__signals_init(void) {
InitializeCriticalSection(&uv__signal_lock);
if (!SetConsoleCtrlHandler(uv__signal_control_handler, TRUE))
abort();
@ -231,7 +231,7 @@ int uv__signal_start(uv_signal_t* handle,
}
void uv_process_signal_req(uv_loop_t* loop, uv_signal_t* handle,
void uv__process_signal_req(uv_loop_t* loop, uv_signal_t* handle,
uv_req_t* req) {
long dispatched_signum;
@ -254,22 +254,22 @@ void uv_process_signal_req(uv_loop_t* loop, uv_signal_t* handle,
if (handle->flags & UV_HANDLE_CLOSING) {
/* When it is closing, it must be stopped at this point. */
assert(handle->signum == 0);
uv_want_endgame(loop, (uv_handle_t*) handle);
uv__want_endgame(loop, (uv_handle_t*) handle);
}
}
void uv_signal_close(uv_loop_t* loop, uv_signal_t* handle) {
void uv__signal_close(uv_loop_t* loop, uv_signal_t* handle) {
uv_signal_stop(handle);
uv__handle_closing(handle);
if (handle->pending_signum == 0) {
uv_want_endgame(loop, (uv_handle_t*) handle);
uv__want_endgame(loop, (uv_handle_t*) handle);
}
}
void uv_signal_endgame(uv_loop_t* loop, uv_signal_t* handle) {
void uv__signal_endgame(uv_loop_t* loop, uv_signal_t* handle) {
assert(handle->flags & UV_HANDLE_CLOSING);
assert(!(handle->flags & UV_HANDLE_CLOSED));

View File

@ -30,9 +30,9 @@
#include "req-inl.h"
INLINE static void uv_stream_init(uv_loop_t* loop,
uv_stream_t* handle,
uv_handle_type type) {
INLINE static void uv__stream_init(uv_loop_t* loop,
uv_stream_t* handle,
uv_handle_type type) {
uv__handle_init(loop, (uv_handle_t*) handle, type);
handle->write_queue_size = 0;
handle->activecnt = 0;
@ -46,7 +46,7 @@ INLINE static void uv_stream_init(uv_loop_t* loop,
}
INLINE static void uv_connection_init(uv_stream_t* handle) {
INLINE static void uv__connection_init(uv_stream_t* handle) {
handle->flags |= UV_HANDLE_CONNECTION;
}

View File

@ -33,10 +33,10 @@ int uv_listen(uv_stream_t* stream, int backlog, uv_connection_cb cb) {
err = ERROR_INVALID_PARAMETER;
switch (stream->type) {
case UV_TCP:
err = uv_tcp_listen((uv_tcp_t*)stream, backlog, cb);
err = uv__tcp_listen((uv_tcp_t*)stream, backlog, cb);
break;
case UV_NAMED_PIPE:
err = uv_pipe_listen((uv_pipe_t*)stream, backlog, cb);
err = uv__pipe_listen((uv_pipe_t*)stream, backlog, cb);
break;
default:
assert(0);
@ -52,10 +52,10 @@ int uv_accept(uv_stream_t* server, uv_stream_t* client) {
err = ERROR_INVALID_PARAMETER;
switch (server->type) {
case UV_TCP:
err = uv_tcp_accept((uv_tcp_t*)server, (uv_tcp_t*)client);
err = uv__tcp_accept((uv_tcp_t*)server, (uv_tcp_t*)client);
break;
case UV_NAMED_PIPE:
err = uv_pipe_accept((uv_pipe_t*)server, client);
err = uv__pipe_accept((uv_pipe_t*)server, client);
break;
default:
assert(0);
@ -73,13 +73,13 @@ int uv__read_start(uv_stream_t* handle,
err = ERROR_INVALID_PARAMETER;
switch (handle->type) {
case UV_TCP:
err = uv_tcp_read_start((uv_tcp_t*)handle, alloc_cb, read_cb);
err = uv__tcp_read_start((uv_tcp_t*)handle, alloc_cb, read_cb);
break;
case UV_NAMED_PIPE:
err = uv_pipe_read_start((uv_pipe_t*)handle, alloc_cb, read_cb);
err = uv__pipe_read_start((uv_pipe_t*)handle, alloc_cb, read_cb);
break;
case UV_TTY:
err = uv_tty_read_start((uv_tty_t*) handle, alloc_cb, read_cb);
err = uv__tty_read_start((uv_tty_t*) handle, alloc_cb, read_cb);
break;
default:
assert(0);
@ -97,7 +97,7 @@ int uv_read_stop(uv_stream_t* handle) {
err = 0;
if (handle->type == UV_TTY) {
err = uv_tty_read_stop((uv_tty_t*) handle);
err = uv__tty_read_stop((uv_tty_t*) handle);
} else if (handle->type == UV_NAMED_PIPE) {
uv__pipe_read_stop((uv_pipe_t*) handle);
} else {
@ -124,14 +124,14 @@ int uv_write(uv_write_t* req,
err = ERROR_INVALID_PARAMETER;
switch (handle->type) {
case UV_TCP:
err = uv_tcp_write(loop, req, (uv_tcp_t*) handle, bufs, nbufs, cb);
err = uv__tcp_write(loop, req, (uv_tcp_t*) handle, bufs, nbufs, cb);
break;
case UV_NAMED_PIPE:
err = uv__pipe_write(
loop, req, (uv_pipe_t*) handle, bufs, nbufs, NULL, cb);
break;
case UV_TTY:
err = uv_tty_write(loop, req, (uv_tty_t*) handle, bufs, nbufs, cb);
err = uv__tty_write(loop, req, (uv_tty_t*) handle, bufs, nbufs, cb);
break;
default:
assert(0);
@ -217,7 +217,7 @@ int uv_shutdown(uv_shutdown_t* req, uv_stream_t* handle, uv_shutdown_cb cb) {
handle->reqs_pending++;
REGISTER_HANDLE_REQ(loop, handle, req);
uv_want_endgame(loop, (uv_handle_t*)handle);
uv__want_endgame(loop, (uv_handle_t*)handle);
return 0;
}

View File

@ -78,11 +78,11 @@ static int uv__tcp_keepalive(uv_tcp_t* handle, SOCKET socket, int enable, unsign
}
static int uv_tcp_set_socket(uv_loop_t* loop,
uv_tcp_t* handle,
SOCKET socket,
int family,
int imported) {
static int uv__tcp_set_socket(uv_loop_t* loop,
uv_tcp_t* handle,
SOCKET socket,
int family,
int imported) {
DWORD yes = 1;
int non_ifs_lsp;
int err;
@ -162,7 +162,7 @@ int uv_tcp_init_ex(uv_loop_t* loop, uv_tcp_t* handle, unsigned int flags) {
if (flags & ~0xFF)
return UV_EINVAL;
uv_stream_init(loop, (uv_stream_t*) handle, UV_TCP);
uv__stream_init(loop, (uv_stream_t*) handle, UV_TCP);
handle->tcp.serv.accept_reqs = NULL;
handle->tcp.serv.pending_accepts = NULL;
handle->socket = INVALID_SOCKET;
@ -173,7 +173,7 @@ int uv_tcp_init_ex(uv_loop_t* loop, uv_tcp_t* handle, unsigned int flags) {
handle->delayed_error = 0;
/* If anything fails beyond this point we need to remove the handle from
* the handle queue, since it was added by uv__handle_init in uv_stream_init.
* the handle queue, since it was added by uv__handle_init in uv__stream_init.
*/
if (domain != AF_UNSPEC) {
@ -187,7 +187,7 @@ int uv_tcp_init_ex(uv_loop_t* loop, uv_tcp_t* handle, unsigned int flags) {
return uv_translate_sys_error(err);
}
err = uv_tcp_set_socket(handle->loop, handle, sock, domain, 0);
err = uv__tcp_set_socket(handle->loop, handle, sock, domain, 0);
if (err) {
closesocket(sock);
QUEUE_REMOVE(&handle->handle_queue);
@ -205,7 +205,7 @@ int uv_tcp_init(uv_loop_t* loop, uv_tcp_t* handle) {
}
void uv_tcp_endgame(uv_loop_t* loop, uv_tcp_t* handle) {
void uv__tcp_endgame(uv_loop_t* loop, uv_tcp_t* handle) {
int err;
unsigned int i;
uv_tcp_accept_t* req;
@ -286,10 +286,10 @@ void uv_tcp_endgame(uv_loop_t* loop, uv_tcp_t* handle) {
* See issue #1360.
*
*/
static int uv_tcp_try_bind(uv_tcp_t* handle,
const struct sockaddr* addr,
unsigned int addrlen,
unsigned int flags) {
static int uv__tcp_try_bind(uv_tcp_t* handle,
const struct sockaddr* addr,
unsigned int addrlen,
unsigned int flags) {
DWORD err;
int r;
@ -305,7 +305,7 @@ static int uv_tcp_try_bind(uv_tcp_t* handle,
return WSAGetLastError();
}
err = uv_tcp_set_socket(handle->loop, handle, sock, addr->sa_family, 0);
err = uv__tcp_set_socket(handle->loop, handle, sock, addr->sa_family, 0);
if (err) {
closesocket(sock);
return err;
@ -385,7 +385,7 @@ static void CALLBACK post_write_completion(void* context, BOOLEAN timed_out) {
}
static void uv_tcp_queue_accept(uv_tcp_t* handle, uv_tcp_accept_t* req) {
static void uv__tcp_queue_accept(uv_tcp_t* handle, uv_tcp_accept_t* req) {
uv_loop_t* loop = handle->loop;
BOOL success;
DWORD bytes;
@ -406,7 +406,7 @@ static void uv_tcp_queue_accept(uv_tcp_t* handle, uv_tcp_accept_t* req) {
accept_socket = socket(family, SOCK_STREAM, 0);
if (accept_socket == INVALID_SOCKET) {
SET_REQ_ERROR(req, WSAGetLastError());
uv_insert_pending_req(loop, (uv_req_t*)req);
uv__insert_pending_req(loop, (uv_req_t*)req);
handle->reqs_pending++;
return;
}
@ -414,7 +414,7 @@ static void uv_tcp_queue_accept(uv_tcp_t* handle, uv_tcp_accept_t* req) {
/* Make the socket non-inheritable */
if (!SetHandleInformation((HANDLE) accept_socket, HANDLE_FLAG_INHERIT, 0)) {
SET_REQ_ERROR(req, GetLastError());
uv_insert_pending_req(loop, (uv_req_t*)req);
uv__insert_pending_req(loop, (uv_req_t*)req);
handle->reqs_pending++;
closesocket(accept_socket);
return;
@ -440,7 +440,7 @@ static void uv_tcp_queue_accept(uv_tcp_t* handle, uv_tcp_accept_t* req) {
/* Process the req without IOCP. */
req->accept_socket = accept_socket;
handle->reqs_pending++;
uv_insert_pending_req(loop, (uv_req_t*)req);
uv__insert_pending_req(loop, (uv_req_t*)req);
} else if (UV_SUCCEEDED_WITH_IOCP(success)) {
/* The req will be processed with IOCP. */
req->accept_socket = accept_socket;
@ -451,12 +451,12 @@ static void uv_tcp_queue_accept(uv_tcp_t* handle, uv_tcp_accept_t* req) {
req->event_handle, post_completion, (void*) req,
INFINITE, WT_EXECUTEINWAITTHREAD)) {
SET_REQ_ERROR(req, GetLastError());
uv_insert_pending_req(loop, (uv_req_t*)req);
uv__insert_pending_req(loop, (uv_req_t*)req);
}
} else {
/* Make this req pending reporting an error. */
SET_REQ_ERROR(req, WSAGetLastError());
uv_insert_pending_req(loop, (uv_req_t*)req);
uv__insert_pending_req(loop, (uv_req_t*)req);
handle->reqs_pending++;
/* Destroy the preallocated client socket. */
closesocket(accept_socket);
@ -469,7 +469,7 @@ static void uv_tcp_queue_accept(uv_tcp_t* handle, uv_tcp_accept_t* req) {
}
static void uv_tcp_queue_read(uv_loop_t* loop, uv_tcp_t* handle) {
static void uv__tcp_queue_read(uv_loop_t* loop, uv_tcp_t* handle) {
uv_read_t* req;
uv_buf_t buf;
int result;
@ -524,7 +524,7 @@ static void uv_tcp_queue_read(uv_loop_t* loop, uv_tcp_t* handle) {
if (UV_SUCCEEDED_WITHOUT_IOCP(result == 0)) {
/* Process the req without IOCP. */
req->u.io.overlapped.InternalHigh = bytes;
uv_insert_pending_req(loop, (uv_req_t*)req);
uv__insert_pending_req(loop, (uv_req_t*)req);
} else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) {
/* The req will be processed with IOCP. */
if (handle->flags & UV_HANDLE_EMULATE_IOCP &&
@ -533,12 +533,12 @@ static void uv_tcp_queue_read(uv_loop_t* loop, uv_tcp_t* handle) {
req->event_handle, post_completion, (void*) req,
INFINITE, WT_EXECUTEINWAITTHREAD)) {
SET_REQ_ERROR(req, GetLastError());
uv_insert_pending_req(loop, (uv_req_t*)req);
uv__insert_pending_req(loop, (uv_req_t*)req);
}
} else {
/* Make this req pending reporting an error. */
SET_REQ_ERROR(req, WSAGetLastError());
uv_insert_pending_req(loop, (uv_req_t*)req);
uv__insert_pending_req(loop, (uv_req_t*)req);
}
}
@ -558,7 +558,7 @@ int uv_tcp_close_reset(uv_tcp_t* handle, uv_close_cb close_cb) {
}
int uv_tcp_listen(uv_tcp_t* handle, int backlog, uv_connection_cb cb) {
int uv__tcp_listen(uv_tcp_t* handle, int backlog, uv_connection_cb cb) {
unsigned int i, simultaneous_accepts;
uv_tcp_accept_t* req;
int err;
@ -578,10 +578,10 @@ int uv_tcp_listen(uv_tcp_t* handle, int backlog, uv_connection_cb cb) {
}
if (!(handle->flags & UV_HANDLE_BOUND)) {
err = uv_tcp_try_bind(handle,
(const struct sockaddr*) &uv_addr_ip4_any_,
sizeof(uv_addr_ip4_any_),
0);
err = uv__tcp_try_bind(handle,
(const struct sockaddr*) &uv_addr_ip4_any_,
sizeof(uv_addr_ip4_any_),
0);
if (err)
return err;
if (handle->delayed_error)
@ -589,7 +589,7 @@ int uv_tcp_listen(uv_tcp_t* handle, int backlog, uv_connection_cb cb) {
}
if (!handle->tcp.serv.func_acceptex) {
if (!uv_get_acceptex_function(handle->socket, &handle->tcp.serv.func_acceptex)) {
if (!uv__get_acceptex_function(handle->socket, &handle->tcp.serv.func_acceptex)) {
return WSAEAFNOSUPPORT;
}
}
@ -630,7 +630,7 @@ int uv_tcp_listen(uv_tcp_t* handle, int backlog, uv_connection_cb cb) {
req->event_handle = NULL;
}
uv_tcp_queue_accept(handle, req);
uv__tcp_queue_accept(handle, req);
}
/* Initialize other unused requests too, because uv_tcp_endgame doesn't
@ -650,7 +650,7 @@ int uv_tcp_listen(uv_tcp_t* handle, int backlog, uv_connection_cb cb) {
}
int uv_tcp_accept(uv_tcp_t* server, uv_tcp_t* client) {
int uv__tcp_accept(uv_tcp_t* server, uv_tcp_t* client) {
uv_loop_t* loop = server->loop;
int err = 0;
int family;
@ -672,7 +672,7 @@ int uv_tcp_accept(uv_tcp_t* server, uv_tcp_t* client) {
family = AF_INET;
}
err = uv_tcp_set_socket(client->loop,
err = uv__tcp_set_socket(client->loop,
client,
req->accept_socket,
family,
@ -680,7 +680,7 @@ int uv_tcp_accept(uv_tcp_t* server, uv_tcp_t* client) {
if (err) {
closesocket(req->accept_socket);
} else {
uv_connection_init((uv_stream_t*) client);
uv__connection_init((uv_stream_t*) client);
/* AcceptEx() implicitly binds the accepted socket. */
client->flags |= UV_HANDLE_BOUND | UV_HANDLE_READABLE | UV_HANDLE_WRITABLE;
}
@ -693,7 +693,7 @@ int uv_tcp_accept(uv_tcp_t* server, uv_tcp_t* client) {
if (!(server->flags & UV_HANDLE_CLOSING)) {
/* Check if we're in a middle of changing the number of pending accepts. */
if (!(server->flags & UV_HANDLE_TCP_ACCEPT_STATE_CHANGING)) {
uv_tcp_queue_accept(server, req);
uv__tcp_queue_accept(server, req);
} else {
/* We better be switching to a single pending accept. */
assert(server->flags & UV_HANDLE_TCP_SINGLE_ACCEPT);
@ -706,7 +706,7 @@ int uv_tcp_accept(uv_tcp_t* server, uv_tcp_t* client) {
* All previously queued accept requests are now processed.
* We now switch to queueing just a single accept.
*/
uv_tcp_queue_accept(server, &server->tcp.serv.accept_reqs[0]);
uv__tcp_queue_accept(server, &server->tcp.serv.accept_reqs[0]);
server->flags &= ~UV_HANDLE_TCP_ACCEPT_STATE_CHANGING;
server->flags |= UV_HANDLE_TCP_SINGLE_ACCEPT;
}
@ -719,7 +719,7 @@ int uv_tcp_accept(uv_tcp_t* server, uv_tcp_t* client) {
}
int uv_tcp_read_start(uv_tcp_t* handle, uv_alloc_cb alloc_cb,
int uv__tcp_read_start(uv_tcp_t* handle, uv_alloc_cb alloc_cb,
uv_read_cb read_cb) {
uv_loop_t* loop = handle->loop;
@ -738,7 +738,7 @@ int uv_tcp_read_start(uv_tcp_t* handle, uv_alloc_cb alloc_cb,
uv_fatal_error(GetLastError(), "CreateEvent");
}
}
uv_tcp_queue_read(loop, handle);
uv__tcp_queue_read(loop, handle);
}
return 0;
@ -779,7 +779,7 @@ static int uv__is_fast_loopback_fail_supported(void) {
return os_info.dwBuildNumber >= 16299;
}
static int uv_tcp_try_connect(uv_connect_t* req,
static int uv__tcp_try_connect(uv_connect_t* req,
uv_tcp_t* handle,
const struct sockaddr* addr,
unsigned int addrlen,
@ -807,7 +807,7 @@ static int uv_tcp_try_connect(uv_connect_t* req,
} else {
abort();
}
err = uv_tcp_try_bind(handle, bind_addr, addrlen, 0);
err = uv__tcp_try_bind(handle, bind_addr, addrlen, 0);
if (err)
return err;
if (handle->delayed_error != 0)
@ -815,7 +815,7 @@ static int uv_tcp_try_connect(uv_connect_t* req,
}
if (!handle->tcp.conn.func_connectex) {
if (!uv_get_connectex_function(handle->socket, &handle->tcp.conn.func_connectex)) {
if (!uv__get_connectex_function(handle->socket, &handle->tcp.conn.func_connectex)) {
return WSAEAFNOSUPPORT;
}
}
@ -850,7 +850,7 @@ out:
/* Process the req without IOCP. */
handle->reqs_pending++;
REGISTER_HANDLE_REQ(loop, handle, req);
uv_insert_pending_req(loop, (uv_req_t*)req);
uv__insert_pending_req(loop, (uv_req_t*)req);
return 0;
}
@ -866,7 +866,7 @@ out:
/* Process the req without IOCP. */
handle->reqs_pending++;
REGISTER_HANDLE_REQ(loop, handle, req);
uv_insert_pending_req(loop, (uv_req_t*)req);
uv__insert_pending_req(loop, (uv_req_t*)req);
} else if (UV_SUCCEEDED_WITH_IOCP(success)) {
/* The req will be processed with IOCP. */
handle->reqs_pending++;
@ -903,7 +903,7 @@ int uv_tcp_getpeername(const uv_tcp_t* handle,
}
int uv_tcp_write(uv_loop_t* loop,
int uv__tcp_write(uv_loop_t* loop,
uv_write_t* req,
uv_tcp_t* handle,
const uv_buf_t bufs[],
@ -941,7 +941,7 @@ int uv_tcp_write(uv_loop_t* loop,
handle->reqs_pending++;
handle->stream.conn.write_reqs_pending++;
REGISTER_HANDLE_REQ(loop, handle, req);
uv_insert_pending_req(loop, (uv_req_t*) req);
uv__insert_pending_req(loop, (uv_req_t*) req);
} else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) {
/* Request queued by the kernel. */
req->u.io.queued_bytes = uv__count_bufs(bufs, nbufs);
@ -954,7 +954,7 @@ int uv_tcp_write(uv_loop_t* loop,
req->event_handle, post_write_completion, (void*) req,
INFINITE, WT_EXECUTEINWAITTHREAD | WT_EXECUTEONLYONCE)) {
SET_REQ_ERROR(req, GetLastError());
uv_insert_pending_req(loop, (uv_req_t*)req);
uv__insert_pending_req(loop, (uv_req_t*)req);
}
} else {
/* Send failed due to an error, report it later */
@ -963,7 +963,7 @@ int uv_tcp_write(uv_loop_t* loop,
handle->stream.conn.write_reqs_pending++;
REGISTER_HANDLE_REQ(loop, handle, req);
SET_REQ_ERROR(req, WSAGetLastError());
uv_insert_pending_req(loop, (uv_req_t*) req);
uv__insert_pending_req(loop, (uv_req_t*) req);
}
return 0;
@ -994,7 +994,7 @@ int uv__tcp_try_write(uv_tcp_t* handle,
}
void uv_process_tcp_read_req(uv_loop_t* loop, uv_tcp_t* handle,
void uv__process_tcp_read_req(uv_loop_t* loop, uv_tcp_t* handle,
uv_req_t* req) {
DWORD bytes, flags, err;
uv_buf_t buf;
@ -1115,7 +1115,7 @@ done:
/* Post another read if still reading and not closing. */
if ((handle->flags & UV_HANDLE_READING) &&
!(handle->flags & UV_HANDLE_READ_PENDING)) {
uv_tcp_queue_read(loop, handle);
uv__tcp_queue_read(loop, handle);
}
}
@ -1123,7 +1123,7 @@ done:
}
void uv_process_tcp_write_req(uv_loop_t* loop, uv_tcp_t* handle,
void uv__process_tcp_write_req(uv_loop_t* loop, uv_tcp_t* handle,
uv_write_t* req) {
int err;
@ -1161,7 +1161,7 @@ void uv_process_tcp_write_req(uv_loop_t* loop, uv_tcp_t* handle,
handle->socket = INVALID_SOCKET;
}
if (handle->stream.conn.shutdown_req != NULL) {
uv_want_endgame(loop, (uv_handle_t*)handle);
uv__want_endgame(loop, (uv_handle_t*)handle);
}
}
@ -1169,7 +1169,7 @@ void uv_process_tcp_write_req(uv_loop_t* loop, uv_tcp_t* handle,
}
void uv_process_tcp_accept_req(uv_loop_t* loop, uv_tcp_t* handle,
void uv__process_tcp_accept_req(uv_loop_t* loop, uv_tcp_t* handle,
uv_req_t* raw_req) {
uv_tcp_accept_t* req = (uv_tcp_accept_t*) raw_req;
int err;
@ -1209,7 +1209,7 @@ void uv_process_tcp_accept_req(uv_loop_t* loop, uv_tcp_t* handle,
closesocket(req->accept_socket);
req->accept_socket = INVALID_SOCKET;
if (handle->flags & UV_HANDLE_LISTENING) {
uv_tcp_queue_accept(handle, req);
uv__tcp_queue_accept(handle, req);
}
}
@ -1217,7 +1217,7 @@ void uv_process_tcp_accept_req(uv_loop_t* loop, uv_tcp_t* handle,
}
void uv_process_tcp_connect_req(uv_loop_t* loop, uv_tcp_t* handle,
void uv__process_tcp_connect_req(uv_loop_t* loop, uv_tcp_t* handle,
uv_connect_t* req) {
int err;
@ -1242,7 +1242,7 @@ void uv_process_tcp_connect_req(uv_loop_t* loop, uv_tcp_t* handle,
SO_UPDATE_CONNECT_CONTEXT,
NULL,
0) == 0) {
uv_connection_init((uv_stream_t*)handle);
uv__connection_init((uv_stream_t*)handle);
handle->flags |= UV_HANDLE_READABLE | UV_HANDLE_WRITABLE;
loop->active_tcp_streams++;
} else {
@ -1312,7 +1312,7 @@ int uv__tcp_xfer_import(uv_tcp_t* tcp,
return WSAGetLastError();
}
err = uv_tcp_set_socket(
err = uv__tcp_set_socket(
tcp->loop, tcp, socket, xfer_info->socket_info.iAddressFamily, 1);
if (err) {
closesocket(socket);
@ -1323,7 +1323,7 @@ int uv__tcp_xfer_import(uv_tcp_t* tcp,
tcp->flags |= UV_HANDLE_BOUND | UV_HANDLE_SHARED_TCP_SOCKET;
if (xfer_type == UV__IPC_SOCKET_XFER_TCP_CONNECTION) {
uv_connection_init((uv_stream_t*)tcp);
uv__connection_init((uv_stream_t*)tcp);
tcp->flags |= UV_HANDLE_READABLE | UV_HANDLE_WRITABLE;
}
@ -1404,7 +1404,7 @@ int uv_tcp_simultaneous_accepts(uv_tcp_t* handle, int enable) {
}
static void uv_tcp_try_cancel_reqs(uv_tcp_t* tcp) {
static void uv__tcp_try_cancel_reqs(uv_tcp_t* tcp) {
SOCKET socket;
int non_ifs_lsp;
int reading;
@ -1456,9 +1456,9 @@ static void uv_tcp_try_cancel_reqs(uv_tcp_t* tcp) {
}
void uv_tcp_close(uv_loop_t* loop, uv_tcp_t* tcp) {
void uv__tcp_close(uv_loop_t* loop, uv_tcp_t* tcp) {
if (tcp->flags & UV_HANDLE_CONNECTION) {
uv_tcp_try_cancel_reqs(tcp);
uv__tcp_try_cancel_reqs(tcp);
if (tcp->flags & UV_HANDLE_READING) {
uv_read_stop((uv_stream_t*) tcp);
}
@ -1498,7 +1498,7 @@ void uv_tcp_close(uv_loop_t* loop, uv_tcp_t* tcp) {
uv__handle_closing(tcp);
if (tcp->reqs_pending == 0) {
uv_want_endgame(tcp->loop, (uv_handle_t*)tcp);
uv__want_endgame(tcp->loop, (uv_handle_t*)tcp);
}
}
@ -1520,7 +1520,7 @@ int uv_tcp_open(uv_tcp_t* handle, uv_os_sock_t sock) {
return uv_translate_sys_error(GetLastError());
}
err = uv_tcp_set_socket(handle->loop,
err = uv__tcp_set_socket(handle->loop,
handle,
sock,
protocol_info.iAddressFamily,
@ -1537,7 +1537,7 @@ int uv_tcp_open(uv_tcp_t* handle, uv_os_sock_t sock) {
saddr_len = sizeof(saddr);
if (!uv_tcp_getpeername(handle, (struct sockaddr*) &saddr, &saddr_len)) {
/* Socket is already connected. */
uv_connection_init((uv_stream_t*) handle);
uv__connection_init((uv_stream_t*) handle);
handle->flags |= UV_HANDLE_READABLE | UV_HANDLE_WRITABLE;
}
}
@ -1555,7 +1555,7 @@ int uv__tcp_bind(uv_tcp_t* handle,
unsigned int flags) {
int err;
err = uv_tcp_try_bind(handle, addr, addrlen, flags);
err = uv__tcp_try_bind(handle, addr, addrlen, flags);
if (err)
return uv_translate_sys_error(err);
@ -1573,7 +1573,7 @@ int uv__tcp_connect(uv_connect_t* req,
uv_connect_cb cb) {
int err;
err = uv_tcp_try_connect(req, handle, addr, addrlen, cb);
err = uv__tcp_try_connect(req, handle, addr, addrlen, cb);
if (err)
return uv_translate_sys_error(err);
@ -1634,7 +1634,7 @@ int uv_socketpair(int type, int protocol, uv_os_sock_t fds[2], int flags0, int f
goto wsaerror;
if (!SetHandleInformation((HANDLE) client1, HANDLE_FLAG_INHERIT, 0))
goto error;
if (!uv_get_acceptex_function(server, &func_acceptex)) {
if (!uv__get_acceptex_function(server, &func_acceptex)) {
err = WSAEAFNOSUPPORT;
goto cleanup;
}

View File

@ -182,8 +182,9 @@ int uv_thread_create_ex(uv_thread_t* tid,
uv_thread_t uv_thread_self(void) {
uv_thread_t key;
uv_once(&uv__current_thread_init_guard, uv__init_current_thread_key);
uv_thread_t key = uv_key_get(&uv__current_thread_key);
key = uv_key_get(&uv__current_thread_key);
if (key == NULL) {
/* If the thread wasn't started by uv_thread_create (such as the main
* thread), we assign an id to it now. */
@ -248,113 +249,60 @@ void uv_mutex_unlock(uv_mutex_t* mutex) {
LeaveCriticalSection(mutex);
}
/* Ensure that the ABI for this type remains stable in v1.x */
#ifdef _WIN64
STATIC_ASSERT(sizeof(uv_rwlock_t) == 80);
#else
STATIC_ASSERT(sizeof(uv_rwlock_t) == 48);
#endif
int uv_rwlock_init(uv_rwlock_t* rwlock) {
/* Initialize the semaphore that acts as the write lock. */
HANDLE handle = CreateSemaphoreW(NULL, 1, 1, NULL);
if (handle == NULL)
return uv_translate_sys_error(GetLastError());
rwlock->state_.write_semaphore_ = handle;
/* Initialize the critical section protecting the reader count. */
InitializeCriticalSection(&rwlock->state_.num_readers_lock_);
/* Initialize the reader count. */
rwlock->state_.num_readers_ = 0;
memset(rwlock, 0, sizeof(*rwlock));
InitializeSRWLock(&rwlock->read_write_lock_);
return 0;
}
void uv_rwlock_destroy(uv_rwlock_t* rwlock) {
DeleteCriticalSection(&rwlock->state_.num_readers_lock_);
CloseHandle(rwlock->state_.write_semaphore_);
/* SRWLock does not need explicit destruction so long as there are no waiting threads
See: https://docs.microsoft.com/windows/win32/api/synchapi/nf-synchapi-initializesrwlock#remarks */
}
void uv_rwlock_rdlock(uv_rwlock_t* rwlock) {
/* Acquire the lock that protects the reader count. */
EnterCriticalSection(&rwlock->state_.num_readers_lock_);
/* Increase the reader count, and lock for write if this is the first
* reader.
*/
if (++rwlock->state_.num_readers_ == 1) {
DWORD r = WaitForSingleObject(rwlock->state_.write_semaphore_, INFINITE);
if (r != WAIT_OBJECT_0)
uv_fatal_error(GetLastError(), "WaitForSingleObject");
}
/* Release the lock that protects the reader count. */
LeaveCriticalSection(&rwlock->state_.num_readers_lock_);
AcquireSRWLockShared(&rwlock->read_write_lock_);
}
int uv_rwlock_tryrdlock(uv_rwlock_t* rwlock) {
int err;
if (!TryEnterCriticalSection(&rwlock->state_.num_readers_lock_))
if (!TryAcquireSRWLockShared(&rwlock->read_write_lock_))
return UV_EBUSY;
err = 0;
if (rwlock->state_.num_readers_ == 0) {
/* Currently there are no other readers, which means that the write lock
* needs to be acquired.
*/
DWORD r = WaitForSingleObject(rwlock->state_.write_semaphore_, 0);
if (r == WAIT_OBJECT_0)
rwlock->state_.num_readers_++;
else if (r == WAIT_TIMEOUT)
err = UV_EBUSY;
else if (r == WAIT_FAILED)
uv_fatal_error(GetLastError(), "WaitForSingleObject");
} else {
/* The write lock has already been acquired because there are other
* active readers.
*/
rwlock->state_.num_readers_++;
}
LeaveCriticalSection(&rwlock->state_.num_readers_lock_);
return err;
return 0;
}
void uv_rwlock_rdunlock(uv_rwlock_t* rwlock) {
EnterCriticalSection(&rwlock->state_.num_readers_lock_);
if (--rwlock->state_.num_readers_ == 0) {
if (!ReleaseSemaphore(rwlock->state_.write_semaphore_, 1, NULL))
uv_fatal_error(GetLastError(), "ReleaseSemaphore");
}
LeaveCriticalSection(&rwlock->state_.num_readers_lock_);
ReleaseSRWLockShared(&rwlock->read_write_lock_);
}
void uv_rwlock_wrlock(uv_rwlock_t* rwlock) {
DWORD r = WaitForSingleObject(rwlock->state_.write_semaphore_, INFINITE);
if (r != WAIT_OBJECT_0)
uv_fatal_error(GetLastError(), "WaitForSingleObject");
AcquireSRWLockExclusive(&rwlock->read_write_lock_);
}
int uv_rwlock_trywrlock(uv_rwlock_t* rwlock) {
DWORD r = WaitForSingleObject(rwlock->state_.write_semaphore_, 0);
if (r == WAIT_OBJECT_0)
return 0;
else if (r == WAIT_TIMEOUT)
if (!TryAcquireSRWLockExclusive(&rwlock->read_write_lock_))
return UV_EBUSY;
else
uv_fatal_error(GetLastError(), "WaitForSingleObject");
return 0;
}
void uv_rwlock_wrunlock(uv_rwlock_t* rwlock) {
if (!ReleaseSemaphore(rwlock->state_.write_semaphore_, 1, NULL))
uv_fatal_error(GetLastError(), "ReleaseSemaphore");
ReleaseSRWLockExclusive(&rwlock->read_write_lock_);
}

View File

@ -67,10 +67,10 @@
#define CURSOR_SIZE_SMALL 25
#define CURSOR_SIZE_LARGE 100
static void uv_tty_capture_initial_style(
static void uv__tty_capture_initial_style(
CONSOLE_SCREEN_BUFFER_INFO* screen_buffer_info,
CONSOLE_CURSOR_INFO* cursor_info);
static void uv_tty_update_virtual_window(CONSOLE_SCREEN_BUFFER_INFO* info);
static void uv__tty_update_virtual_window(CONSOLE_SCREEN_BUFFER_INFO* info);
static int uv__cancel_read_console(uv_tty_t* handle);
@ -163,7 +163,7 @@ static BOOL uv__need_check_vterm_state = TRUE;
static uv_tty_vtermstate_t uv__vterm_state = UV_TTY_UNSUPPORTED;
static void uv__determine_vterm_state(HANDLE handle);
void uv_console_init(void) {
void uv__console_init(void) {
if (uv_sem_init(&uv_tty_output_lock, 1))
abort();
uv__tty_console_handle = CreateFileW(L"CONOUT$",
@ -238,16 +238,16 @@ int uv_tty_init(uv_loop_t* loop, uv_tty_t* tty, uv_file fd, int unused) {
uv__determine_vterm_state(handle);
/* Remember the original console text attributes and cursor info. */
uv_tty_capture_initial_style(&screen_buffer_info, &cursor_info);
uv__tty_capture_initial_style(&screen_buffer_info, &cursor_info);
uv_tty_update_virtual_window(&screen_buffer_info);
uv__tty_update_virtual_window(&screen_buffer_info);
uv_sem_post(&uv_tty_output_lock);
}
uv_stream_init(loop, (uv_stream_t*) tty, UV_TTY);
uv_connection_init((uv_stream_t*) tty);
uv__stream_init(loop, (uv_stream_t*) tty, UV_TTY);
uv__connection_init((uv_stream_t*) tty);
tty->handle = handle;
tty->u.fd = fd;
@ -289,7 +289,7 @@ int uv_tty_init(uv_loop_t* loop, uv_tty_t* tty, uv_file fd, int unused) {
/* Set the default console text attributes based on how the console was
* configured when libuv started.
*/
static void uv_tty_capture_initial_style(
static void uv__tty_capture_initial_style(
CONSOLE_SCREEN_BUFFER_INFO* screen_buffer_info,
CONSOLE_CURSOR_INFO* cursor_info) {
static int style_captured = 0;
@ -380,7 +380,7 @@ int uv_tty_set_mode(uv_tty_t* tty, uv_tty_mode_t mode) {
was_reading = 1;
alloc_cb = tty->alloc_cb;
read_cb = tty->read_cb;
err = uv_tty_read_stop(tty);
err = uv__tty_read_stop(tty);
if (err) {
return uv_translate_sys_error(err);
}
@ -404,7 +404,7 @@ int uv_tty_set_mode(uv_tty_t* tty, uv_tty_mode_t mode) {
/* If we just stopped reading, restart. */
if (was_reading) {
err = uv_tty_read_start(tty, alloc_cb, read_cb);
err = uv__tty_read_start(tty, alloc_cb, read_cb);
if (err) {
return uv_translate_sys_error(err);
}
@ -422,7 +422,7 @@ int uv_tty_get_winsize(uv_tty_t* tty, int* width, int* height) {
}
uv_sem_wait(&uv_tty_output_lock);
uv_tty_update_virtual_window(&info);
uv__tty_update_virtual_window(&info);
uv_sem_post(&uv_tty_output_lock);
*width = uv_tty_virtual_width;
@ -452,7 +452,7 @@ static void CALLBACK uv_tty_post_raw_read(void* data, BOOLEAN didTimeout) {
}
static void uv_tty_queue_read_raw(uv_loop_t* loop, uv_tty_t* handle) {
static void uv__tty_queue_read_raw(uv_loop_t* loop, uv_tty_t* handle) {
uv_read_t* req;
BOOL r;
@ -475,7 +475,7 @@ static void uv_tty_queue_read_raw(uv_loop_t* loop, uv_tty_t* handle) {
if (!r) {
handle->tty.rd.read_raw_wait = NULL;
SET_REQ_ERROR(req, GetLastError());
uv_insert_pending_req(loop, (uv_req_t*)req);
uv__insert_pending_req(loop, (uv_req_t*)req);
}
handle->flags |= UV_HANDLE_READ_PENDING;
@ -579,7 +579,7 @@ static DWORD CALLBACK uv_tty_line_read_thread(void* data) {
}
static void uv_tty_queue_read_line(uv_loop_t* loop, uv_tty_t* handle) {
static void uv__tty_queue_read_line(uv_loop_t* loop, uv_tty_t* handle) {
uv_read_t* req;
BOOL r;
@ -611,7 +611,7 @@ static void uv_tty_queue_read_line(uv_loop_t* loop, uv_tty_t* handle) {
WT_EXECUTELONGFUNCTION);
if (!r) {
SET_REQ_ERROR(req, GetLastError());
uv_insert_pending_req(loop, (uv_req_t*)req);
uv__insert_pending_req(loop, (uv_req_t*)req);
}
handle->flags |= UV_HANDLE_READ_PENDING;
@ -619,11 +619,11 @@ static void uv_tty_queue_read_line(uv_loop_t* loop, uv_tty_t* handle) {
}
static void uv_tty_queue_read(uv_loop_t* loop, uv_tty_t* handle) {
static void uv__tty_queue_read(uv_loop_t* loop, uv_tty_t* handle) {
if (handle->flags & UV_HANDLE_TTY_RAW) {
uv_tty_queue_read_raw(loop, handle);
uv__tty_queue_read_raw(loop, handle);
} else {
uv_tty_queue_read_line(loop, handle);
uv__tty_queue_read_line(loop, handle);
}
}
@ -947,7 +947,7 @@ void uv_process_tty_read_raw_req(uv_loop_t* loop, uv_tty_t* handle,
/* Wait for more input events. */
if ((handle->flags & UV_HANDLE_READING) &&
!(handle->flags & UV_HANDLE_READ_PENDING)) {
uv_tty_queue_read(loop, handle);
uv__tty_queue_read(loop, handle);
}
DECREASE_PENDING_REQ_COUNT(handle);
@ -992,14 +992,14 @@ void uv_process_tty_read_line_req(uv_loop_t* loop, uv_tty_t* handle,
/* Wait for more input events. */
if ((handle->flags & UV_HANDLE_READING) &&
!(handle->flags & UV_HANDLE_READ_PENDING)) {
uv_tty_queue_read(loop, handle);
uv__tty_queue_read(loop, handle);
}
DECREASE_PENDING_REQ_COUNT(handle);
}
void uv_process_tty_read_req(uv_loop_t* loop, uv_tty_t* handle,
void uv__process_tty_read_req(uv_loop_t* loop, uv_tty_t* handle,
uv_req_t* req) {
assert(handle->type == UV_TTY);
assert(handle->flags & UV_HANDLE_TTY_READABLE);
@ -1015,7 +1015,7 @@ void uv_process_tty_read_req(uv_loop_t* loop, uv_tty_t* handle,
}
int uv_tty_read_start(uv_tty_t* handle, uv_alloc_cb alloc_cb,
int uv__tty_read_start(uv_tty_t* handle, uv_alloc_cb alloc_cb,
uv_read_cb read_cb) {
uv_loop_t* loop = handle->loop;
@ -1038,20 +1038,20 @@ int uv_tty_read_start(uv_tty_t* handle, uv_alloc_cb alloc_cb,
* Short-circuit if this could be the case. */
if (handle->tty.rd.last_key_len > 0) {
SET_REQ_SUCCESS(&handle->read_req);
uv_insert_pending_req(handle->loop, (uv_req_t*) &handle->read_req);
uv__insert_pending_req(handle->loop, (uv_req_t*) &handle->read_req);
/* Make sure no attempt is made to insert it again until it's handled. */
handle->flags |= UV_HANDLE_READ_PENDING;
handle->reqs_pending++;
return 0;
}
uv_tty_queue_read(loop, handle);
uv__tty_queue_read(loop, handle);
return 0;
}
int uv_tty_read_stop(uv_tty_t* handle) {
int uv__tty_read_stop(uv_tty_t* handle) {
INPUT_RECORD record;
DWORD written, err;
@ -1137,7 +1137,7 @@ static int uv__cancel_read_console(uv_tty_t* handle) {
}
static void uv_tty_update_virtual_window(CONSOLE_SCREEN_BUFFER_INFO* info) {
static void uv__tty_update_virtual_window(CONSOLE_SCREEN_BUFFER_INFO* info) {
uv_tty_virtual_width = info->dwSize.X;
uv_tty_virtual_height = info->srWindow.Bottom - info->srWindow.Top + 1;
@ -1160,12 +1160,12 @@ static void uv_tty_update_virtual_window(CONSOLE_SCREEN_BUFFER_INFO* info) {
}
static COORD uv_tty_make_real_coord(uv_tty_t* handle,
static COORD uv__tty_make_real_coord(uv_tty_t* handle,
CONSOLE_SCREEN_BUFFER_INFO* info, int x, unsigned char x_relative, int y,
unsigned char y_relative) {
COORD result;
uv_tty_update_virtual_window(info);
uv__tty_update_virtual_window(info);
/* Adjust y position */
if (y_relative) {
@ -1197,7 +1197,7 @@ static COORD uv_tty_make_real_coord(uv_tty_t* handle,
}
static int uv_tty_emit_text(uv_tty_t* handle, WCHAR buffer[], DWORD length,
static int uv__tty_emit_text(uv_tty_t* handle, WCHAR buffer[], DWORD length,
DWORD* error) {
DWORD written;
@ -1218,7 +1218,7 @@ static int uv_tty_emit_text(uv_tty_t* handle, WCHAR buffer[], DWORD length,
}
static int uv_tty_move_caret(uv_tty_t* handle, int x, unsigned char x_relative,
static int uv__tty_move_caret(uv_tty_t* handle, int x, unsigned char x_relative,
int y, unsigned char y_relative, DWORD* error) {
CONSOLE_SCREEN_BUFFER_INFO info;
COORD pos;
@ -1232,7 +1232,7 @@ static int uv_tty_move_caret(uv_tty_t* handle, int x, unsigned char x_relative,
*error = GetLastError();
}
pos = uv_tty_make_real_coord(handle, &info, x, x_relative, y, y_relative);
pos = uv__tty_make_real_coord(handle, &info, x, x_relative, y, y_relative);
if (!SetConsoleCursorPosition(handle->handle, pos)) {
if (GetLastError() == ERROR_INVALID_PARAMETER) {
@ -1248,7 +1248,7 @@ static int uv_tty_move_caret(uv_tty_t* handle, int x, unsigned char x_relative,
}
static int uv_tty_reset(uv_tty_t* handle, DWORD* error) {
static int uv__tty_reset(uv_tty_t* handle, DWORD* error) {
const COORD origin = {0, 0};
const WORD char_attrs = uv_tty_default_text_attributes;
CONSOLE_SCREEN_BUFFER_INFO screen_buffer_info;
@ -1300,7 +1300,7 @@ static int uv_tty_reset(uv_tty_t* handle, DWORD* error) {
/* Move the virtual window up to the top. */
uv_tty_virtual_offset = 0;
uv_tty_update_virtual_window(&screen_buffer_info);
uv__tty_update_virtual_window(&screen_buffer_info);
/* Reset the cursor size and the cursor state. */
if (!SetConsoleCursorInfo(handle->handle, &uv_tty_default_cursor_info)) {
@ -1312,7 +1312,7 @@ static int uv_tty_reset(uv_tty_t* handle, DWORD* error) {
}
static int uv_tty_clear(uv_tty_t* handle, int dir, char entire_screen,
static int uv__tty_clear(uv_tty_t* handle, int dir, char entire_screen,
DWORD* error) {
CONSOLE_SCREEN_BUFFER_INFO info;
COORD start, end;
@ -1341,7 +1341,7 @@ static int uv_tty_clear(uv_tty_t* handle, int dir, char entire_screen,
x2r = 1;
} else {
/* Clear to end of row. We pretend the console is 65536 characters wide,
* uv_tty_make_real_coord will clip it to the actual console width. */
* uv__tty_make_real_coord will clip it to the actual console width. */
x2 = 0xffff;
x2r = 0;
}
@ -1364,8 +1364,8 @@ static int uv_tty_clear(uv_tty_t* handle, int dir, char entire_screen,
return -1;
}
start = uv_tty_make_real_coord(handle, &info, x1, x1r, y1, y1r);
end = uv_tty_make_real_coord(handle, &info, x2, x2r, y2, y2r);
start = uv__tty_make_real_coord(handle, &info, x1, x1r, y1, y1r);
end = uv__tty_make_real_coord(handle, &info, x2, x2r, y2, y2r);
count = (end.Y * info.dwSize.X + end.X) -
(start.Y * info.dwSize.X + start.X) + 1;
@ -1400,7 +1400,7 @@ static int uv_tty_clear(uv_tty_t* handle, int dir, char entire_screen,
info.wAttributes |= bg >> 4; \
} while (0)
static int uv_tty_set_style(uv_tty_t* handle, DWORD* error) {
static int uv__tty_set_style(uv_tty_t* handle, DWORD* error) {
unsigned short argc = handle->tty.wr.ansi_csi_argc;
unsigned short* argv = handle->tty.wr.ansi_csi_argv;
int i;
@ -1556,7 +1556,7 @@ static int uv_tty_set_style(uv_tty_t* handle, DWORD* error) {
}
static int uv_tty_save_state(uv_tty_t* handle, unsigned char save_attributes,
static int uv__tty_save_state(uv_tty_t* handle, unsigned char save_attributes,
DWORD* error) {
CONSOLE_SCREEN_BUFFER_INFO info;
@ -1569,10 +1569,11 @@ static int uv_tty_save_state(uv_tty_t* handle, unsigned char save_attributes,
return -1;
}
uv_tty_update_virtual_window(&info);
uv__tty_update_virtual_window(&info);
handle->tty.wr.saved_position.X = info.dwCursorPosition.X;
handle->tty.wr.saved_position.Y = info.dwCursorPosition.Y - uv_tty_virtual_offset;
handle->tty.wr.saved_position.Y = info.dwCursorPosition.Y -
uv_tty_virtual_offset;
handle->flags |= UV_HANDLE_TTY_SAVED_POSITION;
if (save_attributes) {
@ -1585,7 +1586,7 @@ static int uv_tty_save_state(uv_tty_t* handle, unsigned char save_attributes,
}
static int uv_tty_restore_state(uv_tty_t* handle,
static int uv__tty_restore_state(uv_tty_t* handle,
unsigned char restore_attributes, DWORD* error) {
CONSOLE_SCREEN_BUFFER_INFO info;
WORD new_attributes;
@ -1595,7 +1596,7 @@ static int uv_tty_restore_state(uv_tty_t* handle,
}
if (handle->flags & UV_HANDLE_TTY_SAVED_POSITION) {
if (uv_tty_move_caret(handle,
if (uv__tty_move_caret(handle,
handle->tty.wr.saved_position.X,
0,
handle->tty.wr.saved_position.Y,
@ -1625,7 +1626,7 @@ static int uv_tty_restore_state(uv_tty_t* handle,
return 0;
}
static int uv_tty_set_cursor_visibility(uv_tty_t* handle,
static int uv__tty_set_cursor_visibility(uv_tty_t* handle,
BOOL visible,
DWORD* error) {
CONSOLE_CURSOR_INFO cursor_info;
@ -1645,7 +1646,7 @@ static int uv_tty_set_cursor_visibility(uv_tty_t* handle,
return 0;
}
static int uv_tty_set_cursor_shape(uv_tty_t* handle, int style, DWORD* error) {
static int uv__tty_set_cursor_shape(uv_tty_t* handle, int style, DWORD* error) {
CONSOLE_CURSOR_INFO cursor_info;
if (!GetConsoleCursorInfo(handle->handle, &cursor_info)) {
@ -1670,7 +1671,7 @@ static int uv_tty_set_cursor_shape(uv_tty_t* handle, int style, DWORD* error) {
}
static int uv_tty_write_bufs(uv_tty_t* handle,
static int uv__tty_write_bufs(uv_tty_t* handle,
const uv_buf_t bufs[],
unsigned int nbufs,
DWORD* error) {
@ -1683,7 +1684,7 @@ static int uv_tty_write_bufs(uv_tty_t* handle,
#define FLUSH_TEXT() \
do { \
if (utf16_buf_used > 0) { \
uv_tty_emit_text(handle, utf16_buf, utf16_buf_used, error); \
uv__tty_emit_text(handle, utf16_buf, utf16_buf_used, error); \
utf16_buf_used = 0; \
} \
} while (0)
@ -1802,21 +1803,21 @@ static int uv_tty_write_bufs(uv_tty_t* handle,
case 'c':
/* Full console reset. */
FLUSH_TEXT();
uv_tty_reset(handle, error);
uv__tty_reset(handle, error);
ansi_parser_state = ANSI_NORMAL;
continue;
case '7':
/* Save the cursor position and text attributes. */
FLUSH_TEXT();
uv_tty_save_state(handle, 1, error);
uv__tty_save_state(handle, 1, error);
ansi_parser_state = ANSI_NORMAL;
continue;
case '8':
/* Restore the cursor position and text attributes */
FLUSH_TEXT();
uv_tty_restore_state(handle, 1, error);
uv__tty_restore_state(handle, 1, error);
ansi_parser_state = ANSI_NORMAL;
continue;
@ -1849,7 +1850,7 @@ static int uv_tty_write_bufs(uv_tty_t* handle,
? handle->tty.wr.ansi_csi_argv[0] : 1;
if (style >= 0 && style <= 6) {
FLUSH_TEXT();
uv_tty_set_cursor_shape(handle, style, error);
uv__tty_set_cursor_shape(handle, style, error);
}
}
@ -1947,7 +1948,7 @@ static int uv_tty_write_bufs(uv_tty_t* handle,
if (handle->tty.wr.ansi_csi_argc == 1 &&
handle->tty.wr.ansi_csi_argv[0] == 25) {
FLUSH_TEXT();
uv_tty_set_cursor_visibility(handle, 0, error);
uv__tty_set_cursor_visibility(handle, 0, error);
}
break;
@ -1956,7 +1957,7 @@ static int uv_tty_write_bufs(uv_tty_t* handle,
if (handle->tty.wr.ansi_csi_argc == 1 &&
handle->tty.wr.ansi_csi_argv[0] == 25) {
FLUSH_TEXT();
uv_tty_set_cursor_visibility(handle, 1, error);
uv__tty_set_cursor_visibility(handle, 1, error);
}
break;
}
@ -1970,7 +1971,7 @@ static int uv_tty_write_bufs(uv_tty_t* handle,
FLUSH_TEXT();
y = -(handle->tty.wr.ansi_csi_argc
? handle->tty.wr.ansi_csi_argv[0] : 1);
uv_tty_move_caret(handle, 0, 1, y, 1, error);
uv__tty_move_caret(handle, 0, 1, y, 1, error);
break;
case 'B':
@ -1978,7 +1979,7 @@ static int uv_tty_write_bufs(uv_tty_t* handle,
FLUSH_TEXT();
y = handle->tty.wr.ansi_csi_argc
? handle->tty.wr.ansi_csi_argv[0] : 1;
uv_tty_move_caret(handle, 0, 1, y, 1, error);
uv__tty_move_caret(handle, 0, 1, y, 1, error);
break;
case 'C':
@ -1986,7 +1987,7 @@ static int uv_tty_write_bufs(uv_tty_t* handle,
FLUSH_TEXT();
x = handle->tty.wr.ansi_csi_argc
? handle->tty.wr.ansi_csi_argv[0] : 1;
uv_tty_move_caret(handle, x, 1, 0, 1, error);
uv__tty_move_caret(handle, x, 1, 0, 1, error);
break;
case 'D':
@ -1994,7 +1995,7 @@ static int uv_tty_write_bufs(uv_tty_t* handle,
FLUSH_TEXT();
x = -(handle->tty.wr.ansi_csi_argc
? handle->tty.wr.ansi_csi_argv[0] : 1);
uv_tty_move_caret(handle, x, 1, 0, 1, error);
uv__tty_move_caret(handle, x, 1, 0, 1, error);
break;
case 'E':
@ -2002,7 +2003,7 @@ static int uv_tty_write_bufs(uv_tty_t* handle,
FLUSH_TEXT();
y = handle->tty.wr.ansi_csi_argc
? handle->tty.wr.ansi_csi_argv[0] : 1;
uv_tty_move_caret(handle, 0, 0, y, 1, error);
uv__tty_move_caret(handle, 0, 0, y, 1, error);
break;
case 'F':
@ -2010,7 +2011,7 @@ static int uv_tty_write_bufs(uv_tty_t* handle,
FLUSH_TEXT();
y = -(handle->tty.wr.ansi_csi_argc
? handle->tty.wr.ansi_csi_argv[0] : 1);
uv_tty_move_caret(handle, 0, 0, y, 1, error);
uv__tty_move_caret(handle, 0, 0, y, 1, error);
break;
case 'G':
@ -2019,7 +2020,7 @@ static int uv_tty_write_bufs(uv_tty_t* handle,
x = (handle->tty.wr.ansi_csi_argc >= 1 &&
handle->tty.wr.ansi_csi_argv[0])
? handle->tty.wr.ansi_csi_argv[0] - 1 : 0;
uv_tty_move_caret(handle, x, 0, 0, 1, error);
uv__tty_move_caret(handle, x, 0, 0, 1, error);
break;
case 'H':
@ -2032,7 +2033,7 @@ static int uv_tty_write_bufs(uv_tty_t* handle,
x = (handle->tty.wr.ansi_csi_argc >= 2 &&
handle->tty.wr.ansi_csi_argv[1])
? handle->tty.wr.ansi_csi_argv[1] - 1 : 0;
uv_tty_move_caret(handle, x, 0, y, 0, error);
uv__tty_move_caret(handle, x, 0, y, 0, error);
break;
case 'J':
@ -2041,7 +2042,7 @@ static int uv_tty_write_bufs(uv_tty_t* handle,
d = handle->tty.wr.ansi_csi_argc
? handle->tty.wr.ansi_csi_argv[0] : 0;
if (d >= 0 && d <= 2) {
uv_tty_clear(handle, d, 1, error);
uv__tty_clear(handle, d, 1, error);
}
break;
@ -2051,26 +2052,26 @@ static int uv_tty_write_bufs(uv_tty_t* handle,
d = handle->tty.wr.ansi_csi_argc
? handle->tty.wr.ansi_csi_argv[0] : 0;
if (d >= 0 && d <= 2) {
uv_tty_clear(handle, d, 0, error);
uv__tty_clear(handle, d, 0, error);
}
break;
case 'm':
/* Set style */
FLUSH_TEXT();
uv_tty_set_style(handle, error);
uv__tty_set_style(handle, error);
break;
case 's':
/* Save the cursor position. */
FLUSH_TEXT();
uv_tty_save_state(handle, 0, error);
uv__tty_save_state(handle, 0, error);
break;
case 'u':
/* Restore the cursor position */
FLUSH_TEXT();
uv_tty_restore_state(handle, 0, error);
uv__tty_restore_state(handle, 0, error);
break;
}
}
@ -2179,7 +2180,7 @@ static int uv_tty_write_bufs(uv_tty_t* handle,
}
int uv_tty_write(uv_loop_t* loop,
int uv__tty_write(uv_loop_t* loop,
uv_write_t* req,
uv_tty_t* handle,
const uv_buf_t bufs[],
@ -2197,13 +2198,13 @@ int uv_tty_write(uv_loop_t* loop,
req->u.io.queued_bytes = 0;
if (!uv_tty_write_bufs(handle, bufs, nbufs, &error)) {
if (!uv__tty_write_bufs(handle, bufs, nbufs, &error)) {
SET_REQ_SUCCESS(req);
} else {
SET_REQ_ERROR(req, error);
}
uv_insert_pending_req(loop, (uv_req_t*) req);
uv__insert_pending_req(loop, (uv_req_t*) req);
return 0;
}
@ -2217,14 +2218,14 @@ int uv__tty_try_write(uv_tty_t* handle,
if (handle->stream.conn.write_reqs_pending > 0)
return UV_EAGAIN;
if (uv_tty_write_bufs(handle, bufs, nbufs, &error))
if (uv__tty_write_bufs(handle, bufs, nbufs, &error))
return uv_translate_sys_error(error);
return uv__count_bufs(bufs, nbufs);
}
void uv_process_tty_write_req(uv_loop_t* loop, uv_tty_t* handle,
void uv__process_tty_write_req(uv_loop_t* loop, uv_tty_t* handle,
uv_write_t* req) {
int err;
@ -2239,17 +2240,17 @@ void uv_process_tty_write_req(uv_loop_t* loop, uv_tty_t* handle,
handle->stream.conn.write_reqs_pending--;
if (handle->stream.conn.shutdown_req != NULL &&
handle->stream.conn.write_reqs_pending == 0) {
uv_want_endgame(loop, (uv_handle_t*)handle);
uv__want_endgame(loop, (uv_handle_t*)handle);
}
DECREASE_PENDING_REQ_COUNT(handle);
}
void uv_tty_close(uv_tty_t* handle) {
void uv__tty_close(uv_tty_t* handle) {
assert(handle->u.fd == -1 || handle->u.fd > 2);
if (handle->flags & UV_HANDLE_READING)
uv_tty_read_stop(handle);
uv__tty_read_stop(handle);
if (handle->u.fd == -1)
CloseHandle(handle->handle);
@ -2262,12 +2263,12 @@ void uv_tty_close(uv_tty_t* handle) {
uv__handle_closing(handle);
if (handle->reqs_pending == 0) {
uv_want_endgame(handle->loop, (uv_handle_t*) handle);
uv__want_endgame(handle->loop, (uv_handle_t*) handle);
}
}
void uv_tty_endgame(uv_loop_t* loop, uv_tty_t* handle) {
void uv__tty_endgame(uv_loop_t* loop, uv_tty_t* handle) {
if (!(handle->flags & UV_HANDLE_TTY_READABLE) &&
handle->stream.conn.shutdown_req != NULL &&
handle->stream.conn.write_reqs_pending == 0) {
@ -2302,20 +2303,20 @@ void uv_tty_endgame(uv_loop_t* loop, uv_tty_t* handle) {
/*
* uv_process_tty_accept_req() is a stub to keep DELEGATE_STREAM_REQ working
* uv__process_tty_accept_req() is a stub to keep DELEGATE_STREAM_REQ working
* TODO: find a way to remove it
*/
void uv_process_tty_accept_req(uv_loop_t* loop, uv_tty_t* handle,
void uv__process_tty_accept_req(uv_loop_t* loop, uv_tty_t* handle,
uv_req_t* raw_req) {
abort();
}
/*
* uv_process_tty_connect_req() is a stub to keep DELEGATE_STREAM_REQ working
* uv__process_tty_connect_req() is a stub to keep DELEGATE_STREAM_REQ working
* TODO: find a way to remove it
*/
void uv_process_tty_connect_req(uv_loop_t* loop, uv_tty_t* handle,
void uv__process_tty_connect_req(uv_loop_t* loop, uv_tty_t* handle,
uv_connect_t* req) {
abort();
}

View File

@ -60,7 +60,7 @@ int uv_udp_getsockname(const uv_udp_t* handle,
}
static int uv_udp_set_socket(uv_loop_t* loop, uv_udp_t* handle, SOCKET socket,
static int uv__udp_set_socket(uv_loop_t* loop, uv_udp_t* handle, SOCKET socket,
int family) {
DWORD yes = 1;
WSAPROTOCOL_INFOW info;
@ -106,8 +106,8 @@ static int uv_udp_set_socket(uv_loop_t* loop, uv_udp_t* handle, SOCKET socket,
FILE_SKIP_SET_EVENT_ON_HANDLE |
FILE_SKIP_COMPLETION_PORT_ON_SUCCESS)) {
handle->flags |= UV_HANDLE_SYNC_BYPASS_IOCP;
handle->func_wsarecv = uv_wsarecv_workaround;
handle->func_wsarecvfrom = uv_wsarecvfrom_workaround;
handle->func_wsarecv = uv__wsarecv_workaround;
handle->func_wsarecvfrom = uv__wsarecvfrom_workaround;
} else if (GetLastError() != ERROR_INVALID_FUNCTION) {
return GetLastError();
}
@ -155,7 +155,7 @@ int uv__udp_init_ex(uv_loop_t* loop,
return uv_translate_sys_error(err);
}
err = uv_udp_set_socket(handle->loop, handle, sock, domain);
err = uv__udp_set_socket(handle->loop, handle, sock, domain);
if (err) {
closesocket(sock);
QUEUE_REMOVE(&handle->handle_queue);
@ -167,7 +167,7 @@ int uv__udp_init_ex(uv_loop_t* loop,
}
void uv_udp_close(uv_loop_t* loop, uv_udp_t* handle) {
void uv__udp_close(uv_loop_t* loop, uv_udp_t* handle) {
uv_udp_recv_stop(handle);
closesocket(handle->socket);
handle->socket = INVALID_SOCKET;
@ -175,12 +175,12 @@ void uv_udp_close(uv_loop_t* loop, uv_udp_t* handle) {
uv__handle_closing(handle);
if (handle->reqs_pending == 0) {
uv_want_endgame(loop, (uv_handle_t*) handle);
uv__want_endgame(loop, (uv_handle_t*) handle);
}
}
void uv_udp_endgame(uv_loop_t* loop, uv_udp_t* handle) {
void uv__udp_endgame(uv_loop_t* loop, uv_udp_t* handle) {
if (handle->flags & UV_HANDLE_CLOSING &&
handle->reqs_pending == 0) {
assert(!(handle->flags & UV_HANDLE_CLOSED));
@ -194,10 +194,10 @@ int uv_udp_using_recvmmsg(const uv_udp_t* handle) {
}
static int uv_udp_maybe_bind(uv_udp_t* handle,
const struct sockaddr* addr,
unsigned int addrlen,
unsigned int flags) {
static int uv__udp_maybe_bind(uv_udp_t* handle,
const struct sockaddr* addr,
unsigned int addrlen,
unsigned int flags) {
int r;
int err;
DWORD no = 0;
@ -216,7 +216,7 @@ static int uv_udp_maybe_bind(uv_udp_t* handle,
return WSAGetLastError();
}
err = uv_udp_set_socket(handle->loop, handle, sock, addr->sa_family);
err = uv__udp_set_socket(handle->loop, handle, sock, addr->sa_family);
if (err) {
closesocket(sock);
return err;
@ -264,7 +264,7 @@ static int uv_udp_maybe_bind(uv_udp_t* handle,
}
static void uv_udp_queue_recv(uv_loop_t* loop, uv_udp_t* handle) {
static void uv__udp_queue_recv(uv_loop_t* loop, uv_udp_t* handle) {
uv_req_t* req;
uv_buf_t buf;
DWORD bytes, flags;
@ -311,7 +311,7 @@ static void uv_udp_queue_recv(uv_loop_t* loop, uv_udp_t* handle) {
handle->flags |= UV_HANDLE_READ_PENDING;
req->u.io.overlapped.InternalHigh = bytes;
handle->reqs_pending++;
uv_insert_pending_req(loop, req);
uv__insert_pending_req(loop, req);
} else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) {
/* The req will be processed with IOCP. */
handle->flags |= UV_HANDLE_READ_PENDING;
@ -319,7 +319,7 @@ static void uv_udp_queue_recv(uv_loop_t* loop, uv_udp_t* handle) {
} else {
/* Make this req pending reporting an error. */
SET_REQ_ERROR(req, WSAGetLastError());
uv_insert_pending_req(loop, req);
uv__insert_pending_req(loop, req);
handle->reqs_pending++;
}
@ -343,7 +343,7 @@ static void uv_udp_queue_recv(uv_loop_t* loop, uv_udp_t* handle) {
handle->flags |= UV_HANDLE_READ_PENDING;
req->u.io.overlapped.InternalHigh = bytes;
handle->reqs_pending++;
uv_insert_pending_req(loop, req);
uv__insert_pending_req(loop, req);
} else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) {
/* The req will be processed with IOCP. */
handle->flags |= UV_HANDLE_READ_PENDING;
@ -351,7 +351,7 @@ static void uv_udp_queue_recv(uv_loop_t* loop, uv_udp_t* handle) {
} else {
/* Make this req pending reporting an error. */
SET_REQ_ERROR(req, WSAGetLastError());
uv_insert_pending_req(loop, req);
uv__insert_pending_req(loop, req);
handle->reqs_pending++;
}
}
@ -367,10 +367,10 @@ int uv__udp_recv_start(uv_udp_t* handle, uv_alloc_cb alloc_cb,
return UV_EALREADY;
}
err = uv_udp_maybe_bind(handle,
(const struct sockaddr*) &uv_addr_ip4_any_,
sizeof(uv_addr_ip4_any_),
0);
err = uv__udp_maybe_bind(handle,
(const struct sockaddr*) &uv_addr_ip4_any_,
sizeof(uv_addr_ip4_any_),
0);
if (err)
return uv_translate_sys_error(err);
@ -384,7 +384,7 @@ int uv__udp_recv_start(uv_udp_t* handle, uv_alloc_cb alloc_cb,
/* If reading was stopped and then started again, there could still be a recv
* request pending. */
if (!(handle->flags & UV_HANDLE_READ_PENDING))
uv_udp_queue_recv(loop, handle);
uv__udp_queue_recv(loop, handle);
return 0;
}
@ -433,7 +433,7 @@ static int uv__send(uv_udp_send_t* req,
handle->send_queue_size += req->u.io.queued_bytes;
handle->send_queue_count++;
REGISTER_HANDLE_REQ(loop, handle, req);
uv_insert_pending_req(loop, (uv_req_t*)req);
uv__insert_pending_req(loop, (uv_req_t*)req);
} else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) {
/* Request queued by the kernel. */
req->u.io.queued_bytes = uv__count_bufs(bufs, nbufs);
@ -450,7 +450,7 @@ static int uv__send(uv_udp_send_t* req,
}
void uv_process_udp_recv_req(uv_loop_t* loop, uv_udp_t* handle,
void uv__process_udp_recv_req(uv_loop_t* loop, uv_udp_t* handle,
uv_req_t* req) {
uv_buf_t buf;
int partial;
@ -554,14 +554,14 @@ done:
/* Post another read if still reading and not closing. */
if ((handle->flags & UV_HANDLE_READING) &&
!(handle->flags & UV_HANDLE_READ_PENDING)) {
uv_udp_queue_recv(loop, handle);
uv__udp_queue_recv(loop, handle);
}
DECREASE_PENDING_REQ_COUNT(handle);
}
void uv_process_udp_send_req(uv_loop_t* loop, uv_udp_t* handle,
void uv__process_udp_send_req(uv_loop_t* loop, uv_udp_t* handle,
uv_udp_send_t* req) {
int err;
@ -598,10 +598,10 @@ static int uv__udp_set_membership4(uv_udp_t* handle,
return UV_EINVAL;
/* If the socket is unbound, bind to inaddr_any. */
err = uv_udp_maybe_bind(handle,
(const struct sockaddr*) &uv_addr_ip4_any_,
sizeof(uv_addr_ip4_any_),
UV_UDP_REUSEADDR);
err = uv__udp_maybe_bind(handle,
(const struct sockaddr*) &uv_addr_ip4_any_,
sizeof(uv_addr_ip4_any_),
UV_UDP_REUSEADDR);
if (err)
return uv_translate_sys_error(err);
@ -652,10 +652,10 @@ int uv__udp_set_membership6(uv_udp_t* handle,
if ((handle->flags & UV_HANDLE_BOUND) && !(handle->flags & UV_HANDLE_IPV6))
return UV_EINVAL;
err = uv_udp_maybe_bind(handle,
(const struct sockaddr*) &uv_addr_ip6_any_,
sizeof(uv_addr_ip6_any_),
UV_UDP_REUSEADDR);
err = uv__udp_maybe_bind(handle,
(const struct sockaddr*) &uv_addr_ip6_any_,
sizeof(uv_addr_ip6_any_),
UV_UDP_REUSEADDR);
if (err)
return uv_translate_sys_error(err);
@ -708,10 +708,10 @@ static int uv__udp_set_source_membership4(uv_udp_t* handle,
return UV_EINVAL;
/* If the socket is unbound, bind to inaddr_any. */
err = uv_udp_maybe_bind(handle,
(const struct sockaddr*) &uv_addr_ip4_any_,
sizeof(uv_addr_ip4_any_),
UV_UDP_REUSEADDR);
err = uv__udp_maybe_bind(handle,
(const struct sockaddr*) &uv_addr_ip4_any_,
sizeof(uv_addr_ip4_any_),
UV_UDP_REUSEADDR);
if (err)
return uv_translate_sys_error(err);
@ -763,10 +763,10 @@ int uv__udp_set_source_membership6(uv_udp_t* handle,
if ((handle->flags & UV_HANDLE_BOUND) && !(handle->flags & UV_HANDLE_IPV6))
return UV_EINVAL;
err = uv_udp_maybe_bind(handle,
(const struct sockaddr*) &uv_addr_ip6_any_,
sizeof(uv_addr_ip6_any_),
UV_UDP_REUSEADDR);
err = uv__udp_maybe_bind(handle,
(const struct sockaddr*) &uv_addr_ip6_any_,
sizeof(uv_addr_ip6_any_),
UV_UDP_REUSEADDR);
if (err)
return uv_translate_sys_error(err);
@ -962,10 +962,10 @@ int uv_udp_open(uv_udp_t* handle, uv_os_sock_t sock) {
return uv_translate_sys_error(GetLastError());
}
err = uv_udp_set_socket(handle->loop,
handle,
sock,
protocol_info.iAddressFamily);
err = uv__udp_set_socket(handle->loop,
handle,
sock,
protocol_info.iAddressFamily);
if (err)
return uv_translate_sys_error(err);
@ -1044,7 +1044,7 @@ int uv__udp_bind(uv_udp_t* handle,
unsigned int flags) {
int err;
err = uv_udp_maybe_bind(handle, addr, addrlen, flags);
err = uv__udp_maybe_bind(handle, addr, addrlen, flags);
if (err)
return uv_translate_sys_error(err);
@ -1066,7 +1066,7 @@ int uv__udp_connect(uv_udp_t* handle,
else
return UV_EINVAL;
err = uv_udp_maybe_bind(handle, bind_addr, addrlen, 0);
err = uv__udp_maybe_bind(handle, bind_addr, addrlen, 0);
if (err)
return uv_translate_sys_error(err);
}
@ -1117,7 +1117,7 @@ int uv__udp_send(uv_udp_send_t* req,
else
return UV_EINVAL;
err = uv_udp_maybe_bind(handle, bind_addr, addrlen, 0);
err = uv__udp_maybe_bind(handle, bind_addr, addrlen, 0);
if (err)
return uv_translate_sys_error(err);
}
@ -1159,7 +1159,7 @@ int uv__udp_try_send(uv_udp_t* handle,
bind_addr = (const struct sockaddr*) &uv_addr_ip6_any_;
else
return UV_EINVAL;
err = uv_udp_maybe_bind(handle, bind_addr, addrlen, 0);
err = uv__udp_maybe_bind(handle, bind_addr, addrlen, 0);
if (err)
return uv_translate_sys_error(err);
}

View File

@ -531,103 +531,25 @@ int uv_resident_set_memory(size_t* rss) {
int uv_uptime(double* uptime) {
BYTE stack_buffer[4096];
BYTE* malloced_buffer = NULL;
BYTE* buffer = (BYTE*) stack_buffer;
size_t buffer_size = sizeof(stack_buffer);
DWORD data_size;
*uptime = GetTickCount64() / 1000.0;
return 0;
}
PERF_DATA_BLOCK* data_block;
PERF_OBJECT_TYPE* object_type;
PERF_COUNTER_DEFINITION* counter_definition;
DWORD i;
unsigned int uv_available_parallelism(void) {
SYSTEM_INFO info;
unsigned rc;
for (;;) {
LONG result;
/* TODO(bnoordhuis) Use GetLogicalProcessorInformationEx() to support systems
* with > 64 CPUs? See https://github.com/libuv/libuv/pull/3458
*/
GetSystemInfo(&info);
data_size = (DWORD) buffer_size;
result = RegQueryValueExW(HKEY_PERFORMANCE_DATA,
L"2",
NULL,
NULL,
buffer,
&data_size);
if (result == ERROR_SUCCESS) {
break;
} else if (result != ERROR_MORE_DATA) {
*uptime = 0;
return uv_translate_sys_error(result);
}
rc = info.dwNumberOfProcessors;
if (rc < 1)
rc = 1;
buffer_size *= 2;
/* Don't let the buffer grow infinitely. */
if (buffer_size > 1 << 20) {
goto internalError;
}
uv__free(malloced_buffer);
buffer = malloced_buffer = (BYTE*) uv__malloc(buffer_size);
if (malloced_buffer == NULL) {
*uptime = 0;
return UV_ENOMEM;
}
}
if (data_size < sizeof(*data_block))
goto internalError;
data_block = (PERF_DATA_BLOCK*) buffer;
if (wmemcmp(data_block->Signature, L"PERF", 4) != 0)
goto internalError;
if (data_size < data_block->HeaderLength + sizeof(*object_type))
goto internalError;
object_type = (PERF_OBJECT_TYPE*) (buffer + data_block->HeaderLength);
if (object_type->NumInstances != PERF_NO_INSTANCES)
goto internalError;
counter_definition = (PERF_COUNTER_DEFINITION*) (buffer +
data_block->HeaderLength + object_type->HeaderLength);
for (i = 0; i < object_type->NumCounters; i++) {
if ((BYTE*) counter_definition + sizeof(*counter_definition) >
buffer + data_size) {
break;
}
if (counter_definition->CounterNameTitleIndex == 674 &&
counter_definition->CounterSize == sizeof(uint64_t)) {
if (counter_definition->CounterOffset + sizeof(uint64_t) > data_size ||
!(counter_definition->CounterType & PERF_OBJECT_TIMER)) {
goto internalError;
} else {
BYTE* address = (BYTE*) object_type + object_type->DefinitionLength +
counter_definition->CounterOffset;
uint64_t value = *((uint64_t*) address);
*uptime = floor((double) (object_type->PerfTime.QuadPart - value) /
(double) object_type->PerfFreq.QuadPart);
uv__free(malloced_buffer);
return 0;
}
}
counter_definition = (PERF_COUNTER_DEFINITION*)
((BYTE*) counter_definition + counter_definition->ByteLength);
}
/* If we get here, the uptime value was not found. */
uv__free(malloced_buffer);
*uptime = 0;
return UV_ENOSYS;
internalError:
uv__free(malloced_buffer);
*uptime = 0;
return UV_EIO;
return rc;
}

View File

@ -48,7 +48,7 @@ sSetWinEventHook pSetWinEventHook;
/* ws2_32.dll function pointer */
uv_sGetHostNameW pGetHostNameW;
void uv_winapi_init(void) {
void uv__winapi_init(void) {
HMODULE ntdll_module;
HMODULE powrprof_module;
HMODULE user32_module;
@ -126,19 +126,19 @@ void uv_winapi_init(void) {
kernel32_module,
"GetQueuedCompletionStatusEx");
powrprof_module = LoadLibraryA("powrprof.dll");
powrprof_module = LoadLibraryExA("powrprof.dll", NULL, LOAD_LIBRARY_SEARCH_SYSTEM32);
if (powrprof_module != NULL) {
pPowerRegisterSuspendResumeNotification = (sPowerRegisterSuspendResumeNotification)
GetProcAddress(powrprof_module, "PowerRegisterSuspendResumeNotification");
}
user32_module = LoadLibraryA("user32.dll");
user32_module = GetModuleHandleA("user32.dll");
if (user32_module != NULL) {
pSetWinEventHook = (sSetWinEventHook)
GetProcAddress(user32_module, "SetWinEventHook");
}
ws2_32_module = LoadLibraryA("ws2_32.dll");
ws2_32_module = GetModuleHandleA("ws2_32.dll");
if (ws2_32_module != NULL) {
pGetHostNameW = (uv_sGetHostNameW) GetProcAddress(
ws2_32_module,

View File

@ -38,7 +38,7 @@ struct sockaddr_in6 uv_addr_ip6_any_;
/*
* Retrieves the pointer to a winsock extension function.
*/
static BOOL uv_get_extension_function(SOCKET socket, GUID guid,
static BOOL uv__get_extension_function(SOCKET socket, GUID guid,
void **target) {
int result;
DWORD bytes;
@ -62,20 +62,20 @@ static BOOL uv_get_extension_function(SOCKET socket, GUID guid,
}
BOOL uv_get_acceptex_function(SOCKET socket, LPFN_ACCEPTEX* target) {
BOOL uv__get_acceptex_function(SOCKET socket, LPFN_ACCEPTEX* target) {
const GUID wsaid_acceptex = WSAID_ACCEPTEX;
return uv_get_extension_function(socket, wsaid_acceptex, (void**)target);
return uv__get_extension_function(socket, wsaid_acceptex, (void**)target);
}
BOOL uv_get_connectex_function(SOCKET socket, LPFN_CONNECTEX* target) {
BOOL uv__get_connectex_function(SOCKET socket, LPFN_CONNECTEX* target) {
const GUID wsaid_connectex = WSAID_CONNECTEX;
return uv_get_extension_function(socket, wsaid_connectex, (void**)target);
return uv__get_extension_function(socket, wsaid_connectex, (void**)target);
}
void uv_winsock_init(void) {
void uv__winsock_init(void) {
WSADATA wsa_data;
int errorno;
SOCKET dummy;
@ -134,7 +134,7 @@ void uv_winsock_init(void) {
}
int uv_ntstatus_to_winsock_error(NTSTATUS status) {
int uv__ntstatus_to_winsock_error(NTSTATUS status) {
switch (status) {
case STATUS_SUCCESS:
return ERROR_SUCCESS;
@ -267,7 +267,7 @@ int uv_ntstatus_to_winsock_error(NTSTATUS status) {
* the user to use the default msafd driver, doesn't work when other LSPs are
* stacked on top of it.
*/
int WSAAPI uv_wsarecv_workaround(SOCKET socket, WSABUF* buffers,
int WSAAPI uv__wsarecv_workaround(SOCKET socket, WSABUF* buffers,
DWORD buffer_count, DWORD* bytes, DWORD* flags, WSAOVERLAPPED *overlapped,
LPWSAOVERLAPPED_COMPLETION_ROUTINE completion_routine) {
NTSTATUS status;
@ -346,7 +346,7 @@ int WSAAPI uv_wsarecv_workaround(SOCKET socket, WSABUF* buffers,
break;
default:
error = uv_ntstatus_to_winsock_error(status);
error = uv__ntstatus_to_winsock_error(status);
break;
}
@ -360,8 +360,8 @@ int WSAAPI uv_wsarecv_workaround(SOCKET socket, WSABUF* buffers,
}
/* See description of uv_wsarecv_workaround. */
int WSAAPI uv_wsarecvfrom_workaround(SOCKET socket, WSABUF* buffers,
/* See description of uv__wsarecv_workaround. */
int WSAAPI uv__wsarecvfrom_workaround(SOCKET socket, WSABUF* buffers,
DWORD buffer_count, DWORD* bytes, DWORD* flags, struct sockaddr* addr,
int* addr_len, WSAOVERLAPPED *overlapped,
LPWSAOVERLAPPED_COMPLETION_ROUTINE completion_routine) {
@ -444,7 +444,7 @@ int WSAAPI uv_wsarecvfrom_workaround(SOCKET socket, WSABUF* buffers,
break;
default:
error = uv_ntstatus_to_winsock_error(status);
error = uv__ntstatus_to_winsock_error(status);
break;
}
@ -458,7 +458,7 @@ int WSAAPI uv_wsarecvfrom_workaround(SOCKET socket, WSABUF* buffers,
}
int WSAAPI uv_msafd_poll(SOCKET socket, AFD_POLL_INFO* info_in,
int WSAAPI uv__msafd_poll(SOCKET socket, AFD_POLL_INFO* info_in,
AFD_POLL_INFO* info_out, OVERLAPPED* overlapped) {
IO_STATUS_BLOCK iosb;
IO_STATUS_BLOCK* iosb_ptr;
@ -531,7 +531,7 @@ int WSAAPI uv_msafd_poll(SOCKET socket, AFD_POLL_INFO* info_in,
break;
default:
error = uv_ntstatus_to_winsock_error(status);
error = uv__ntstatus_to_winsock_error(status);
break;
}

View File

@ -23,7 +23,9 @@ BENCHMARK_DECLARE (sizes)
BENCHMARK_DECLARE (loop_count)
BENCHMARK_DECLARE (loop_count_timed)
BENCHMARK_DECLARE (ping_pongs)
BENCHMARK_DECLARE (ping_udp)
BENCHMARK_DECLARE (ping_udp1)
BENCHMARK_DECLARE (ping_udp10)
BENCHMARK_DECLARE (ping_udp100)
BENCHMARK_DECLARE (tcp_write_batch)
BENCHMARK_DECLARE (tcp4_pound_100)
BENCHMARK_DECLARE (tcp4_pound_1000)
@ -72,6 +74,7 @@ BENCHMARK_DECLARE (async_pummel_1)
BENCHMARK_DECLARE (async_pummel_2)
BENCHMARK_DECLARE (async_pummel_4)
BENCHMARK_DECLARE (async_pummel_8)
BENCHMARK_DECLARE (queue_work)
BENCHMARK_DECLARE (spawn)
BENCHMARK_DECLARE (thread_create)
BENCHMARK_DECLARE (million_async)
@ -90,6 +93,10 @@ TASK_LIST_START
BENCHMARK_ENTRY (ping_pongs)
BENCHMARK_HELPER (ping_pongs, tcp4_echo_server)
BENCHMARK_ENTRY (ping_udp1)
BENCHMARK_ENTRY (ping_udp10)
BENCHMARK_ENTRY (ping_udp100)
BENCHMARK_ENTRY (tcp_write_batch)
BENCHMARK_HELPER (tcp_write_batch, tcp4_blackhole_server)
@ -155,6 +162,7 @@ TASK_LIST_START
BENCHMARK_ENTRY (async_pummel_2)
BENCHMARK_ENTRY (async_pummel_4)
BENCHMARK_ENTRY (async_pummel_8)
BENCHMARK_ENTRY (queue_work)
BENCHMARK_ENTRY (spawn)
BENCHMARK_ENTRY (thread_create)

View File

@ -94,6 +94,9 @@ static void pinger_read_cb(uv_udp_t* udp,
pinger_t* pinger;
pinger = (pinger_t*)udp->data;
/* No data here means something went wrong */
ASSERT(nread > 0);
/* Now we count the pings */
for (i = 0; i < nread; i++) {
ASSERT(buf->base[i] == PING[pinger->state]);
@ -108,7 +111,8 @@ static void pinger_read_cb(uv_udp_t* udp,
}
}
buf_free(buf);
if (buf && !(flags & UV_UDP_MMSG_CHUNK))
buf_free(buf);
}
static void udp_pinger_new(void) {
@ -122,6 +126,8 @@ static void udp_pinger_new(void) {
/* Try to do NUM_PINGS ping-pongs (connection-less). */
r = uv_udp_init(loop, &pinger->udp);
ASSERT(r == 0);
r = uv_udp_bind(&pinger->udp, (const struct sockaddr*) &pinger->server_addr, 0);
ASSERT(r == 0);
pinger->udp.data = pinger;

68
deps/libuv/test/benchmark-queue-work.c vendored Normal file
View File

@ -0,0 +1,68 @@
/* Copyright libuv contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "task.h"
#include "uv.h"
static int done = 0;
static unsigned events = 0;
static unsigned result;
static unsigned fastrand(void) {
static unsigned g = 0;
g = g * 214013 + 2531011;
return g;
}
static void work_cb(uv_work_t* req) {
req->data = &result;
*(unsigned*)req->data = fastrand();
}
static void after_work_cb(uv_work_t* req, int status) {
events++;
if (!done)
ASSERT_EQ(0, uv_queue_work(req->loop, req, work_cb, after_work_cb));
}
static void timer_cb(uv_timer_t* handle) { done = 1; }
BENCHMARK_IMPL(queue_work) {
uv_timer_t timer_handle;
uv_work_t work;
uv_loop_t* loop;
int timeout;
loop = uv_default_loop();
timeout = 5000;
ASSERT_EQ(0, uv_timer_init(loop, &timer_handle));
ASSERT_EQ(0, uv_timer_start(&timer_handle, timer_cb, timeout, 0));
ASSERT_EQ(0, uv_queue_work(loop, &work, work_cb, after_work_cb));
ASSERT_EQ(0, uv_run(loop, UV_RUN_DEFAULT));
printf("%s async jobs in %.1f seconds (%s/s)\n", fmt(events), timeout / 1000.,
fmt(events / (timeout / 1000.)));
MAKE_VALGRIND_HAPPY();
return 0;
}

View File

@ -49,7 +49,6 @@ __attribute__((constructor)) void init() {
int ipc_helper(int listen_after_write);
int ipc_helper_heavy_traffic_deadlock_bug(void);
int ipc_helper_tcp_connection(void);
int ipc_helper_closed_handle(void);
int ipc_send_recv_helper(void);
int ipc_helper_bind_twice(void);
int ipc_helper_send_zero(void);
@ -119,10 +118,6 @@ static int maybe_run_test(int argc, char **argv) {
return ipc_helper_tcp_connection();
}
if (strcmp(argv[1], "ipc_helper_closed_handle") == 0) {
return ipc_helper_closed_handle();
}
if (strcmp(argv[1], "ipc_helper_bind_twice") == 0) {
return ipc_helper_bind_twice();
}

View File

@ -333,8 +333,8 @@ int process_wait(process_info_t* vec, int n, int timeout) {
abort();
terminate:
close(args.pipe[0]);
close(args.pipe[1]);
closefd(args.pipe[0]);
closefd(args.pipe[1]);
return retval;
}

View File

@ -851,7 +851,12 @@ static void check_utime(const char* path,
#endif
st_atim = s->st_atim.tv_sec + s->st_atim.tv_nsec / 1e9;
st_mtim = s->st_mtim.tv_sec + s->st_mtim.tv_nsec / 1e9;
ASSERT_DOUBLE_EQ(st_atim, atime);
/*
* Linux does not allow reading reliably the atime of a symlink
* since readlink() can update it
*/
if (!test_lutime)
ASSERT_DOUBLE_EQ(st_atim, atime);
ASSERT_DOUBLE_EQ(st_mtim, mtime);
}

View File

@ -22,6 +22,10 @@
#include "uv.h"
#include "task.h"
#include <string.h>
#ifndef _WIN32
#include <unistd.h>
#include <sys/types.h>
#endif
TEST_IMPL(get_passwd) {
/* TODO(gengjiawen): Fix test on QEMU. */
@ -64,11 +68,15 @@ TEST_IMPL(get_passwd) {
#endif
#ifdef _WIN32
ASSERT(pwd.uid == -1);
ASSERT(pwd.gid == -1);
ASSERT_EQ(pwd.uid, (unsigned)-1);
ASSERT_EQ(pwd.gid, (unsigned)-1);
#else
ASSERT(pwd.uid >= 0);
ASSERT(pwd.gid >= 0);
ASSERT_NE(pwd.uid, (unsigned)-1);
ASSERT_NE(pwd.gid, (unsigned)-1);
ASSERT_EQ(pwd.uid, geteuid());
if (pwd.uid != 0 && pwd.gid != getgid())
/* This will be likely true, as only root could have changed it. */
ASSERT_EQ(pwd.gid, getegid());
#endif
/* Test uv_os_free_passwd() */

View File

@ -308,8 +308,12 @@ static void read_cb(uv_stream_t* handle,
return;
}
ASSERT_GE(nread, 0);
pipe = (uv_pipe_t*) handle;
do {
ASSERT_EQ(pipe, &ctx2.channel);
while (uv_pipe_pending_count(pipe) > 0) {
if (++read_cb_count == 2) {
recv = &ctx2.recv;
write_req = &ctx2.write_req;
@ -318,10 +322,6 @@ static void read_cb(uv_stream_t* handle,
write_req = &ctx2.write_req2;
}
ASSERT(pipe == &ctx2.channel);
ASSERT(nread >= 0);
ASSERT(uv_pipe_pending_count(pipe) > 0);
pending = uv_pipe_pending_type(pipe);
ASSERT(pending == UV_NAMED_PIPE || pending == UV_TCP);
@ -344,7 +344,7 @@ static void read_cb(uv_stream_t* handle,
&recv->stream,
write2_cb);
ASSERT(r == 0);
} while (uv_pipe_pending_count(pipe) > 0);
}
}
static void send_recv_start(void) {

View File

@ -45,8 +45,6 @@ static int close_cb_called;
static int connection_accepted;
static int tcp_conn_read_cb_called;
static int tcp_conn_write_cb_called;
static int closed_handle_data_read;
static int closed_handle_write;
static int send_zero_write;
typedef struct {
@ -57,15 +55,6 @@ typedef struct {
#define CONN_COUNT 100
#define BACKLOG 128
#define LARGE_SIZE 100000
static uv_buf_t large_buf;
static char buffer[LARGE_SIZE];
static uv_write_t write_reqs[300];
static int write_reqs_completed;
static unsigned int write_until_data_queued(void);
static void send_handle_and_close(void);
static void close_server_conn_cb(uv_handle_t* handle) {
@ -417,26 +406,6 @@ static void on_read_connection(uv_stream_t* handle,
}
#ifndef _WIN32
static void on_read_closed_handle(uv_stream_t* handle,
ssize_t nread,
const uv_buf_t* buf) {
if (nread == 0 || nread == UV_EOF) {
free(buf->base);
return;
}
if (nread < 0) {
printf("error recving on channel: %s\n", uv_strerror(nread));
abort();
}
closed_handle_data_read += nread;
free(buf->base);
}
#endif
static void on_read_send_zero(uv_stream_t* handle,
ssize_t nread,
const uv_buf_t* buf) {
@ -498,15 +467,6 @@ TEST_IMPL(ipc_tcp_connection) {
return r;
}
#ifndef _WIN32
TEST_IMPL(ipc_closed_handle) {
int r;
r = run_ipc_test("ipc_helper_closed_handle", on_read_closed_handle);
ASSERT_EQ(r, 0);
return 0;
}
#endif
#ifdef _WIN32
TEST_IMPL(listen_with_simultaneous_accepts) {
@ -602,23 +562,6 @@ static void tcp_connection_write_cb(uv_write_t* req, int status) {
}
static void closed_handle_large_write_cb(uv_write_t* req, int status) {
ASSERT_EQ(status, 0);
ASSERT(closed_handle_data_read = LARGE_SIZE);
if (++write_reqs_completed == ARRAY_SIZE(write_reqs)) {
write_reqs_completed = 0;
if (write_until_data_queued() > 0)
send_handle_and_close();
}
}
static void closed_handle_write_cb(uv_write_t* req, int status) {
ASSERT_EQ(status, UV_EBADF);
closed_handle_write = 1;
}
static void send_zero_write_cb(uv_write_t* req, int status) {
ASSERT_EQ(status, 0);
send_zero_write++;
@ -835,76 +778,6 @@ int ipc_helper_tcp_connection(void) {
return 0;
}
static unsigned int write_until_data_queued() {
unsigned int i;
int r;
i = 0;
do {
r = uv_write(&write_reqs[i],
(uv_stream_t*)&channel,
&large_buf,
1,
closed_handle_large_write_cb);
ASSERT_EQ(r, 0);
i++;
} while (channel.write_queue_size == 0 &&
i < ARRAY_SIZE(write_reqs));
return channel.write_queue_size;
}
static void send_handle_and_close() {
int r;
struct sockaddr_in addr;
r = uv_tcp_init(uv_default_loop(), &tcp_server);
ASSERT_EQ(r, 0);
ASSERT_EQ(0, uv_ip4_addr("0.0.0.0", TEST_PORT, &addr));
r = uv_tcp_bind(&tcp_server, (const struct sockaddr*) &addr, 0);
ASSERT_EQ(r, 0);
r = uv_write2(&write_req,
(uv_stream_t*)&channel,
&large_buf,
1,
(uv_stream_t*)&tcp_server,
closed_handle_write_cb);
ASSERT_EQ(r, 0);
uv_close((uv_handle_t*)&tcp_server, NULL);
}
int ipc_helper_closed_handle(void) {
int r;
memset(buffer, '.', LARGE_SIZE);
large_buf = uv_buf_init(buffer, LARGE_SIZE);
r = uv_pipe_init(uv_default_loop(), &channel, 1);
ASSERT_EQ(r, 0);
uv_pipe_open(&channel, 0);
ASSERT_EQ(1, uv_is_readable((uv_stream_t*) &channel));
ASSERT_EQ(1, uv_is_writable((uv_stream_t*) &channel));
ASSERT_EQ(0, uv_is_closing((uv_handle_t*) &channel));
if (write_until_data_queued() > 0)
send_handle_and_close();
r = uv_run(uv_default_loop(), UV_RUN_DEFAULT);
ASSERT_EQ(r, 0);
ASSERT_EQ(closed_handle_write, 1);
MAKE_VALGRIND_HAPPY();
return 0;
}
int ipc_helper_bind_twice(void) {
/*
* This is launched from test-ipc.c. stdin is a duplex channel

View File

@ -91,9 +91,6 @@ TEST_DECLARE (ipc_send_recv_tcp)
TEST_DECLARE (ipc_send_recv_tcp_inprocess)
TEST_DECLARE (ipc_tcp_connection)
TEST_DECLARE (ipc_send_zero)
#ifndef _WIN32
TEST_DECLARE (ipc_closed_handle)
#endif
TEST_DECLARE (tcp_alloc_cb_fail)
TEST_DECLARE (tcp_ping_pong)
TEST_DECLARE (tcp_ping_pong_vec)
@ -320,6 +317,7 @@ TEST_DECLARE (spawn_reads_child_path)
TEST_DECLARE (spawn_inherit_streams)
TEST_DECLARE (spawn_quoted_path)
TEST_DECLARE (spawn_tcp_server)
TEST_DECLARE (spawn_exercise_sigchld_issue)
TEST_DECLARE (fs_poll)
TEST_DECLARE (fs_poll_getpath)
TEST_DECLARE (fs_poll_close_request)
@ -627,9 +625,6 @@ TASK_LIST_START
TEST_ENTRY (ipc_send_recv_tcp_inprocess)
TEST_ENTRY (ipc_tcp_connection)
TEST_ENTRY (ipc_send_zero)
#ifndef _WIN32
TEST_ENTRY (ipc_closed_handle)
#endif
TEST_ENTRY (tcp_alloc_cb_fail)
@ -950,6 +945,7 @@ TASK_LIST_START
TEST_ENTRY (spawn_inherit_streams)
TEST_ENTRY (spawn_quoted_path)
TEST_ENTRY (spawn_tcp_server)
TEST_ENTRY (spawn_exercise_sigchld_issue)
TEST_ENTRY (fs_poll)
TEST_ENTRY (fs_poll_getpath)
TEST_ENTRY (fs_poll_close_request)

View File

@ -28,7 +28,7 @@ TEST_IMPL(loop_update_time) {
start = uv_now(uv_default_loop());
while (uv_now(uv_default_loop()) - start < 1000)
ASSERT(0 == uv_run(uv_default_loop(), UV_RUN_NOWAIT));
ASSERT_EQ(0, uv_run(uv_default_loop(), UV_RUN_NOWAIT));
MAKE_VALGRIND_HAPPY();
return 0;
@ -43,20 +43,26 @@ TEST_IMPL(loop_backend_timeout) {
uv_timer_t timer;
int r;
r = uv_timer_init(loop, &timer);
ASSERT(r == 0);
/* The default loop has some internal watchers to initialize. */
loop->active_handles++;
r = uv_run(loop, UV_RUN_NOWAIT);
ASSERT_EQ(r, 1);
loop->active_handles--;
ASSERT_EQ(uv_loop_alive(loop), 0);
ASSERT(!uv_loop_alive(loop));
ASSERT(uv_backend_timeout(loop) == 0);
r = uv_timer_init(loop, &timer);
ASSERT_EQ(r, 0);
ASSERT_EQ(uv_loop_alive(loop), 0);
ASSERT_EQ(uv_backend_timeout(loop), 0);
r = uv_timer_start(&timer, cb, 1000, 0); /* 1 sec */
ASSERT(r == 0);
ASSERT(uv_backend_timeout(loop) > 100); /* 0.1 sec */
ASSERT(uv_backend_timeout(loop) <= 1000); /* 1 sec */
ASSERT_EQ(r, 0);
ASSERT_EQ(uv_backend_timeout(loop), 1000);
r = uv_run(loop, UV_RUN_DEFAULT);
ASSERT(r == 0);
ASSERT(uv_backend_timeout(loop) == 0);
ASSERT_EQ(r, 0);
ASSERT_EQ(uv_backend_timeout(loop), 0);
MAKE_VALGRIND_HAPPY();
return 0;

View File

@ -46,11 +46,7 @@ static void thread_main(void* arg) {
uv_fs_req_cleanup(&req);
} while (n > 0 || (n == -1 && uv_errno == UV_EINTR));
#ifdef _WIN32
ASSERT(n == UV_EOF);
#else
ASSERT(n == 0);
#endif
}

View File

@ -41,6 +41,7 @@ TEST_IMPL(platform_output) {
uv_interface_address_t* interfaces;
uv_passwd_t pwd;
uv_utsname_t uname;
unsigned par;
int count;
int i;
int err;
@ -88,6 +89,10 @@ TEST_IMPL(platform_output) {
printf(" maximum resident set size: %llu\n",
(unsigned long long) rusage.ru_maxrss);
par = uv_available_parallelism();
ASSERT_GE(par, 1);
printf("uv_available_parallelism: %u\n", par);
err = uv_cpu_info(&cpus, &count);
#if defined(__CYGWIN__) || defined(__MSYS__)
ASSERT(err == UV_ENOSYS);

View File

@ -1891,6 +1891,44 @@ TEST_IMPL(spawn_quoted_path) {
#endif
}
TEST_IMPL(spawn_exercise_sigchld_issue) {
int r;
int i;
uv_process_options_t dummy_options = {0};
uv_process_t dummy_processes[100];
char* args[2];
init_process_options("spawn_helper1", exit_cb);
r = uv_spawn(uv_default_loop(), &process, &options);
ASSERT_EQ(r, 0);
// This test exercises a bug in the darwin kernel that causes SIGCHLD not to
// be delivered sometimes. Calling posix_spawn many times increases the
// likelihood of encountering this issue, so spin a few times to make this
// test more reliable.
dummy_options.file = args[0] = "program-that-had-better-not-exist";
args[1] = NULL;
dummy_options.args = args;
dummy_options.exit_cb = fail_cb;
dummy_options.flags = 0;
for (i = 0; i < 100; i++) {
r = uv_spawn(uv_default_loop(), &dummy_processes[i], &dummy_options);
if (r != UV_ENOENT)
ASSERT_EQ(r, UV_EACCES);
uv_close((uv_handle_t*) &dummy_processes[i], close_cb);
}
r = uv_run(uv_default_loop(), UV_RUN_DEFAULT);
ASSERT_EQ(r, 0);
ASSERT_EQ(exit_cb_called, 1);
ASSERT_EQ(close_cb_called, 101);
MAKE_VALGRIND_HAPPY();
return 0;
}
/* Helper for child process of spawn_inherit_streams */
#ifndef _WIN32
void spawn_stdin_stdout(void) {

View File

@ -273,6 +273,11 @@ TEST_IMPL(thread_stack_size_explicit) {
thread_check_stack, &options));
ASSERT(0 == uv_thread_join(&thread));
options.stack_size = 42;
ASSERT(0 == uv_thread_create_ex(&thread, &options,
thread_check_stack, &options));
ASSERT(0 == uv_thread_join(&thread));
#ifdef PTHREAD_STACK_MIN
options.stack_size = PTHREAD_STACK_MIN - 42; /* unaligned size */
ASSERT(0 == uv_thread_create_ex(&thread, &options,

View File

@ -29,14 +29,15 @@
#define CHECK_HANDLE(handle) \
ASSERT((uv_udp_t*)(handle) == &recver || (uv_udp_t*)(handle) == &sender)
#define BUFFER_MULTIPLIER 4
#define BUFFER_MULTIPLIER 20
#define MAX_DGRAM_SIZE (64 * 1024)
#define NUM_SENDS 8
#define NUM_SENDS 40
#define EXPECTED_MMSG_ALLOCS (NUM_SENDS / BUFFER_MULTIPLIER)
static uv_udp_t recver;
static uv_udp_t sender;
static int recv_cb_called;
static int received_datagrams;
static int close_cb_called;
static int alloc_cb_called;
@ -68,10 +69,10 @@ static void close_cb(uv_handle_t* handle) {
static void recv_cb(uv_udp_t* handle,
ssize_t nread,
const uv_buf_t* rcvbuf,
const struct sockaddr* addr,
unsigned flags) {
ssize_t nread,
const uv_buf_t* rcvbuf,
const struct sockaddr* addr,
unsigned flags) {
ASSERT_GE(nread, 0);
/* free and return if this is a mmsg free-only callback invocation */
@ -82,14 +83,20 @@ static void recv_cb(uv_udp_t* handle,
return;
}
ASSERT_EQ(nread, 4);
ASSERT_NOT_NULL(addr);
ASSERT_MEM_EQ("PING", rcvbuf->base, nread);
if (nread == 0) {
/* There can be no more available data for the time being. */
ASSERT_NULL(addr);
} else {
ASSERT_EQ(nread, 4);
ASSERT_NOT_NULL(addr);
ASSERT_MEM_EQ("PING", rcvbuf->base, nread);
received_datagrams++;
}
recv_cb_called++;
if (recv_cb_called == NUM_SENDS) {
uv_close((uv_handle_t*)handle, close_cb);
uv_close((uv_handle_t*)&sender, close_cb);
if (received_datagrams == NUM_SENDS) {
uv_close((uv_handle_t*) handle, close_cb);
uv_close((uv_handle_t*) &sender, close_cb);
}
/* Don't free if the buffer could be reused via mmsg */
@ -124,7 +131,7 @@ TEST_IMPL(udp_mmsg) {
ASSERT_EQ(0, uv_run(uv_default_loop(), UV_RUN_DEFAULT));
ASSERT_EQ(close_cb_called, 2);
ASSERT_EQ(recv_cb_called, NUM_SENDS);
ASSERT_EQ(received_datagrams, NUM_SENDS);
ASSERT_EQ(sender.send_queue_size, 0);
ASSERT_EQ(recver.send_queue_size, 0);