libuv 1.45.0, #include cleanup, probably something else.

git-svn-id: https://www.unprompted.com/svn/projects/tildefriends/trunk@4308 ed5197a5-7fde-0310-b194-c3ffbd925b24
This commit is contained in:
Cory McWilliams 2023-05-21 21:36:51 +00:00
parent 1ccb9183b4
commit f421606e21
299 changed files with 7167 additions and 4918 deletions

View File

@ -142,13 +142,10 @@ UV_SOURCES_unix := \
deps/libuv/src/unix/async.c \ deps/libuv/src/unix/async.c \
deps/libuv/src/unix/core.c \ deps/libuv/src/unix/core.c \
deps/libuv/src/unix/dl.c \ deps/libuv/src/unix/dl.c \
deps/libuv/src/unix/epoll.c \
deps/libuv/src/unix/fs.c \ deps/libuv/src/unix/fs.c \
deps/libuv/src/unix/getaddrinfo.c \ deps/libuv/src/unix/getaddrinfo.c \
deps/libuv/src/unix/getnameinfo.c \ deps/libuv/src/unix/getnameinfo.c \
deps/libuv/src/unix/linux-core.c \ deps/libuv/src/unix/linux.c \
deps/libuv/src/unix/linux-inotify.c \
deps/libuv/src/unix/linux-syscalls.c \
deps/libuv/src/unix/loop-watcher.c \ deps/libuv/src/unix/loop-watcher.c \
deps/libuv/src/unix/loop.c \ deps/libuv/src/unix/loop.c \
deps/libuv/src/unix/pipe.c \ deps/libuv/src/unix/pipe.c \
@ -166,7 +163,6 @@ UV_SOURCES_unix := \
deps/libuv/src/unix/tty.c \ deps/libuv/src/unix/tty.c \
deps/libuv/src/unix/udp.c deps/libuv/src/unix/udp.c
UV_SOURCES_android := \ UV_SOURCES_android := \
deps/libuv/src/unix/pthread-fixes.c \
deps/libuv/src/unix/random-getentropy.c deps/libuv/src/unix/random-getentropy.c
UV_SOURCES_win := \ UV_SOURCES_win := \
deps/libuv/src/win/async.c \ deps/libuv/src/win/async.c \
@ -198,12 +194,13 @@ UV_OBJS := $(call get_objs,UV_SOURCES)
$(UV_OBJS): CFLAGS += \ $(UV_OBJS): CFLAGS += \
-Ideps/libuv/include \ -Ideps/libuv/include \
-Ideps/libuv/src \ -Ideps/libuv/src \
-Wno-unused-but-set-variable \
-Wno-incompatible-pointer-types \
-Wno-sign-compare \
-Wno-unused-variable \
-Wno-dangling-pointer \ -Wno-dangling-pointer \
-Wno-incompatible-pointer-types \
-Wno-maybe-uninitialized \ -Wno-maybe-uninitialized \
-Wno-sign-compare \
-Wno-unused-but-set-variable \
-Wno-unused-result \
-Wno-unused-variable \
-D_GNU_SOURCE -D_GNU_SOURCE
SODIUM_SOURCES := \ SODIUM_SOURCES := \
@ -376,15 +373,17 @@ debug release: LDFLAGS += \
-lssl \ -lssl \
-lcrypto -lcrypto
windebug winrelease: LDFLAGS += \ windebug winrelease: LDFLAGS += \
-lwsock32 \
-lws2_32 \
-lkernel32 \
-liphlpapi \
-luserenv \
-lssl \ -lssl \
-lcrypto \ -lcrypto \
-lcrypt32 \
-ldbghelp \
-liphlpapi \
-lkernel32 \
-lole32 \
-luserenv \
-luuid \
-lws2_32 \ -lws2_32 \
-lcrypt32 -lwsock32
$(ANDROID_TARGETS): LDFLAGS += \ $(ANDROID_TARGETS): LDFLAGS += \
-target $(ANDROID_NDK_TARGET_TRIPLE)$(ANDROID_NDK_API_VERSION) \ -target $(ANDROID_NDK_TARGET_TRIPLE)$(ANDROID_NDK_API_VERSION) \
-ldl \ -ldl \

View File

@ -608,6 +608,9 @@ async function useAppHandler(response, handler_blob_id, path) {
let process = await getProcessBlob(handler_blob_id, 'handler_' + g_handler_index++, { let process = await getProcessBlob(handler_blob_id, 'handler_' + g_handler_index++, {
script: 'handler.js', script: 'handler.js',
imports: { imports: {
request: {
path: path,
},
respond: do_resolve, respond: do_resolve,
}, },
}); });

View File

@ -0,0 +1,25 @@
name: CI-docs
on:
pull_request:
paths:
- 'docs/**'
- '!docs/code/**'
- '.github/workflows/CI-docs.yml'
jobs:
docs-src:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: '3.9'
cache: 'pip' # caching pip dependencies
- run: pip install -r docs/requirements.txt
- name: html
run: |
make -C docs html
- name: linkcheck
run: |
make -C docs linkcheck

View File

@ -14,6 +14,19 @@ on:
- master - master
jobs: jobs:
build-linux:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: configure
run: |
./autogen.sh
mkdir build
(cd build && ../configure)
- name: distcheck
run: |
make -C build distcheck
build-android: build-android:
runs-on: ubuntu-latest runs-on: ubuntu-latest
container: reactnativecommunity/react-native-android:2020-5-20 container: reactnativecommunity/react-native-android:2020-5-20
@ -33,14 +46,14 @@ jobs:
ls -lh build ls -lh build
build-macos: build-macos:
runs-on: macos-10.15 runs-on: macos-11
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
- name: Envinfo - name: Envinfo
run: npx envinfo run: npx envinfo
- name: Setup - name: Setup
run: | run: |
brew install ninja brew install ninja automake libtool
- name: Configure - name: Configure
run: | run: |
mkdir build mkdir build
@ -59,9 +72,16 @@ jobs:
- name: Test - name: Test
run: | run: |
cd build && ctest -V cd build && ctest -V
- name: Autotools configure
if: always()
run: |
./autogen.sh
mkdir build-auto
(cd build-auto && ../configure)
make -C build-auto -j4
build-ios: build-ios:
runs-on: macos-10.15 runs-on: macos-11
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
- name: Configure - name: Configure
@ -94,7 +114,6 @@ jobs:
- {target: mips64, toolchain: gcc-mips64-linux-gnuabi64, cc: mips64-linux-gnuabi64-gcc, qemu: qemu-mips64-static } - {target: mips64, toolchain: gcc-mips64-linux-gnuabi64, cc: mips64-linux-gnuabi64-gcc, qemu: qemu-mips64-static }
- {target: mipsel, toolchain: gcc-mipsel-linux-gnu, cc: mipsel-linux-gnu-gcc, qemu: qemu-mipsel-static } - {target: mipsel, toolchain: gcc-mipsel-linux-gnu, cc: mipsel-linux-gnu-gcc, qemu: qemu-mipsel-static }
- {target: mips64el,toolchain: gcc-mips64el-linux-gnuabi64, cc: mips64el-linux-gnuabi64-gcc,qemu: qemu-mips64el-static } - {target: mips64el,toolchain: gcc-mips64el-linux-gnuabi64, cc: mips64el-linux-gnuabi64-gcc,qemu: qemu-mips64el-static }
- {target: alpha, toolchain: gcc-alpha-linux-gnu, cc: alpha-linux-gnu-gcc, qemu: qemu-alpha-static }
- {target: arm (u64 slots), toolchain: gcc-arm-linux-gnueabi, cc: arm-linux-gnueabi-gcc, qemu: qemu-arm-static} - {target: arm (u64 slots), toolchain: gcc-arm-linux-gnueabi, cc: arm-linux-gnueabi-gcc, qemu: qemu-arm-static}
- {target: aarch64 (u64 slots), toolchain: gcc-aarch64-linux-gnu, cc: aarch64-linux-gnu-gcc, qemu: qemu-aarch64-static} - {target: aarch64 (u64 slots), toolchain: gcc-aarch64-linux-gnu, cc: aarch64-linux-gnu-gcc, qemu: qemu-aarch64-static}
- {target: ppc (u64 slots), toolchain: gcc-powerpc-linux-gnu, cc: powerpc-linux-gnu-gcc, qemu: qemu-ppc-static} - {target: ppc (u64 slots), toolchain: gcc-powerpc-linux-gnu, cc: powerpc-linux-gnu-gcc, qemu: qemu-ppc-static}
@ -106,7 +125,7 @@ jobs:
# this ensure install latest qemu on ubuntu, apt get version is old # this ensure install latest qemu on ubuntu, apt get version is old
env: env:
QEMU_SRC: "http://archive.ubuntu.com/ubuntu/pool/universe/q/qemu" QEMU_SRC: "http://archive.ubuntu.com/ubuntu/pool/universe/q/qemu"
QEMU_VER: "qemu-user-static_4\\.2-.*_amd64.deb$" QEMU_VER: "qemu-user-static_7\\.0+dfsg-.*_amd64.deb$"
run: | run: |
DEB=`curl -s $QEMU_SRC/ | grep -o -E 'href="([^"#]+)"' | cut -d'"' -f2 | grep $QEMU_VER | tail -1` DEB=`curl -s $QEMU_SRC/ | grep -o -E 'href="([^"#]+)"' | cut -d'"' -f2 | grep $QEMU_VER | tail -1`
wget $QEMU_SRC/$DEB wget $QEMU_SRC/$DEB

View File

@ -16,7 +16,7 @@ on:
jobs: jobs:
build-windows: build-windows:
runs-on: windows-${{ matrix.config.server }} runs-on: windows-${{ matrix.config.server }}
name: build-${{ matrix.config.toolchain}}-${{ matrix.config.arch}} name: build-${{ join(matrix.config.*, '-') }}
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
@ -25,27 +25,98 @@ jobs:
- {toolchain: Visual Studio 16 2019, arch: x64, server: 2019} - {toolchain: Visual Studio 16 2019, arch: x64, server: 2019}
- {toolchain: Visual Studio 17 2022, arch: Win32, server: 2022} - {toolchain: Visual Studio 17 2022, arch: Win32, server: 2022}
- {toolchain: Visual Studio 17 2022, arch: x64, server: 2022} - {toolchain: Visual Studio 17 2022, arch: x64, server: 2022}
- {toolchain: Visual Studio 17 2022, arch: x64, server: 2022, config: ASAN}
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
- name: Envinfo - name: Envinfo
run: npx envinfo run: npx envinfo
- name: Build - name: Build
shell: cmd shell: cmd
run: | run:
mkdir -p build cmake -S . -B build -DBUILD_TESTING=ON
cd build -G "${{ matrix.config.toolchain }}" -A ${{ matrix.config.arch }}
cmake .. -DBUILD_TESTING=ON -G "${{ matrix.config.toolchain }}" -A ${{ matrix.config.arch }} ${{ matrix.config.config == 'ASAN' && '-DASAN=on -DCMAKE_MSVC_RUNTIME_LIBRARY=MultiThreaded' || '' }}
cmake --build .
cmake --build build --config RelWithDebInfo
ls -l build
- name: platform_output - name: platform_output
shell: cmd shell: cmd
run: | run:
build\\Debug\\uv_run_tests.exe platform_output build\\RelWithDebInfo\\uv_run_tests.exe platform_output
- name: platform_output_a - name: platform_output_a
shell: cmd shell: cmd
run:
build\\RelWithDebInfo\\uv_run_tests_a.exe platform_output
- name: Test
# only valid with libuv-master with the fix for
# https://github.com/libuv/leps/blob/master/005-windows-handles-not-fd.md
if: ${{ matrix.config.config != 'ASAN' }}
shell: cmd
run:
cd build
ctest -C RelWithDebInfo -V
- name: Test only static
if: ${{ matrix.config.config == 'ASAN' }}
shell: cmd
run:
build\\RelWithDebInfo\\uv_run_tests_a.exe
build-mingw:
runs-on: ubuntu-latest
name: build-mingw-${{ matrix.config.arch }}
strategy:
fail-fast: false
matrix:
config:
- {arch: i686, server: 2022, libgcc: dw2 }
- {arch: x86_64, server: 2022, libgcc: seh }
steps:
- uses: actions/checkout@v3
- name: Install mingw32 environment
run: | run: |
build\\Debug\\uv_run_tests_a.exe platform_output sudo apt update
sudo apt install mingw-w64 ninja-build -y
- name: Build
run: |
cmake -S . -B build -G Ninja -DHOST_ARCH=${{ matrix.config.arch }} -DBUILD_TESTING=ON -DCMAKE_TOOLCHAIN_FILE=cmake-toolchains/cross-mingw32.cmake
cmake --build build
cmake --install build --prefix "`pwd`/build/usr"
mkdir -p build/usr/test build/usr/bin
cp -av test/fixtures build/usr/test
cp -av build/uv_run_tests_a.exe build/uv_run_tests.exe \
`${{ matrix.config.arch }}-w64-mingw32-gcc -print-file-name=libgcc_s_${{ matrix.config.libgcc }}-1.dll` \
`${{ matrix.config.arch }}-w64-mingw32-gcc -print-file-name=libwinpthread-1.dll` \
`${{ matrix.config.arch }}-w64-mingw32-gcc -print-file-name=libatomic-1.dll` \
build/usr/bin
- name: Upload build artifacts
uses: actions/upload-artifact@v3
with:
name: mingw-${{ matrix.config.arch }}
path: build/usr/**/*
retention-days: 2
test-mingw:
runs-on: windows-${{ matrix.config.server }}
name: test-mingw-${{ matrix.config.arch }}
needs: build-mingw
strategy:
fail-fast: false
matrix:
config:
- {arch: i686, server: 2022}
- {arch: x86_64, server: 2022}
steps:
- name: Download build artifacts
uses: actions/download-artifact@v2
with:
name: mingw-${{ matrix.config.arch }}
- name: Test - name: Test
shell: cmd shell: cmd
run: | run: |
cd build bin\uv_run_tests_a.exe
ctest -C Debug -V - name: Test
shell: cmd
run: |
bin\uv_run_tests.exe

View File

@ -14,7 +14,7 @@ on:
jobs: jobs:
sanitizers: sanitizers:
runs-on: ubuntu-latest runs-on: ubuntu-22.04
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
- name: Setup - name: Setup
@ -22,15 +22,7 @@ jobs:
sudo apt-get install ninja-build sudo apt-get install ninja-build
- name: Envinfo - name: Envinfo
run: npx envinfo run: npx envinfo
- name: TSAN Build
run: |
mkdir build-tsan
(cd build-tsan && cmake .. -G Ninja -DBUILD_TESTING=ON -DTSAN=ON -DCMAKE_BUILD_TYPE=Release)
cmake --build build-tsan
- name: TSAN Test
continue-on-error: true # currently permit failures
run: |
./build-tsan/uv_run_tests_a
- name: ASAN Build - name: ASAN Build
run: | run: |
mkdir build-asan mkdir build-asan
@ -39,3 +31,32 @@ jobs:
- name: ASAN Test - name: ASAN Test
run: | run: |
./build-asan/uv_run_tests_a ./build-asan/uv_run_tests_a
- name: MSAN Build
run: |
mkdir build-msan
(cd build-msan && cmake .. -G Ninja -DBUILD_TESTING=ON -DMSAN=ON -DCMAKE_BUILD_TYPE=Debug -DCMAKE_C_COMPILER=clang)
cmake --build build-msan
- name: MSAN Test
run: |
./build-msan/uv_run_tests_a
- name: TSAN Build
run: |
mkdir build-tsan
(cd build-tsan && cmake .. -G Ninja -DBUILD_TESTING=ON -DTSAN=ON -DCMAKE_BUILD_TYPE=Release)
cmake --build build-tsan
- name: TSAN Test
# Note: path must be absolute because some tests chdir.
# TSan exits with an error when it can't find the file.
run: |
env TSAN_OPTIONS="suppressions=$PWD/tsansupp.txt" ./build-tsan/uv_run_tests_a
- name: UBSAN Build
run: |
mkdir build-ubsan
(cd build-ubsan && cmake .. -G Ninja -DBUILD_TESTING=ON -DUBSAN=ON -DCMAKE_BUILD_TYPE=Debug -DCMAKE_C_COMPILER=clang)
cmake --build build-ubsan
- name: UBSAN Test
run: |
./build-ubsan/uv_run_tests_a

3
deps/libuv/.mailmap vendored
View File

@ -29,6 +29,7 @@ Keno Fischer <kenof@stanford.edu> <kfischer+github@college.harvard.edu>
Keno Fischer <kenof@stanford.edu> <kfischer@college.harvard.edu> Keno Fischer <kenof@stanford.edu> <kfischer@college.harvard.edu>
Leith Bade <leith@leithalweapon.geek.nz> <leith@mapbox.com> Leith Bade <leith@leithalweapon.geek.nz> <leith@mapbox.com>
Leonard Hecker <leonard.hecker91@gmail.com> <leonard@hecker.io> Leonard Hecker <leonard.hecker91@gmail.com> <leonard@hecker.io>
Lewis Russell <me@lewisr.dev> <lewis6991@gmail.com>
Maciej Małecki <maciej.malecki@notimplemented.org> <me@mmalecki.com> Maciej Małecki <maciej.malecki@notimplemented.org> <me@mmalecki.com>
Marc Schlaich <marc.schlaich@googlemail.com> <marc.schlaich@gmail.com> Marc Schlaich <marc.schlaich@googlemail.com> <marc.schlaich@gmail.com>
Michael <michael_dawson@ca.ibm.com> Michael <michael_dawson@ca.ibm.com>
@ -60,5 +61,7 @@ gengjiawen <technicalcute@gmail.com>
jBarz <jBarz@users.noreply.github.com> <jbarboza@ca.ibm.com> jBarz <jBarz@users.noreply.github.com> <jbarboza@ca.ibm.com>
jBarz <jBarz@users.noreply.github.com> <jbarz@users.noreply.github.com> jBarz <jBarz@users.noreply.github.com> <jbarz@users.noreply.github.com>
ptlomholt <pt@lomholt.com> ptlomholt <pt@lomholt.com>
theanarkh <2923878201@qq.com> <theratliter@gmail.com>
tjarlama <59913901+tjarlama@users.noreply.github.com> <tjarlama@gmail.com> tjarlama <59913901+tjarlama@users.noreply.github.com> <tjarlama@gmail.com>
ywave620 <rogertyang@tencent.com> <60539365+ywave620@users.noreply.github.com>
zlargon <zlargon1988@gmail.com> zlargon <zlargon1988@gmail.com>

View File

@ -5,7 +5,10 @@ sphinx:
configuration: null configuration: null
fail_on_warning: false fail_on_warning: false
build:
tools:
python: "3.9"
python: python:
version: 3.8
install: install:
- requirements: docs/requirements.txt - requirements: docs/requirements.txt

25
deps/libuv/AUTHORS vendored
View File

@ -517,3 +517,28 @@ chucksilvers <chuq@chuq.com>
Sergey Fedorov <vital.had@gmail.com> Sergey Fedorov <vital.had@gmail.com>
theanarkh <2923878201@qq.com> theanarkh <2923878201@qq.com>
Samuel Cabrero <samuelcabrero@gmail.com> Samuel Cabrero <samuelcabrero@gmail.com>
自发对称破缺 <429839446@qq.com>
Luan Devecchi <luan@engineer.com>
Steven Schveighoffer <schveiguy@gmail.com>
number201724 <number201724@me.com>
Daniel <reymond315qq@gmail.com>
Christian Clason <christian.clason@uni-due.de>
ywave620 <rogertyang@tencent.com>
jensbjorgensen <jbj1@ultraemail.net>
daomingq <daoming.qiu@intel.com>
Qix <Qix-@users.noreply.github.com>
Edward Humes <29870961+aurxenon@users.noreply.github.com>
Tim Besard <tim.besard@gmail.com>
Sergey Rubanov <chi187@gmail.com>
Stefan Stojanovic <StefanStojanovic@users.noreply.github.com>
Zvicii <zvicii@qq.com>
dundargoc <33953936+dundargoc@users.noreply.github.com>
Jack·Boos·Yu <47264268+JackBoosY@users.noreply.github.com>
panran <310762957@qq.com>
Tamás Bálint Misius <lbphacker@gmail.com>
Bruno Passeri <Varstahl@users.noreply.github.com>
Jason Zhang <xzha4350@gmail.com>
Lewis Russell <me@lewisr.dev>
sivadeilra <arlie.davis@gmail.com>
cui fliter <imcusg@gmail.com>
Mohammed Keyvanzadeh <mohammadkeyvanzade94@gmail.com>

View File

@ -1,8 +1,13 @@
cmake_minimum_required(VERSION 3.4) cmake_minimum_required(VERSION 3.4)
project(libuv LANGUAGES C)
cmake_policy(SET CMP0057 NEW) # Enable IN_LIST operator if(POLICY CMP0091)
cmake_policy(SET CMP0064 NEW) # Support if (TEST) operator cmake_policy(SET CMP0091 NEW) # Enable MSVC_RUNTIME_LIBRARY setting
endif()
if(POLICY CMP0092)
cmake_policy(SET CMP0092 NEW) # disable /W3 warning, if possible
endif()
project(libuv LANGUAGES C)
list(APPEND CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/cmake") list(APPEND CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/cmake")
@ -17,9 +22,13 @@ set(CMAKE_C_STANDARD_REQUIRED ON)
set(CMAKE_C_EXTENSIONS ON) set(CMAKE_C_EXTENSIONS ON)
set(CMAKE_C_STANDARD 90) set(CMAKE_C_STANDARD 90)
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
option(LIBUV_BUILD_SHARED "Build shared lib" ON)
cmake_dependent_option(LIBUV_BUILD_TESTS cmake_dependent_option(LIBUV_BUILD_TESTS
"Build the unit tests when BUILD_TESTING is enabled and we are the root project" ON "Build the unit tests when BUILD_TESTING is enabled and we are the root project" ON
"BUILD_TESTING;CMAKE_SOURCE_DIR STREQUAL PROJECT_SOURCE_DIR" OFF) "BUILD_TESTING;LIBUV_BUILD_SHARED;CMAKE_SOURCE_DIR STREQUAL PROJECT_SOURCE_DIR" OFF)
cmake_dependent_option(LIBUV_BUILD_BENCH cmake_dependent_option(LIBUV_BUILD_BENCH
"Build the benchmarks when building unit tests and we are the root project" ON "Build the benchmarks when building unit tests and we are the root project" ON
"LIBUV_BUILD_TESTS" OFF) "LIBUV_BUILD_TESTS" OFF)
@ -27,28 +36,61 @@ cmake_dependent_option(LIBUV_BUILD_BENCH
# Qemu Build # Qemu Build
option(QEMU "build for qemu" OFF) option(QEMU "build for qemu" OFF)
if(QEMU) if(QEMU)
add_definitions(-D__QEMU__=1) list(APPEND uv_defines __QEMU__=1)
endif() endif()
# Note: these are mutually exclusive.
option(ASAN "Enable AddressSanitizer (ASan)" OFF) option(ASAN "Enable AddressSanitizer (ASan)" OFF)
option(MSAN "Enable MemorySanitizer (MSan)" OFF)
option(TSAN "Enable ThreadSanitizer (TSan)" OFF) option(TSAN "Enable ThreadSanitizer (TSan)" OFF)
option(UBSAN "Enable UndefinedBehaviorSanitizer (UBSan)" OFF)
if((ASAN OR TSAN) AND NOT (CMAKE_C_COMPILER_ID MATCHES "AppleClang|GNU|Clang")) if(MSAN AND NOT CMAKE_C_COMPILER_ID MATCHES "AppleClang|Clang")
message(SEND_ERROR "Sanitizer support requires clang or gcc. Try again with -DCMAKE_C_COMPILER.") message(SEND_ERROR "MemorySanitizer requires clang. Try again with -DCMAKE_C_COMPILER=clang")
endif() endif()
if(ASAN) if(ASAN)
add_definitions(-D__ASAN__=1) list(APPEND uv_defines __ASAN__=1)
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fno-omit-frame-pointer -fsanitize=address") if(CMAKE_C_COMPILER_ID MATCHES "AppleClang|GNU|Clang")
set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fno-omit-frame-pointer -fsanitize=address") set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fno-omit-frame-pointer -fsanitize=address")
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fno-omit-frame-pointer -fsanitize=address") set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fno-omit-frame-pointer -fsanitize=address")
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fno-omit-frame-pointer -fsanitize=address")
elseif(MSVC)
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /fsanitize=address")
else()
message(SEND_ERROR "AddressSanitizer support requires clang, gcc, or msvc. Try again with -DCMAKE_C_COMPILER.")
endif()
endif()
if(MSAN)
list(APPEND uv_defines __MSAN__=1)
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fno-omit-frame-pointer -fsanitize=memory")
set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fno-omit-frame-pointer -fsanitize=memory")
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fno-omit-frame-pointer -fsanitize=memory")
endif() endif()
if(TSAN) if(TSAN)
add_definitions(-D__TSAN__=1) list(APPEND uv_defines __TSAN__=1)
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fno-omit-frame-pointer -fsanitize=thread") if(CMAKE_C_COMPILER_ID MATCHES "AppleClang|GNU|Clang")
set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fno-omit-frame-pointer -fsanitize=thread") set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fno-omit-frame-pointer -fsanitize=thread")
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fno-omit-frame-pointer -fsanitize=thread") set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fno-omit-frame-pointer -fsanitize=thread")
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fno-omit-frame-pointer -fsanitize=thread")
else()
message(SEND_ERROR "ThreadSanitizer support requires clang or gcc. Try again with -DCMAKE_C_COMPILER.")
endif()
endif()
if(UBSAN)
list(APPEND uv_defines __UBSAN__=1)
if(CMAKE_C_COMPILER_ID MATCHES "AppleClang|GNU|Clang")
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fno-omit-frame-pointer -fsanitize=undefined")
set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fno-omit-frame-pointer -fsanitize=undefined")
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fno-omit-frame-pointer -fsanitize=undefined")
elseif(MSVC)
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /fsanitize=undefined")
else()
message(SEND_ERROR "UndefinedBehaviorSanitizer support requires clang, gcc, or msvc. Try again with -DCMAKE_C_COMPILER.")
endif()
endif() endif()
# Compiler check # Compiler check
@ -126,6 +168,7 @@ set(uv_sources
src/random.c src/random.c
src/strscpy.c src/strscpy.c
src/strtok.c src/strtok.c
src/thread-common.c
src/threadpool.c src/threadpool.c
src/timer.c src/timer.c
src/uv-common.c src/uv-common.c
@ -140,7 +183,10 @@ if(WIN32)
advapi32 advapi32
iphlpapi iphlpapi
userenv userenv
ws2_32) ws2_32
dbghelp
ole32
uuid)
list(APPEND uv_sources list(APPEND uv_sources
src/win/async.c src/win/async.c
src/win/core.c src/win/core.c
@ -216,15 +262,11 @@ if(CMAKE_SYSTEM_NAME STREQUAL "Android")
list(APPEND uv_defines _GNU_SOURCE) list(APPEND uv_defines _GNU_SOURCE)
list(APPEND uv_libraries dl) list(APPEND uv_libraries dl)
list(APPEND uv_sources list(APPEND uv_sources
src/unix/linux-core.c src/unix/linux.c
src/unix/linux-inotify.c
src/unix/linux-syscalls.c
src/unix/procfs-exepath.c src/unix/procfs-exepath.c
src/unix/pthread-fixes.c
src/unix/random-getentropy.c src/unix/random-getentropy.c
src/unix/random-getrandom.c src/unix/random-getrandom.c
src/unix/random-sysctl-linux.c src/unix/random-sysctl-linux.c)
src/unix/epoll.c)
endif() endif()
if(APPLE OR CMAKE_SYSTEM_NAME MATCHES "Android|Linux") if(APPLE OR CMAKE_SYSTEM_NAME MATCHES "Android|Linux")
@ -270,22 +312,14 @@ if(CMAKE_SYSTEM_NAME STREQUAL "GNU")
src/unix/hurd.c) src/unix/hurd.c)
endif() endif()
if(CMAKE_SYSTEM_NAME STREQUAL "kFreeBSD")
list(APPEND uv_defines _GNU_SOURCE)
list(APPEND uv_libraries dl freebsd-glue)
endif()
if(CMAKE_SYSTEM_NAME STREQUAL "Linux") if(CMAKE_SYSTEM_NAME STREQUAL "Linux")
list(APPEND uv_defines _GNU_SOURCE _POSIX_C_SOURCE=200112) list(APPEND uv_defines _GNU_SOURCE _POSIX_C_SOURCE=200112)
list(APPEND uv_libraries dl rt) list(APPEND uv_libraries dl rt)
list(APPEND uv_sources list(APPEND uv_sources
src/unix/linux-core.c src/unix/linux.c
src/unix/linux-inotify.c
src/unix/linux-syscalls.c
src/unix/procfs-exepath.c src/unix/procfs-exepath.c
src/unix/random-getrandom.c src/unix/random-getrandom.c
src/unix/random-sysctl-linux.c src/unix/random-sysctl-linux.c)
src/unix/epoll.c)
endif() endif()
if(CMAKE_SYSTEM_NAME STREQUAL "NetBSD") if(CMAKE_SYSTEM_NAME STREQUAL "NetBSD")
@ -316,7 +350,6 @@ if(CMAKE_SYSTEM_NAME STREQUAL "OS390")
list(APPEND uv_defines _XOPEN_SOURCE=600) list(APPEND uv_defines _XOPEN_SOURCE=600)
list(APPEND uv_defines _XOPEN_SOURCE_EXTENDED) list(APPEND uv_defines _XOPEN_SOURCE_EXTENDED)
list(APPEND uv_sources list(APPEND uv_sources
src/unix/pthread-fixes.c
src/unix/os390.c src/unix/os390.c
src/unix/os390-syscalls.c src/unix/os390-syscalls.c
src/unix/os390-proctitle.c) src/unix/os390-proctitle.c)
@ -354,6 +387,10 @@ if(CMAKE_SYSTEM_NAME STREQUAL "OS400")
endif() endif()
if(CMAKE_SYSTEM_NAME STREQUAL "SunOS") if(CMAKE_SYSTEM_NAME STREQUAL "SunOS")
if(CMAKE_SYSTEM_VERSION STREQUAL "5.10")
list(APPEND uv_defines SUNOS_NO_IFADDRS)
list(APPEND uv_libraries rt)
endif()
list(APPEND uv_defines __EXTENSIONS__ _XOPEN_SOURCE=500 _REENTRANT) list(APPEND uv_defines __EXTENSIONS__ _XOPEN_SOURCE=500 _REENTRANT)
list(APPEND uv_libraries kstat nsl sendfile socket) list(APPEND uv_libraries kstat nsl sendfile socket)
list(APPEND uv_sources list(APPEND uv_sources
@ -388,25 +425,42 @@ if(APPLE OR CMAKE_SYSTEM_NAME MATCHES "DragonFly|FreeBSD|Linux|NetBSD|OpenBSD")
list(APPEND uv_test_libraries util) list(APPEND uv_test_libraries util)
endif() endif()
add_library(uv SHARED ${uv_sources}) if(CYGWIN OR MSYS)
target_compile_definitions(uv list(APPEND uv_defines _GNU_SOURCE)
INTERFACE list(APPEND uv_sources
USING_UV_SHARED=1 src/unix/cygwin.c
PRIVATE src/unix/bsd-ifaddrs.c
BUILDING_UV_SHARED=1 src/unix/no-fsevents.c
${uv_defines}) src/unix/no-proctitle.c
target_compile_options(uv PRIVATE ${uv_cflags}) src/unix/posix-hrtime.c
target_include_directories(uv src/unix/posix-poll.c
PUBLIC src/unix/procfs-exepath.c
$<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/include> src/unix/sysinfo-loadavg.c
$<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}> src/unix/sysinfo-memory.c)
PRIVATE endif()
$<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/src>)
if(CMAKE_SYSTEM_NAME STREQUAL "OS390") if(LIBUV_BUILD_SHARED)
target_include_directories(uv PUBLIC $<BUILD_INTERFACE:${ZOSLIB_DIR}/include>) add_library(uv SHARED ${uv_sources})
set_target_properties(uv PROPERTIES LINKER_LANGUAGE CXX) target_compile_definitions(uv
INTERFACE
USING_UV_SHARED=1
PRIVATE
BUILDING_UV_SHARED=1
${uv_defines})
target_compile_options(uv PRIVATE ${uv_cflags})
target_include_directories(uv
PUBLIC
$<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/include>
$<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>
PRIVATE
$<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/src>)
if(CMAKE_SYSTEM_NAME STREQUAL "OS390")
target_include_directories(uv PUBLIC $<BUILD_INTERFACE:${ZOSLIB_DIR}/include>)
set_target_properties(uv PROPERTIES LINKER_LANGUAGE CXX)
endif()
target_link_libraries(uv ${uv_libraries})
set_target_properties(uv PROPERTIES OUTPUT_NAME "uv")
endif() endif()
target_link_libraries(uv ${uv_libraries})
add_library(uv_a STATIC ${uv_sources}) add_library(uv_a STATIC ${uv_sources})
target_compile_definitions(uv_a PRIVATE ${uv_defines}) target_compile_definitions(uv_a PRIVATE ${uv_defines})
@ -422,6 +476,10 @@ if(CMAKE_SYSTEM_NAME STREQUAL "OS390")
set_target_properties(uv_a PROPERTIES LINKER_LANGUAGE CXX) set_target_properties(uv_a PROPERTIES LINKER_LANGUAGE CXX)
endif() endif()
target_link_libraries(uv_a ${uv_libraries}) target_link_libraries(uv_a ${uv_libraries})
set_target_properties(uv_a PROPERTIES OUTPUT_NAME "uv")
if(MSVC)
set_target_properties(uv_a PROPERTIES PREFIX "lib")
endif()
if(LIBUV_BUILD_TESTS) if(LIBUV_BUILD_TESTS)
# Small hack: use ${uv_test_sources} now to get the runner skeleton, # Small hack: use ${uv_test_sources} now to get the runner skeleton,
@ -584,6 +642,7 @@ if(LIBUV_BUILD_TESTS)
test/test-tcp-rst.c test/test-tcp-rst.c
test/test-tcp-shutdown-after-write.c test/test-tcp-shutdown-after-write.c
test/test-tcp-try-write.c test/test-tcp-try-write.c
test/test-tcp-write-in-a-row.c
test/test-tcp-try-write-error.c test/test-tcp-try-write-error.c
test/test-tcp-unexpected-read.c test/test-tcp-unexpected-read.c
test/test-tcp-write-after-connect.c test/test-tcp-write-after-connect.c
@ -592,6 +651,7 @@ if(LIBUV_BUILD_TESTS)
test/test-tcp-write-to-half-open-connection.c test/test-tcp-write-to-half-open-connection.c
test/test-tcp-writealot.c test/test-tcp-writealot.c
test/test-test-macros.c test/test-test-macros.c
test/test-thread-affinity.c
test/test-thread-equal.c test/test-thread-equal.c
test/test-thread.c test/test-thread.c
test/test-threadpool-cancel.c test/test-threadpool-cancel.c
@ -624,6 +684,7 @@ if(LIBUV_BUILD_TESTS)
test/test-udp-sendmmsg-error.c test/test-udp-sendmmsg-error.c
test/test-udp-send-unreachable.c test/test-udp-send-unreachable.c
test/test-udp-try-send.c test/test-udp-try-send.c
test/test-udp-recv-in-a-row.c
test/test-uname.c test/test-uname.c
test/test-walk-handles.c test/test-walk-handles.c
test/test-watcher-cross-stop.c) test/test-watcher-cross-stop.c)
@ -667,27 +728,36 @@ string(REPLACE ";" " " LIBS "${LIBS}")
file(STRINGS configure.ac configure_ac REGEX ^AC_INIT) file(STRINGS configure.ac configure_ac REGEX ^AC_INIT)
string(REGEX MATCH "([0-9]+)[.][0-9]+[.][0-9]+" PACKAGE_VERSION "${configure_ac}") string(REGEX MATCH "([0-9]+)[.][0-9]+[.][0-9]+" PACKAGE_VERSION "${configure_ac}")
set(UV_VERSION_MAJOR "${CMAKE_MATCH_1}") set(UV_VERSION_MAJOR "${CMAKE_MATCH_1}")
# The version in the filename is mirroring the behaviour of autotools.
set_target_properties(uv PROPERTIES
VERSION ${UV_VERSION_MAJOR}.0.0
SOVERSION ${UV_VERSION_MAJOR})
set(includedir ${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_INCLUDEDIR}) set(includedir ${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_INCLUDEDIR})
set(libdir ${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}) set(libdir ${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR})
set(prefix ${CMAKE_INSTALL_PREFIX}) set(prefix ${CMAKE_INSTALL_PREFIX})
configure_file(libuv.pc.in libuv.pc @ONLY)
configure_file(libuv-static.pc.in libuv-static.pc @ONLY) configure_file(libuv-static.pc.in libuv-static.pc @ONLY)
install(DIRECTORY include/ DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}) install(DIRECTORY include/ DESTINATION ${CMAKE_INSTALL_INCLUDEDIR})
install(FILES LICENSE DESTINATION ${CMAKE_INSTALL_DOCDIR}) install(FILES LICENSE DESTINATION ${CMAKE_INSTALL_DOCDIR})
install(FILES ${PROJECT_BINARY_DIR}/libuv.pc ${PROJECT_BINARY_DIR}/libuv-static.pc install(FILES LICENSE-extra DESTINATION ${CMAKE_INSTALL_DOCDIR})
install(FILES ${PROJECT_BINARY_DIR}/libuv-static.pc
DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig) DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig)
install(TARGETS uv EXPORT libuvConfig
RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
install(TARGETS uv_a EXPORT libuvConfig install(TARGETS uv_a EXPORT libuvConfig
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}) ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
install(EXPORT libuvConfig DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/libuv) install(EXPORT libuvConfig
DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/libuv
NAMESPACE libuv::)
if(LIBUV_BUILD_SHARED)
# The version in the filename is mirroring the behaviour of autotools.
set_target_properties(uv PROPERTIES
VERSION ${UV_VERSION_MAJOR}.0.0
SOVERSION ${UV_VERSION_MAJOR})
configure_file(libuv.pc.in libuv.pc @ONLY)
install(FILES ${PROJECT_BINARY_DIR}/libuv.pc
DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig)
install(TARGETS uv EXPORT libuvConfig
RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
endif()
if(MSVC) if(MSVC)
set(CMAKE_DEBUG_POSTFIX d) set(CMAKE_DEBUG_POSTFIX d)

295
deps/libuv/ChangeLog vendored
View File

@ -1,4 +1,297 @@
2022.07.12, Version 1.44.2 (Stable) 2023.05.19, Version 1.45.0 (Stable)
Changes since version 1.44.2:
* win: remove stdint-msvc2008.h (Ben Noordhuis)
* android: remove pthread-fixes.c (Ben Noordhuis)
* build: enable MSVC_RUNTIME_LIBRARY setting (自发对称破缺)
* unix: switch to c11 atomics (Ben Noordhuis)
* unix: don't accept() connections in a loop (Ben Noordhuis)
* win: fix off-by-1 buffer overrun in uv_exepath() (Ben Noordhuis)
* build: switch ci from macos-10.15 to macos-11 (Ben Noordhuis)
* win: fix thread race in uv_cwd() and uv_chdir() (Ben Noordhuis)
* unix,win: remove UV_HANDLE_SHUTTING flag (Santiago Gimeno)
* win: support Windows 11 in uv_os_uname() (Luan Devecchi)
* unix: fix uv_getrusage() ru_maxrss reporting (Ben Noordhuis)
* doc: add note about offset -1 in uv_fs_read/write (Steven Schveighoffer)
* test: fix musl libc.a dlerror() test expectation (Ben Noordhuis)
* kqueue: DRY file descriptor deletion logic (Ben Noordhuis)
* linux: teach uv_get_constrained_memory() cgroupsv2 (Ben Noordhuis)
* build: upgrade qemu-user-static package (Ben Noordhuis)
* linux: move epoll.c back into linux-core.c (Ben Noordhuis)
* unix: remove pre-macos 10.8 compatibility hack (Ben Noordhuis)
* unix,win: fix memory leak in uv_fs_scandir() (Ben Noordhuis)
* build: restore qemu download logic (Ben Noordhuis)
* win: fix uv__pipe_accept memory leak (number201724)
* doc: update LINKS.md (Daniel)
* unix: simplify atomic op in uv_tty_reset_mode() (Ben Noordhuis)
* build: add LIBUV_BUILD_SHARED cmake option (Christian Clason)
* linux: remove unused or obsolete syscall wrappers (Ben Noordhuis)
* linux: merge files back into single file (Ben Noordhuis)
* stream: process more than one write req per loop tick (ywave620)
* unix,win: give thread pool threads an 8 MB stack (Ben Noordhuis)
* build: add MemorySanitizer (MSAN) support (Ben Noordhuis)
* doc: add uv_poll_cb status==UV_EBADF note (jensbjorgensen)
* build: support AddressSanitizer on MSVC (Jameson Nash)
* win,pipe: improve method of obtaining pid for ipc (number201724)
* thread: add support for affinity (daomingq)
* include: map ENODATA error code (Ben Noordhuis)
* build: remove bashism from autogen.sh (Santiago Gimeno)
* win,tcp,udp: remove "active streams" optimization (Saúl Ibarra Corretgé)
* win: drop code checking for Windows XP / Server 2k3 (Saúl Ibarra Corretgé)
* unix,win: fix 'sprintf' is deprecated warning (twosee)
* doc: mention close_cb can be NULL (Qix)
* win: optimize udp receive performance (ywave620)
* win: fix an incompatible types warning (twosee)
* doc: document 0 return value for free/total memory (Ben Noordhuis)
* darwin: use hw.cpufrequency again for frequency info (Jameson Nash)
* win,test: change format of TEST_PIPENAME's (Santiago Gimeno)
* win,pipe: fixes in uv_pipe_connect() (Santiago Gimeno)
* misc: fix return value of memory functions (theanarkh)
* src: add new metrics APIs (Trevor Norris)
* thread: add uv_thread_getcpu() (daomingq)
* build: don't use ifaddrs.h on solaris 10 (Edward Humes)
* unix,win: add uv_get_available_memory() (Tim Besard)
* test: fix -Wunused-but-set-variable warnings (Ben Noordhuis)
* doc: bump min supported linux and freebsd versions (Ben Noordhuis)
* Add Socket Runtime to the LINKS.md (Sergey Rubanov)
* unix: drop kfreebsd support (Ben Noordhuis)
* win: fix fstat for pipes and character files (Stefan Stojanovic)
* win: fix -Wunused-variable warning (Ben Noordhuis)
* win: fix -Wunused-function warning (Ben Noordhuis)
* build: drop qemu-alpha from ci matrix (Ben Noordhuis)
* win: move child_stdio_buffer out of uv_process_t (Santiago Gimeno)
* test: fix some unreachable code warnings (Santiago Gimeno)
* linux: simplify uv_uptime() (Ben Noordhuis)
* test: unflake fs_event_watch_dir test (Ben Noordhuis)
* darwin: remove unused fsevents symbol lookups (Ben Noordhuis)
* build: add define guard around UV_EXTERN (Zvicii)
* build: add UndefinedBehaviorSanitizer support (Ben Noordhuis)
* build: enable platform_output test on qemu (Ben Noordhuis)
* linux: handle cpu hotplugging in uv_cpu_info() (Ben Noordhuis)
* build: remove unnecessary policy setting (dundargoc)
* docs: add vcpkg instruction step (Jack·Boos·Yu)
* win,fs: fix readlink errno for a non-symlink file (Darshan Sen)
* misc: extend getpw to take uid as an argument (Jameson Nash)
* unix,win: use static_assert when available (Ben Noordhuis)
* docs: delete code Makefile (Jameson Nash)
* docs: add CI for docs PRs (Jameson Nash)
* docs: update Sphinx version on RTD (Jameson Nash)
* doc: clean up license file (Ben Noordhuis)
* test: fix some warnings when compiling tests (panran)
* build,win: add mingw-w64 CI configuration (Jameson Nash)
* build: add CI for distcheck (Jameson Nash)
* unix: remove busy loop from uv_async_send (Jameson Nash)
* doc: document uv_fs_cb type (Tamás Bálint Misius)
* build: Improve build by cmake for Cygwin (erw7)
* build: add libuv:: namespace to libuvConfig.cmake (AJ Heller)
* test: fix ThreadSanitizer thread leak warning (Ben Noordhuis)
* test: fix ThreadSanitizer data race warning (Ben Noordhuis)
* test: fix ThreadSanitizer data race warning (Ben Noordhuis)
* test: fix ThreadSanitizer data race warning (Ben Noordhuis)
* test: cond-skip fork_threadpool_queue_work_simple (Ben Noordhuis)
* test: cond-skip signal_multiple_loops (Ben Noordhuis)
* test: cond-skip tcp_writealot (Ben Noordhuis)
* build: promote tsan ci to must-pass (Ben Noordhuis)
* build: add CI for OpenBSD and FreeBSD (James McCoy)
* build,test: fix distcheck errors (Jameson Nash)
* test: remove bad tty window size assumption (Ben Noordhuis)
* darwin,process: feed kevent the signal to reap children (Jameson Nash)
* unix: abort on clock_gettime() error (Ben Noordhuis)
* test: remove timing-sensitive check (Ben Noordhuis)
* unix: DRY and fix tcp bind error path (Jameson Nash)
* macos: fix fsevents thread race conditions (Ben Noordhuis)
* win: fix leak in uv_chdir (Trevor Norris)
* test: make valgrind happy (Trevor Norris)
* barrier: wait for prior out before next in (Jameson Nash)
* test: fix visual studio 2015 build error (Ben Noordhuis)
* linux: fix ceph copy error truncating readonly files (Bruno Passeri)
* test: silence more valgrind warnings (Trevor Norris)
* doc: add entries to LINKS.md (Trevor Norris)
* win,unix: change execution order of timers (Trevor Norris)
* doc: add trevnorris to maintainers (Trevor Norris)
* linux: remove epoll_pwait() emulation code path (Ben Noordhuis)
* linux: replace unsafe macro with inline function (Ben Noordhuis)
* linux: remove arm oabi support (Ben Noordhuis)
* unix,sunos: SO_REUSEPORT not valid on all sockets (Stacey Marshall)
* doc: consistent single backquote in misc.rst (Jason Zhang)
* src: switch to use C11 atomics where available (Trevor Norris)
* test: don't use static buffer for formatting (Ben Noordhuis)
* linux: introduce io_uring support (Ben Noordhuis)
* linux: fix academic valgrind warning (Ben Noordhuis)
* test: disable signal test under ASan and MSan (Ben Noordhuis)
* linux: add IORING_OP_OPENAT support (Ben Noordhuis)
* linux: add IORING_OP_CLOSE support (Ben Noordhuis)
* linux: remove bug workaround for obsolete kernels (Ben Noordhuis)
* doc: update active maintainers list (Ben Noordhuis)
* test: add ASSERT_OK (Trevor Norris)
* src: fix events/events_waiting metrics counter (Trevor Norris)
* unix,win: add uv_clock_gettime() (Ben Noordhuis)
* build: remove freebsd and openbsd buildbots (Ben Noordhuis)
* win: fix race condition in uv__init_console() (sivadeilra)
* linux: fix logic bug in sqe ring space check (Ben Noordhuis)
* linux: use io_uring to batch epoll_ctl calls (Ben Noordhuis)
* macos: update minimum supported version (Santiago Gimeno)
* docs: fix some typos (cui fliter)
* unix: use memcpy() instead of type punning (Ben Noordhuis)
* test: add additional assert (Mohammed Keyvanzadeh)
* build: export compile_commands.json (Lewis Russell)
* win,process: write minidumps when sending SIGQUIT (Elliot Saba)
* unix: constrained_memory should return UINT64_MAX (Tim Besard)
* unix: handle CQ overflow in iou ring (Santiago Gimeno)
* unix: remove clang compiler warning pragmas (Ben Noordhuis)
* win: fix mingw build (gengjiawen)
* test: fix -Wbool-compare compiler warning (Ben Noordhuis)
* win: define MiniDumpWithAvxXStateContext always (Santiago Gimeno)
* freebsd: hard-code UV_ENODATA definition (Santiago Gimeno)
* linux: work around EOWNERDEAD io_uring kernel bug (Ben Noordhuis)
* linux: fix WRITEV with lots of bufs using io_uring (Santiago Gimeno)
2022.07.12, Version 1.44.2 (Stable), 0c1fa696aa502eb749c2c4735005f41ba00a27b8
Changes since version 1.44.1: Changes since version 1.44.1:

47
deps/libuv/LICENSE vendored
View File

@ -1,6 +1,3 @@
libuv is licensed for use as follows:
====
Copyright (c) 2015-present libuv project contributors. Copyright (c) 2015-present libuv project contributors.
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
@ -20,47 +17,3 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE. IN THE SOFTWARE.
====
This license applies to parts of libuv originating from the
https://github.com/joyent/libuv repository:
====
Copyright Joyent, Inc. and other Node contributors. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
====
This license applies to all parts of libuv that are not externally
maintained libraries.
The externally maintained libraries used by libuv are:
- tree.h (from FreeBSD), copyright Niels Provos. Two clause BSD license.
- inet_pton and inet_ntop implementations, contained in src/inet.c, are
copyright the Internet Systems Consortium, Inc., and licensed under the ISC
license.
- stdint-msvc2008.h (from msinttypes), copyright Alexander Chemeris. Three
clause BSD license.
- pthread-fixes.c, copyright Google Inc. and Sony Mobile Communications AB.
Three clause BSD license.

36
deps/libuv/LICENSE-extra vendored Normal file
View File

@ -0,0 +1,36 @@
This license applies to parts of libuv originating from the
https://github.com/joyent/libuv repository:
====
Copyright Joyent, Inc. and other Node contributors. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
====
This license applies to all parts of libuv that are not externally
maintained libraries.
The externally maintained libraries used by libuv are:
- tree.h (from FreeBSD), copyright Niels Provos. Two clause BSD license.
- inet_pton and inet_ntop implementations, contained in src/inet.c, are
copyright the Internet Systems Consortium, Inc., and licensed under the ISC
license.

9
deps/libuv/LINKS.md vendored
View File

@ -1,8 +1,11 @@
### Apps / VM ### Apps / VM
* [AliceO2](https://github.com/AliceO2Group/AliceO2): The framework and detector specific code for the reconstruction, calibration and simulation for the ALICE experiment at CERN.
* [Beam](https://github.com/BeamMW/beam): A scalable, confidential cryptocurrency based on the Mimblewimble protocol.
* [BIND 9](https://bind.isc.org/): DNS software system including an authoritative server, a recursive resolver and related utilities. * [BIND 9](https://bind.isc.org/): DNS software system including an authoritative server, a recursive resolver and related utilities.
* [cjdns](https://github.com/cjdelisle/cjdns): Encrypted self-configuring network/VPN routing engine * [cjdns](https://github.com/cjdelisle/cjdns): Encrypted self-configuring network/VPN routing engine
* [clearskies_core](https://github.com/larroy/clearskies_core): Clearskies file synchronization program. (C++11) * [clearskies_core](https://github.com/larroy/clearskies_core): Clearskies file synchronization program. (C++11)
* [CMake](https://cmake.org) open-source, cross-platform family of tools designed to build, test and package software * [CMake](https://cmake.org) open-source, cross-platform family of tools designed to build, test and package software
* [Cocos-Engine](https://github.com/cocos/cocos-engine): The runtime framework for Cocos Creator editor.
* [Coherence](https://github.com/liesware/coherence/): Cryptographic server for modern web apps. * [Coherence](https://github.com/liesware/coherence/): Cryptographic server for modern web apps.
* [DPS-For-IoT](https://github.com/intel/dps-for-iot/wiki): Fully distributed publish/subscribe protocol. * [DPS-For-IoT](https://github.com/intel/dps-for-iot/wiki): Fully distributed publish/subscribe protocol.
* [HashLink](https://github.com/HaxeFoundation/hashlink): Haxe run-time with libuv support included. * [HashLink](https://github.com/HaxeFoundation/hashlink): Haxe run-time with libuv support included.
@ -10,7 +13,7 @@
* [H2O](https://github.com/h2o/h2o): An optimized HTTP server with support for HTTP/1.x and HTTP/2. * [H2O](https://github.com/h2o/h2o): An optimized HTTP server with support for HTTP/1.x and HTTP/2.
* [Igropyr](https://github.com/guenchi/Igropyr): a async Scheme http server base on libuv. * [Igropyr](https://github.com/guenchi/Igropyr): a async Scheme http server base on libuv.
* [Julia](http://julialang.org/): Scientific computing programming language * [Julia](http://julialang.org/): Scientific computing programming language
* [Kestrel](https://github.com/aspnet/AspNetCore/tree/master/src/Servers/Kestrel): web server (C# + libuv + [ASP.NET Core](http://github.com/aspnet)) * [Kestrel](https://github.com/dotnet/aspnetcore/tree/main/src/Servers/Kestrel): web server (C# + libuv + [ASP.NET Core](http://github.com/aspnet))
* [Knot DNS Resolver](https://www.knot-resolver.cz/): A minimalistic DNS caching resolver * [Knot DNS Resolver](https://www.knot-resolver.cz/): A minimalistic DNS caching resolver
* [Lever](http://leverlanguage.com): runtime, libuv at the 0.9.0 release * [Lever](http://leverlanguage.com): runtime, libuv at the 0.9.0 release
* [libnode](https://github.com/plenluno/libnode): C++ implementation of Node.js * [libnode](https://github.com/plenluno/libnode): C++ implementation of Node.js
@ -30,8 +33,10 @@
* [phastlight](https://github.com/phastlight/phastlight): Command line tool and web server written in PHP 5.3+ inspired by Node.js * [phastlight](https://github.com/phastlight/phastlight): Command line tool and web server written in PHP 5.3+ inspired by Node.js
* [pilight](https://www.pilight.org/): home automation ("domotica") * [pilight](https://www.pilight.org/): home automation ("domotica")
* [pixie](https://github.com/pixie-lang/pixie): clojure-inspired lisp with a tracing JIT * [pixie](https://github.com/pixie-lang/pixie): clojure-inspired lisp with a tracing JIT
* [Pixie-io](https://github.com/pixie-io/pixie): Open-source observability tool for Kubernetes applications.
* [potion](https://github.com/perl11/potion)/[p2](https://github.com/perl11/p2): runtime * [potion](https://github.com/perl11/potion)/[p2](https://github.com/perl11/p2): runtime
* [racer](https://libraries.io/rubygems/racer): Ruby web server written as an C extension * [racer](https://libraries.io/rubygems/racer): Ruby web server written as an C extension
* [Socket Runtime](https://sockets.sh): A runtime for creating native cross-platform software on mobile and desktop using HTML, CSS, and JavaScript
* [spider-gazelle](https://github.com/cotag/spider-gazelle): Ruby web server using libuv bindings * [spider-gazelle](https://github.com/cotag/spider-gazelle): Ruby web server using libuv bindings
* [Suave](http://suave.io/): A simple web development F# library providing a lightweight web server and a set of combinators to manipulate route flow and task composition * [Suave](http://suave.io/): A simple web development F# library providing a lightweight web server and a set of combinators to manipulate route flow and task composition
* [Swish](https://github.com/becls/swish/): Concurrency engine with Erlang-like concepts. Includes a web server. * [Swish](https://github.com/becls/swish/): Concurrency engine with Erlang-like concepts. Includes a web server.
@ -39,6 +44,7 @@
* [Urbit](http://urbit.org): runtime * [Urbit](http://urbit.org): runtime
* [uv_callback](https://github.com/litesync/uv_callback) libuv thread communication * [uv_callback](https://github.com/litesync/uv_callback) libuv thread communication
* [uvloop](https://github.com/MagicStack/uvloop): Ultra fast implementation of python's asyncio event loop on top of libuv * [uvloop](https://github.com/MagicStack/uvloop): Ultra fast implementation of python's asyncio event loop on top of libuv
* [WPILib](https://github.com/wpilibsuite/allwpilib): Libraries for creating robot programs for the roboRIO.
* [Wren CLI](https://github.com/wren-lang/wren-cli): For io, process, scheduler and timer modules * [Wren CLI](https://github.com/wren-lang/wren-cli): For io, process, scheduler and timer modules
### Other ### Other
@ -59,6 +65,7 @@
* [lluv](https://github.com/moteus/lua-lluv) * [lluv](https://github.com/moteus/lua-lluv)
* C++11 * C++11
* [uvpp](https://github.com/larroy/uvpp) - Not complete, exposes very few aspects of `libuv` * [uvpp](https://github.com/larroy/uvpp) - Not complete, exposes very few aspects of `libuv`
* [nsuv](https://github.com/nodesource/nsuv) - Template wrapper focused on enforcing compile-time type safety when propagating data
* C++17 * C++17
* [uvw](https://github.com/skypjack/uvw) - Header-only, event based, tiny and easy to use *libuv* wrapper in modern C++. * [uvw](https://github.com/skypjack/uvw) - Header-only, event based, tiny and easy to use *libuv* wrapper in modern C++.
* Python * Python

View File

@ -4,12 +4,9 @@ libuv is currently managed by the following individuals:
* **Ben Noordhuis** ([@bnoordhuis](https://github.com/bnoordhuis)) * **Ben Noordhuis** ([@bnoordhuis](https://github.com/bnoordhuis))
- GPG key: D77B 1E34 243F BAF0 5F8E 9CC3 4F55 C8C8 46AB 89B9 (pubkey-bnoordhuis) - GPG key: D77B 1E34 243F BAF0 5F8E 9CC3 4F55 C8C8 46AB 89B9 (pubkey-bnoordhuis)
* **Bert Belder** ([@piscisaureus](https://github.com/piscisaureus))
* **Colin Ihrig** ([@cjihrig](https://github.com/cjihrig)) * **Colin Ihrig** ([@cjihrig](https://github.com/cjihrig))
- GPG key: 94AE 3667 5C46 4D64 BAFA 68DD 7434 390B DBE9 B9C5 (pubkey-cjihrig) - GPG key: 94AE 3667 5C46 4D64 BAFA 68DD 7434 390B DBE9 B9C5 (pubkey-cjihrig)
- GPG key: 5735 3E0D BDAA A7E8 39B6 6A1A FF47 D5E4 AD8B 4FDC (pubkey-cjihrig-kb) - GPG key: 5735 3E0D BDAA A7E8 39B6 6A1A FF47 D5E4 AD8B 4FDC (pubkey-cjihrig-kb)
* **Fedor Indutny** ([@indutny](https://github.com/indutny))
- GPG key: AF2E EA41 EC34 47BF DD86 FED9 D706 3CCE 19B7 E890 (pubkey-indutny)
* **Jameson Nash** ([@vtjnash](https://github.com/vtjnash)) * **Jameson Nash** ([@vtjnash](https://github.com/vtjnash))
- GPG key: AEAD 0A4B 6867 6775 1A0E 4AEF 34A2 5FB1 2824 6514 (pubkey-vtjnash) - GPG key: AEAD 0A4B 6867 6775 1A0E 4AEF 34A2 5FB1 2824 6514 (pubkey-vtjnash)
- GPG key: CFBB 9CA9 A5BE AFD7 0E2B 3C5A 79A6 7C55 A367 9C8B (pubkey2022-vtjnash) - GPG key: CFBB 9CA9 A5BE AFD7 0E2B 3C5A 79A6 7C55 A367 9C8B (pubkey2022-vtjnash)
@ -22,11 +19,16 @@ libuv is currently managed by the following individuals:
- GPG key: 612F 0EAD 9401 6223 79DF 4402 F28C 3C8D A33C 03BE (pubkey-santigimeno) - GPG key: 612F 0EAD 9401 6223 79DF 4402 F28C 3C8D A33C 03BE (pubkey-santigimeno)
* **Saúl Ibarra Corretgé** ([@saghul](https://github.com/saghul)) * **Saúl Ibarra Corretgé** ([@saghul](https://github.com/saghul))
- GPG key: FDF5 1936 4458 319F A823 3DC9 410E 5553 AE9B C059 (pubkey-saghul) - GPG key: FDF5 1936 4458 319F A823 3DC9 410E 5553 AE9B C059 (pubkey-saghul)
* **Trevor Norris** ([@trevnorris](https://github.com/trevnorris))
- GPG key: AEFC 279A 0C93 0676 7E58 29A1 251C A676 820D C7F3 (pubkey-trevnorris)
## Project Maintainers emeriti ## Project Maintainers emeriti
* **Anna Henningsen** ([@addaleax](https://github.com/addaleax)) * **Anna Henningsen** ([@addaleax](https://github.com/addaleax))
* **Bartosz Sosnowski** ([@bzoz](https://github.com/bzoz)) * **Bartosz Sosnowski** ([@bzoz](https://github.com/bzoz))
* **Bert Belder** ([@piscisaureus](https://github.com/piscisaureus))
* **Fedor Indutny** ([@indutny](https://github.com/indutny))
- GPG key: AF2E EA41 EC34 47BF DD86 FED9 D706 3CCE 19B7 E890 (pubkey-indutny)
* **Imran Iqbal** ([@imran-iq](https://github.com/imran-iq)) * **Imran Iqbal** ([@imran-iq](https://github.com/imran-iq))
* **John Barboza** ([@jbarz](https://github.com/jbarz)) * **John Barboza** ([@jbarz](https://github.com/jbarz))

View File

@ -38,6 +38,7 @@ libuv_la_SOURCES = src/fs-poll.c \
src/random.c \ src/random.c \
src/strscpy.c \ src/strscpy.c \
src/strscpy.h \ src/strscpy.h \
src/thread-common.c \
src/threadpool.c \ src/threadpool.c \
src/timer.c \ src/timer.c \
src/uv-data-getter-setters.c \ src/uv-data-getter-setters.c \
@ -96,7 +97,6 @@ else # WINNT
uvinclude_HEADERS += include/uv/unix.h uvinclude_HEADERS += include/uv/unix.h
AM_CPPFLAGS += -I$(top_srcdir)/src/unix AM_CPPFLAGS += -I$(top_srcdir)/src/unix
libuv_la_SOURCES += src/unix/async.c \ libuv_la_SOURCES += src/unix/async.c \
src/unix/atomic-ops.h \
src/unix/core.c \ src/unix/core.c \
src/unix/dl.c \ src/unix/dl.c \
src/unix/fs.c \ src/unix/fs.c \
@ -110,7 +110,6 @@ libuv_la_SOURCES += src/unix/async.c \
src/unix/process.c \ src/unix/process.c \
src/unix/random-devurandom.c \ src/unix/random-devurandom.c \
src/unix/signal.c \ src/unix/signal.c \
src/unix/spinlock.h \
src/unix/stream.c \ src/unix/stream.c \
src/unix/tcp.c \ src/unix/tcp.c \
src/unix/thread.c \ src/unix/thread.c \
@ -122,11 +121,13 @@ endif # WINNT
EXTRA_DIST = test/fixtures/empty_file \ EXTRA_DIST = test/fixtures/empty_file \
test/fixtures/load_error.node \ test/fixtures/load_error.node \
test/fixtures/lorem_ipsum.txt \ test/fixtures/lorem_ipsum.txt \
test/fixtures/one_file/one_file \
include \ include \
docs \ docs \
img \ img \
CONTRIBUTING.md \ CONTRIBUTING.md \
LICENSE \ LICENSE \
LICENSE-extra \
README.md README.md
@ -278,11 +279,13 @@ test_run_tests_SOURCES = test/blackhole-server.c \
test/test-tcp-writealot.c \ test/test-tcp-writealot.c \
test/test-tcp-write-fail.c \ test/test-tcp-write-fail.c \
test/test-tcp-try-write.c \ test/test-tcp-try-write.c \
test/test-tcp-write-in-a-row.c \
test/test-tcp-try-write-error.c \ test/test-tcp-try-write-error.c \
test/test-tcp-write-queue-order.c \ test/test-tcp-write-queue-order.c \
test/test-test-macros.c \ test/test-test-macros.c \
test/test-thread-equal.c \ test/test-thread-equal.c \
test/test-thread.c \ test/test-thread.c \
test/test-thread-affinity.c \
test/test-threadpool-cancel.c \ test/test-threadpool-cancel.c \
test/test-threadpool.c \ test/test-threadpool.c \
test/test-timer-again.c \ test/test-timer-again.c \
@ -313,6 +316,7 @@ test_run_tests_SOURCES = test/blackhole-server.c \
test/test-udp-sendmmsg-error.c \ test/test-udp-sendmmsg-error.c \
test/test-udp-send-unreachable.c \ test/test-udp-send-unreachable.c \
test/test-udp-try-send.c \ test/test-udp-try-send.c \
test/test-udp-recv-in-a-row.c \
test/test-uname.c \ test/test-uname.c \
test/test-walk-handles.c \ test/test-walk-handles.c \
test/test-watcher-cross-stop.c test/test-watcher-cross-stop.c
@ -393,7 +397,6 @@ endif
if ANDROID if ANDROID
libuv_la_CFLAGS += -D_GNU_SOURCE libuv_la_CFLAGS += -D_GNU_SOURCE
libuv_la_SOURCES += src/unix/pthread-fixes.c
endif endif
if CYGWIN if CYGWIN
@ -467,22 +470,14 @@ libuv_la_SOURCES += src/unix/bsd-ifaddrs.c \
src/unix/hurd.c src/unix/hurd.c
endif endif
if KFREEBSD
libuv_la_CFLAGS += -D_GNU_SOURCE
endif
if LINUX if LINUX
uvinclude_HEADERS += include/uv/linux.h uvinclude_HEADERS += include/uv/linux.h
libuv_la_CFLAGS += -D_GNU_SOURCE libuv_la_CFLAGS += -D_GNU_SOURCE
libuv_la_SOURCES += src/unix/linux-core.c \ libuv_la_SOURCES += src/unix/linux.c \
src/unix/linux-inotify.c \
src/unix/linux-syscalls.c \
src/unix/linux-syscalls.h \
src/unix/procfs-exepath.c \ src/unix/procfs-exepath.c \
src/unix/proctitle.c \ src/unix/proctitle.c \
src/unix/random-getrandom.c \ src/unix/random-getrandom.c \
src/unix/random-sysctl-linux.c \ src/unix/random-sysctl-linux.c
src/unix/epoll.c
test_run_tests_LDFLAGS += -lutil test_run_tests_LDFLAGS += -lutil
endif endif
@ -546,8 +541,7 @@ libuv_la_CFLAGS += -D_UNIX03_THREADS \
-qXPLINK \ -qXPLINK \
-qFLOAT=IEEE -qFLOAT=IEEE
libuv_la_LDFLAGS += -qXPLINK libuv_la_LDFLAGS += -qXPLINK
libuv_la_SOURCES += src/unix/pthread-fixes.c \ libuv_la_SOURCES += src/unix/os390.c \
src/unix/os390.c \
src/unix/os390-syscalls.c \ src/unix/os390-syscalls.c \
src/unix/proctitle.c src/unix/proctitle.c
endif endif

16
deps/libuv/README.md vendored
View File

@ -43,8 +43,11 @@ The ABI/API changes can be tracked [here](http://abi-laboratory.pro/tracker/time
## Licensing ## Licensing
libuv is licensed under the MIT license. Check the [LICENSE file](LICENSE). libuv is licensed under the MIT license. Check the [LICENSE](LICENSE) and
The documentation is licensed under the CC BY 4.0 license. Check the [LICENSE-docs file](LICENSE-docs). [LICENSE-extra](LICENSE-extra) files.
The documentation is licensed under the CC BY 4.0 license. Check the
[LICENSE-docs file](LICENSE-docs).
## Community ## Community
@ -220,6 +223,15 @@ Make sure that you specify the architecture you wish to build for in the
"ARCHS" flag. You can specify more than one by delimiting with a space "ARCHS" flag. You can specify more than one by delimiting with a space
(e.g. "x86_64 i386"). (e.g. "x86_64 i386").
### Install with vcpkg
```bash
$ git clone https://github.com/microsoft/vcpkg.git
$ ./bootstrap-vcpkg.bat # for powershell
$ ./bootstrap-vcpkg.sh # for bash
$ ./vcpkg install libuv
```
### Running tests ### Running tests
Some tests are timing sensitive. Relaxing test timeouts may be necessary Some tests are timing sensitive. Relaxing test timeouts may be necessary

View File

@ -2,10 +2,10 @@
| System | Support type | Supported versions | Notes | | System | Support type | Supported versions | Notes |
|---|---|---|---| |---|---|---|---|
| GNU/Linux | Tier 1 | Linux >= 2.6.32 with glibc >= 2.12 | | | GNU/Linux | Tier 1 | Linux >= 3.10 with glibc >= 2.17 | |
| macOS | Tier 1 | macOS >= 10.15 | Current and previous macOS release | | macOS | Tier 1 | macOS >= 11 | Currently supported macOS releases |
| Windows | Tier 1 | >= Windows 8 | VS 2015 and later are supported | | Windows | Tier 1 | >= Windows 8 | VS 2015 and later are supported |
| FreeBSD | Tier 1 | >= 10 | | | FreeBSD | Tier 2 | >= 12 | |
| AIX | Tier 2 | >= 6 | Maintainers: @libuv/aix | | AIX | Tier 2 | >= 6 | Maintainers: @libuv/aix |
| IBM i | Tier 2 | >= IBM i 7.2 | Maintainers: @libuv/ibmi | | IBM i | Tier 2 | >= IBM i 7.2 | Maintainers: @libuv/ibmi |
| z/OS | Tier 2 | >= V2R2 | Maintainers: @libuv/zos | | z/OS | Tier 2 | >= V2R2 | Maintainers: @libuv/zos |

View File

@ -17,7 +17,7 @@
set -eu set -eu
cd `dirname "$0"` cd `dirname "$0"`
if [ "${1:-dev}" == "release" ]; then if [ "${1:-dev}" = "release" ]; then
export LIBUV_RELEASE=true export LIBUV_RELEASE=true
else else
export LIBUV_RELEASE=false export LIBUV_RELEASE=false

View File

@ -0,0 +1,17 @@
if(NOT HOST_ARCH)
message(SEND_ERROR "-DHOST_ARCH required to be specified")
endif()
list(APPEND CMAKE_TRY_COMPILE_PLATFORM_VARIABLES
HOST_ARCH
)
SET(CMAKE_SYSTEM_NAME Windows)
set(COMPILER_PREFIX "${HOST_ARCH}-w64-mingw32")
find_program(CMAKE_RC_COMPILER NAMES ${COMPILER_PREFIX}-windres)
find_program(CMAKE_C_COMPILER NAMES ${COMPILER_PREFIX}-gcc)
find_program(CMAKE_CXX_COMPILER NAMES ${COMPILER_PREFIX}-g++)
set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)

View File

@ -13,7 +13,7 @@
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
AC_PREREQ(2.57) AC_PREREQ(2.57)
AC_INIT([libuv], [1.44.2], [https://github.com/libuv/libuv/issues]) AC_INIT([libuv], [1.45.0], [https://github.com/libuv/libuv/issues])
AC_CONFIG_MACRO_DIR([m4]) AC_CONFIG_MACRO_DIR([m4])
m4_include([m4/libuv-extra-automake-flags.m4]) m4_include([m4/libuv-extra-automake-flags.m4])
m4_include([m4/as_case.m4]) m4_include([m4/as_case.m4])
@ -61,8 +61,7 @@ AM_CONDITIONAL([ANDROID], [AS_CASE([$host_os],[linux-android*],[true], [false])
AM_CONDITIONAL([CYGWIN], [AS_CASE([$host_os],[cygwin*], [true], [false])]) AM_CONDITIONAL([CYGWIN], [AS_CASE([$host_os],[cygwin*], [true], [false])])
AM_CONDITIONAL([DARWIN], [AS_CASE([$host_os],[darwin*], [true], [false])]) AM_CONDITIONAL([DARWIN], [AS_CASE([$host_os],[darwin*], [true], [false])])
AM_CONDITIONAL([DRAGONFLY],[AS_CASE([$host_os],[dragonfly*], [true], [false])]) AM_CONDITIONAL([DRAGONFLY],[AS_CASE([$host_os],[dragonfly*], [true], [false])])
AM_CONDITIONAL([FREEBSD], [AS_CASE([$host_os],[*freebsd*], [true], [false])]) AM_CONDITIONAL([FREEBSD], [AS_CASE([$host_os],[freebsd*], [true], [false])])
AM_CONDITIONAL([KFREEBSD], [AS_CASE([$host_os],[kfreebsd*], [true], [false])])
AM_CONDITIONAL([HAIKU], [AS_CASE([$host_os],[haiku], [true], [false])]) AM_CONDITIONAL([HAIKU], [AS_CASE([$host_os],[haiku], [true], [false])])
AM_CONDITIONAL([HURD], [AS_CASE([$host_os],[gnu*], [true], [false])]) AM_CONDITIONAL([HURD], [AS_CASE([$host_os],[gnu*], [true], [false])])
AM_CONDITIONAL([LINUX], [AS_CASE([$host_os],[linux*], [true], [false])]) AM_CONDITIONAL([LINUX], [AS_CASE([$host_os],[linux*], [true], [false])])
@ -74,12 +73,12 @@ AM_CONDITIONAL([OS400], [AS_CASE([$host_os],[os400], [true], [false])
AM_CONDITIONAL([SUNOS], [AS_CASE([$host_os],[solaris*], [true], [false])]) AM_CONDITIONAL([SUNOS], [AS_CASE([$host_os],[solaris*], [true], [false])])
AM_CONDITIONAL([WINNT], [AS_CASE([$host_os],[mingw*], [true], [false])]) AM_CONDITIONAL([WINNT], [AS_CASE([$host_os],[mingw*], [true], [false])])
AS_CASE([$host_os],[mingw*], [ AS_CASE([$host_os],[mingw*], [
LIBS="$LIBS -lws2_32 -lpsapi -liphlpapi -lshell32 -luserenv -luser32" LIBS="$LIBS -lws2_32 -lpsapi -liphlpapi -lshell32 -luserenv -luser32 -ldbghelp -lole32 -luuid"
])
AS_CASE([$host_os], [solaris2.10], [
CFLAGS="$CFLAGS -DSUNOS_NO_IFADDRS"
]) ])
AS_CASE([$host_os], [netbsd*], [AC_CHECK_LIB([kvm], [kvm_open])]) AS_CASE([$host_os], [netbsd*], [AC_CHECK_LIB([kvm], [kvm_open])])
AS_CASE([$host_os], [kfreebsd*], [
LIBS="$LIBS -lfreebsd-glue"
])
AS_CASE([$host_os], [haiku], [ AS_CASE([$host_os], [haiku], [
LIBS="$LIBS -lnetwork" LIBS="$LIBS -lnetwork"
]) ])
@ -88,4 +87,5 @@ AC_CONFIG_FILES([Makefile libuv.pc])
AC_CONFIG_LINKS([test/fixtures/empty_file:test/fixtures/empty_file]) AC_CONFIG_LINKS([test/fixtures/empty_file:test/fixtures/empty_file])
AC_CONFIG_LINKS([test/fixtures/load_error.node:test/fixtures/load_error.node]) AC_CONFIG_LINKS([test/fixtures/load_error.node:test/fixtures/load_error.node])
AC_CONFIG_LINKS([test/fixtures/lorem_ipsum.txt:test/fixtures/lorem_ipsum.txt]) AC_CONFIG_LINKS([test/fixtures/lorem_ipsum.txt:test/fixtures/lorem_ipsum.txt])
AC_CONFIG_LINKS([test/fixtures/one_file/one_file:test/fixtures/one_file/one_file])
AC_OUTPUT AC_OUTPUT

View File

@ -1,82 +0,0 @@
examples=\
helloworld\
default-loop\
idle-basic\
uvcat\
uvtee\
onchange\
thread-create\
queue-work\
progress\
tcp-echo-server\
dns\
udp-dhcp\
idle-compute\
ref-timer\
spawn\
detach\
proc-streams\
cgi\
pipe-echo-server\
multi-echo-server\
tty\
tty-gravity\
interfaces\
locks \
signal \
uvstop \
queue-cancel
UV_PATH=$(shell pwd)/../..
UV_LIB=$(UV_PATH)/.libs/libuv.a
CFLAGS=-g -Wall -I$(UV_PATH)/include
LIBS=
uname_S=$(shell uname -s)
ifeq (Darwin, $(uname_S))
CFLAGS+=-framework CoreServices
SHARED_LIB_FLAGS=-bundle -undefined dynamic_lookup -o plugin/libhello.dylib
endif
ifeq (Linux, $(uname_S))
LIBS=-lrt -ldl -lm -pthread -lcurl
SHARED_LIB_FLAGS=-shared -Wl,-soname,libhello.so -o plugin/libhello.so
PLUGIN_EXE_FLAGS=-Wl,-export-dynamic
endif
all: $(examples) plugin/plugin proc-streams/test cgi/tick multi-echo-server/worker uvwget/uvwget
$(examples): % : %/main.c
gcc $(CFLAGS) -o $@/$@ $< $(UV_LIB) $(LIBS)
plugin: plugin/plugin
plugin/plugin: plugin/*.c
gcc $(CFLAGS) $(PLUGIN_EXE_FLAGS) -o plugin/plugin plugin/main.c $(UV_LIB) $(LIBS)
gcc -g -Wall -c -fPIC -o plugin/hello.o plugin/hello.c
gcc $(SHARED_LIB_FLAGS) plugin/hello.o
proc-streams/test: proc-streams/test.c
gcc -g -Wall -o proc-streams/test proc-streams/test.c
cgi/tick: cgi/tick.c
gcc -g -Wall -o cgi/tick cgi/tick.c
multi-echo-server/worker: multi-echo-server/worker.c
gcc $(CFLAGS) -o multi-echo-server/worker multi-echo-server/worker.c $(UV_LIB) $(LIBS)
uvwget: uvwget/uvwget
uvwget/uvwget: uvwget/main.c
gcc $(CFLAGS) `curl-config --cflags --libs` -o uvwget/uvwget uvwget/main.c $(UV_LIB) $(LIBS)
clean:
for dir in $(examples); do cd $$dir; rm -f $$dir; rm -rf $$dir.dSYM; cd ..; done
rm -rf plugin/*.o plugin/libhello.*
rm -rf plugin/plugin plugin/plugin.dSYM
rm -rf proc-streams/test proc-streams/test.dSYM
rm -rf cgi/tick cgi/tick.dSYM
rm -rf multi-echo-server/worker multi-echo-server/worker.dSYM
rm -rf uvwget/uvwget uvwget/uvwget.dSYM
.PHONY: clean all $(examples) plugin uvwget

View File

@ -1,42 +1,27 @@
# primary # primary
Sphinx==3.5.4 sphinx==6.1.3
# dependencies # dependencies
alabaster==0.7.12 alabaster==0.7.13
appdirs==1.4.3 Babel==2.11.0
Babel==2.9.0 certifi==2022.12.7
CacheControl==0.12.6 charset-normalizer==3.0.1
certifi==2019.11.28 docutils==0.19
chardet==3.0.4 idna==3.4
colorama==0.4.3 imagesize==1.4.1
contextlib2==0.6.0 importlib-metadata==6.0.0
distlib==0.3.0 Jinja2==3.1.2
distro==1.4.0 MarkupSafe==2.1.2
docutils==0.16 packaging==23.0
html5lib==1.0.1 Pygments==2.14.0
idna==2.8 pytz==2022.7.1
imagesize==1.2.0 requests==2.28.2
ipaddr==2.2.0 snowballstemmer==2.2.0
Jinja2==2.11.3 sphinxcontrib-applehelp==1.0.3
lockfile==0.12.2
MarkupSafe==1.1.1
msgpack==0.6.2
packaging==20.3
pep517==0.8.2
progress==1.5
Pygments==2.8.1
pyparsing==2.4.6
pytoml==0.1.21
pytz==2021.1
requests==2.22.0
retrying==1.3.3
six==1.14.0
snowballstemmer==2.1.0
sphinxcontrib-applehelp==1.0.2
sphinxcontrib-devhelp==1.0.2 sphinxcontrib-devhelp==1.0.2
sphinxcontrib-htmlhelp==1.0.3 sphinxcontrib-htmlhelp==2.0.0
sphinxcontrib-jsmath==1.0.1 sphinxcontrib-jsmath==1.0.1
sphinxcontrib-qthelp==1.0.3 sphinxcontrib-qthelp==1.0.3
sphinxcontrib-serializinghtml==1.1.4 sphinxcontrib-serializinghtml==1.1.5
urllib3==1.25.8 urllib3==1.26.14
webencodings==0.5.1 zipp==3.11.0

View File

@ -60,16 +60,15 @@ stages of a loop iteration:
:align: center :align: center
#. The loop concept of 'now' is updated. The event loop caches the current time at the start of #. The loop concept of 'now' is initially set.
the event loop tick in order to reduce the number of time-related system calls.
#. Due timers are run if the loop was run with ``UV_RUN_DEFAULT``. All active timers scheduled
for a time before the loop's concept of *now* get their callbacks called.
#. If the loop is *alive* an iteration is started, otherwise the loop will exit immediately. So, #. If the loop is *alive* an iteration is started, otherwise the loop will exit immediately. So,
when is a loop considered to be *alive*? If a loop has active and ref'd handles, active when is a loop considered to be *alive*? If a loop has active and ref'd handles, active
requests or closing handles it's considered to be *alive*. requests or closing handles it's considered to be *alive*.
#. Due timers are run. All active timers scheduled for a time before the loop's concept of *now*
get their callbacks called.
#. Pending callbacks are called. All I/O callbacks are called right after polling for I/O, for the #. Pending callbacks are called. All I/O callbacks are called right after polling for I/O, for the
most part. There are cases, however, in which calling such a callback is deferred for the next most part. There are cases, however, in which calling such a callback is deferred for the next
loop iteration. If the previous iteration deferred any I/O callback it will be run at this point. loop iteration. If the previous iteration deferred any I/O callback it will be run at this point.
@ -101,9 +100,11 @@ stages of a loop iteration:
#. Close callbacks are called. If a handle was closed by calling :c:func:`uv_close` it will #. Close callbacks are called. If a handle was closed by calling :c:func:`uv_close` it will
get the close callback called. get the close callback called.
#. Special case in case the loop was run with ``UV_RUN_ONCE``, as it implies forward progress. #. The loop concept of 'now' is updated.
It's possible that no I/O callbacks were fired after blocking for I/O, but some time has passed
so there might be timers which are due, those timers get their callbacks called. #. Due timers are run. Note that 'now' is not updated again until the next loop iteration.
So if a timer became due while other timers were being processed, it won't be run until
the following event loop iteration.
#. Iteration ends. If the loop was run with ``UV_RUN_NOWAIT`` or ``UV_RUN_ONCE`` modes the #. Iteration ends. If the loop was run with ``UV_RUN_NOWAIT`` or ``UV_RUN_ONCE`` modes the
iteration ends and :c:func:`uv_run` will return. If the loop was run with ``UV_RUN_DEFAULT`` iteration ends and :c:func:`uv_run` will return. If the loop was run with ``UV_RUN_DEFAULT``

View File

@ -12,6 +12,12 @@ otherwise it will be performed asynchronously.
All file operations are run on the threadpool. See :ref:`threadpool` for information All file operations are run on the threadpool. See :ref:`threadpool` for information
on the threadpool size. on the threadpool size.
Starting with libuv v1.45.0, some file operations on Linux are handed off to
`io_uring <https://en.wikipedia.org/wiki/Io_uring>` when possible. Apart from
a (sometimes significant) increase in throughput there should be no change in
observable behavior. Libuv reverts to using its threadpool when the necessary
kernel features are unavailable or unsuitable.
.. note:: .. note::
On Windows `uv_fs_*` functions use utf-8 encoding. On Windows `uv_fs_*` functions use utf-8 encoding.
@ -24,7 +30,8 @@ Data types
.. c:type:: uv_timespec_t .. c:type:: uv_timespec_t
Portable equivalent of ``struct timespec``. Y2K38-unsafe data type for storing times with nanosecond resolution.
Will be replaced with :c:type:`uv_timespec64_t` in libuv v2.0.
:: ::
@ -160,6 +167,10 @@ Data types
size_t nentries; size_t nentries;
} uv_dir_t; } uv_dir_t;
.. c:type:: void (*uv_fs_cb)(uv_fs_t* req)
Callback called when a request is completed asynchronously.
Public members Public members
^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^
@ -218,7 +229,8 @@ API
.. c:function:: int uv_fs_read(uv_loop_t* loop, uv_fs_t* req, uv_file file, const uv_buf_t bufs[], unsigned int nbufs, int64_t offset, uv_fs_cb cb) .. c:function:: int uv_fs_read(uv_loop_t* loop, uv_fs_t* req, uv_file file, const uv_buf_t bufs[], unsigned int nbufs, int64_t offset, uv_fs_cb cb)
Equivalent to :man:`preadv(2)`. Equivalent to :man:`preadv(2)`. If the `offset` argument is `-1`, then
the current file offset is used and updated.
.. warning:: .. warning::
On Windows, under non-MSVC environments (e.g. when GCC or Clang is used On Windows, under non-MSVC environments (e.g. when GCC or Clang is used
@ -231,7 +243,8 @@ API
.. c:function:: int uv_fs_write(uv_loop_t* loop, uv_fs_t* req, uv_file file, const uv_buf_t bufs[], unsigned int nbufs, int64_t offset, uv_fs_cb cb) .. c:function:: int uv_fs_write(uv_loop_t* loop, uv_fs_t* req, uv_file file, const uv_buf_t bufs[], unsigned int nbufs, int64_t offset, uv_fs_cb cb)
Equivalent to :man:`pwritev(2)`. Equivalent to :man:`pwritev(2)`. If the `offset` argument is `-1`, then
the current file offset is used and updated.
.. warning:: .. warning::
On Windows, under non-MSVC environments (e.g. when GCC or Clang is used On Windows, under non-MSVC environments (e.g. when GCC or Clang is used
@ -463,10 +476,6 @@ API
The background story and some more details on these issues can be checked The background story and some more details on these issues can be checked
`here <https://github.com/nodejs/node/issues/7726>`_. `here <https://github.com/nodejs/node/issues/7726>`_.
.. note::
This function is not implemented on Windows XP and Windows Server 2003.
On these systems, UV_ENOSYS is returned.
.. versionadded:: 1.8.0 .. versionadded:: 1.8.0
.. c:function:: int uv_fs_chown(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_uid_t uid, uv_gid_t gid, uv_fs_cb cb) .. c:function:: int uv_fs_chown(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_uid_t uid, uv_gid_t gid, uv_fs_cb cb)

View File

@ -164,7 +164,7 @@ IPv6 stack only
IPv6 sockets can be used for both IPv4 and IPv6 communication. If you want to IPv6 sockets can be used for both IPv4 and IPv6 communication. If you want to
restrict the socket to IPv6 only, pass the ``UV_UDP_IPV6ONLY`` flag to restrict the socket to IPv6 only, pass the ``UV_UDP_IPV6ONLY`` flag to
``uv_udp_bind`` [#]_. ``uv_udp_bind``.
Multicast Multicast
~~~~~~~~~ ~~~~~~~~~
@ -250,7 +250,6 @@ times, with each address being reported once.
---- ----
.. [#] https://beej.us/guide/bgnet/html/#broadcast-packetshello-world .. [#] https://beej.us/guide/bgnet/html/#broadcast-packetshello-world
.. [#] on Windows only supported on Windows Vista and later.
.. [#] https://www.tldp.org/HOWTO/Multicast-HOWTO-6.html#ss6.1 .. [#] https://www.tldp.org/HOWTO/Multicast-HOWTO-6.html#ss6.1
.. [#] libuv use the system ``getaddrinfo`` in the libuv threadpool. libuv .. [#] libuv use the system ``getaddrinfo`` in the libuv threadpool. libuv
v0.8.0 and earlier also included c-ares_ as an alternative, but this has been v0.8.0 and earlier also included c-ares_ as an alternative, but this has been

View File

@ -235,7 +235,7 @@ Our downloader is to be invoked as::
$ ./uvwget [url1] [url2] ... $ ./uvwget [url1] [url2] ...
So we add each argument as an URL So we add each argument as a URL
.. rubric:: uvwget/main.c - Adding urls .. rubric:: uvwget/main.c - Adding urls
.. literalinclude:: ../../code/uvwget/main.c .. literalinclude:: ../../code/uvwget/main.c

View File

@ -153,6 +153,9 @@ API
In-progress requests, like uv_connect_t or uv_write_t, are cancelled and In-progress requests, like uv_connect_t or uv_write_t, are cancelled and
have their callbacks called asynchronously with status=UV_ECANCELED. have their callbacks called asynchronously with status=UV_ECANCELED.
`close_cb` can be `NULL` in cases where no cleanup or deallocation is
necessary.
.. c:function:: void uv_ref(uv_handle_t* handle) .. c:function:: void uv_ref(uv_handle_t* handle)
Reference the given handle. References are idempotent, that is, if a handle Reference the given handle. References are idempotent, that is, if a handle

View File

@ -4,8 +4,46 @@
Metrics operations Metrics operations
====================== ======================
libuv provides a metrics API to track the amount of time the event loop has libuv provides a metrics API to track various internal operations of the event
spent idle in the kernel's event provider. loop.
Data types
----------
.. c:type:: uv_metrics_t
The struct that contains event loop metrics. It is recommended to retrieve
these metrics in a :c:type:`uv_prepare_cb` in order to make sure there are
no inconsistencies with the metrics counters.
::
typedef struct {
uint64_t loop_count;
uint64_t events;
uint64_t events_waiting;
/* private */
uint64_t* reserved[13];
} uv_metrics_t;
Public members
^^^^^^^^^^^^^^
.. c:member:: uint64_t uv_metrics_t.loop_count
Number of event loop iterations.
.. c:member:: uint64_t uv_metrics_t.events
Number of events that have been processed by the event handler.
.. c:member:: uint64_t uv_metrics_t.events_waiting
Number of events that were waiting to be processed when the event provider
was called.
API API
--- ---
@ -25,3 +63,9 @@ API
:c:type:`UV_METRICS_IDLE_TIME`. :c:type:`UV_METRICS_IDLE_TIME`.
.. versionadded:: 1.39.0 .. versionadded:: 1.39.0
.. c:function:: int uv_metrics_info(uv_loop_t* loop, uv_metrics_t* metrics)
Copy the current set of event loop metrics to the ``metrics`` pointer.
.. versionadded:: 1.45.0

View File

@ -73,7 +73,8 @@ Data types
.. c:type:: uv_timeval_t .. c:type:: uv_timeval_t
Data type for storing times. Y2K38-unsafe data type for storing times with microsecond resolution.
Will be replaced with :c:type:`uv_timeval64_t` in libuv v2.0.
:: ::
@ -84,7 +85,7 @@ Data types
.. c:type:: uv_timeval64_t .. c:type:: uv_timeval64_t
Alternative data type for storing times. Y2K38-safe data type for storing times with microsecond resolution.
:: ::
@ -93,6 +94,28 @@ Data types
int32_t tv_usec; int32_t tv_usec;
} uv_timeval64_t; } uv_timeval64_t;
.. c:type:: uv_timespec64_t
Y2K38-safe data type for storing times with nanosecond resolution.
::
typedef struct {
int64_t tv_sec;
int32_t tv_nsec;
} uv_timespec64_t;
.. c:enum:: uv_clock_id
Clock source for :c:func:`uv_clock_gettime`.
::
typedef enum {
UV_CLOCK_MONOTONIC,
UV_CLOCK_REALTIME
} uv_clock_id;
.. c:type:: uv_rusage_t .. c:type:: uv_rusage_t
Data type for resource usage results. Data type for resource usage results.
@ -119,7 +142,10 @@ Data types
} uv_rusage_t; } uv_rusage_t;
Members marked with `(X)` are unsupported on Windows. Members marked with `(X)` are unsupported on Windows.
See :man:`getrusage(2)` for supported fields on Unix See :man:`getrusage(2)` for supported fields on UNIX-like platforms.
The maximum resident set size is reported in kilobytes, the unit most
platforms use natively.
.. c:type:: uv_cpu_info_t .. c:type:: uv_cpu_info_t
@ -211,7 +237,7 @@ API
type of the stdio streams. type of the stdio streams.
For :man:`isatty(3)` equivalent functionality use this function and test For :man:`isatty(3)` equivalent functionality use this function and test
for ``UV_TTY``. for `UV_TTY`.
.. c:function:: int uv_replace_allocator(uv_malloc_func malloc_func, uv_realloc_func realloc_func, uv_calloc_func calloc_func, uv_free_func free_func) .. c:function:: int uv_replace_allocator(uv_malloc_func malloc_func, uv_realloc_func realloc_func, uv_calloc_func calloc_func, uv_free_func free_func)
@ -225,8 +251,8 @@ API
after all resources have been freed and thus libuv doesn't reference after all resources have been freed and thus libuv doesn't reference
any allocated memory chunk. any allocated memory chunk.
On success, it returns 0, if any of the function pointers is NULL it On success, it returns 0, if any of the function pointers is `NULL` it
returns UV_EINVAL. returns `UV_EINVAL`.
.. warning:: There is no protection against changing the allocator multiple .. warning:: There is no protection against changing the allocator multiple
times. If the user changes it they are responsible for making times. If the user changes it they are responsible for making
@ -362,6 +388,13 @@ API
Frees the `cpu_infos` array previously allocated with :c:func:`uv_cpu_info`. Frees the `cpu_infos` array previously allocated with :c:func:`uv_cpu_info`.
.. c:function:: int uv_cpumask_size(void)
Returns the maximum size of the mask used for process/thread affinities,
or `UV_ENOTSUP` if affinities are not supported on the current platform.
.. versionadded:: 1.45.0
.. c:function:: int uv_interface_addresses(uv_interface_address_t** addresses, int* count) .. c:function:: int uv_interface_addresses(uv_interface_address_t** addresses, int* count)
Gets address information about the network interfaces on the system. An Gets address information about the network interfaces on the system. An
@ -541,18 +574,21 @@ API
.. c:function:: uint64_t uv_get_free_memory(void) .. c:function:: uint64_t uv_get_free_memory(void)
Gets the amount of free memory available in the system, as reported by the kernel (in bytes). Gets the amount of free memory available in the system, as reported by
the kernel (in bytes). Returns 0 when unknown.
.. c:function:: uint64_t uv_get_total_memory(void) .. c:function:: uint64_t uv_get_total_memory(void)
Gets the total amount of physical memory in the system (in bytes). Gets the total amount of physical memory in the system (in bytes).
Returns 0 when unknown.
.. c:function:: uint64_t uv_get_constrained_memory(void) .. c:function:: uint64_t uv_get_constrained_memory(void)
Gets the amount of memory available to the process (in bytes) based on Gets the total amount of memory available to the process (in bytes) based on
limits imposed by the OS. If there is no such constraint, or the constraint limits imposed by the OS. If there is no such constraint, or the constraint
is unknown, `0` is returned. Note that it is not unusual for this value to is unknown, `0` is returned. If there is a constraining mechanism, but there
be less than or greater than :c:func:`uv_get_total_memory`. is no constraint set, `UINT64_MAX` is returned. Note that it is not unusual
for this value to be less than or greater than :c:func:`uv_get_total_memory`.
.. note:: .. note::
This function currently only returns a non-zero value on Linux, based This function currently only returns a non-zero value on Linux, based
@ -560,9 +596,23 @@ API
.. versionadded:: 1.29.0 .. versionadded:: 1.29.0
.. c:function:: uint64_t uv_get_available_memory(void)
Gets the amount of free memory that is still available to the process (in bytes).
This differs from :c:func:`uv_get_free_memory` in that it takes into account any
limits imposed by the OS. If there is no such constraint, or the constraint
is unknown, the amount returned will be identical to :c:func:`uv_get_free_memory`.
.. note::
This function currently only returns a value that is different from
what :c:func:`uv_get_free_memory` reports on Linux, based
on cgroups if it is present.
.. versionadded:: 1.45.0
.. c:function:: uint64_t uv_hrtime(void) .. c:function:: uint64_t uv_hrtime(void)
Returns the current high-resolution real time. This is expressed in Returns the current high-resolution timestamp. This is expressed in
nanoseconds. It is relative to an arbitrary time in the past. It is not nanoseconds. It is relative to an arbitrary time in the past. It is not
related to the time of day and therefore not subject to clock drift. The related to the time of day and therefore not subject to clock drift. The
primary use is for measuring performance between intervals. primary use is for measuring performance between intervals.
@ -571,6 +621,19 @@ API
Not every platform can support nanosecond resolution; however, this value will always Not every platform can support nanosecond resolution; however, this value will always
be in nanoseconds. be in nanoseconds.
.. c:function:: int uv_clock_gettime(uv_clock_id clock_id, uv_timespec64_t* ts)
Obtain the current system time from a high-resolution real-time or monotonic
clock source.
The real-time clock counts from the UNIX epoch (1970-01-01) and is subject
to time adjustments; it can jump back in time.
The monotonic clock counts from an arbitrary point in the past and never
jumps back in time.
.. versionadded:: 1.45.0
.. c:function:: void uv_print_all_handles(uv_loop_t* loop, FILE* stream) .. c:function:: void uv_print_all_handles(uv_loop_t* loop, FILE* stream)
Prints all handles associated with the given `loop` to the given `stream`. Prints all handles associated with the given `loop` to the given `stream`.

View File

@ -101,7 +101,9 @@ API
with one of the `UV_E*` error codes (see :ref:`errors`). The user should with one of the `UV_E*` error codes (see :ref:`errors`). The user should
not close the socket while the handle is active. If the user does that not close the socket while the handle is active. If the user does that
anyway, the callback *may* be called reporting an error status, but this is anyway, the callback *may* be called reporting an error status, but this is
**not** guaranteed. **not** guaranteed. If `status == UV_EBADF` polling is discontinued for the
file handle and no further events will be reported. The user should
then call :c:func:`uv_close` on the handle.
.. note:: .. note::
Calling :c:func:`uv_poll_start` on a handle that is already active is Calling :c:func:`uv_poll_start` on a handle that is already active is

Binary file not shown.

Before

Width:  |  Height:  |  Size: 79 KiB

After

Width:  |  Height:  |  Size: 64 KiB

View File

@ -88,6 +88,46 @@ Threads
.. versionadded:: 1.26.0 .. versionadded:: 1.26.0
.. c:function:: int uv_thread_setaffinity(uv_thread_t* tid, char* cpumask, char* oldmask, size_t mask_size)
Sets the specified thread's affinity to cpumask, which is specified in
bytes. Optionally returning the previous affinity setting in oldmask.
On Unix, uses :man:`pthread_getaffinity_np(3)` to get the affinity setting
and maps the cpu_set_t to bytes in oldmask. Then maps the bytes in cpumask
to a cpu_set_t and uses :man:`pthread_setaffinity_np(3)`. On Windows, maps
the bytes in cpumask to a bitmask and uses SetThreadAffinityMask() which
returns the previous affinity setting.
The mask_size specifies the number of entries (bytes) in cpumask / oldmask,
and must be greater-than-or-equal-to :c:func:`uv_cpumask_size`.
.. note::
Thread affinity setting is not atomic on Windows. Unsupported on macOS.
.. versionadded:: 1.45.0
.. c:function:: int uv_thread_getaffinity(uv_thread_t* tid, char* cpumask, size_t mask_size)
Gets the specified thread's affinity setting. On Unix, this maps the
cpu_set_t returned by :man:`pthread_getaffinity_np(3)` to bytes in cpumask.
The mask_size specifies the number of entries (bytes) in cpumask,
and must be greater-than-or-equal-to :c:func:`uv_cpumask_size`.
.. note::
Thread affinity getting is not atomic on Windows. Unsupported on macOS.
.. versionadded:: 1.45.0
.. c:function:: int uv_thread_getcpu(void)
Gets the CPU number on which the calling thread is running.
.. note::
Currently only implemented on Windows, Linux and FreeBSD.
.. versionadded:: 1.45.0
.. c:function:: uv_thread_t uv_thread_self(void) .. c:function:: uv_thread_t uv_thread_self(void)
.. c:function:: int uv_thread_join(uv_thread_t *tid) .. c:function:: int uv_thread_join(uv_thread_t *tid)
.. c:function:: int uv_thread_equal(const uv_thread_t* t1, const uv_thread_t* t2) .. c:function:: int uv_thread_equal(const uv_thread_t* t1, const uv_thread_t* t2)

View File

@ -14,6 +14,9 @@ is 1024).
.. versionchanged:: 1.30.0 the maximum UV_THREADPOOL_SIZE allowed was increased from 128 to 1024. .. versionchanged:: 1.30.0 the maximum UV_THREADPOOL_SIZE allowed was increased from 128 to 1024.
.. versionchanged:: 1.45.0 threads now have an 8 MB stack instead of the
(sometimes too low) platform default.
The threadpool is global and shared across all event loops. When a particular The threadpool is global and shared across all event loops. When a particular
function makes use of the threadpool (i.e. when using :c:func:`uv_queue_work`) function makes use of the threadpool (i.e. when using :c:func:`uv_queue_work`)
libuv preallocates and initializes the maximum number of threads allowed by libuv preallocates and initializes the maximum number of threads allowed by

View File

@ -56,7 +56,7 @@ Data types
/* /*
* Indicates if IP_RECVERR/IPV6_RECVERR will be set when binding the handle. * Indicates if IP_RECVERR/IPV6_RECVERR will be set when binding the handle.
* This sets IP_RECVERR for IPv4 and IPV6_RECVERR for IPv6 UDP sockets on * This sets IP_RECVERR for IPv4 and IPV6_RECVERR for IPv6 UDP sockets on
* Linux. This stops the Linux kernel from supressing some ICMP error messages * Linux. This stops the Linux kernel from suppressing some ICMP error messages
* and enables full ICMP error reporting for faster failover. * and enables full ICMP error reporting for faster failover.
* This flag is no-op on platforms other than Linux. * This flag is no-op on platforms other than Linux.
*/ */

View File

@ -31,6 +31,7 @@ extern "C" {
#error "Define either BUILDING_UV_SHARED or USING_UV_SHARED, not both." #error "Define either BUILDING_UV_SHARED or USING_UV_SHARED, not both."
#endif #endif
#ifndef UV_EXTERN
#ifdef _WIN32 #ifdef _WIN32
/* Windows - set up dll import/export decorators. */ /* Windows - set up dll import/export decorators. */
# if defined(BUILDING_UV_SHARED) # if defined(BUILDING_UV_SHARED)
@ -50,17 +51,13 @@ extern "C" {
#else #else
# define UV_EXTERN /* nothing */ # define UV_EXTERN /* nothing */
#endif #endif
#endif /* UV_EXTERN */
#include "uv/errno.h" #include "uv/errno.h"
#include "uv/version.h" #include "uv/version.h"
#include <stddef.h> #include <stddef.h>
#include <stdio.h> #include <stdio.h>
#include <stdint.h>
#if defined(_MSC_VER) && _MSC_VER < 1600
# include "uv/stdint-msvc2008.h"
#else
# include <stdint.h>
#endif
#if defined(_WIN32) #if defined(_WIN32)
# include "uv/win.h" # include "uv/win.h"
@ -152,6 +149,7 @@ extern "C" {
XX(EFTYPE, "inappropriate file type or format") \ XX(EFTYPE, "inappropriate file type or format") \
XX(EILSEQ, "illegal byte sequence") \ XX(EILSEQ, "illegal byte sequence") \
XX(ESOCKTNOSUPPORT, "socket type not supported") \ XX(ESOCKTNOSUPPORT, "socket type not supported") \
XX(ENODATA, "no data available") \
#define UV_HANDLE_TYPE_MAP(XX) \ #define UV_HANDLE_TYPE_MAP(XX) \
XX(ASYNC, async) \ XX(ASYNC, async) \
@ -247,9 +245,12 @@ typedef struct uv_cpu_info_s uv_cpu_info_t;
typedef struct uv_interface_address_s uv_interface_address_t; typedef struct uv_interface_address_s uv_interface_address_t;
typedef struct uv_dirent_s uv_dirent_t; typedef struct uv_dirent_s uv_dirent_t;
typedef struct uv_passwd_s uv_passwd_t; typedef struct uv_passwd_s uv_passwd_t;
typedef struct uv_group_s uv_group_t;
typedef struct uv_utsname_s uv_utsname_t; typedef struct uv_utsname_s uv_utsname_t;
typedef struct uv_statfs_s uv_statfs_t; typedef struct uv_statfs_s uv_statfs_t;
typedef struct uv_metrics_s uv_metrics_t;
typedef enum { typedef enum {
UV_LOOP_BLOCK_SIGNAL = 0, UV_LOOP_BLOCK_SIGNAL = 0,
UV_METRICS_IDLE_TIME UV_METRICS_IDLE_TIME
@ -344,11 +345,32 @@ typedef void (*uv_random_cb)(uv_random_t* req,
void* buf, void* buf,
size_t buflen); size_t buflen);
typedef enum {
UV_CLOCK_MONOTONIC,
UV_CLOCK_REALTIME
} uv_clock_id;
/* XXX(bnoordhuis) not 2038-proof, https://github.com/libuv/libuv/issues/3864 */
typedef struct { typedef struct {
long tv_sec; long tv_sec;
long tv_nsec; long tv_nsec;
} uv_timespec_t; } uv_timespec_t;
typedef struct {
int64_t tv_sec;
int32_t tv_nsec;
} uv_timespec64_t;
/* XXX(bnoordhuis) not 2038-proof, https://github.com/libuv/libuv/issues/3864 */
typedef struct {
long tv_sec;
long tv_usec;
} uv_timeval_t;
typedef struct {
int64_t tv_sec;
int32_t tv_usec;
} uv_timeval64_t;
typedef struct { typedef struct {
uint64_t st_dev; uint64_t st_dev;
@ -1139,6 +1161,12 @@ struct uv_passwd_s {
char* homedir; char* homedir;
}; };
struct uv_group_s {
char* groupname;
unsigned long gid;
char** members;
};
struct uv_utsname_s { struct uv_utsname_s {
char sysname[256]; char sysname[256];
char release[256]; char release[256];
@ -1184,16 +1212,6 @@ UV_EXTERN int uv_uptime(double* uptime);
UV_EXTERN uv_os_fd_t uv_get_osfhandle(int fd); UV_EXTERN uv_os_fd_t uv_get_osfhandle(int fd);
UV_EXTERN int uv_open_osfhandle(uv_os_fd_t os_fd); UV_EXTERN int uv_open_osfhandle(uv_os_fd_t os_fd);
typedef struct {
long tv_sec;
long tv_usec;
} uv_timeval_t;
typedef struct {
int64_t tv_sec;
int32_t tv_usec;
} uv_timeval64_t;
typedef struct { typedef struct {
uv_timeval_t ru_utime; /* user CPU time used */ uv_timeval_t ru_utime; /* user CPU time used */
uv_timeval_t ru_stime; /* system CPU time used */ uv_timeval_t ru_stime; /* system CPU time used */
@ -1219,6 +1237,9 @@ UV_EXTERN int uv_os_homedir(char* buffer, size_t* size);
UV_EXTERN int uv_os_tmpdir(char* buffer, size_t* size); UV_EXTERN int uv_os_tmpdir(char* buffer, size_t* size);
UV_EXTERN int uv_os_get_passwd(uv_passwd_t* pwd); UV_EXTERN int uv_os_get_passwd(uv_passwd_t* pwd);
UV_EXTERN void uv_os_free_passwd(uv_passwd_t* pwd); UV_EXTERN void uv_os_free_passwd(uv_passwd_t* pwd);
UV_EXTERN int uv_os_get_passwd2(uv_passwd_t* pwd, uv_uid_t uid);
UV_EXTERN int uv_os_get_group(uv_group_t* grp, uv_uid_t gid);
UV_EXTERN void uv_os_free_group(uv_group_t* grp);
UV_EXTERN uv_pid_t uv_os_getpid(void); UV_EXTERN uv_pid_t uv_os_getpid(void);
UV_EXTERN uv_pid_t uv_os_getppid(void); UV_EXTERN uv_pid_t uv_os_getppid(void);
@ -1245,6 +1266,7 @@ UV_EXTERN int uv_os_setpriority(uv_pid_t pid, int priority);
UV_EXTERN unsigned int uv_available_parallelism(void); UV_EXTERN unsigned int uv_available_parallelism(void);
UV_EXTERN int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count); UV_EXTERN int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count);
UV_EXTERN void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count); UV_EXTERN void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count);
UV_EXTERN int uv_cpumask_size(void);
UV_EXTERN int uv_interface_addresses(uv_interface_address_t** addresses, UV_EXTERN int uv_interface_addresses(uv_interface_address_t** addresses,
int* count); int* count);
@ -1277,6 +1299,15 @@ UV_EXTERN int uv_os_gethostname(char* buffer, size_t* size);
UV_EXTERN int uv_os_uname(uv_utsname_t* buffer); UV_EXTERN int uv_os_uname(uv_utsname_t* buffer);
struct uv_metrics_s {
uint64_t loop_count;
uint64_t events;
uint64_t events_waiting;
/* private */
uint64_t* reserved[13];
};
UV_EXTERN int uv_metrics_info(uv_loop_t* loop, uv_metrics_t* metrics);
UV_EXTERN uint64_t uv_metrics_idle_time(uv_loop_t* loop); UV_EXTERN uint64_t uv_metrics_idle_time(uv_loop_t* loop);
typedef enum { typedef enum {
@ -1710,7 +1741,9 @@ UV_EXTERN int uv_chdir(const char* dir);
UV_EXTERN uint64_t uv_get_free_memory(void); UV_EXTERN uint64_t uv_get_free_memory(void);
UV_EXTERN uint64_t uv_get_total_memory(void); UV_EXTERN uint64_t uv_get_total_memory(void);
UV_EXTERN uint64_t uv_get_constrained_memory(void); UV_EXTERN uint64_t uv_get_constrained_memory(void);
UV_EXTERN uint64_t uv_get_available_memory(void);
UV_EXTERN int uv_clock_gettime(uv_clock_id clock_id, uv_timespec64_t* ts);
UV_EXTERN uint64_t uv_hrtime(void); UV_EXTERN uint64_t uv_hrtime(void);
UV_EXTERN void uv_sleep(unsigned int msec); UV_EXTERN void uv_sleep(unsigned int msec);
@ -1787,6 +1820,14 @@ UV_EXTERN int uv_thread_create_ex(uv_thread_t* tid,
const uv_thread_options_t* params, const uv_thread_options_t* params,
uv_thread_cb entry, uv_thread_cb entry,
void* arg); void* arg);
UV_EXTERN int uv_thread_setaffinity(uv_thread_t* tid,
char* cpumask,
char* oldmask,
size_t mask_size);
UV_EXTERN int uv_thread_getaffinity(uv_thread_t* tid,
char* cpumask,
size_t mask_size);
UV_EXTERN int uv_thread_getcpu(void);
UV_EXTERN uv_thread_t uv_thread_self(void); UV_EXTERN uv_thread_t uv_thread_self(void);
UV_EXTERN int uv_thread_join(uv_thread_t *tid); UV_EXTERN int uv_thread_join(uv_thread_t *tid);
UV_EXTERN int uv_thread_equal(const uv_thread_t* t1, const uv_thread_t* t2); UV_EXTERN int uv_thread_equal(const uv_thread_t* t1, const uv_thread_t* t2);

View File

@ -413,7 +413,6 @@
#elif defined(__APPLE__) || \ #elif defined(__APPLE__) || \
defined(__DragonFly__) || \ defined(__DragonFly__) || \
defined(__FreeBSD__) || \ defined(__FreeBSD__) || \
defined(__FreeBSD_kernel__) || \
defined(__NetBSD__) || \ defined(__NetBSD__) || \
defined(__OpenBSD__) defined(__OpenBSD__)
# define UV__EHOSTDOWN (-64) # define UV__EHOSTDOWN (-64)
@ -457,4 +456,16 @@
# define UV__ESOCKTNOSUPPORT (-4025) # define UV__ESOCKTNOSUPPORT (-4025)
#endif #endif
/* FreeBSD defines ENODATA in /usr/include/c++/v1/errno.h which is only visible
* if C++ is being used. Define it directly to avoid problems when integrating
* libuv in a C++ project.
*/
#if defined(ENODATA) && !defined(_WIN32)
# define UV__ENODATA UV__ERR(ENODATA)
#elif defined(__FreeBSD__)
# define UV__ENODATA (-9919)
#else
# define UV__ENODATA (-4024)
#endif
#endif /* UV_ERRNO_H_ */ #endif /* UV_ERRNO_H_ */

View File

@ -1,247 +0,0 @@
// ISO C9x compliant stdint.h for Microsoft Visual Studio
// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
//
// Copyright (c) 2006-2008 Alexander Chemeris
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. The name of the author may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
///////////////////////////////////////////////////////////////////////////////
#ifndef _MSC_VER // [
#error "Use this header only with Microsoft Visual C++ compilers!"
#endif // _MSC_VER ]
#ifndef _MSC_STDINT_H_ // [
#define _MSC_STDINT_H_
#if _MSC_VER > 1000
#pragma once
#endif
#include <limits.h>
// For Visual Studio 6 in C++ mode and for many Visual Studio versions when
// compiling for ARM we should wrap <wchar.h> include with 'extern "C++" {}'
// or compiler give many errors like this:
// error C2733: second C linkage of overloaded function 'wmemchr' not allowed
#ifdef __cplusplus
extern "C" {
#endif
# include <wchar.h>
#ifdef __cplusplus
}
#endif
// Define _W64 macros to mark types changing their size, like intptr_t.
#ifndef _W64
# if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300
# define _W64 __w64
# else
# define _W64
# endif
#endif
// 7.18.1 Integer types
// 7.18.1.1 Exact-width integer types
// Visual Studio 6 and Embedded Visual C++ 4 doesn't
// realize that, e.g. char has the same size as __int8
// so we give up on __intX for them.
#if (_MSC_VER < 1300)
typedef signed char int8_t;
typedef signed short int16_t;
typedef signed int int32_t;
typedef unsigned char uint8_t;
typedef unsigned short uint16_t;
typedef unsigned int uint32_t;
#else
typedef signed __int8 int8_t;
typedef signed __int16 int16_t;
typedef signed __int32 int32_t;
typedef unsigned __int8 uint8_t;
typedef unsigned __int16 uint16_t;
typedef unsigned __int32 uint32_t;
#endif
typedef signed __int64 int64_t;
typedef unsigned __int64 uint64_t;
// 7.18.1.2 Minimum-width integer types
typedef int8_t int_least8_t;
typedef int16_t int_least16_t;
typedef int32_t int_least32_t;
typedef int64_t int_least64_t;
typedef uint8_t uint_least8_t;
typedef uint16_t uint_least16_t;
typedef uint32_t uint_least32_t;
typedef uint64_t uint_least64_t;
// 7.18.1.3 Fastest minimum-width integer types
typedef int8_t int_fast8_t;
typedef int16_t int_fast16_t;
typedef int32_t int_fast32_t;
typedef int64_t int_fast64_t;
typedef uint8_t uint_fast8_t;
typedef uint16_t uint_fast16_t;
typedef uint32_t uint_fast32_t;
typedef uint64_t uint_fast64_t;
// 7.18.1.4 Integer types capable of holding object pointers
#ifdef _WIN64 // [
typedef signed __int64 intptr_t;
typedef unsigned __int64 uintptr_t;
#else // _WIN64 ][
typedef _W64 signed int intptr_t;
typedef _W64 unsigned int uintptr_t;
#endif // _WIN64 ]
// 7.18.1.5 Greatest-width integer types
typedef int64_t intmax_t;
typedef uint64_t uintmax_t;
// 7.18.2 Limits of specified-width integer types
#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259
// 7.18.2.1 Limits of exact-width integer types
#define INT8_MIN ((int8_t)_I8_MIN)
#define INT8_MAX _I8_MAX
#define INT16_MIN ((int16_t)_I16_MIN)
#define INT16_MAX _I16_MAX
#define INT32_MIN ((int32_t)_I32_MIN)
#define INT32_MAX _I32_MAX
#define INT64_MIN ((int64_t)_I64_MIN)
#define INT64_MAX _I64_MAX
#define UINT8_MAX _UI8_MAX
#define UINT16_MAX _UI16_MAX
#define UINT32_MAX _UI32_MAX
#define UINT64_MAX _UI64_MAX
// 7.18.2.2 Limits of minimum-width integer types
#define INT_LEAST8_MIN INT8_MIN
#define INT_LEAST8_MAX INT8_MAX
#define INT_LEAST16_MIN INT16_MIN
#define INT_LEAST16_MAX INT16_MAX
#define INT_LEAST32_MIN INT32_MIN
#define INT_LEAST32_MAX INT32_MAX
#define INT_LEAST64_MIN INT64_MIN
#define INT_LEAST64_MAX INT64_MAX
#define UINT_LEAST8_MAX UINT8_MAX
#define UINT_LEAST16_MAX UINT16_MAX
#define UINT_LEAST32_MAX UINT32_MAX
#define UINT_LEAST64_MAX UINT64_MAX
// 7.18.2.3 Limits of fastest minimum-width integer types
#define INT_FAST8_MIN INT8_MIN
#define INT_FAST8_MAX INT8_MAX
#define INT_FAST16_MIN INT16_MIN
#define INT_FAST16_MAX INT16_MAX
#define INT_FAST32_MIN INT32_MIN
#define INT_FAST32_MAX INT32_MAX
#define INT_FAST64_MIN INT64_MIN
#define INT_FAST64_MAX INT64_MAX
#define UINT_FAST8_MAX UINT8_MAX
#define UINT_FAST16_MAX UINT16_MAX
#define UINT_FAST32_MAX UINT32_MAX
#define UINT_FAST64_MAX UINT64_MAX
// 7.18.2.4 Limits of integer types capable of holding object pointers
#ifdef _WIN64 // [
# define INTPTR_MIN INT64_MIN
# define INTPTR_MAX INT64_MAX
# define UINTPTR_MAX UINT64_MAX
#else // _WIN64 ][
# define INTPTR_MIN INT32_MIN
# define INTPTR_MAX INT32_MAX
# define UINTPTR_MAX UINT32_MAX
#endif // _WIN64 ]
// 7.18.2.5 Limits of greatest-width integer types
#define INTMAX_MIN INT64_MIN
#define INTMAX_MAX INT64_MAX
#define UINTMAX_MAX UINT64_MAX
// 7.18.3 Limits of other integer types
#ifdef _WIN64 // [
# define PTRDIFF_MIN _I64_MIN
# define PTRDIFF_MAX _I64_MAX
#else // _WIN64 ][
# define PTRDIFF_MIN _I32_MIN
# define PTRDIFF_MAX _I32_MAX
#endif // _WIN64 ]
#define SIG_ATOMIC_MIN INT_MIN
#define SIG_ATOMIC_MAX INT_MAX
#ifndef SIZE_MAX // [
# ifdef _WIN64 // [
# define SIZE_MAX _UI64_MAX
# else // _WIN64 ][
# define SIZE_MAX _UI32_MAX
# endif // _WIN64 ]
#endif // SIZE_MAX ]
// WCHAR_MIN and WCHAR_MAX are also defined in <wchar.h>
#ifndef WCHAR_MIN // [
# define WCHAR_MIN 0
#endif // WCHAR_MIN ]
#ifndef WCHAR_MAX // [
# define WCHAR_MAX _UI16_MAX
#endif // WCHAR_MAX ]
#define WINT_MIN 0
#define WINT_MAX _UI16_MAX
#endif // __STDC_LIMIT_MACROS ]
// 7.18.4 Limits of other integer types
#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260
// 7.18.4.1 Macros for minimum-width integer constants
#define INT8_C(val) val##i8
#define INT16_C(val) val##i16
#define INT32_C(val) val##i32
#define INT64_C(val) val##i64
#define UINT8_C(val) val##ui8
#define UINT16_C(val) val##ui16
#define UINT32_C(val) val##ui32
#define UINT64_C(val) val##ui64
// 7.18.4.2 Macros for greatest-width integer constants
#define INTMAX_C INT64_C
#define UINTMAX_C UINT64_C
#endif // __STDC_CONSTANT_MACROS ]
#endif // _MSC_STDINT_H_ ]

View File

@ -59,7 +59,6 @@
# include "uv/darwin.h" # include "uv/darwin.h"
#elif defined(__DragonFly__) || \ #elif defined(__DragonFly__) || \
defined(__FreeBSD__) || \ defined(__FreeBSD__) || \
defined(__FreeBSD_kernel__) || \
defined(__OpenBSD__) || \ defined(__OpenBSD__) || \
defined(__NetBSD__) defined(__NetBSD__)
# include "uv/bsd.h" # include "uv/bsd.h"

View File

@ -31,8 +31,8 @@
*/ */
#define UV_VERSION_MAJOR 1 #define UV_VERSION_MAJOR 1
#define UV_VERSION_MINOR 44 #define UV_VERSION_MINOR 45
#define UV_VERSION_PATCH 2 #define UV_VERSION_PATCH 0
#define UV_VERSION_IS_RELEASE 1 #define UV_VERSION_IS_RELEASE 1
#define UV_VERSION_SUFFIX "" #define UV_VERSION_SUFFIX ""

View File

@ -59,12 +59,7 @@ typedef struct pollfd {
#include <signal.h> #include <signal.h>
#include <fcntl.h> #include <fcntl.h>
#include <sys/stat.h> #include <sys/stat.h>
#include <stdint.h>
#if defined(_MSC_VER) && _MSC_VER < 1600
# include "uv/stdint-msvc2008.h"
#else
# include <stdint.h>
#endif
#include "uv/tree.h" #include "uv/tree.h"
#include "uv/threadpool.h" #include "uv/threadpool.h"
@ -75,6 +70,11 @@ typedef struct pollfd {
# define S_IFLNK 0xA000 # define S_IFLNK 0xA000
#endif #endif
// Define missing in Windows Kit Include\{VERSION}\ucrt\sys\stat.h
#if defined(_CRT_INTERNAL_NONSTDC_NAMES) && _CRT_INTERNAL_NONSTDC_NAMES && !defined(S_IFIFO)
# define S_IFIFO _S_IFIFO
#endif
/* Additional signals supported by uv_signal and or uv_kill. The CRT defines /* Additional signals supported by uv_signal and or uv_kill. The CRT defines
* the following signals already: * the following signals already:
* *
@ -91,6 +91,7 @@ typedef struct pollfd {
* variants (Linux and Darwin) * variants (Linux and Darwin)
*/ */
#define SIGHUP 1 #define SIGHUP 1
#define SIGQUIT 3
#define SIGKILL 9 #define SIGKILL 9
#define SIGWINCH 28 #define SIGWINCH 28
@ -274,11 +275,12 @@ typedef struct {
} uv_rwlock_t; } uv_rwlock_t;
typedef struct { typedef struct {
unsigned int n; unsigned threshold;
unsigned int count; unsigned in;
uv_mutex_t mutex; uv_mutex_t mutex;
uv_sem_t turnstile1; /* TODO: in v2 make this a uv_cond_t, without unused_ */
uv_sem_t turnstile2; CONDITION_VARIABLE cond;
unsigned out;
} uv_barrier_t; } uv_barrier_t;
typedef struct { typedef struct {
@ -348,9 +350,9 @@ typedef struct {
uv_idle_t* next_idle_handle; \ uv_idle_t* next_idle_handle; \
/* This handle holds the peer sockets for the fast variant of uv_poll_t */ \ /* This handle holds the peer sockets for the fast variant of uv_poll_t */ \
SOCKET poll_peer_sockets[UV_MSAFD_PROVIDER_COUNT]; \ SOCKET poll_peer_sockets[UV_MSAFD_PROVIDER_COUNT]; \
/* Counter to keep track of active tcp streams */ \ /* No longer used. */ \
unsigned int active_tcp_streams; \ unsigned int active_tcp_streams; \
/* Counter to keep track of active udp streams */ \ /* No longer used. */ \
unsigned int active_udp_streams; \ unsigned int active_udp_streams; \
/* Counter to started timer */ \ /* Counter to started timer */ \
uint64_t timer_counter; \ uint64_t timer_counter; \
@ -382,6 +384,7 @@ typedef struct {
ULONG_PTR result; /* overlapped.Internal is reused to hold the result */\ ULONG_PTR result; /* overlapped.Internal is reused to hold the result */\
HANDLE pipeHandle; \ HANDLE pipeHandle; \
DWORD duplex_flags; \ DWORD duplex_flags; \
WCHAR* name; \
} connect; \ } connect; \
} u; \ } u; \
struct uv_req_s* next_req; struct uv_req_s* next_req;
@ -497,7 +500,7 @@ typedef struct {
struct { uv_pipe_connection_fields } conn; \ struct { uv_pipe_connection_fields } conn; \
} pipe; } pipe;
/* TODO: put the parser states in an union - TTY handles are always half-duplex /* TODO: put the parser states in a union - TTY handles are always half-duplex
* so read-state can safely overlap write-state. */ * so read-state can safely overlap write-state. */
#define UV_TTY_PRIVATE_FIELDS \ #define UV_TTY_PRIVATE_FIELDS \
HANDLE handle; \ HANDLE handle; \
@ -605,7 +608,7 @@ typedef struct {
struct uv_process_exit_s { \ struct uv_process_exit_s { \
UV_REQ_FIELDS \ UV_REQ_FIELDS \
} exit_req; \ } exit_req; \
BYTE* child_stdio_buffer; \ void* unused; /* TODO: retained for ABI compat; remove this in v2.x. */ \
int exit_signal; \ int exit_signal; \
HANDLE wait_handle; \ HANDLE wait_handle; \
HANDLE process_handle; \ HANDLE process_handle; \

View File

@ -8,5 +8,5 @@ Version: @PACKAGE_VERSION@
Description: multi-platform support library with a focus on asynchronous I/O. Description: multi-platform support library with a focus on asynchronous I/O.
URL: http://libuv.org/ URL: http://libuv.org/
Libs: -L${libdir} -luv_a @LIBS@ Libs: -L${libdir} -l:libuv.a @LIBS@
Cflags: -I${includedir} Cflags: -I${includedir}

View File

@ -2,6 +2,7 @@ prefix=@prefix@
exec_prefix=${prefix} exec_prefix=${prefix}
libdir=@libdir@ libdir=@libdir@
includedir=@includedir@ includedir=@includedir@
LIBUV_STATIC=-L${libdir} -l:libuv.a @LIBS@
Name: libuv Name: libuv
Version: @PACKAGE_VERSION@ Version: @PACKAGE_VERSION@

View File

@ -17,12 +17,7 @@
#include <stdio.h> #include <stdio.h>
#include <string.h> #include <string.h>
#include <stdint.h>
#if defined(_MSC_VER) && _MSC_VER < 1600
# include "uv/stdint-msvc2008.h"
#else
# include <stdint.h>
#endif
#include "uv.h" #include "uv.h"
#include "uv-common.h" #include "uv-common.h"
@ -135,7 +130,7 @@ static int inet_ntop6(const unsigned char *src, char *dst, size_t size) {
tp += strlen(tp); tp += strlen(tp);
break; break;
} }
tp += sprintf(tp, "%x", words[i]); tp += snprintf(tp, sizeof tmp - (tp - tmp), "%x", words[i]);
} }
/* Was it a trailing run of 0x00's? */ /* Was it a trailing run of 0x00's? */
if (best.base != -1 && (best.base + best.len) == ARRAY_SIZE(words)) if (best.base != -1 && (best.base + best.len) == ARRAY_SIZE(words))

175
deps/libuv/src/thread-common.c vendored Normal file
View File

@ -0,0 +1,175 @@
/* Copyright libuv project contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "uv-common.h"
#include <stdlib.h>
#ifndef _WIN32
#include <pthread.h>
#endif
#if defined(PTHREAD_BARRIER_SERIAL_THREAD)
STATIC_ASSERT(sizeof(uv_barrier_t) == sizeof(pthread_barrier_t));
#endif
/* Note: guard clauses should match uv_barrier_t's in include/uv/unix.h. */
#if defined(_AIX) || \
defined(__OpenBSD__) || \
!defined(PTHREAD_BARRIER_SERIAL_THREAD)
int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
int rc;
#ifdef _WIN32
uv_barrier_t* b;
b = barrier;
if (barrier == NULL || count == 0)
return UV_EINVAL;
#else
struct _uv_barrier* b;
if (barrier == NULL || count == 0)
return UV_EINVAL;
b = uv__malloc(sizeof(*b));
if (b == NULL)
return UV_ENOMEM;
#endif
b->in = 0;
b->out = 0;
b->threshold = count;
rc = uv_mutex_init(&b->mutex);
if (rc != 0)
goto error2;
/* TODO(vjnash): remove these uv_cond_t casts in v2. */
rc = uv_cond_init((uv_cond_t*) &b->cond);
if (rc != 0)
goto error;
#ifndef _WIN32
barrier->b = b;
#endif
return 0;
error:
uv_mutex_destroy(&b->mutex);
error2:
#ifndef _WIN32
uv__free(b);
#endif
return rc;
}
int uv_barrier_wait(uv_barrier_t* barrier) {
int last;
#ifdef _WIN32
uv_barrier_t* b;
b = barrier;
#else
struct _uv_barrier* b;
if (barrier == NULL || barrier->b == NULL)
return UV_EINVAL;
b = barrier->b;
#endif
uv_mutex_lock(&b->mutex);
while (b->out != 0)
uv_cond_wait((uv_cond_t*) &b->cond, &b->mutex);
if (++b->in == b->threshold) {
b->in = 0;
b->out = b->threshold;
uv_cond_broadcast((uv_cond_t*) &b->cond);
} else {
do
uv_cond_wait((uv_cond_t*) &b->cond, &b->mutex);
while (b->in != 0);
}
last = (--b->out == 0);
if (last)
uv_cond_broadcast((uv_cond_t*) &b->cond);
uv_mutex_unlock(&b->mutex);
return last;
}
void uv_barrier_destroy(uv_barrier_t* barrier) {
#ifdef _WIN32
uv_barrier_t* b;
b = barrier;
#else
struct _uv_barrier* b;
b = barrier->b;
#endif
uv_mutex_lock(&b->mutex);
assert(b->in == 0);
while (b->out != 0)
uv_cond_wait((uv_cond_t*) &b->cond, &b->mutex);
if (b->in != 0)
abort();
uv_mutex_unlock(&b->mutex);
uv_mutex_destroy(&b->mutex);
uv_cond_destroy((uv_cond_t*) &b->cond);
#ifndef _WIN32
uv__free(barrier->b);
barrier->b = NULL;
#endif
}
#else
int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
return UV__ERR(pthread_barrier_init(barrier, NULL, count));
}
int uv_barrier_wait(uv_barrier_t* barrier) {
int rc;
rc = pthread_barrier_wait(barrier);
if (rc != 0)
if (rc != PTHREAD_BARRIER_SERIAL_THREAD)
abort();
return rc == PTHREAD_BARRIER_SERIAL_THREAD;
}
void uv_barrier_destroy(uv_barrier_t* barrier) {
if (pthread_barrier_destroy(barrier))
abort();
}
#endif

View File

@ -191,6 +191,7 @@ void uv__threadpool_cleanup(void) {
static void init_threads(void) { static void init_threads(void) {
uv_thread_options_t config;
unsigned int i; unsigned int i;
const char* val; const char* val;
uv_sem_t sem; uv_sem_t sem;
@ -226,8 +227,11 @@ static void init_threads(void) {
if (uv_sem_init(&sem, 0)) if (uv_sem_init(&sem, 0))
abort(); abort();
config.flags = UV_THREAD_HAS_STACK_SIZE;
config.stack_size = 8u << 20; /* 8 MB */
for (i = 0; i < nthreads; i++) for (i = 0; i < nthreads; i++)
if (uv_thread_create(threads + i, worker, &sem)) if (uv_thread_create_ex(threads + i, &config, worker, &sem))
abort(); abort();
for (i = 0; i < nthreads; i++) for (i = 0; i < nthreads; i++)
@ -271,9 +275,13 @@ void uv__work_submit(uv_loop_t* loop,
} }
/* TODO(bnoordhuis) teach libuv how to cancel file operations
* that go through io_uring instead of the thread pool.
*/
static int uv__work_cancel(uv_loop_t* loop, uv_req_t* req, struct uv__work* w) { static int uv__work_cancel(uv_loop_t* loop, uv_req_t* req, struct uv__work* w) {
int cancelled; int cancelled;
uv_once(&once, init_once); /* Ensure |mutex| is initialized. */
uv_mutex_lock(&mutex); uv_mutex_lock(&mutex);
uv_mutex_lock(&w->loop->wq_mutex); uv_mutex_lock(&w->loop->wq_mutex);
@ -303,12 +311,15 @@ void uv__work_done(uv_async_t* handle) {
QUEUE* q; QUEUE* q;
QUEUE wq; QUEUE wq;
int err; int err;
int nevents;
loop = container_of(handle, uv_loop_t, wq_async); loop = container_of(handle, uv_loop_t, wq_async);
uv_mutex_lock(&loop->wq_mutex); uv_mutex_lock(&loop->wq_mutex);
QUEUE_MOVE(&loop->wq, &wq); QUEUE_MOVE(&loop->wq, &wq);
uv_mutex_unlock(&loop->wq_mutex); uv_mutex_unlock(&loop->wq_mutex);
nevents = 0;
while (!QUEUE_EMPTY(&wq)) { while (!QUEUE_EMPTY(&wq)) {
q = QUEUE_HEAD(&wq); q = QUEUE_HEAD(&wq);
QUEUE_REMOVE(q); QUEUE_REMOVE(q);
@ -316,6 +327,20 @@ void uv__work_done(uv_async_t* handle) {
w = container_of(q, struct uv__work, wq); w = container_of(q, struct uv__work, wq);
err = (w->work == uv__cancelled) ? UV_ECANCELED : 0; err = (w->work == uv__cancelled) ? UV_ECANCELED : 0;
w->done(w, err); w->done(w, err);
nevents++;
}
/* This check accomplishes 2 things:
* 1. Even if the queue was empty, the call to uv__work_done() should count
* as an event. Which will have been added by the event loop when
* calling this callback.
* 2. Prevents accidental wrap around in case nevents == 0 events == 0.
*/
if (nevents > 1) {
/* Subtract 1 to counter the call to uv__work_done(). */
uv__metrics_inc_events(loop, nevents - 1);
if (uv__get_internal_fields(loop)->current_timeout == 0)
uv__metrics_inc_events_waiting(loop, nevents - 1);
} }
} }

View File

@ -131,6 +131,7 @@ int uv__io_check_fd(uv_loop_t* loop, int fd) {
void uv__io_poll(uv_loop_t* loop, int timeout) { void uv__io_poll(uv_loop_t* loop, int timeout) {
uv__loop_internal_fields_t* lfields;
struct pollfd events[1024]; struct pollfd events[1024];
struct pollfd pqry; struct pollfd pqry;
struct pollfd* pe; struct pollfd* pe;
@ -154,6 +155,8 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
return; return;
} }
lfields = uv__get_internal_fields(loop);
while (!QUEUE_EMPTY(&loop->watcher_queue)) { while (!QUEUE_EMPTY(&loop->watcher_queue)) {
q = QUEUE_HEAD(&loop->watcher_queue); q = QUEUE_HEAD(&loop->watcher_queue);
QUEUE_REMOVE(q); QUEUE_REMOVE(q);
@ -217,7 +220,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
base = loop->time; base = loop->time;
count = 48; /* Benchmarks suggest this gives the best throughput. */ count = 48; /* Benchmarks suggest this gives the best throughput. */
if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) { if (lfields->flags & UV_METRICS_IDLE_TIME) {
reset_timeout = 1; reset_timeout = 1;
user_timeout = timeout; user_timeout = timeout;
timeout = 0; timeout = 0;
@ -232,6 +235,12 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
if (timeout != 0) if (timeout != 0)
uv__metrics_set_provider_entry_time(loop); uv__metrics_set_provider_entry_time(loop);
/* Store the current timeout in a location that's globally accessible so
* other locations like uv__work_done() can determine whether the queue
* of events in the callback were waiting when poll was called.
*/
lfields->current_timeout = timeout;
nfds = pollset_poll(loop->backend_fd, nfds = pollset_poll(loop->backend_fd,
events, events,
ARRAY_SIZE(events), ARRAY_SIZE(events),
@ -321,9 +330,11 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
nevents++; nevents++;
} }
uv__metrics_inc_events(loop, nevents);
if (reset_timeout != 0) { if (reset_timeout != 0) {
timeout = user_timeout; timeout = user_timeout;
reset_timeout = 0; reset_timeout = 0;
uv__metrics_inc_events_waiting(loop, nevents);
} }
if (have_signals != 0) { if (have_signals != 0) {
@ -389,6 +400,11 @@ uint64_t uv_get_constrained_memory(void) {
} }
uint64_t uv_get_available_memory(void) {
return uv_get_free_memory();
}
void uv_loadavg(double avg[3]) { void uv_loadavg(double avg[3]) {
perfstat_cpu_total_t ps_total; perfstat_cpu_total_t ps_total;
int result = perfstat_cpu_total(NULL, &ps_total, sizeof(ps_total), 1); int result = perfstat_cpu_total(NULL, &ps_total, sizeof(ps_total), 1);
@ -425,7 +441,7 @@ static char* uv__rawname(const char* cp, char (*dst)[FILENAME_MAX+1]) {
static int uv__path_is_a_directory(char* filename) { static int uv__path_is_a_directory(char* filename) {
struct stat statbuf; struct stat statbuf;
if (stat(filename, &statbuf) < 0) if (uv__stat(filename, &statbuf) < 0)
return -1; /* failed: not a directory, assume it is a file */ return -1; /* failed: not a directory, assume it is a file */
if (statbuf.st_type == VDIR) if (statbuf.st_type == VDIR)

View File

@ -24,9 +24,9 @@
#include "uv.h" #include "uv.h"
#include "internal.h" #include "internal.h"
#include "atomic-ops.h"
#include <errno.h> #include <errno.h>
#include <stdatomic.h>
#include <stdio.h> /* snprintf() */ #include <stdio.h> /* snprintf() */
#include <assert.h> #include <assert.h>
#include <stdlib.h> #include <stdlib.h>
@ -40,6 +40,7 @@
static void uv__async_send(uv_loop_t* loop); static void uv__async_send(uv_loop_t* loop);
static int uv__async_start(uv_loop_t* loop); static int uv__async_start(uv_loop_t* loop);
static void uv__cpu_relax(void);
int uv_async_init(uv_loop_t* loop, uv_async_t* handle, uv_async_cb async_cb) { int uv_async_init(uv_loop_t* loop, uv_async_t* handle, uv_async_cb async_cb) {
@ -52,6 +53,7 @@ int uv_async_init(uv_loop_t* loop, uv_async_t* handle, uv_async_cb async_cb) {
uv__handle_init(loop, (uv_handle_t*)handle, UV_ASYNC); uv__handle_init(loop, (uv_handle_t*)handle, UV_ASYNC);
handle->async_cb = async_cb; handle->async_cb = async_cb;
handle->pending = 0; handle->pending = 0;
handle->u.fd = 0; /* This will be used as a busy flag. */
QUEUE_INSERT_TAIL(&loop->async_handles, &handle->queue); QUEUE_INSERT_TAIL(&loop->async_handles, &handle->queue);
uv__handle_start(handle); uv__handle_start(handle);
@ -61,46 +63,54 @@ int uv_async_init(uv_loop_t* loop, uv_async_t* handle, uv_async_cb async_cb) {
int uv_async_send(uv_async_t* handle) { int uv_async_send(uv_async_t* handle) {
_Atomic int* pending;
_Atomic int* busy;
pending = (_Atomic int*) &handle->pending;
busy = (_Atomic int*) &handle->u.fd;
/* Do a cheap read first. */ /* Do a cheap read first. */
if (ACCESS_ONCE(int, handle->pending) != 0) if (atomic_load_explicit(pending, memory_order_relaxed) != 0)
return 0; return 0;
/* Tell the other thread we're busy with the handle. */ /* Set the loop to busy. */
if (cmpxchgi(&handle->pending, 0, 1) != 0) atomic_fetch_add(busy, 1);
return 0;
/* Wake up the other thread's event loop. */ /* Wake up the other thread's event loop. */
uv__async_send(handle->loop); if (atomic_exchange(pending, 1) == 0)
uv__async_send(handle->loop);
/* Tell the other thread we're done. */ /* Set the loop to not-busy. */
if (cmpxchgi(&handle->pending, 1, 2) != 1) atomic_fetch_add(busy, -1);
abort();
return 0; return 0;
} }
/* Only call this from the event loop thread. */ /* Wait for the busy flag to clear before closing.
static int uv__async_spin(uv_async_t* handle) { * Only call this from the event loop thread. */
static void uv__async_spin(uv_async_t* handle) {
_Atomic int* pending;
_Atomic int* busy;
int i; int i;
int rc;
pending = (_Atomic int*) &handle->pending;
busy = (_Atomic int*) &handle->u.fd;
/* Set the pending flag first, so no new events will be added by other
* threads after this function returns. */
atomic_store(pending, 1);
for (;;) { for (;;) {
/* 997 is not completely chosen at random. It's a prime number, acyclical /* 997 is not completely chosen at random. It's a prime number, acyclic by
* by nature, and should therefore hopefully dampen sympathetic resonance. * nature, and should therefore hopefully dampen sympathetic resonance.
*/ */
for (i = 0; i < 997; i++) { for (i = 0; i < 997; i++) {
/* rc=0 -- handle is not pending. if (atomic_load(busy) == 0)
* rc=1 -- handle is pending, other thread is still working with it. return;
* rc=2 -- handle is pending, other thread is done.
*/
rc = cmpxchgi(&handle->pending, 2, 0);
if (rc != 1)
return rc;
/* Other thread is busy with this handle, spin until it's done. */ /* Other thread is busy with this handle, spin until it's done. */
cpu_relax(); uv__cpu_relax();
} }
/* Yield the CPU. We may have preempted the other thread while it's /* Yield the CPU. We may have preempted the other thread while it's
@ -125,6 +135,7 @@ static void uv__async_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
QUEUE queue; QUEUE queue;
QUEUE* q; QUEUE* q;
uv_async_t* h; uv_async_t* h;
_Atomic int *pending;
assert(w == &loop->async_io_watcher); assert(w == &loop->async_io_watcher);
@ -154,8 +165,10 @@ static void uv__async_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
QUEUE_REMOVE(q); QUEUE_REMOVE(q);
QUEUE_INSERT_TAIL(&loop->async_handles, q); QUEUE_INSERT_TAIL(&loop->async_handles, q);
if (0 == uv__async_spin(h)) /* Atomically fetch and clear pending flag */
continue; /* Not pending. */ pending = (_Atomic int*) &h->pending;
if (atomic_exchange(pending, 0) == 0)
continue;
if (h->async_cb == NULL) if (h->async_cb == NULL)
continue; continue;
@ -227,20 +240,28 @@ static int uv__async_start(uv_loop_t* loop) {
} }
int uv__async_fork(uv_loop_t* loop) {
if (loop->async_io_watcher.fd == -1) /* never started */
return 0;
uv__async_stop(loop);
return uv__async_start(loop);
}
void uv__async_stop(uv_loop_t* loop) { void uv__async_stop(uv_loop_t* loop) {
QUEUE queue;
QUEUE* q;
uv_async_t* h;
if (loop->async_io_watcher.fd == -1) if (loop->async_io_watcher.fd == -1)
return; return;
/* Make sure no other thread is accessing the async handle fd after the loop
* cleanup.
*/
QUEUE_MOVE(&loop->async_handles, &queue);
while (!QUEUE_EMPTY(&queue)) {
q = QUEUE_HEAD(&queue);
h = QUEUE_DATA(q, uv_async_t, queue);
QUEUE_REMOVE(q);
QUEUE_INSERT_TAIL(&loop->async_handles, q);
uv__async_spin(h);
}
if (loop->async_wfd != -1) { if (loop->async_wfd != -1) {
if (loop->async_wfd != loop->async_io_watcher.fd) if (loop->async_wfd != loop->async_io_watcher.fd)
uv__close(loop->async_wfd); uv__close(loop->async_wfd);
@ -251,3 +272,58 @@ void uv__async_stop(uv_loop_t* loop) {
uv__close(loop->async_io_watcher.fd); uv__close(loop->async_io_watcher.fd);
loop->async_io_watcher.fd = -1; loop->async_io_watcher.fd = -1;
} }
int uv__async_fork(uv_loop_t* loop) {
QUEUE queue;
QUEUE* q;
uv_async_t* h;
if (loop->async_io_watcher.fd == -1) /* never started */
return 0;
QUEUE_MOVE(&loop->async_handles, &queue);
while (!QUEUE_EMPTY(&queue)) {
q = QUEUE_HEAD(&queue);
h = QUEUE_DATA(q, uv_async_t, queue);
QUEUE_REMOVE(q);
QUEUE_INSERT_TAIL(&loop->async_handles, q);
/* The state of any thread that set pending is now likely corrupt in this
* child because the user called fork, so just clear these flags and move
* on. Calling most libc functions after `fork` is declared to be undefined
* behavior anyways, unless async-signal-safe, for multithreaded programs
* like libuv, and nothing interesting in pthreads is async-signal-safe.
*/
h->pending = 0;
/* This is the busy flag, and we just abruptly lost all other threads. */
h->u.fd = 0;
}
/* Recreate these, since they still exist, but belong to the wrong pid now. */
if (loop->async_wfd != -1) {
if (loop->async_wfd != loop->async_io_watcher.fd)
uv__close(loop->async_wfd);
loop->async_wfd = -1;
}
uv__io_stop(loop, &loop->async_io_watcher, POLLIN);
uv__close(loop->async_io_watcher.fd);
loop->async_io_watcher.fd = -1;
return uv__async_start(loop);
}
static void uv__cpu_relax(void) {
#if defined(__i386__) || defined(__x86_64__)
__asm__ __volatile__ ("rep; nop" ::: "memory"); /* a.k.a. PAUSE */
#elif (defined(__arm__) && __ARM_ARCH >= 7) || defined(__aarch64__)
__asm__ __volatile__ ("yield" ::: "memory");
#elif (defined(__ppc__) || defined(__ppc64__)) && defined(__APPLE__)
__asm volatile ("" : : : "memory");
#elif !defined(__APPLE__) && (defined(__powerpc64__) || defined(__ppc64__) || defined(__PPC64__))
__asm__ __volatile__ ("or 1,1,1; or 2,2,2" ::: "memory");
#endif
}

View File

@ -1,64 +0,0 @@
/* Copyright (c) 2013, Ben Noordhuis <info@bnoordhuis.nl>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef UV_ATOMIC_OPS_H_
#define UV_ATOMIC_OPS_H_
#include "internal.h" /* UV_UNUSED */
#if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
#include <atomic.h>
#endif
UV_UNUSED(static int cmpxchgi(int* ptr, int oldval, int newval));
UV_UNUSED(static void cpu_relax(void));
/* Prefer hand-rolled assembly over the gcc builtins because the latter also
* issue full memory barriers.
*/
UV_UNUSED(static int cmpxchgi(int* ptr, int oldval, int newval)) {
#if defined(__i386__) || defined(__x86_64__)
int out;
__asm__ __volatile__ ("lock; cmpxchg %2, %1;"
: "=a" (out), "+m" (*(volatile int*) ptr)
: "r" (newval), "0" (oldval)
: "memory");
return out;
#elif defined(__MVS__)
/* Use hand-rolled assembly because codegen from builtin __plo_CSST results in
* a runtime bug.
*/
__asm(" cs %0,%2,%1 \n " : "+r"(oldval), "+m"(*ptr) : "r"(newval) :);
return oldval;
#elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
return atomic_cas_uint((uint_t *)ptr, (uint_t)oldval, (uint_t)newval);
#else
return __sync_val_compare_and_swap(ptr, oldval, newval);
#endif
}
UV_UNUSED(static void cpu_relax(void)) {
#if defined(__i386__) || defined(__x86_64__)
__asm__ __volatile__ ("rep; nop" ::: "memory"); /* a.k.a. PAUSE */
#elif (defined(__arm__) && __ARM_ARCH >= 7) || defined(__aarch64__)
__asm__ __volatile__ ("yield" ::: "memory");
#elif (defined(__ppc__) || defined(__ppc64__)) && defined(__APPLE__)
__asm volatile ("" : : : "memory");
#elif !defined(__APPLE__) && (defined(__powerpc64__) || defined(__ppc64__) || defined(__PPC64__))
__asm__ __volatile__ ("or 1,1,1; or 2,2,2" ::: "memory");
#endif
}
#endif /* UV_ATOMIC_OPS_H_ */

View File

@ -41,12 +41,13 @@
#include <sys/uio.h> /* writev */ #include <sys/uio.h> /* writev */
#include <sys/resource.h> /* getrusage */ #include <sys/resource.h> /* getrusage */
#include <pwd.h> #include <pwd.h>
#include <grp.h>
#include <sys/utsname.h> #include <sys/utsname.h>
#include <sys/time.h> #include <sys/time.h>
#include <time.h> /* clock_gettime */
#ifdef __sun #ifdef __sun
# include <sys/filio.h> # include <sys/filio.h>
# include <sys/types.h>
# include <sys/wait.h> # include <sys/wait.h>
#endif #endif
@ -66,13 +67,14 @@ extern char** environ;
#if defined(__DragonFly__) || \ #if defined(__DragonFly__) || \
defined(__FreeBSD__) || \ defined(__FreeBSD__) || \
defined(__FreeBSD_kernel__) || \
defined(__NetBSD__) || \ defined(__NetBSD__) || \
defined(__OpenBSD__) defined(__OpenBSD__)
# include <sys/sysctl.h> # include <sys/sysctl.h>
# include <sys/filio.h> # include <sys/filio.h>
# include <sys/wait.h> # include <sys/wait.h>
# include <sys/param.h>
# if defined(__FreeBSD__) # if defined(__FreeBSD__)
# include <sys/cpuset.h>
# define uv__accept4 accept4 # define uv__accept4 accept4
# endif # endif
# if defined(__NetBSD__) # if defined(__NetBSD__)
@ -107,6 +109,35 @@ STATIC_ASSERT(offsetof(uv_buf_t, base) == offsetof(struct iovec, iov_base));
STATIC_ASSERT(offsetof(uv_buf_t, len) == offsetof(struct iovec, iov_len)); STATIC_ASSERT(offsetof(uv_buf_t, len) == offsetof(struct iovec, iov_len));
/* https://github.com/libuv/libuv/issues/1674 */
int uv_clock_gettime(uv_clock_id clock_id, uv_timespec64_t* ts) {
struct timespec t;
int r;
if (ts == NULL)
return UV_EFAULT;
switch (clock_id) {
default:
return UV_EINVAL;
case UV_CLOCK_MONOTONIC:
r = clock_gettime(CLOCK_MONOTONIC, &t);
break;
case UV_CLOCK_REALTIME:
r = clock_gettime(CLOCK_REALTIME, &t);
break;
}
if (r)
return UV__ERR(errno);
ts->tv_sec = t.tv_sec;
ts->tv_nsec = t.tv_nsec;
return 0;
}
uint64_t uv_hrtime(void) { uint64_t uv_hrtime(void) {
return uv__hrtime(UV_CLOCK_PRECISE); return uv__hrtime(UV_CLOCK_PRECISE);
} }
@ -232,10 +263,10 @@ int uv__getiovmax(void) {
#if defined(IOV_MAX) #if defined(IOV_MAX)
return IOV_MAX; return IOV_MAX;
#elif defined(_SC_IOV_MAX) #elif defined(_SC_IOV_MAX)
static int iovmax_cached = -1; static _Atomic int iovmax_cached = -1;
int iovmax; int iovmax;
iovmax = uv__load_relaxed(&iovmax_cached); iovmax = atomic_load_explicit(&iovmax_cached, memory_order_relaxed);
if (iovmax != -1) if (iovmax != -1)
return iovmax; return iovmax;
@ -247,7 +278,7 @@ int uv__getiovmax(void) {
if (iovmax == -1) if (iovmax == -1)
iovmax = 1; iovmax = 1;
uv__store_relaxed(&iovmax_cached, iovmax); atomic_store_explicit(&iovmax_cached, iovmax, memory_order_relaxed);
return iovmax; return iovmax;
#else #else
@ -360,6 +391,7 @@ static int uv__backend_timeout(const uv_loop_t* loop) {
(uv__has_active_handles(loop) || uv__has_active_reqs(loop)) && (uv__has_active_handles(loop) || uv__has_active_reqs(loop)) &&
QUEUE_EMPTY(&loop->pending_queue) && QUEUE_EMPTY(&loop->pending_queue) &&
QUEUE_EMPTY(&loop->idle_handles) && QUEUE_EMPTY(&loop->idle_handles) &&
(loop->flags & UV_LOOP_REAP_CHILDREN) == 0 &&
loop->closing_handles == NULL) loop->closing_handles == NULL)
return uv__next_timeout(loop); return uv__next_timeout(loop);
return 0; return 0;
@ -388,10 +420,17 @@ int uv_run(uv_loop_t* loop, uv_run_mode mode) {
if (!r) if (!r)
uv__update_time(loop); uv__update_time(loop);
while (r != 0 && loop->stop_flag == 0) { /* Maintain backwards compatibility by processing timers before entering the
uv__update_time(loop); * while loop for UV_RUN_DEFAULT. Otherwise timers only need to be executed
* once, which should be done after polling in order to maintain proper
* execution order of the conceptual event loop. */
if (mode == UV_RUN_DEFAULT) {
if (r)
uv__update_time(loop);
uv__run_timers(loop); uv__run_timers(loop);
}
while (r != 0 && loop->stop_flag == 0) {
can_sleep = can_sleep =
QUEUE_EMPTY(&loop->pending_queue) && QUEUE_EMPTY(&loop->idle_handles); QUEUE_EMPTY(&loop->pending_queue) && QUEUE_EMPTY(&loop->idle_handles);
@ -403,6 +442,8 @@ int uv_run(uv_loop_t* loop, uv_run_mode mode) {
if ((mode == UV_RUN_ONCE && can_sleep) || mode == UV_RUN_DEFAULT) if ((mode == UV_RUN_ONCE && can_sleep) || mode == UV_RUN_DEFAULT)
timeout = uv__backend_timeout(loop); timeout = uv__backend_timeout(loop);
uv__metrics_inc_loop_count(loop);
uv__io_poll(loop, timeout); uv__io_poll(loop, timeout);
/* Process immediate callbacks (e.g. write_cb) a small fixed number of /* Process immediate callbacks (e.g. write_cb) a small fixed number of
@ -420,18 +461,8 @@ int uv_run(uv_loop_t* loop, uv_run_mode mode) {
uv__run_check(loop); uv__run_check(loop);
uv__run_closing_handles(loop); uv__run_closing_handles(loop);
if (mode == UV_RUN_ONCE) { uv__update_time(loop);
/* UV_RUN_ONCE implies forward progress: at least one callback must have uv__run_timers(loop);
* been invoked when it returns. uv__io_poll() can return without doing
* I/O (meaning: no callbacks) when its timeout expires - which means we
* have pending timers that satisfy the forward progress constraint.
*
* UV_RUN_NOWAIT makes no guarantees about progress so it's omitted from
* the check.
*/
uv__update_time(loop);
uv__run_timers(loop);
}
r = uv__loop_alive(loop); r = uv__loop_alive(loop);
if (mode == UV_RUN_ONCE || mode == UV_RUN_NOWAIT) if (mode == UV_RUN_ONCE || mode == UV_RUN_NOWAIT)
@ -867,11 +898,6 @@ void uv__io_init(uv__io_t* w, uv__io_cb cb, int fd) {
w->fd = fd; w->fd = fd;
w->events = 0; w->events = 0;
w->pevents = 0; w->pevents = 0;
#if defined(UV_HAVE_KQUEUE)
w->rcount = 0;
w->wcount = 0;
#endif /* defined(UV_HAVE_KQUEUE) */
} }
@ -991,6 +1017,15 @@ int uv_getrusage(uv_rusage_t* rusage) {
rusage->ru_nivcsw = usage.ru_nivcsw; rusage->ru_nivcsw = usage.ru_nivcsw;
#endif #endif
/* Most platforms report ru_maxrss in kilobytes; macOS and Solaris are
* the outliers because of course they are.
*/
#if defined(__APPLE__) && !TARGET_OS_IPHONE
rusage->ru_maxrss /= 1024; /* macOS reports bytes. */
#elif defined(__sun)
rusage->ru_maxrss /= getpagesize() / 1024; /* Solaris reports pages. */
#endif
return 0; return 0;
} }
@ -1090,8 +1125,8 @@ int uv_os_homedir(char* buffer, size_t* size) {
if (r != UV_ENOENT) if (r != UV_ENOENT)
return r; return r;
/* HOME is not set, so call uv__getpwuid_r() */ /* HOME is not set, so call uv_os_get_passwd() */
r = uv__getpwuid_r(&pwd); r = uv_os_get_passwd(&pwd);
if (r != 0) { if (r != 0) {
return r; return r;
@ -1164,11 +1199,10 @@ return_buffer:
} }
int uv__getpwuid_r(uv_passwd_t* pwd) { static int uv__getpwuid_r(uv_passwd_t *pwd, uid_t uid) {
struct passwd pw; struct passwd pw;
struct passwd* result; struct passwd* result;
char* buf; char* buf;
uid_t uid;
size_t bufsize; size_t bufsize;
size_t name_size; size_t name_size;
size_t homedir_size; size_t homedir_size;
@ -1178,8 +1212,6 @@ int uv__getpwuid_r(uv_passwd_t* pwd) {
if (pwd == NULL) if (pwd == NULL)
return UV_EINVAL; return UV_EINVAL;
uid = geteuid();
/* Calling sysconf(_SC_GETPW_R_SIZE_MAX) would get the suggested size, but it /* Calling sysconf(_SC_GETPW_R_SIZE_MAX) would get the suggested size, but it
* is frequently 1024 or 4096, so we can just use that directly. The pwent * is frequently 1024 or 4096, so we can just use that directly. The pwent
* will not usually be large. */ * will not usually be large. */
@ -1238,24 +1270,93 @@ int uv__getpwuid_r(uv_passwd_t* pwd) {
} }
void uv_os_free_passwd(uv_passwd_t* pwd) { int uv_os_get_group(uv_group_t* grp, uv_uid_t gid) {
if (pwd == NULL) struct group gp;
return; struct group* result;
char* buf;
char* gr_mem;
size_t bufsize;
size_t name_size;
long members;
size_t mem_size;
int r;
/* if (grp == NULL)
The memory for name, shell, and homedir are allocated in a single return UV_EINVAL;
uv__malloc() call. The base of the pointer is stored in pwd->username, so
that is the field that needs to be freed. /* Calling sysconf(_SC_GETGR_R_SIZE_MAX) would get the suggested size, but it
*/ * is frequently 1024 or 4096, so we can just use that directly. The pwent
uv__free(pwd->username); * will not usually be large. */
pwd->username = NULL; for (bufsize = 2000;; bufsize *= 2) {
pwd->shell = NULL; buf = uv__malloc(bufsize);
pwd->homedir = NULL;
if (buf == NULL)
return UV_ENOMEM;
do
r = getgrgid_r(gid, &gp, buf, bufsize, &result);
while (r == EINTR);
if (r != 0 || result == NULL)
uv__free(buf);
if (r != ERANGE)
break;
}
if (r != 0)
return UV__ERR(r);
if (result == NULL)
return UV_ENOENT;
/* Allocate memory for the groupname and members. */
name_size = strlen(gp.gr_name) + 1;
members = 0;
mem_size = sizeof(char*);
for (r = 0; gp.gr_mem[r] != NULL; r++) {
mem_size += strlen(gp.gr_mem[r]) + 1 + sizeof(char*);
members++;
}
gr_mem = uv__malloc(name_size + mem_size);
if (gr_mem == NULL) {
uv__free(buf);
return UV_ENOMEM;
}
/* Copy the members */
grp->members = (char**) gr_mem;
grp->members[members] = NULL;
gr_mem = (char*) &grp->members[members + 1];
for (r = 0; r < members; r++) {
grp->members[r] = gr_mem;
strcpy(gr_mem, gp.gr_mem[r]);
gr_mem += strlen(gr_mem) + 1;
}
assert(gr_mem == (char*)grp->members + mem_size);
/* Copy the groupname */
grp->groupname = gr_mem;
memcpy(grp->groupname, gp.gr_name, name_size);
gr_mem += name_size;
/* Copy the gid */
grp->gid = gp.gr_gid;
uv__free(buf);
return 0;
} }
int uv_os_get_passwd(uv_passwd_t* pwd) { int uv_os_get_passwd(uv_passwd_t* pwd) {
return uv__getpwuid_r(pwd); return uv__getpwuid_r(pwd, geteuid());
}
int uv_os_get_passwd2(uv_passwd_t* pwd, uv_uid_t uid) {
return uv__getpwuid_r(pwd, uid);
} }
@ -1416,6 +1517,13 @@ uv_pid_t uv_os_getppid(void) {
return getppid(); return getppid();
} }
int uv_cpumask_size(void) {
#if UV__CPU_AFFINITY_SUPPORTED
return CPU_SETSIZE;
#else
return UV_ENOTSUP;
#endif
}
int uv_os_getpriority(uv_pid_t pid, int* priority) { int uv_os_getpriority(uv_pid_t pid, int* priority) {
int r; int r;

View File

@ -51,3 +51,7 @@ int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
uint64_t uv_get_constrained_memory(void) { uint64_t uv_get_constrained_memory(void) {
return 0; /* Memory constraints are unknown. */ return 0; /* Memory constraints are unknown. */
} }
uint64_t uv_get_available_memory(void) {
return uv_get_free_memory();
}

View File

@ -27,7 +27,6 @@
struct CFArrayCallBacks; struct CFArrayCallBacks;
struct CFRunLoopSourceContext; struct CFRunLoopSourceContext;
struct FSEventStreamContext; struct FSEventStreamContext;
struct CFRange;
typedef double CFAbsoluteTime; typedef double CFAbsoluteTime;
typedef double CFTimeInterval; typedef double CFTimeInterval;
@ -43,23 +42,13 @@ typedef unsigned CFStringEncoding;
typedef void* CFAllocatorRef; typedef void* CFAllocatorRef;
typedef void* CFArrayRef; typedef void* CFArrayRef;
typedef void* CFBundleRef; typedef void* CFBundleRef;
typedef void* CFDataRef;
typedef void* CFDictionaryRef; typedef void* CFDictionaryRef;
typedef void* CFMutableDictionaryRef;
typedef struct CFRange CFRange;
typedef void* CFRunLoopRef; typedef void* CFRunLoopRef;
typedef void* CFRunLoopSourceRef; typedef void* CFRunLoopSourceRef;
typedef void* CFStringRef; typedef void* CFStringRef;
typedef void* CFTypeRef; typedef void* CFTypeRef;
typedef void* FSEventStreamRef; typedef void* FSEventStreamRef;
typedef uint32_t IOOptionBits;
typedef unsigned int io_iterator_t;
typedef unsigned int io_object_t;
typedef unsigned int io_service_t;
typedef unsigned int io_registry_entry_t;
typedef void (*FSEventStreamCallback)(const FSEventStreamRef, typedef void (*FSEventStreamCallback)(const FSEventStreamRef,
void*, void*,
size_t, size_t,
@ -80,11 +69,6 @@ struct FSEventStreamContext {
void* pad[3]; void* pad[3];
}; };
struct CFRange {
CFIndex location;
CFIndex length;
};
static const CFStringEncoding kCFStringEncodingUTF8 = 0x8000100; static const CFStringEncoding kCFStringEncodingUTF8 = 0x8000100;
static const OSStatus noErr = 0; static const OSStatus noErr = 0;

View File

@ -33,13 +33,10 @@
#include <sys/sysctl.h> #include <sys/sysctl.h>
#include <unistd.h> /* sysconf */ #include <unistd.h> /* sysconf */
#include "darwin-stub.h"
static uv_once_t once = UV_ONCE_INIT; static uv_once_t once = UV_ONCE_INIT;
static uint64_t (*time_func)(void); static uint64_t (*time_func)(void);
static mach_timebase_info_data_t timebase; static mach_timebase_info_data_t timebase;
typedef unsigned char UInt8;
int uv__platform_loop_init(uv_loop_t* loop) { int uv__platform_loop_init(uv_loop_t* loop) {
loop->cf_state = NULL; loop->cf_state = NULL;
@ -110,7 +107,7 @@ uint64_t uv_get_free_memory(void) {
if (host_statistics(mach_host_self(), HOST_VM_INFO, if (host_statistics(mach_host_self(), HOST_VM_INFO,
(host_info_t)&info, &count) != KERN_SUCCESS) { (host_info_t)&info, &count) != KERN_SUCCESS) {
return UV_EINVAL; /* FIXME(bnoordhuis) Translate error. */ return 0;
} }
return (uint64_t) info.free_count * sysconf(_SC_PAGESIZE); return (uint64_t) info.free_count * sysconf(_SC_PAGESIZE);
@ -123,7 +120,7 @@ uint64_t uv_get_total_memory(void) {
size_t size = sizeof(info); size_t size = sizeof(info);
if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0)) if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0))
return UV__ERR(errno); return 0;
return (uint64_t) info; return (uint64_t) info;
} }
@ -134,6 +131,11 @@ uint64_t uv_get_constrained_memory(void) {
} }
uint64_t uv_get_available_memory(void) {
return uv_get_free_memory();
}
void uv_loadavg(double avg[3]) { void uv_loadavg(double avg[3]) {
struct loadavg info; struct loadavg info;
size_t size = sizeof(info); size_t size = sizeof(info);
@ -183,159 +185,17 @@ int uv_uptime(double* uptime) {
return 0; return 0;
} }
static int uv__get_cpu_speed(uint64_t* speed) {
/* IOKit */
void (*pIOObjectRelease)(io_object_t);
kern_return_t (*pIOMasterPort)(mach_port_t, mach_port_t*);
CFMutableDictionaryRef (*pIOServiceMatching)(const char*);
kern_return_t (*pIOServiceGetMatchingServices)(mach_port_t,
CFMutableDictionaryRef,
io_iterator_t*);
io_service_t (*pIOIteratorNext)(io_iterator_t);
CFTypeRef (*pIORegistryEntryCreateCFProperty)(io_registry_entry_t,
CFStringRef,
CFAllocatorRef,
IOOptionBits);
/* CoreFoundation */
CFStringRef (*pCFStringCreateWithCString)(CFAllocatorRef,
const char*,
CFStringEncoding);
CFStringEncoding (*pCFStringGetSystemEncoding)(void);
UInt8 *(*pCFDataGetBytePtr)(CFDataRef);
CFIndex (*pCFDataGetLength)(CFDataRef);
void (*pCFDataGetBytes)(CFDataRef, CFRange, UInt8*);
void (*pCFRelease)(CFTypeRef);
void* core_foundation_handle;
void* iokit_handle;
int err;
kern_return_t kr;
mach_port_t mach_port;
io_iterator_t it;
io_object_t service;
mach_port = 0;
err = UV_ENOENT;
core_foundation_handle = dlopen("/System/Library/Frameworks/"
"CoreFoundation.framework/"
"CoreFoundation",
RTLD_LAZY | RTLD_LOCAL);
iokit_handle = dlopen("/System/Library/Frameworks/IOKit.framework/"
"IOKit",
RTLD_LAZY | RTLD_LOCAL);
if (core_foundation_handle == NULL || iokit_handle == NULL)
goto out;
#define V(handle, symbol) \
do { \
*(void **)(&p ## symbol) = dlsym((handle), #symbol); \
if (p ## symbol == NULL) \
goto out; \
} \
while (0)
V(iokit_handle, IOMasterPort);
V(iokit_handle, IOServiceMatching);
V(iokit_handle, IOServiceGetMatchingServices);
V(iokit_handle, IOIteratorNext);
V(iokit_handle, IOObjectRelease);
V(iokit_handle, IORegistryEntryCreateCFProperty);
V(core_foundation_handle, CFStringCreateWithCString);
V(core_foundation_handle, CFStringGetSystemEncoding);
V(core_foundation_handle, CFDataGetBytePtr);
V(core_foundation_handle, CFDataGetLength);
V(core_foundation_handle, CFDataGetBytes);
V(core_foundation_handle, CFRelease);
#undef V
#define S(s) pCFStringCreateWithCString(NULL, (s), kCFStringEncodingUTF8)
kr = pIOMasterPort(MACH_PORT_NULL, &mach_port);
assert(kr == KERN_SUCCESS);
CFMutableDictionaryRef classes_to_match
= pIOServiceMatching("IOPlatformDevice");
kr = pIOServiceGetMatchingServices(mach_port, classes_to_match, &it);
assert(kr == KERN_SUCCESS);
service = pIOIteratorNext(it);
CFStringRef device_type_str = S("device_type");
CFStringRef clock_frequency_str = S("clock-frequency");
while (service != 0) {
CFDataRef data;
data = pIORegistryEntryCreateCFProperty(service,
device_type_str,
NULL,
0);
if (data) {
const UInt8* raw = pCFDataGetBytePtr(data);
if (strncmp((char*)raw, "cpu", 3) == 0 ||
strncmp((char*)raw, "processor", 9) == 0) {
CFDataRef freq_ref;
freq_ref = pIORegistryEntryCreateCFProperty(service,
clock_frequency_str,
NULL,
0);
if (freq_ref) {
const UInt8* freq_ref_ptr = pCFDataGetBytePtr(freq_ref);
CFIndex len = pCFDataGetLength(freq_ref);
if (len == 8)
memcpy(speed, freq_ref_ptr, 8);
else if (len == 4) {
uint32_t v;
memcpy(&v, freq_ref_ptr, 4);
*speed = v;
} else {
*speed = 0;
}
pCFRelease(freq_ref);
pCFRelease(data);
break;
}
}
pCFRelease(data);
}
service = pIOIteratorNext(it);
}
pIOObjectRelease(it);
err = 0;
if (device_type_str != NULL)
pCFRelease(device_type_str);
if (clock_frequency_str != NULL)
pCFRelease(clock_frequency_str);
out:
if (core_foundation_handle != NULL)
dlclose(core_foundation_handle);
if (iokit_handle != NULL)
dlclose(iokit_handle);
mach_port_deallocate(mach_task_self(), mach_port);
return err;
}
int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) { int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
unsigned int ticks = (unsigned int)sysconf(_SC_CLK_TCK), unsigned int ticks = (unsigned int)sysconf(_SC_CLK_TCK),
multiplier = ((uint64_t)1000L / ticks); multiplier = ((uint64_t)1000L / ticks);
char model[512]; char model[512];
uint64_t cpuspeed;
size_t size; size_t size;
unsigned int i; unsigned int i;
natural_t numcpus; natural_t numcpus;
mach_msg_type_number_t msg_type; mach_msg_type_number_t msg_type;
processor_cpu_load_info_data_t *info; processor_cpu_load_info_data_t *info;
uv_cpu_info_t* cpu_info; uv_cpu_info_t* cpu_info;
uint64_t cpuspeed;
int err;
size = sizeof(model); size = sizeof(model);
if (sysctlbyname("machdep.cpu.brand_string", &model, &size, NULL, 0) && if (sysctlbyname("machdep.cpu.brand_string", &model, &size, NULL, 0) &&
@ -343,9 +203,13 @@ int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
return UV__ERR(errno); return UV__ERR(errno);
} }
err = uv__get_cpu_speed(&cpuspeed); cpuspeed = 0;
if (err < 0) size = sizeof(cpuspeed);
return err; sysctlbyname("hw.cpufrequency", &cpuspeed, &size, NULL, 0);
if (cpuspeed == 0)
/* If sysctl hw.cputype == CPU_TYPE_ARM64, the correct value is unavailable
* from Apple, but we can hard-code it here to a plausible value. */
cpuspeed = 2400000000;
if (host_processor_info(mach_host_self(), PROCESSOR_CPU_LOAD_INFO, &numcpus, if (host_processor_info(mach_host_self(), PROCESSOR_CPU_LOAD_INFO, &numcpus,
(processor_info_array_t*)&info, (processor_info_array_t*)&info,

View File

@ -1,422 +0,0 @@
/* Copyright libuv contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "internal.h"
#include <errno.h>
#include <sys/epoll.h>
int uv__epoll_init(uv_loop_t* loop) {
int fd;
fd = epoll_create1(O_CLOEXEC);
/* epoll_create1() can fail either because it's not implemented (old kernel)
* or because it doesn't understand the O_CLOEXEC flag.
*/
if (fd == -1 && (errno == ENOSYS || errno == EINVAL)) {
fd = epoll_create(256);
if (fd != -1)
uv__cloexec(fd, 1);
}
loop->backend_fd = fd;
if (fd == -1)
return UV__ERR(errno);
return 0;
}
void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
struct epoll_event* events;
struct epoll_event dummy;
uintptr_t i;
uintptr_t nfds;
assert(loop->watchers != NULL);
assert(fd >= 0);
events = (struct epoll_event*) loop->watchers[loop->nwatchers];
nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
if (events != NULL)
/* Invalidate events with same file descriptor */
for (i = 0; i < nfds; i++)
if (events[i].data.fd == fd)
events[i].data.fd = -1;
/* Remove the file descriptor from the epoll.
* This avoids a problem where the same file description remains open
* in another process, causing repeated junk epoll events.
*
* We pass in a dummy epoll_event, to work around a bug in old kernels.
*/
if (loop->backend_fd >= 0) {
/* Work around a bug in kernels 3.10 to 3.19 where passing a struct that
* has the EPOLLWAKEUP flag set generates spurious audit syslog warnings.
*/
memset(&dummy, 0, sizeof(dummy));
epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, &dummy);
}
}
int uv__io_check_fd(uv_loop_t* loop, int fd) {
struct epoll_event e;
int rc;
memset(&e, 0, sizeof(e));
e.events = POLLIN;
e.data.fd = -1;
rc = 0;
if (epoll_ctl(loop->backend_fd, EPOLL_CTL_ADD, fd, &e))
if (errno != EEXIST)
rc = UV__ERR(errno);
if (rc == 0)
if (epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, &e))
abort();
return rc;
}
void uv__io_poll(uv_loop_t* loop, int timeout) {
/* A bug in kernels < 2.6.37 makes timeouts larger than ~30 minutes
* effectively infinite on 32 bits architectures. To avoid blocking
* indefinitely, we cap the timeout and poll again if necessary.
*
* Note that "30 minutes" is a simplification because it depends on
* the value of CONFIG_HZ. The magic constant assumes CONFIG_HZ=1200,
* that being the largest value I have seen in the wild (and only once.)
*/
static const int max_safe_timeout = 1789569;
static int no_epoll_pwait_cached;
static int no_epoll_wait_cached;
int no_epoll_pwait;
int no_epoll_wait;
struct epoll_event events[1024];
struct epoll_event* pe;
struct epoll_event e;
int real_timeout;
QUEUE* q;
uv__io_t* w;
sigset_t sigset;
uint64_t sigmask;
uint64_t base;
int have_signals;
int nevents;
int count;
int nfds;
int fd;
int op;
int i;
int user_timeout;
int reset_timeout;
if (loop->nfds == 0) {
assert(QUEUE_EMPTY(&loop->watcher_queue));
return;
}
memset(&e, 0, sizeof(e));
while (!QUEUE_EMPTY(&loop->watcher_queue)) {
q = QUEUE_HEAD(&loop->watcher_queue);
QUEUE_REMOVE(q);
QUEUE_INIT(q);
w = QUEUE_DATA(q, uv__io_t, watcher_queue);
assert(w->pevents != 0);
assert(w->fd >= 0);
assert(w->fd < (int) loop->nwatchers);
e.events = w->pevents;
e.data.fd = w->fd;
if (w->events == 0)
op = EPOLL_CTL_ADD;
else
op = EPOLL_CTL_MOD;
/* XXX Future optimization: do EPOLL_CTL_MOD lazily if we stop watching
* events, skip the syscall and squelch the events after epoll_wait().
*/
if (epoll_ctl(loop->backend_fd, op, w->fd, &e)) {
if (errno != EEXIST)
abort();
assert(op == EPOLL_CTL_ADD);
/* We've reactivated a file descriptor that's been watched before. */
if (epoll_ctl(loop->backend_fd, EPOLL_CTL_MOD, w->fd, &e))
abort();
}
w->events = w->pevents;
}
sigmask = 0;
if (loop->flags & UV_LOOP_BLOCK_SIGPROF) {
sigemptyset(&sigset);
sigaddset(&sigset, SIGPROF);
sigmask |= 1 << (SIGPROF - 1);
}
assert(timeout >= -1);
base = loop->time;
count = 48; /* Benchmarks suggest this gives the best throughput. */
real_timeout = timeout;
if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
reset_timeout = 1;
user_timeout = timeout;
timeout = 0;
} else {
reset_timeout = 0;
user_timeout = 0;
}
/* You could argue there is a dependency between these two but
* ultimately we don't care about their ordering with respect
* to one another. Worst case, we make a few system calls that
* could have been avoided because another thread already knows
* they fail with ENOSYS. Hardly the end of the world.
*/
no_epoll_pwait = uv__load_relaxed(&no_epoll_pwait_cached);
no_epoll_wait = uv__load_relaxed(&no_epoll_wait_cached);
for (;;) {
/* Only need to set the provider_entry_time if timeout != 0. The function
* will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
*/
if (timeout != 0)
uv__metrics_set_provider_entry_time(loop);
/* See the comment for max_safe_timeout for an explanation of why
* this is necessary. Executive summary: kernel bug workaround.
*/
if (sizeof(int32_t) == sizeof(long) && timeout >= max_safe_timeout)
timeout = max_safe_timeout;
if (sigmask != 0 && no_epoll_pwait != 0)
if (pthread_sigmask(SIG_BLOCK, &sigset, NULL))
abort();
if (no_epoll_wait != 0 || (sigmask != 0 && no_epoll_pwait == 0)) {
nfds = epoll_pwait(loop->backend_fd,
events,
ARRAY_SIZE(events),
timeout,
&sigset);
if (nfds == -1 && errno == ENOSYS) {
uv__store_relaxed(&no_epoll_pwait_cached, 1);
no_epoll_pwait = 1;
}
} else {
nfds = epoll_wait(loop->backend_fd,
events,
ARRAY_SIZE(events),
timeout);
if (nfds == -1 && errno == ENOSYS) {
uv__store_relaxed(&no_epoll_wait_cached, 1);
no_epoll_wait = 1;
}
}
if (sigmask != 0 && no_epoll_pwait != 0)
if (pthread_sigmask(SIG_UNBLOCK, &sigset, NULL))
abort();
/* Update loop->time unconditionally. It's tempting to skip the update when
* timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
* operating system didn't reschedule our process while in the syscall.
*/
SAVE_ERRNO(uv__update_time(loop));
if (nfds == 0) {
assert(timeout != -1);
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
}
if (timeout == -1)
continue;
if (timeout == 0)
return;
/* We may have been inside the system call for longer than |timeout|
* milliseconds so we need to update the timestamp to avoid drift.
*/
goto update_timeout;
}
if (nfds == -1) {
if (errno == ENOSYS) {
/* epoll_wait() or epoll_pwait() failed, try the other system call. */
assert(no_epoll_wait == 0 || no_epoll_pwait == 0);
continue;
}
if (errno != EINTR)
abort();
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
}
if (timeout == -1)
continue;
if (timeout == 0)
return;
/* Interrupted by a signal. Update timeout and poll again. */
goto update_timeout;
}
have_signals = 0;
nevents = 0;
{
/* Squelch a -Waddress-of-packed-member warning with gcc >= 9. */
union {
struct epoll_event* events;
uv__io_t* watchers;
} x;
x.events = events;
assert(loop->watchers != NULL);
loop->watchers[loop->nwatchers] = x.watchers;
loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
}
for (i = 0; i < nfds; i++) {
pe = events + i;
fd = pe->data.fd;
/* Skip invalidated events, see uv__platform_invalidate_fd */
if (fd == -1)
continue;
assert(fd >= 0);
assert((unsigned) fd < loop->nwatchers);
w = loop->watchers[fd];
if (w == NULL) {
/* File descriptor that we've stopped watching, disarm it.
*
* Ignore all errors because we may be racing with another thread
* when the file descriptor is closed.
*/
epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, pe);
continue;
}
/* Give users only events they're interested in. Prevents spurious
* callbacks when previous callback invocation in this loop has stopped
* the current watcher. Also, filters out events that users has not
* requested us to watch.
*/
pe->events &= w->pevents | POLLERR | POLLHUP;
/* Work around an epoll quirk where it sometimes reports just the
* EPOLLERR or EPOLLHUP event. In order to force the event loop to
* move forward, we merge in the read/write events that the watcher
* is interested in; uv__read() and uv__write() will then deal with
* the error or hangup in the usual fashion.
*
* Note to self: happens when epoll reports EPOLLIN|EPOLLHUP, the user
* reads the available data, calls uv_read_stop(), then sometime later
* calls uv_read_start() again. By then, libuv has forgotten about the
* hangup and the kernel won't report EPOLLIN again because there's
* nothing left to read. If anything, libuv is to blame here. The
* current hack is just a quick bandaid; to properly fix it, libuv
* needs to remember the error/hangup event. We should get that for
* free when we switch over to edge-triggered I/O.
*/
if (pe->events == POLLERR || pe->events == POLLHUP)
pe->events |=
w->pevents & (POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI);
if (pe->events != 0) {
/* Run signal watchers last. This also affects child process watchers
* because those are implemented in terms of signal watchers.
*/
if (w == &loop->signal_io_watcher) {
have_signals = 1;
} else {
uv__metrics_update_idle_time(loop);
w->cb(loop, w, pe->events);
}
nevents++;
}
}
if (reset_timeout != 0) {
timeout = user_timeout;
reset_timeout = 0;
}
if (have_signals != 0) {
uv__metrics_update_idle_time(loop);
loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
}
loop->watchers[loop->nwatchers] = NULL;
loop->watchers[loop->nwatchers + 1] = NULL;
if (have_signals != 0)
return; /* Event loop should cycle now so don't poll again. */
if (nevents != 0) {
if (nfds == ARRAY_SIZE(events) && --count != 0) {
/* Poll for more events but don't block this time. */
timeout = 0;
continue;
}
return;
}
if (timeout == 0)
return;
if (timeout == -1)
continue;
update_timeout:
assert(timeout > 0);
real_timeout -= (loop->time - base);
if (real_timeout <= 0)
return;
timeout = real_timeout;
}
}

View File

@ -91,7 +91,7 @@ uint64_t uv_get_free_memory(void) {
size_t size = sizeof(freecount); size_t size = sizeof(freecount);
if (sysctlbyname("vm.stats.vm.v_free_count", &freecount, &size, NULL, 0)) if (sysctlbyname("vm.stats.vm.v_free_count", &freecount, &size, NULL, 0))
return UV__ERR(errno); return 0;
return (uint64_t) freecount * sysconf(_SC_PAGESIZE); return (uint64_t) freecount * sysconf(_SC_PAGESIZE);
@ -105,7 +105,7 @@ uint64_t uv_get_total_memory(void) {
size_t size = sizeof(info); size_t size = sizeof(info);
if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0)) if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0))
return UV__ERR(errno); return 0;
return (uint64_t) info; return (uint64_t) info;
} }
@ -116,6 +116,11 @@ uint64_t uv_get_constrained_memory(void) {
} }
uint64_t uv_get_available_memory(void) {
return uv_get_free_memory();
}
void uv_loadavg(double avg[3]) { void uv_loadavg(double avg[3]) {
struct loadavg info; struct loadavg info;
size_t size = sizeof(info); size_t size = sizeof(info);
@ -264,30 +269,6 @@ int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
} }
int uv__sendmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen) {
#if __FreeBSD__ >= 11 && !defined(__DragonFly__)
return sendmmsg(fd,
(struct mmsghdr*) mmsg,
vlen,
0 /* flags */);
#else
return errno = ENOSYS, -1;
#endif
}
int uv__recvmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen) {
#if __FreeBSD__ >= 11 && !defined(__DragonFly__)
return recvmmsg(fd,
(struct mmsghdr*) mmsg,
vlen,
0 /* flags */,
NULL /* timeout */);
#else
return errno = ENOSYS, -1;
#endif
}
ssize_t ssize_t
uv__fs_copy_file_range(int fd_in, uv__fs_copy_file_range(int fd_in,
off_t* off_in, off_t* off_in,

View File

@ -48,7 +48,6 @@
#if defined(__DragonFly__) || \ #if defined(__DragonFly__) || \
defined(__FreeBSD__) || \ defined(__FreeBSD__) || \
defined(__FreeBSD_kernel__) || \
defined(__OpenBSD__) || \ defined(__OpenBSD__) || \
defined(__NetBSD__) defined(__NetBSD__)
# define HAVE_PREADV 1 # define HAVE_PREADV 1
@ -57,10 +56,11 @@
#endif #endif
#if defined(__linux__) #if defined(__linux__)
# include "sys/utsname.h" # include <sys/sendfile.h>
# include <sys/utsname.h>
#endif #endif
#if defined(__linux__) || defined(__sun) #if defined(__sun)
# include <sys/sendfile.h> # include <sys/sendfile.h>
# include <sys/sysmacros.h> # include <sys/sysmacros.h>
#endif #endif
@ -79,7 +79,6 @@
#if defined(__APPLE__) || \ #if defined(__APPLE__) || \
defined(__DragonFly__) || \ defined(__DragonFly__) || \
defined(__FreeBSD__) || \ defined(__FreeBSD__) || \
defined(__FreeBSD_kernel__) || \
defined(__OpenBSD__) || \ defined(__OpenBSD__) || \
defined(__NetBSD__) defined(__NetBSD__)
# include <sys/param.h> # include <sys/param.h>
@ -256,7 +255,6 @@ static ssize_t uv__fs_futime(uv_fs_t* req) {
#elif defined(__APPLE__) \ #elif defined(__APPLE__) \
|| defined(__DragonFly__) \ || defined(__DragonFly__) \
|| defined(__FreeBSD__) \ || defined(__FreeBSD__) \
|| defined(__FreeBSD_kernel__) \
|| defined(__NetBSD__) \ || defined(__NetBSD__) \
|| defined(__OpenBSD__) \ || defined(__OpenBSD__) \
|| defined(__sun) || defined(__sun)
@ -311,7 +309,7 @@ static int uv__fs_mkstemp(uv_fs_t* req) {
static uv_once_t once = UV_ONCE_INIT; static uv_once_t once = UV_ONCE_INIT;
int r; int r;
#ifdef O_CLOEXEC #ifdef O_CLOEXEC
static int no_cloexec_support; static _Atomic int no_cloexec_support;
#endif #endif
static const char pattern[] = "XXXXXX"; static const char pattern[] = "XXXXXX";
static const size_t pattern_size = sizeof(pattern) - 1; static const size_t pattern_size = sizeof(pattern) - 1;
@ -336,7 +334,8 @@ static int uv__fs_mkstemp(uv_fs_t* req) {
uv_once(&once, uv__mkostemp_initonce); uv_once(&once, uv__mkostemp_initonce);
#ifdef O_CLOEXEC #ifdef O_CLOEXEC
if (uv__load_relaxed(&no_cloexec_support) == 0 && uv__mkostemp != NULL) { if (atomic_load_explicit(&no_cloexec_support, memory_order_relaxed) == 0 &&
uv__mkostemp != NULL) {
r = uv__mkostemp(path, O_CLOEXEC); r = uv__mkostemp(path, O_CLOEXEC);
if (r >= 0) if (r >= 0)
@ -349,7 +348,7 @@ static int uv__fs_mkstemp(uv_fs_t* req) {
/* We set the static variable so that next calls don't even /* We set the static variable so that next calls don't even
try to use mkostemp. */ try to use mkostemp. */
uv__store_relaxed(&no_cloexec_support, 1); atomic_store_explicit(&no_cloexec_support, 1, memory_order_relaxed);
} }
#endif /* O_CLOEXEC */ #endif /* O_CLOEXEC */
@ -459,7 +458,7 @@ static ssize_t uv__fs_preadv(uv_file fd,
static ssize_t uv__fs_read(uv_fs_t* req) { static ssize_t uv__fs_read(uv_fs_t* req) {
#if defined(__linux__) #if defined(__linux__)
static int no_preadv; static _Atomic int no_preadv;
#endif #endif
unsigned int iovmax; unsigned int iovmax;
ssize_t result; ssize_t result;
@ -483,19 +482,19 @@ static ssize_t uv__fs_read(uv_fs_t* req) {
result = preadv(req->file, (struct iovec*) req->bufs, req->nbufs, req->off); result = preadv(req->file, (struct iovec*) req->bufs, req->nbufs, req->off);
#else #else
# if defined(__linux__) # if defined(__linux__)
if (uv__load_relaxed(&no_preadv)) retry: if (atomic_load_explicit(&no_preadv, memory_order_relaxed)) retry:
# endif # endif
{ {
result = uv__fs_preadv(req->file, req->bufs, req->nbufs, req->off); result = uv__fs_preadv(req->file, req->bufs, req->nbufs, req->off);
} }
# if defined(__linux__) # if defined(__linux__)
else { else {
result = uv__preadv(req->file, result = preadv(req->file,
(struct iovec*)req->bufs, (struct iovec*) req->bufs,
req->nbufs, req->nbufs,
req->off); req->off);
if (result == -1 && errno == ENOSYS) { if (result == -1 && errno == ENOSYS) {
uv__store_relaxed(&no_preadv, 1); atomic_store_explicit(&no_preadv, 1, memory_order_relaxed);
goto retry; goto retry;
} }
} }
@ -516,7 +515,7 @@ done:
if (result == -1 && errno == EOPNOTSUPP) { if (result == -1 && errno == EOPNOTSUPP) {
struct stat buf; struct stat buf;
ssize_t rc; ssize_t rc;
rc = fstat(req->file, &buf); rc = uv__fstat(req->file, &buf);
if (rc == 0 && S_ISDIR(buf.st_mode)) { if (rc == 0 && S_ISDIR(buf.st_mode)) {
errno = EISDIR; errno = EISDIR;
} }
@ -527,19 +526,12 @@ done:
} }
#if defined(__APPLE__) && !defined(MAC_OS_X_VERSION_10_8) static int uv__fs_scandir_filter(const uv__dirent_t* dent) {
#define UV_CONST_DIRENT uv__dirent_t
#else
#define UV_CONST_DIRENT const uv__dirent_t
#endif
static int uv__fs_scandir_filter(UV_CONST_DIRENT* dent) {
return strcmp(dent->d_name, ".") != 0 && strcmp(dent->d_name, "..") != 0; return strcmp(dent->d_name, ".") != 0 && strcmp(dent->d_name, "..") != 0;
} }
static int uv__fs_scandir_sort(UV_CONST_DIRENT** a, UV_CONST_DIRENT** b) { static int uv__fs_scandir_sort(const uv__dirent_t** a, const uv__dirent_t** b) {
return strcmp((*a)->d_name, (*b)->d_name); return strcmp((*a)->d_name, (*b)->d_name);
} }
@ -715,7 +707,7 @@ static ssize_t uv__fs_readlink(uv_fs_t* req) {
/* We may not have a real PATH_MAX. Read size of link. */ /* We may not have a real PATH_MAX. Read size of link. */
struct stat st; struct stat st;
int ret; int ret;
ret = lstat(req->path, &st); ret = uv__lstat(req->path, &st);
if (ret != 0) if (ret != 0)
return -1; return -1;
if (!S_ISLNK(st.st_mode)) { if (!S_ISLNK(st.st_mode)) {
@ -908,14 +900,14 @@ out:
#ifdef __linux__ #ifdef __linux__
static unsigned uv__kernel_version(void) { static unsigned uv__kernel_version(void) {
static unsigned cached_version; static _Atomic unsigned cached_version;
struct utsname u; struct utsname u;
unsigned version; unsigned version;
unsigned major; unsigned major;
unsigned minor; unsigned minor;
unsigned patch; unsigned patch;
version = uv__load_relaxed(&cached_version); version = atomic_load_explicit(&cached_version, memory_order_relaxed);
if (version != 0) if (version != 0)
return version; return version;
@ -926,7 +918,7 @@ static unsigned uv__kernel_version(void) {
return 0; return 0;
version = major * 65536 + minor * 256 + patch; version = major * 65536 + minor * 256 + patch;
uv__store_relaxed(&cached_version, version); atomic_store_explicit(&cached_version, version, memory_order_relaxed);
return version; return version;
} }
@ -968,10 +960,10 @@ static int uv__is_cifs_or_smb(int fd) {
static ssize_t uv__fs_try_copy_file_range(int in_fd, off_t* off, static ssize_t uv__fs_try_copy_file_range(int in_fd, off_t* off,
int out_fd, size_t len) { int out_fd, size_t len) {
static int no_copy_file_range_support; static _Atomic int no_copy_file_range_support;
ssize_t r; ssize_t r;
if (uv__load_relaxed(&no_copy_file_range_support)) { if (atomic_load_explicit(&no_copy_file_range_support, memory_order_relaxed)) {
errno = ENOSYS; errno = ENOSYS;
return -1; return -1;
} }
@ -990,7 +982,7 @@ static ssize_t uv__fs_try_copy_file_range(int in_fd, off_t* off,
errno = ENOSYS; /* Use fallback. */ errno = ENOSYS; /* Use fallback. */
break; break;
case ENOSYS: case ENOSYS:
uv__store_relaxed(&no_copy_file_range_support, 1); atomic_store_explicit(&no_copy_file_range_support, 1, memory_order_relaxed);
break; break;
case EPERM: case EPERM:
/* It's been reported that CIFS spuriously fails. /* It's been reported that CIFS spuriously fails.
@ -1061,10 +1053,7 @@ static ssize_t uv__fs_sendfile(uv_fs_t* req) {
return -1; return -1;
} }
#elif defined(__APPLE__) || \ #elif defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__)
defined(__DragonFly__) || \
defined(__FreeBSD__) || \
defined(__FreeBSD_kernel__)
{ {
off_t len; off_t len;
ssize_t r; ssize_t r;
@ -1088,15 +1077,6 @@ static ssize_t uv__fs_sendfile(uv_fs_t* req) {
#endif #endif
len = 0; len = 0;
r = sendfile(in_fd, out_fd, req->off, req->bufsml[0].len, NULL, &len, 0); r = sendfile(in_fd, out_fd, req->off, req->bufsml[0].len, NULL, &len, 0);
#elif defined(__FreeBSD_kernel__)
len = 0;
r = bsd_sendfile(in_fd,
out_fd,
req->off,
req->bufsml[0].len,
NULL,
&len,
0);
#else #else
/* The darwin sendfile takes len as an input for the length to send, /* The darwin sendfile takes len as an input for the length to send,
* so make sure to initialize it with the caller's value. */ * so make sure to initialize it with the caller's value. */
@ -1148,7 +1128,6 @@ static ssize_t uv__fs_utime(uv_fs_t* req) {
#elif defined(__APPLE__) \ #elif defined(__APPLE__) \
|| defined(__DragonFly__) \ || defined(__DragonFly__) \
|| defined(__FreeBSD__) \ || defined(__FreeBSD__) \
|| defined(__FreeBSD_kernel__) \
|| defined(__NetBSD__) \ || defined(__NetBSD__) \
|| defined(__OpenBSD__) || defined(__OpenBSD__)
struct timeval tv[2]; struct timeval tv[2];
@ -1190,7 +1169,6 @@ static ssize_t uv__fs_lutime(uv_fs_t* req) {
#elif defined(__APPLE__) || \ #elif defined(__APPLE__) || \
defined(__DragonFly__) || \ defined(__DragonFly__) || \
defined(__FreeBSD__) || \ defined(__FreeBSD__) || \
defined(__FreeBSD_kernel__) || \
defined(__NetBSD__) defined(__NetBSD__)
struct timeval tv[2]; struct timeval tv[2];
tv[0] = uv__fs_to_timeval(req->atime); tv[0] = uv__fs_to_timeval(req->atime);
@ -1241,10 +1219,10 @@ static ssize_t uv__fs_write(uv_fs_t* req) {
} }
# if defined(__linux__) # if defined(__linux__)
else { else {
r = uv__pwritev(req->file, r = pwritev(req->file,
(struct iovec*) req->bufs, (struct iovec*) req->bufs,
req->nbufs, req->nbufs,
req->off); req->off);
if (r == -1 && errno == ENOSYS) { if (r == -1 && errno == ENOSYS) {
no_pwritev = 1; no_pwritev = 1;
goto retry; goto retry;
@ -1288,7 +1266,7 @@ static ssize_t uv__fs_copyfile(uv_fs_t* req) {
return srcfd; return srcfd;
/* Get the source file's mode. */ /* Get the source file's mode. */
if (fstat(srcfd, &src_statsbuf)) { if (uv__fstat(srcfd, &src_statsbuf)) {
err = UV__ERR(errno); err = UV__ERR(errno);
goto out; goto out;
} }
@ -1316,7 +1294,7 @@ static ssize_t uv__fs_copyfile(uv_fs_t* req) {
destination are not the same file. If they are the same, bail out early. */ destination are not the same file. If they are the same, bail out early. */
if ((req->flags & UV_FS_COPYFILE_EXCL) == 0) { if ((req->flags & UV_FS_COPYFILE_EXCL) == 0) {
/* Get the destination file's mode. */ /* Get the destination file's mode. */
if (fstat(dstfd, &dst_statsbuf)) { if (uv__fstat(dstfd, &dst_statsbuf)) {
err = UV__ERR(errno); err = UV__ERR(errno);
goto out; goto out;
} }
@ -1330,7 +1308,19 @@ static ssize_t uv__fs_copyfile(uv_fs_t* req) {
/* Truncate the file in case the destination already existed. */ /* Truncate the file in case the destination already existed. */
if (ftruncate(dstfd, 0) != 0) { if (ftruncate(dstfd, 0) != 0) {
err = UV__ERR(errno); err = UV__ERR(errno);
goto out;
/* ftruncate() on ceph-fuse fails with EACCES when the file is created
* with read only permissions. Since ftruncate() on a newly created
* file is a meaningless operation anyway, detect that condition
* and squelch the error.
*/
if (err != UV_EACCES)
goto out;
if (dst_statsbuf.st_size > 0)
goto out;
err = 0;
} }
} }
@ -1514,14 +1504,14 @@ static int uv__fs_statx(int fd,
uv_stat_t* buf) { uv_stat_t* buf) {
STATIC_ASSERT(UV_ENOSYS != -1); STATIC_ASSERT(UV_ENOSYS != -1);
#ifdef __linux__ #ifdef __linux__
static int no_statx; static _Atomic int no_statx;
struct uv__statx statxbuf; struct uv__statx statxbuf;
int dirfd; int dirfd;
int flags; int flags;
int mode; int mode;
int rc; int rc;
if (uv__load_relaxed(&no_statx)) if (atomic_load_explicit(&no_statx, memory_order_relaxed))
return UV_ENOSYS; return UV_ENOSYS;
dirfd = AT_FDCWD; dirfd = AT_FDCWD;
@ -1555,30 +1545,11 @@ static int uv__fs_statx(int fd,
* implemented, rc might return 1 with 0 set as the error code in which * implemented, rc might return 1 with 0 set as the error code in which
* case we return ENOSYS. * case we return ENOSYS.
*/ */
uv__store_relaxed(&no_statx, 1); atomic_store_explicit(&no_statx, 1, memory_order_relaxed);
return UV_ENOSYS; return UV_ENOSYS;
} }
buf->st_dev = makedev(statxbuf.stx_dev_major, statxbuf.stx_dev_minor); uv__statx_to_stat(&statxbuf, buf);
buf->st_mode = statxbuf.stx_mode;
buf->st_nlink = statxbuf.stx_nlink;
buf->st_uid = statxbuf.stx_uid;
buf->st_gid = statxbuf.stx_gid;
buf->st_rdev = makedev(statxbuf.stx_rdev_major, statxbuf.stx_rdev_minor);
buf->st_ino = statxbuf.stx_ino;
buf->st_size = statxbuf.stx_size;
buf->st_blksize = statxbuf.stx_blksize;
buf->st_blocks = statxbuf.stx_blocks;
buf->st_atim.tv_sec = statxbuf.stx_atime.tv_sec;
buf->st_atim.tv_nsec = statxbuf.stx_atime.tv_nsec;
buf->st_mtim.tv_sec = statxbuf.stx_mtime.tv_sec;
buf->st_mtim.tv_nsec = statxbuf.stx_mtime.tv_nsec;
buf->st_ctim.tv_sec = statxbuf.stx_ctime.tv_sec;
buf->st_ctim.tv_nsec = statxbuf.stx_ctime.tv_nsec;
buf->st_birthtim.tv_sec = statxbuf.stx_btime.tv_sec;
buf->st_birthtim.tv_nsec = statxbuf.stx_btime.tv_nsec;
buf->st_flags = 0;
buf->st_gen = 0;
return 0; return 0;
#else #else
@ -1595,7 +1566,7 @@ static int uv__fs_stat(const char *path, uv_stat_t *buf) {
if (ret != UV_ENOSYS) if (ret != UV_ENOSYS)
return ret; return ret;
ret = stat(path, &pbuf); ret = uv__stat(path, &pbuf);
if (ret == 0) if (ret == 0)
uv__to_stat(&pbuf, buf); uv__to_stat(&pbuf, buf);
@ -1611,7 +1582,7 @@ static int uv__fs_lstat(const char *path, uv_stat_t *buf) {
if (ret != UV_ENOSYS) if (ret != UV_ENOSYS)
return ret; return ret;
ret = lstat(path, &pbuf); ret = uv__lstat(path, &pbuf);
if (ret == 0) if (ret == 0)
uv__to_stat(&pbuf, buf); uv__to_stat(&pbuf, buf);
@ -1627,7 +1598,7 @@ static int uv__fs_fstat(int fd, uv_stat_t *buf) {
if (ret != UV_ENOSYS) if (ret != UV_ENOSYS)
return ret; return ret;
ret = fstat(fd, &pbuf); ret = uv__fstat(fd, &pbuf);
if (ret == 0) if (ret == 0)
uv__to_stat(&pbuf, buf); uv__to_stat(&pbuf, buf);
@ -1822,6 +1793,9 @@ int uv_fs_chown(uv_loop_t* loop,
int uv_fs_close(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) { int uv_fs_close(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
INIT(CLOSE); INIT(CLOSE);
req->file = file; req->file = file;
if (cb != NULL)
if (uv__iou_fs_close(loop, req))
return 0;
POST; POST;
} }
@ -1869,6 +1843,9 @@ int uv_fs_lchown(uv_loop_t* loop,
int uv_fs_fdatasync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) { int uv_fs_fdatasync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
INIT(FDATASYNC); INIT(FDATASYNC);
req->file = file; req->file = file;
if (cb != NULL)
if (uv__iou_fs_fsync_or_fdatasync(loop, req, /* IORING_FSYNC_DATASYNC */ 1))
return 0;
POST; POST;
} }
@ -1876,6 +1853,9 @@ int uv_fs_fdatasync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
int uv_fs_fstat(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) { int uv_fs_fstat(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
INIT(FSTAT); INIT(FSTAT);
req->file = file; req->file = file;
if (cb != NULL)
if (uv__iou_fs_statx(loop, req, /* is_fstat */ 1, /* is_lstat */ 0))
return 0;
POST; POST;
} }
@ -1883,6 +1863,9 @@ int uv_fs_fstat(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
int uv_fs_fsync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) { int uv_fs_fsync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
INIT(FSYNC); INIT(FSYNC);
req->file = file; req->file = file;
if (cb != NULL)
if (uv__iou_fs_fsync_or_fdatasync(loop, req, /* no flags */ 0))
return 0;
POST; POST;
} }
@ -1929,6 +1912,9 @@ int uv_fs_lutime(uv_loop_t* loop,
int uv_fs_lstat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) { int uv_fs_lstat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
INIT(LSTAT); INIT(LSTAT);
PATH; PATH;
if (cb != NULL)
if (uv__iou_fs_statx(loop, req, /* is_fstat */ 0, /* is_lstat */ 1))
return 0;
POST; POST;
} }
@ -1990,6 +1976,9 @@ int uv_fs_open(uv_loop_t* loop,
PATH; PATH;
req->flags = flags; req->flags = flags;
req->mode = mode; req->mode = mode;
if (cb != NULL)
if (uv__iou_fs_open(loop, req))
return 0;
POST; POST;
} }
@ -2018,6 +2007,11 @@ int uv_fs_read(uv_loop_t* loop, uv_fs_t* req,
memcpy(req->bufs, bufs, nbufs * sizeof(*bufs)); memcpy(req->bufs, bufs, nbufs * sizeof(*bufs));
req->off = off; req->off = off;
if (cb != NULL)
if (uv__iou_fs_read_or_write(loop, req, /* is_read */ 1))
return 0;
POST; POST;
} }
@ -2125,6 +2119,9 @@ int uv_fs_sendfile(uv_loop_t* loop,
int uv_fs_stat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) { int uv_fs_stat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
INIT(STAT); INIT(STAT);
PATH; PATH;
if (cb != NULL)
if (uv__iou_fs_statx(loop, req, /* is_fstat */ 0, /* is_lstat */ 0))
return 0;
POST; POST;
} }
@ -2188,6 +2185,11 @@ int uv_fs_write(uv_loop_t* loop,
memcpy(req->bufs, bufs, nbufs * sizeof(*bufs)); memcpy(req->bufs, bufs, nbufs * sizeof(*bufs));
req->off = off; req->off = off;
if (cb != NULL)
if (uv__iou_fs_read_or_write(loop, req, /* is_read */ 0))
return 0;
POST; POST;
} }
@ -2196,7 +2198,7 @@ void uv_fs_req_cleanup(uv_fs_t* req) {
if (req == NULL) if (req == NULL)
return; return;
/* Only necessary for asychronous requests, i.e., requests with a callback. /* Only necessary for asynchronous requests, i.e., requests with a callback.
* Synchronous ones don't copy their arguments and have req->path and * Synchronous ones don't copy their arguments and have req->path and
* req->new_path pointing to user-owned memory. UV_FS_MKDTEMP and * req->new_path pointing to user-owned memory. UV_FS_MKDTEMP and
* UV_FS_MKSTEMP are the exception to the rule, they always allocate memory. * UV_FS_MKSTEMP are the exception to the rule, they always allocate memory.

View File

@ -132,7 +132,6 @@ static void (*pCFRunLoopWakeUp)(CFRunLoopRef);
static CFStringRef (*pCFStringCreateWithFileSystemRepresentation)( static CFStringRef (*pCFStringCreateWithFileSystemRepresentation)(
CFAllocatorRef, CFAllocatorRef,
const char*); const char*);
static CFStringEncoding (*pCFStringGetSystemEncoding)(void);
static CFStringRef (*pkCFRunLoopDefaultMode); static CFStringRef (*pkCFRunLoopDefaultMode);
static FSEventStreamRef (*pFSEventStreamCreate)(CFAllocatorRef, static FSEventStreamRef (*pFSEventStreamCreate)(CFAllocatorRef,
FSEventStreamCallback, FSEventStreamCallback,
@ -141,7 +140,6 @@ static FSEventStreamRef (*pFSEventStreamCreate)(CFAllocatorRef,
FSEventStreamEventId, FSEventStreamEventId,
CFTimeInterval, CFTimeInterval,
FSEventStreamCreateFlags); FSEventStreamCreateFlags);
static void (*pFSEventStreamFlushSync)(FSEventStreamRef);
static void (*pFSEventStreamInvalidate)(FSEventStreamRef); static void (*pFSEventStreamInvalidate)(FSEventStreamRef);
static void (*pFSEventStreamRelease)(FSEventStreamRef); static void (*pFSEventStreamRelease)(FSEventStreamRef);
static void (*pFSEventStreamScheduleWithRunLoop)(FSEventStreamRef, static void (*pFSEventStreamScheduleWithRunLoop)(FSEventStreamRef,
@ -331,8 +329,9 @@ static void uv__fsevents_event_cb(const FSEventStreamRef streamRef,
/* Runs in CF thread */ /* Runs in CF thread */
static int uv__fsevents_create_stream(uv_loop_t* loop, CFArrayRef paths) { static int uv__fsevents_create_stream(uv__cf_loop_state_t* state,
uv__cf_loop_state_t* state; uv_loop_t* loop,
CFArrayRef paths) {
FSEventStreamContext ctx; FSEventStreamContext ctx;
FSEventStreamRef ref; FSEventStreamRef ref;
CFAbsoluteTime latency; CFAbsoluteTime latency;
@ -373,10 +372,7 @@ static int uv__fsevents_create_stream(uv_loop_t* loop, CFArrayRef paths) {
flags); flags);
assert(ref != NULL); assert(ref != NULL);
state = loop->cf_state; pFSEventStreamScheduleWithRunLoop(ref, state->loop, *pkCFRunLoopDefaultMode);
pFSEventStreamScheduleWithRunLoop(ref,
state->loop,
*pkCFRunLoopDefaultMode);
if (!pFSEventStreamStart(ref)) { if (!pFSEventStreamStart(ref)) {
pFSEventStreamInvalidate(ref); pFSEventStreamInvalidate(ref);
pFSEventStreamRelease(ref); pFSEventStreamRelease(ref);
@ -389,11 +385,7 @@ static int uv__fsevents_create_stream(uv_loop_t* loop, CFArrayRef paths) {
/* Runs in CF thread */ /* Runs in CF thread */
static void uv__fsevents_destroy_stream(uv_loop_t* loop) { static void uv__fsevents_destroy_stream(uv__cf_loop_state_t* state) {
uv__cf_loop_state_t* state;
state = loop->cf_state;
if (state->fsevent_stream == NULL) if (state->fsevent_stream == NULL)
return; return;
@ -408,9 +400,9 @@ static void uv__fsevents_destroy_stream(uv_loop_t* loop) {
/* Runs in CF thread, when there're new fsevent handles to add to stream */ /* Runs in CF thread, when there're new fsevent handles to add to stream */
static void uv__fsevents_reschedule(uv_fs_event_t* handle, static void uv__fsevents_reschedule(uv__cf_loop_state_t* state,
uv_loop_t* loop,
uv__cf_loop_signal_type_t type) { uv__cf_loop_signal_type_t type) {
uv__cf_loop_state_t* state;
QUEUE* q; QUEUE* q;
uv_fs_event_t* curr; uv_fs_event_t* curr;
CFArrayRef cf_paths; CFArrayRef cf_paths;
@ -419,7 +411,6 @@ static void uv__fsevents_reschedule(uv_fs_event_t* handle,
int err; int err;
unsigned int path_count; unsigned int path_count;
state = handle->loop->cf_state;
paths = NULL; paths = NULL;
cf_paths = NULL; cf_paths = NULL;
err = 0; err = 0;
@ -438,7 +429,7 @@ static void uv__fsevents_reschedule(uv_fs_event_t* handle,
uv_mutex_unlock(&state->fsevent_mutex); uv_mutex_unlock(&state->fsevent_mutex);
/* Destroy previous FSEventStream */ /* Destroy previous FSEventStream */
uv__fsevents_destroy_stream(handle->loop); uv__fsevents_destroy_stream(state);
/* Any failure below will be a memory failure */ /* Any failure below will be a memory failure */
err = UV_ENOMEM; err = UV_ENOMEM;
@ -478,7 +469,7 @@ static void uv__fsevents_reschedule(uv_fs_event_t* handle,
err = UV_ENOMEM; err = UV_ENOMEM;
goto final; goto final;
} }
err = uv__fsevents_create_stream(handle->loop, cf_paths); err = uv__fsevents_create_stream(state, loop, cf_paths);
} }
final: final:
@ -563,10 +554,8 @@ static int uv__fsevents_global_init(void) {
V(core_foundation_handle, CFRunLoopStop); V(core_foundation_handle, CFRunLoopStop);
V(core_foundation_handle, CFRunLoopWakeUp); V(core_foundation_handle, CFRunLoopWakeUp);
V(core_foundation_handle, CFStringCreateWithFileSystemRepresentation); V(core_foundation_handle, CFStringCreateWithFileSystemRepresentation);
V(core_foundation_handle, CFStringGetSystemEncoding);
V(core_foundation_handle, kCFRunLoopDefaultMode); V(core_foundation_handle, kCFRunLoopDefaultMode);
V(core_services_handle, FSEventStreamCreate); V(core_services_handle, FSEventStreamCreate);
V(core_services_handle, FSEventStreamFlushSync);
V(core_services_handle, FSEventStreamInvalidate); V(core_services_handle, FSEventStreamInvalidate);
V(core_services_handle, FSEventStreamRelease); V(core_services_handle, FSEventStreamRelease);
V(core_services_handle, FSEventStreamScheduleWithRunLoop); V(core_services_handle, FSEventStreamScheduleWithRunLoop);
@ -767,7 +756,7 @@ static void uv__cf_loop_cb(void* arg) {
if (s->handle == NULL) if (s->handle == NULL)
pCFRunLoopStop(state->loop); pCFRunLoopStop(state->loop);
else else
uv__fsevents_reschedule(s->handle, s->type); uv__fsevents_reschedule(state, loop, s->type);
uv__free(s); uv__free(s);
} }

View File

@ -84,6 +84,11 @@ uint64_t uv_get_constrained_memory(void) {
} }
uint64_t uv_get_available_memory(void) {
return uv_get_free_memory();
}
int uv_resident_set_memory(size_t* rss) { int uv_resident_set_memory(size_t* rss) {
area_info area; area_info area;
ssize_t cookie; ssize_t cookie;

View File

@ -165,3 +165,8 @@ int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
uint64_t uv_get_constrained_memory(void) { uint64_t uv_get_constrained_memory(void) {
return 0; /* Memory constraints are unknown. */ return 0; /* Memory constraints are unknown. */
} }
uint64_t uv_get_available_memory(void) {
return uv_get_free_memory();
}

View File

@ -249,6 +249,11 @@ uint64_t uv_get_constrained_memory(void) {
} }
uint64_t uv_get_available_memory(void) {
return uv_get_free_memory();
}
void uv_loadavg(double avg[3]) { void uv_loadavg(double avg[3]) {
SSTS0200 rcvr; SSTS0200 rcvr;

View File

@ -26,21 +26,34 @@
#include <assert.h> #include <assert.h>
#include <limits.h> /* _POSIX_PATH_MAX, PATH_MAX */ #include <limits.h> /* _POSIX_PATH_MAX, PATH_MAX */
#include <stdint.h>
#include <stdlib.h> /* abort */ #include <stdlib.h> /* abort */
#include <string.h> /* strrchr */ #include <string.h> /* strrchr */
#include <fcntl.h> /* O_CLOEXEC and O_NONBLOCK, if supported. */ #include <fcntl.h> /* O_CLOEXEC and O_NONBLOCK, if supported. */
#include <stdio.h> #include <stdio.h>
#include <errno.h> #include <errno.h>
#include <sys/socket.h> #include <sys/socket.h>
#include <sys/stat.h>
#include <sys/types.h>
#define uv__msan_unpoison(p, n) \
do { \
(void) (p); \
(void) (n); \
} while (0)
#if defined(__has_feature)
# if __has_feature(memory_sanitizer)
# include <sanitizer/msan_interface.h>
# undef uv__msan_unpoison
# define uv__msan_unpoison __msan_unpoison
# endif
#endif
#if defined(__STRICT_ANSI__) #if defined(__STRICT_ANSI__)
# define inline __inline # define inline __inline
#endif #endif
#if defined(__linux__)
# include "linux-syscalls.h"
#endif /* __linux__ */
#if defined(__MVS__) #if defined(__MVS__)
# include "os390-syscalls.h" # include "os390-syscalls.h"
#endif /* __MVS__ */ #endif /* __MVS__ */
@ -79,13 +92,11 @@
# define UV__PATH_MAX 8192 # define UV__PATH_MAX 8192
#endif #endif
#if defined(__ANDROID__) union uv__sockaddr {
int uv__pthread_sigmask(int how, const sigset_t* set, sigset_t* oset); struct sockaddr_in6 in6;
# ifdef pthread_sigmask struct sockaddr_in in;
# undef pthread_sigmask struct sockaddr addr;
# endif };
# define pthread_sigmask(how, set, oldset) uv__pthread_sigmask(how, set, oldset)
#endif
#define ACCESS_ONCE(type, var) \ #define ACCESS_ONCE(type, var) \
(*(volatile type*) &(var)) (*(volatile type*) &(var))
@ -166,12 +177,42 @@ struct uv__stream_queued_fds_s {
int fds[1]; int fds[1];
}; };
#ifdef __linux__
struct uv__statx_timestamp {
int64_t tv_sec;
uint32_t tv_nsec;
int32_t unused0;
};
struct uv__statx {
uint32_t stx_mask;
uint32_t stx_blksize;
uint64_t stx_attributes;
uint32_t stx_nlink;
uint32_t stx_uid;
uint32_t stx_gid;
uint16_t stx_mode;
uint16_t unused0;
uint64_t stx_ino;
uint64_t stx_size;
uint64_t stx_blocks;
uint64_t stx_attributes_mask;
struct uv__statx_timestamp stx_atime;
struct uv__statx_timestamp stx_btime;
struct uv__statx_timestamp stx_ctime;
struct uv__statx_timestamp stx_mtime;
uint32_t stx_rdev_major;
uint32_t stx_rdev_minor;
uint32_t stx_dev_major;
uint32_t stx_dev_minor;
uint64_t unused1[14];
};
#endif /* __linux__ */
#if defined(_AIX) || \ #if defined(_AIX) || \
defined(__APPLE__) || \ defined(__APPLE__) || \
defined(__DragonFly__) || \ defined(__DragonFly__) || \
defined(__FreeBSD__) || \ defined(__FreeBSD__) || \
defined(__FreeBSD_kernel__) || \
defined(__linux__) || \ defined(__linux__) || \
defined(__OpenBSD__) || \ defined(__OpenBSD__) || \
defined(__NetBSD__) defined(__NetBSD__)
@ -258,10 +299,10 @@ int uv__signal_loop_fork(uv_loop_t* loop);
/* platform specific */ /* platform specific */
uint64_t uv__hrtime(uv_clocktype_t type); uint64_t uv__hrtime(uv_clocktype_t type);
int uv__kqueue_init(uv_loop_t* loop); int uv__kqueue_init(uv_loop_t* loop);
int uv__epoll_init(uv_loop_t* loop);
int uv__platform_loop_init(uv_loop_t* loop); int uv__platform_loop_init(uv_loop_t* loop);
void uv__platform_loop_delete(uv_loop_t* loop); void uv__platform_loop_delete(uv_loop_t* loop);
void uv__platform_invalidate_fd(uv_loop_t* loop, int fd); void uv__platform_invalidate_fd(uv_loop_t* loop, int fd);
int uv__process_init(uv_loop_t* loop);
/* various */ /* various */
void uv__async_close(uv_async_t* handle); void uv__async_close(uv_async_t* handle);
@ -278,7 +319,6 @@ size_t uv__thread_stack_size(void);
void uv__udp_close(uv_udp_t* handle); void uv__udp_close(uv_udp_t* handle);
void uv__udp_finish_close(uv_udp_t* handle); void uv__udp_finish_close(uv_udp_t* handle);
FILE* uv__open_file(const char* path); FILE* uv__open_file(const char* path);
int uv__getpwuid_r(uv_passwd_t* pwd);
int uv__search_path(const char* prog, char* buf, size_t* buflen); int uv__search_path(const char* prog, char* buf, size_t* buflen);
void uv__wait_children(uv_loop_t* loop); void uv__wait_children(uv_loop_t* loop);
@ -289,6 +329,28 @@ int uv__random_getentropy(void* buf, size_t buflen);
int uv__random_readpath(const char* path, void* buf, size_t buflen); int uv__random_readpath(const char* path, void* buf, size_t buflen);
int uv__random_sysctl(void* buf, size_t buflen); int uv__random_sysctl(void* buf, size_t buflen);
/* io_uring */
#ifdef __linux__
int uv__iou_fs_close(uv_loop_t* loop, uv_fs_t* req);
int uv__iou_fs_fsync_or_fdatasync(uv_loop_t* loop,
uv_fs_t* req,
uint32_t fsync_flags);
int uv__iou_fs_open(uv_loop_t* loop, uv_fs_t* req);
int uv__iou_fs_read_or_write(uv_loop_t* loop,
uv_fs_t* req,
int is_read);
int uv__iou_fs_statx(uv_loop_t* loop,
uv_fs_t* req,
int is_fstat,
int is_lstat);
#else
#define uv__iou_fs_close(loop, req) 0
#define uv__iou_fs_fsync_or_fdatasync(loop, req, fsync_flags) 0
#define uv__iou_fs_open(loop, req) 0
#define uv__iou_fs_read_or_write(loop, req, is_read) 0
#define uv__iou_fs_statx(loop, req, is_fstat, is_lstat) 0
#endif
#if defined(__APPLE__) #if defined(__APPLE__)
int uv___stream_fd(const uv_stream_t* handle); int uv___stream_fd(const uv_stream_t* handle);
#define uv__stream_fd(handle) (uv___stream_fd((const uv_stream_t*) (handle))) #define uv__stream_fd(handle) (uv___stream_fd((const uv_stream_t*) (handle)))
@ -322,8 +384,51 @@ UV_UNUSED(static char* uv__basename_r(const char* path)) {
return s + 1; return s + 1;
} }
UV_UNUSED(static int uv__fstat(int fd, struct stat* s)) {
int rc;
rc = fstat(fd, s);
if (rc >= 0)
uv__msan_unpoison(s, sizeof(*s));
return rc;
}
UV_UNUSED(static int uv__lstat(const char* path, struct stat* s)) {
int rc;
rc = lstat(path, s);
if (rc >= 0)
uv__msan_unpoison(s, sizeof(*s));
return rc;
}
UV_UNUSED(static int uv__stat(const char* path, struct stat* s)) {
int rc;
rc = stat(path, s);
if (rc >= 0)
uv__msan_unpoison(s, sizeof(*s));
return rc;
}
#if defined(__linux__) #if defined(__linux__)
int uv__inotify_fork(uv_loop_t* loop, void* old_watchers); ssize_t
uv__fs_copy_file_range(int fd_in,
off_t* off_in,
int fd_out,
off_t* off_out,
size_t len,
unsigned int flags);
int uv__statx(int dirfd,
const char* path,
int flags,
unsigned int mask,
struct uv__statx* statxbuf);
void uv__statx_to_stat(const struct uv__statx* statxbuf, uv_stat_t* buf);
ssize_t uv__getrandom(void* buf, size_t buflen, unsigned flags);
#endif #endif
typedef int (*uv__peersockfunc)(int, struct sockaddr*, socklen_t*); typedef int (*uv__peersockfunc)(int, struct sockaddr*, socklen_t*);
@ -333,22 +438,6 @@ int uv__getsockpeername(const uv_handle_t* handle,
struct sockaddr* name, struct sockaddr* name,
int* namelen); int* namelen);
#if defined(__linux__) || \
defined(__FreeBSD__) || \
defined(__FreeBSD_kernel__) || \
defined(__DragonFly__)
#define HAVE_MMSG 1
struct uv__mmsghdr {
struct msghdr msg_hdr;
unsigned int msg_len;
};
int uv__recvmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen);
int uv__sendmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen);
#else
#define HAVE_MMSG 0
#endif
#if defined(__sun) #if defined(__sun)
#if !defined(_POSIX_VERSION) || _POSIX_VERSION < 200809L #if !defined(_POSIX_VERSION) || _POSIX_VERSION < 200809L
size_t strnlen(const char* s, size_t maxlen); size_t strnlen(const char* s, size_t maxlen);
@ -365,5 +454,10 @@ uv__fs_copy_file_range(int fd_in,
unsigned int flags); unsigned int flags);
#endif #endif
#if defined(__linux__) || (defined(__FreeBSD__) && __FreeBSD_version >= 1301000)
#define UV__CPU_AFFINITY_SUPPORTED 1
#else
#define UV__CPU_AFFINITY_SUPPORTED 0
#endif
#endif /* UV_UNIX_INTERNAL_H_ */ #endif /* UV_UNIX_INTERNAL_H_ */

View File

@ -60,7 +60,7 @@ int uv__kqueue_init(uv_loop_t* loop) {
#if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070 #if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
static int uv__has_forked_with_cfrunloop; static _Atomic int uv__has_forked_with_cfrunloop;
#endif #endif
int uv__io_fork(uv_loop_t* loop) { int uv__io_fork(uv_loop_t* loop) {
@ -82,7 +82,9 @@ int uv__io_fork(uv_loop_t* loop) {
process. So we sidestep the issue by pretending like we never process. So we sidestep the issue by pretending like we never
started it in the first place. started it in the first place.
*/ */
uv__store_relaxed(&uv__has_forked_with_cfrunloop, 1); atomic_store_explicit(&uv__has_forked_with_cfrunloop,
1,
memory_order_relaxed);
uv__free(loop->cf_state); uv__free(loop->cf_state);
loop->cf_state = NULL; loop->cf_state = NULL;
} }
@ -109,7 +111,23 @@ int uv__io_check_fd(uv_loop_t* loop, int fd) {
} }
static void uv__kqueue_delete(int kqfd, const struct kevent *ev) {
struct kevent change;
EV_SET(&change, ev->ident, ev->filter, EV_DELETE, 0, 0, 0);
if (0 == kevent(kqfd, &change, 1, NULL, 0, NULL))
return;
if (errno == EBADF || errno == ENOENT)
return;
abort();
}
void uv__io_poll(uv_loop_t* loop, int timeout) { void uv__io_poll(uv_loop_t* loop, int timeout) {
uv__loop_internal_fields_t* lfields;
struct kevent events[1024]; struct kevent events[1024];
struct kevent* ev; struct kevent* ev;
struct timespec spec; struct timespec spec;
@ -138,6 +156,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
return; return;
} }
lfields = uv__get_internal_fields(loop);
nevents = 0; nevents = 0;
while (!QUEUE_EMPTY(&loop->watcher_queue)) { while (!QUEUE_EMPTY(&loop->watcher_queue)) {
@ -205,7 +224,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
base = loop->time; base = loop->time;
count = 48; /* Benchmarks suggest this gives the best throughput. */ count = 48; /* Benchmarks suggest this gives the best throughput. */
if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) { if (lfields->flags & UV_METRICS_IDLE_TIME) {
reset_timeout = 1; reset_timeout = 1;
user_timeout = timeout; user_timeout = timeout;
timeout = 0; timeout = 0;
@ -228,6 +247,12 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
if (pset != NULL) if (pset != NULL)
pthread_sigmask(SIG_BLOCK, pset, NULL); pthread_sigmask(SIG_BLOCK, pset, NULL);
/* Store the current timeout in a location that's globally accessible so
* other locations like uv__work_done() can determine whether the queue
* of events in the callback were waiting when poll was called.
*/
lfields->current_timeout = timeout;
nfds = kevent(loop->backend_fd, nfds = kevent(loop->backend_fd,
events, events,
nevents, nevents,
@ -235,6 +260,9 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
ARRAY_SIZE(events), ARRAY_SIZE(events),
timeout == -1 ? NULL : &spec); timeout == -1 ? NULL : &spec);
if (nfds == -1)
assert(errno == EINTR);
if (pset != NULL) if (pset != NULL)
pthread_sigmask(SIG_UNBLOCK, pset, NULL); pthread_sigmask(SIG_UNBLOCK, pset, NULL);
@ -242,36 +270,26 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
* timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
* operating system didn't reschedule our process while in the syscall. * operating system didn't reschedule our process while in the syscall.
*/ */
SAVE_ERRNO(uv__update_time(loop)); uv__update_time(loop);
if (nfds == 0) { if (nfds == 0 || nfds == -1) {
if (reset_timeout != 0) { /* If kqueue is empty or interrupted, we might still have children ready
timeout = user_timeout; * to reap immediately. */
reset_timeout = 0; if (loop->flags & UV_LOOP_REAP_CHILDREN) {
if (timeout == -1) loop->flags &= ~UV_LOOP_REAP_CHILDREN;
continue; uv__wait_children(loop);
if (timeout > 0) assert((reset_timeout == 0 ? timeout : user_timeout) == 0);
goto update_timeout; return; /* Equivalent to fall-through behavior. */
} }
assert(timeout != -1);
return;
}
if (nfds == -1) {
if (errno != EINTR)
abort();
if (reset_timeout != 0) { if (reset_timeout != 0) {
timeout = user_timeout; timeout = user_timeout;
reset_timeout = 0; reset_timeout = 0;
} } else if (nfds == 0) {
/* Reached the user timeout value. */
if (timeout == 0) assert(timeout != -1);
return; return;
}
if (timeout == -1)
continue;
/* Interrupted by a signal. Update timeout and poll again. */ /* Interrupted by a signal. Update timeout and poll again. */
goto update_timeout; goto update_timeout;
@ -307,15 +325,8 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
w = loop->watchers[fd]; w = loop->watchers[fd];
if (w == NULL) { if (w == NULL) {
/* File descriptor that we've stopped watching, disarm it. /* File descriptor that we've stopped watching, disarm it. */
* TODO: batch up. */ uv__kqueue_delete(loop->backend_fd, ev);
struct kevent events[1];
EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
if (errno != EBADF && errno != ENOENT)
abort();
continue; continue;
} }
@ -331,47 +342,27 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
revents = 0; revents = 0;
if (ev->filter == EVFILT_READ) { if (ev->filter == EVFILT_READ) {
if (w->pevents & POLLIN) { if (w->pevents & POLLIN)
revents |= POLLIN; revents |= POLLIN;
w->rcount = ev->data; else
} else { uv__kqueue_delete(loop->backend_fd, ev);
/* TODO batch up */
struct kevent events[1];
EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
if (errno != ENOENT)
abort();
}
if ((ev->flags & EV_EOF) && (w->pevents & UV__POLLRDHUP)) if ((ev->flags & EV_EOF) && (w->pevents & UV__POLLRDHUP))
revents |= UV__POLLRDHUP; revents |= UV__POLLRDHUP;
} }
if (ev->filter == EV_OOBAND) { if (ev->filter == EV_OOBAND) {
if (w->pevents & UV__POLLPRI) { if (w->pevents & UV__POLLPRI)
revents |= UV__POLLPRI; revents |= UV__POLLPRI;
w->rcount = ev->data; else
} else { uv__kqueue_delete(loop->backend_fd, ev);
/* TODO batch up */
struct kevent events[1];
EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
if (errno != ENOENT)
abort();
}
} }
if (ev->filter == EVFILT_WRITE) { if (ev->filter == EVFILT_WRITE) {
if (w->pevents & POLLOUT) { if (w->pevents & POLLOUT)
revents |= POLLOUT; revents |= POLLOUT;
w->wcount = ev->data; else
} else { uv__kqueue_delete(loop->backend_fd, ev);
/* TODO batch up */
struct kevent events[1];
EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
if (errno != ENOENT)
abort();
}
} }
if (ev->flags & EV_ERROR) if (ev->flags & EV_ERROR)
@ -398,9 +389,11 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
uv__wait_children(loop); uv__wait_children(loop);
} }
uv__metrics_inc_events(loop, nevents);
if (reset_timeout != 0) { if (reset_timeout != 0) {
timeout = user_timeout; timeout = user_timeout;
reset_timeout = 0; reset_timeout = 0;
uv__metrics_inc_events_waiting(loop, nevents);
} }
if (have_signals != 0) { if (have_signals != 0) {
@ -423,13 +416,13 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
return; return;
} }
update_timeout:
if (timeout == 0) if (timeout == 0)
return; return;
if (timeout == -1) if (timeout == -1)
continue; continue;
update_timeout:
assert(timeout > 0); assert(timeout > 0);
diff = loop->time - base; diff = loop->time - base;
@ -541,13 +534,14 @@ int uv_fs_event_start(uv_fs_event_t* handle,
handle->realpath_len = 0; handle->realpath_len = 0;
handle->cf_flags = flags; handle->cf_flags = flags;
if (fstat(fd, &statbuf)) if (uv__fstat(fd, &statbuf))
goto fallback; goto fallback;
/* FSEvents works only with directories */ /* FSEvents works only with directories */
if (!(statbuf.st_mode & S_IFDIR)) if (!(statbuf.st_mode & S_IFDIR))
goto fallback; goto fallback;
if (0 == uv__load_relaxed(&uv__has_forked_with_cfrunloop)) { if (0 == atomic_load_explicit(&uv__has_forked_with_cfrunloop,
memory_order_relaxed)) {
int r; int r;
/* The fallback fd is no longer needed */ /* The fallback fd is no longer needed */
uv__close_nocheckstdio(fd); uv__close_nocheckstdio(fd);
@ -582,7 +576,8 @@ int uv_fs_event_stop(uv_fs_event_t* handle) {
uv__handle_stop(handle); uv__handle_stop(handle);
#if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070 #if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
if (0 == uv__load_relaxed(&uv__has_forked_with_cfrunloop)) if (0 == atomic_load_explicit(&uv__has_forked_with_cfrunloop,
memory_order_relaxed))
if (handle->cf_cb != NULL) if (handle->cf_cb != NULL)
r = uv__fsevents_close(handle); r = uv__fsevents_close(handle);
#endif #endif

View File

@ -1,834 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
/* We lean on the fact that POLL{IN,OUT,ERR,HUP} correspond with their
* EPOLL* counterparts. We use the POLL* variants in this file because that
* is what libuv uses elsewhere.
*/
#include "uv.h"
#include "internal.h"
#include <inttypes.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <errno.h>
#include <net/if.h>
#include <sys/epoll.h>
#include <sys/param.h>
#include <sys/prctl.h>
#include <sys/sysinfo.h>
#include <unistd.h>
#include <fcntl.h>
#include <time.h>
#define HAVE_IFADDRS_H 1
# if defined(__ANDROID_API__) && __ANDROID_API__ < 24
# undef HAVE_IFADDRS_H
#endif
#ifdef __UCLIBC__
# if __UCLIBC_MAJOR__ < 0 && __UCLIBC_MINOR__ < 9 && __UCLIBC_SUBLEVEL__ < 32
# undef HAVE_IFADDRS_H
# endif
#endif
#ifdef HAVE_IFADDRS_H
# include <ifaddrs.h>
# include <sys/socket.h>
# include <net/ethernet.h>
# include <netpacket/packet.h>
#endif /* HAVE_IFADDRS_H */
/* Available from 2.6.32 onwards. */
#ifndef CLOCK_MONOTONIC_COARSE
# define CLOCK_MONOTONIC_COARSE 6
#endif
/* This is rather annoying: CLOCK_BOOTTIME lives in <linux/time.h> but we can't
* include that file because it conflicts with <time.h>. We'll just have to
* define it ourselves.
*/
#ifndef CLOCK_BOOTTIME
# define CLOCK_BOOTTIME 7
#endif
static int read_models(unsigned int numcpus, uv_cpu_info_t* ci);
static int read_times(FILE* statfile_fp,
unsigned int numcpus,
uv_cpu_info_t* ci);
static void read_speeds(unsigned int numcpus, uv_cpu_info_t* ci);
static uint64_t read_cpufreq(unsigned int cpunum);
int uv__platform_loop_init(uv_loop_t* loop) {
loop->inotify_fd = -1;
loop->inotify_watchers = NULL;
return uv__epoll_init(loop);
}
int uv__io_fork(uv_loop_t* loop) {
int err;
void* old_watchers;
old_watchers = loop->inotify_watchers;
uv__close(loop->backend_fd);
loop->backend_fd = -1;
uv__platform_loop_delete(loop);
err = uv__platform_loop_init(loop);
if (err)
return err;
return uv__inotify_fork(loop, old_watchers);
}
void uv__platform_loop_delete(uv_loop_t* loop) {
if (loop->inotify_fd == -1) return;
uv__io_stop(loop, &loop->inotify_read_watcher, POLLIN);
uv__close(loop->inotify_fd);
loop->inotify_fd = -1;
}
uint64_t uv__hrtime(uv_clocktype_t type) {
static clock_t fast_clock_id = -1;
struct timespec t;
clock_t clock_id;
/* Prefer CLOCK_MONOTONIC_COARSE if available but only when it has
* millisecond granularity or better. CLOCK_MONOTONIC_COARSE is
* serviced entirely from the vDSO, whereas CLOCK_MONOTONIC may
* decide to make a costly system call.
*/
/* TODO(bnoordhuis) Use CLOCK_MONOTONIC_COARSE for UV_CLOCK_PRECISE
* when it has microsecond granularity or better (unlikely).
*/
clock_id = CLOCK_MONOTONIC;
if (type != UV_CLOCK_FAST)
goto done;
clock_id = uv__load_relaxed(&fast_clock_id);
if (clock_id != -1)
goto done;
clock_id = CLOCK_MONOTONIC;
if (0 == clock_getres(CLOCK_MONOTONIC_COARSE, &t))
if (t.tv_nsec <= 1 * 1000 * 1000)
clock_id = CLOCK_MONOTONIC_COARSE;
uv__store_relaxed(&fast_clock_id, clock_id);
done:
if (clock_gettime(clock_id, &t))
return 0; /* Not really possible. */
return t.tv_sec * (uint64_t) 1e9 + t.tv_nsec;
}
int uv_resident_set_memory(size_t* rss) {
char buf[1024];
const char* s;
ssize_t n;
long val;
int fd;
int i;
do
fd = open("/proc/self/stat", O_RDONLY);
while (fd == -1 && errno == EINTR);
if (fd == -1)
return UV__ERR(errno);
do
n = read(fd, buf, sizeof(buf) - 1);
while (n == -1 && errno == EINTR);
uv__close(fd);
if (n == -1)
return UV__ERR(errno);
buf[n] = '\0';
s = strchr(buf, ' ');
if (s == NULL)
goto err;
s += 1;
if (*s != '(')
goto err;
s = strchr(s, ')');
if (s == NULL)
goto err;
for (i = 1; i <= 22; i++) {
s = strchr(s + 1, ' ');
if (s == NULL)
goto err;
}
errno = 0;
val = strtol(s, NULL, 10);
if (errno != 0)
goto err;
if (val < 0)
goto err;
*rss = val * getpagesize();
return 0;
err:
return UV_EINVAL;
}
int uv_uptime(double* uptime) {
static volatile int no_clock_boottime;
char buf[128];
struct timespec now;
int r;
/* Try /proc/uptime first, then fallback to clock_gettime(). */
if (0 == uv__slurp("/proc/uptime", buf, sizeof(buf)))
if (1 == sscanf(buf, "%lf", uptime))
return 0;
/* Try CLOCK_BOOTTIME first, fall back to CLOCK_MONOTONIC if not available
* (pre-2.6.39 kernels). CLOCK_MONOTONIC doesn't increase when the system
* is suspended.
*/
if (no_clock_boottime) {
retry_clock_gettime: r = clock_gettime(CLOCK_MONOTONIC, &now);
}
else if ((r = clock_gettime(CLOCK_BOOTTIME, &now)) && errno == EINVAL) {
no_clock_boottime = 1;
goto retry_clock_gettime;
}
if (r)
return UV__ERR(errno);
*uptime = now.tv_sec;
return 0;
}
static int uv__cpu_num(FILE* statfile_fp, unsigned int* numcpus) {
unsigned int num;
char buf[1024];
if (!fgets(buf, sizeof(buf), statfile_fp))
return UV_EIO;
num = 0;
while (fgets(buf, sizeof(buf), statfile_fp)) {
if (strncmp(buf, "cpu", 3))
break;
num++;
}
if (num == 0)
return UV_EIO;
*numcpus = num;
return 0;
}
int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
unsigned int numcpus;
uv_cpu_info_t* ci;
int err;
FILE* statfile_fp;
*cpu_infos = NULL;
*count = 0;
statfile_fp = uv__open_file("/proc/stat");
if (statfile_fp == NULL)
return UV__ERR(errno);
err = uv__cpu_num(statfile_fp, &numcpus);
if (err < 0)
goto out;
err = UV_ENOMEM;
ci = uv__calloc(numcpus, sizeof(*ci));
if (ci == NULL)
goto out;
err = read_models(numcpus, ci);
if (err == 0)
err = read_times(statfile_fp, numcpus, ci);
if (err) {
uv_free_cpu_info(ci, numcpus);
goto out;
}
/* read_models() on x86 also reads the CPU speed from /proc/cpuinfo.
* We don't check for errors here. Worst case, the field is left zero.
*/
if (ci[0].speed == 0)
read_speeds(numcpus, ci);
*cpu_infos = ci;
*count = numcpus;
err = 0;
out:
if (fclose(statfile_fp))
if (errno != EINTR && errno != EINPROGRESS)
abort();
return err;
}
static void read_speeds(unsigned int numcpus, uv_cpu_info_t* ci) {
unsigned int num;
for (num = 0; num < numcpus; num++)
ci[num].speed = read_cpufreq(num) / 1000;
}
/* Also reads the CPU frequency on ppc and x86. The other architectures only
* have a BogoMIPS field, which may not be very accurate.
*
* Note: Simply returns on error, uv_cpu_info() takes care of the cleanup.
*/
static int read_models(unsigned int numcpus, uv_cpu_info_t* ci) {
#if defined(__PPC__)
static const char model_marker[] = "cpu\t\t: ";
static const char speed_marker[] = "clock\t\t: ";
#else
static const char model_marker[] = "model name\t: ";
static const char speed_marker[] = "cpu MHz\t\t: ";
#endif
const char* inferred_model;
unsigned int model_idx;
unsigned int speed_idx;
unsigned int part_idx;
char buf[1024];
char* model;
FILE* fp;
int model_id;
/* Most are unused on non-ARM, non-MIPS and non-x86 architectures. */
(void) &model_marker;
(void) &speed_marker;
(void) &speed_idx;
(void) &part_idx;
(void) &model;
(void) &buf;
(void) &fp;
(void) &model_id;
model_idx = 0;
speed_idx = 0;
part_idx = 0;
#if defined(__arm__) || \
defined(__i386__) || \
defined(__mips__) || \
defined(__aarch64__) || \
defined(__PPC__) || \
defined(__x86_64__)
fp = uv__open_file("/proc/cpuinfo");
if (fp == NULL)
return UV__ERR(errno);
while (fgets(buf, sizeof(buf), fp)) {
if (model_idx < numcpus) {
if (strncmp(buf, model_marker, sizeof(model_marker) - 1) == 0) {
model = buf + sizeof(model_marker) - 1;
model = uv__strndup(model, strlen(model) - 1); /* Strip newline. */
if (model == NULL) {
fclose(fp);
return UV_ENOMEM;
}
ci[model_idx++].model = model;
continue;
}
}
#if defined(__arm__) || defined(__mips__) || defined(__aarch64__)
if (model_idx < numcpus) {
#if defined(__arm__)
/* Fallback for pre-3.8 kernels. */
static const char model_marker[] = "Processor\t: ";
#elif defined(__aarch64__)
static const char part_marker[] = "CPU part\t: ";
/* Adapted from: https://github.com/karelzak/util-linux */
struct vendor_part {
const int id;
const char* name;
};
static const struct vendor_part arm_chips[] = {
{ 0x811, "ARM810" },
{ 0x920, "ARM920" },
{ 0x922, "ARM922" },
{ 0x926, "ARM926" },
{ 0x940, "ARM940" },
{ 0x946, "ARM946" },
{ 0x966, "ARM966" },
{ 0xa20, "ARM1020" },
{ 0xa22, "ARM1022" },
{ 0xa26, "ARM1026" },
{ 0xb02, "ARM11 MPCore" },
{ 0xb36, "ARM1136" },
{ 0xb56, "ARM1156" },
{ 0xb76, "ARM1176" },
{ 0xc05, "Cortex-A5" },
{ 0xc07, "Cortex-A7" },
{ 0xc08, "Cortex-A8" },
{ 0xc09, "Cortex-A9" },
{ 0xc0d, "Cortex-A17" }, /* Originally A12 */
{ 0xc0f, "Cortex-A15" },
{ 0xc0e, "Cortex-A17" },
{ 0xc14, "Cortex-R4" },
{ 0xc15, "Cortex-R5" },
{ 0xc17, "Cortex-R7" },
{ 0xc18, "Cortex-R8" },
{ 0xc20, "Cortex-M0" },
{ 0xc21, "Cortex-M1" },
{ 0xc23, "Cortex-M3" },
{ 0xc24, "Cortex-M4" },
{ 0xc27, "Cortex-M7" },
{ 0xc60, "Cortex-M0+" },
{ 0xd01, "Cortex-A32" },
{ 0xd03, "Cortex-A53" },
{ 0xd04, "Cortex-A35" },
{ 0xd05, "Cortex-A55" },
{ 0xd06, "Cortex-A65" },
{ 0xd07, "Cortex-A57" },
{ 0xd08, "Cortex-A72" },
{ 0xd09, "Cortex-A73" },
{ 0xd0a, "Cortex-A75" },
{ 0xd0b, "Cortex-A76" },
{ 0xd0c, "Neoverse-N1" },
{ 0xd0d, "Cortex-A77" },
{ 0xd0e, "Cortex-A76AE" },
{ 0xd13, "Cortex-R52" },
{ 0xd20, "Cortex-M23" },
{ 0xd21, "Cortex-M33" },
{ 0xd41, "Cortex-A78" },
{ 0xd42, "Cortex-A78AE" },
{ 0xd4a, "Neoverse-E1" },
{ 0xd4b, "Cortex-A78C" },
};
if (strncmp(buf, part_marker, sizeof(part_marker) - 1) == 0) {
model = buf + sizeof(part_marker) - 1;
errno = 0;
model_id = strtol(model, NULL, 16);
if ((errno != 0) || model_id < 0) {
fclose(fp);
return UV_EINVAL;
}
for (part_idx = 0; part_idx < ARRAY_SIZE(arm_chips); part_idx++) {
if (model_id == arm_chips[part_idx].id) {
model = uv__strdup(arm_chips[part_idx].name);
if (model == NULL) {
fclose(fp);
return UV_ENOMEM;
}
ci[model_idx++].model = model;
break;
}
}
}
#else /* defined(__mips__) */
static const char model_marker[] = "cpu model\t\t: ";
#endif
if (strncmp(buf, model_marker, sizeof(model_marker) - 1) == 0) {
model = buf + sizeof(model_marker) - 1;
model = uv__strndup(model, strlen(model) - 1); /* Strip newline. */
if (model == NULL) {
fclose(fp);
return UV_ENOMEM;
}
ci[model_idx++].model = model;
continue;
}
}
#else /* !__arm__ && !__mips__ && !__aarch64__ */
if (speed_idx < numcpus) {
if (strncmp(buf, speed_marker, sizeof(speed_marker) - 1) == 0) {
ci[speed_idx++].speed = atoi(buf + sizeof(speed_marker) - 1);
continue;
}
}
#endif /* __arm__ || __mips__ || __aarch64__ */
}
fclose(fp);
#endif /* __arm__ || __i386__ || __mips__ || __PPC__ || __x86_64__ || __aarch__ */
/* Now we want to make sure that all the models contain *something* because
* it's not safe to leave them as null. Copy the last entry unless there
* isn't one, in that case we simply put "unknown" into everything.
*/
inferred_model = "unknown";
if (model_idx > 0)
inferred_model = ci[model_idx - 1].model;
while (model_idx < numcpus) {
model = uv__strndup(inferred_model, strlen(inferred_model));
if (model == NULL)
return UV_ENOMEM;
ci[model_idx++].model = model;
}
return 0;
}
static int read_times(FILE* statfile_fp,
unsigned int numcpus,
uv_cpu_info_t* ci) {
struct uv_cpu_times_s ts;
unsigned int ticks;
unsigned int multiplier;
uint64_t user;
uint64_t nice;
uint64_t sys;
uint64_t idle;
uint64_t dummy;
uint64_t irq;
uint64_t num;
uint64_t len;
char buf[1024];
ticks = (unsigned int)sysconf(_SC_CLK_TCK);
assert(ticks != (unsigned int) -1);
assert(ticks != 0);
multiplier = ((uint64_t)1000L / ticks);
rewind(statfile_fp);
if (!fgets(buf, sizeof(buf), statfile_fp))
abort();
num = 0;
while (fgets(buf, sizeof(buf), statfile_fp)) {
if (num >= numcpus)
break;
if (strncmp(buf, "cpu", 3))
break;
/* skip "cpu<num> " marker */
{
unsigned int n;
int r = sscanf(buf, "cpu%u ", &n);
assert(r == 1);
(void) r; /* silence build warning */
for (len = sizeof("cpu0"); n /= 10; len++);
}
/* Line contains user, nice, system, idle, iowait, irq, softirq, steal,
* guest, guest_nice but we're only interested in the first four + irq.
*
* Don't use %*s to skip fields or %ll to read straight into the uint64_t
* fields, they're not allowed in C89 mode.
*/
if (6 != sscanf(buf + len,
"%" PRIu64 " %" PRIu64 " %" PRIu64
"%" PRIu64 " %" PRIu64 " %" PRIu64,
&user,
&nice,
&sys,
&idle,
&dummy,
&irq))
abort();
ts.user = user * multiplier;
ts.nice = nice * multiplier;
ts.sys = sys * multiplier;
ts.idle = idle * multiplier;
ts.irq = irq * multiplier;
ci[num++].cpu_times = ts;
}
assert(num == numcpus);
return 0;
}
static uint64_t read_cpufreq(unsigned int cpunum) {
uint64_t val;
char buf[1024];
FILE* fp;
snprintf(buf,
sizeof(buf),
"/sys/devices/system/cpu/cpu%u/cpufreq/scaling_cur_freq",
cpunum);
fp = uv__open_file(buf);
if (fp == NULL)
return 0;
if (fscanf(fp, "%" PRIu64, &val) != 1)
val = 0;
fclose(fp);
return val;
}
#ifdef HAVE_IFADDRS_H
static int uv__ifaddr_exclude(struct ifaddrs *ent, int exclude_type) {
if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)))
return 1;
if (ent->ifa_addr == NULL)
return 1;
/*
* On Linux getifaddrs returns information related to the raw underlying
* devices. We're not interested in this information yet.
*/
if (ent->ifa_addr->sa_family == PF_PACKET)
return exclude_type;
return !exclude_type;
}
#endif
int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
#ifndef HAVE_IFADDRS_H
*count = 0;
*addresses = NULL;
return UV_ENOSYS;
#else
struct ifaddrs *addrs, *ent;
uv_interface_address_t* address;
int i;
struct sockaddr_ll *sll;
*count = 0;
*addresses = NULL;
if (getifaddrs(&addrs))
return UV__ERR(errno);
/* Count the number of interfaces */
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFADDR))
continue;
(*count)++;
}
if (*count == 0) {
freeifaddrs(addrs);
return 0;
}
/* Make sure the memory is initiallized to zero using calloc() */
*addresses = uv__calloc(*count, sizeof(**addresses));
if (!(*addresses)) {
freeifaddrs(addrs);
return UV_ENOMEM;
}
address = *addresses;
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFADDR))
continue;
address->name = uv__strdup(ent->ifa_name);
if (ent->ifa_addr->sa_family == AF_INET6) {
address->address.address6 = *((struct sockaddr_in6*) ent->ifa_addr);
} else {
address->address.address4 = *((struct sockaddr_in*) ent->ifa_addr);
}
if (ent->ifa_netmask->sa_family == AF_INET6) {
address->netmask.netmask6 = *((struct sockaddr_in6*) ent->ifa_netmask);
} else {
address->netmask.netmask4 = *((struct sockaddr_in*) ent->ifa_netmask);
}
address->is_internal = !!(ent->ifa_flags & IFF_LOOPBACK);
address++;
}
/* Fill in physical addresses for each interface */
for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFPHYS))
continue;
address = *addresses;
for (i = 0; i < (*count); i++) {
size_t namelen = strlen(ent->ifa_name);
/* Alias interface share the same physical address */
if (strncmp(address->name, ent->ifa_name, namelen) == 0 &&
(address->name[namelen] == 0 || address->name[namelen] == ':')) {
sll = (struct sockaddr_ll*)ent->ifa_addr;
memcpy(address->phys_addr, sll->sll_addr, sizeof(address->phys_addr));
}
address++;
}
}
freeifaddrs(addrs);
return 0;
#endif
}
void uv_free_interface_addresses(uv_interface_address_t* addresses,
int count) {
int i;
for (i = 0; i < count; i++) {
uv__free(addresses[i].name);
}
uv__free(addresses);
}
void uv__set_process_title(const char* title) {
#if defined(PR_SET_NAME)
prctl(PR_SET_NAME, title); /* Only copies first 16 characters. */
#endif
}
static uint64_t uv__read_proc_meminfo(const char* what) {
uint64_t rc;
char* p;
char buf[4096]; /* Large enough to hold all of /proc/meminfo. */
if (uv__slurp("/proc/meminfo", buf, sizeof(buf)))
return 0;
p = strstr(buf, what);
if (p == NULL)
return 0;
p += strlen(what);
rc = 0;
sscanf(p, "%" PRIu64 " kB", &rc);
return rc * 1024;
}
uint64_t uv_get_free_memory(void) {
struct sysinfo info;
uint64_t rc;
rc = uv__read_proc_meminfo("MemAvailable:");
if (rc != 0)
return rc;
if (0 == sysinfo(&info))
return (uint64_t) info.freeram * info.mem_unit;
return 0;
}
uint64_t uv_get_total_memory(void) {
struct sysinfo info;
uint64_t rc;
rc = uv__read_proc_meminfo("MemTotal:");
if (rc != 0)
return rc;
if (0 == sysinfo(&info))
return (uint64_t) info.totalram * info.mem_unit;
return 0;
}
static uint64_t uv__read_cgroups_uint64(const char* cgroup, const char* param) {
char filename[256];
char buf[32]; /* Large enough to hold an encoded uint64_t. */
uint64_t rc;
rc = 0;
snprintf(filename, sizeof(filename), "/sys/fs/cgroup/%s/%s", cgroup, param);
if (0 == uv__slurp(filename, buf, sizeof(buf)))
sscanf(buf, "%" PRIu64, &rc);
return rc;
}
uint64_t uv_get_constrained_memory(void) {
/*
* This might return 0 if there was a problem getting the memory limit from
* cgroups. This is OK because a return value of 0 signifies that the memory
* limit is unknown.
*/
return uv__read_cgroups_uint64("memory", "memory.limit_in_bytes");
}
void uv_loadavg(double avg[3]) {
struct sysinfo info;
char buf[128]; /* Large enough to hold all of /proc/loadavg. */
if (0 == uv__slurp("/proc/loadavg", buf, sizeof(buf)))
if (3 == sscanf(buf, "%lf %lf %lf", &avg[0], &avg[1], &avg[2]))
return;
if (sysinfo(&info) < 0)
return;
avg[0] = (double) info.loads[0] / 65536.0;
avg[1] = (double) info.loads[1] / 65536.0;
avg[2] = (double) info.loads[2] / 65536.0;
}

View File

@ -1,327 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "uv/tree.h"
#include "internal.h"
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <errno.h>
#include <sys/inotify.h>
#include <sys/types.h>
#include <unistd.h>
struct watcher_list {
RB_ENTRY(watcher_list) entry;
QUEUE watchers;
int iterating;
char* path;
int wd;
};
struct watcher_root {
struct watcher_list* rbh_root;
};
#define CAST(p) ((struct watcher_root*)(p))
static int compare_watchers(const struct watcher_list* a,
const struct watcher_list* b) {
if (a->wd < b->wd) return -1;
if (a->wd > b->wd) return 1;
return 0;
}
RB_GENERATE_STATIC(watcher_root, watcher_list, entry, compare_watchers)
static void uv__inotify_read(uv_loop_t* loop,
uv__io_t* w,
unsigned int revents);
static void maybe_free_watcher_list(struct watcher_list* w,
uv_loop_t* loop);
static int init_inotify(uv_loop_t* loop) {
int fd;
if (loop->inotify_fd != -1)
return 0;
fd = inotify_init1(IN_NONBLOCK | IN_CLOEXEC);
if (fd < 0)
return UV__ERR(errno);
loop->inotify_fd = fd;
uv__io_init(&loop->inotify_read_watcher, uv__inotify_read, loop->inotify_fd);
uv__io_start(loop, &loop->inotify_read_watcher, POLLIN);
return 0;
}
int uv__inotify_fork(uv_loop_t* loop, void* old_watchers) {
/* Open the inotify_fd, and re-arm all the inotify watchers. */
int err;
struct watcher_list* tmp_watcher_list_iter;
struct watcher_list* watcher_list;
struct watcher_list tmp_watcher_list;
QUEUE queue;
QUEUE* q;
uv_fs_event_t* handle;
char* tmp_path;
if (old_watchers != NULL) {
/* We must restore the old watcher list to be able to close items
* out of it.
*/
loop->inotify_watchers = old_watchers;
QUEUE_INIT(&tmp_watcher_list.watchers);
/* Note that the queue we use is shared with the start and stop()
* functions, making QUEUE_FOREACH unsafe to use. So we use the
* QUEUE_MOVE trick to safely iterate. Also don't free the watcher
* list until we're done iterating. c.f. uv__inotify_read.
*/
RB_FOREACH_SAFE(watcher_list, watcher_root,
CAST(&old_watchers), tmp_watcher_list_iter) {
watcher_list->iterating = 1;
QUEUE_MOVE(&watcher_list->watchers, &queue);
while (!QUEUE_EMPTY(&queue)) {
q = QUEUE_HEAD(&queue);
handle = QUEUE_DATA(q, uv_fs_event_t, watchers);
/* It's critical to keep a copy of path here, because it
* will be set to NULL by stop() and then deallocated by
* maybe_free_watcher_list
*/
tmp_path = uv__strdup(handle->path);
assert(tmp_path != NULL);
QUEUE_REMOVE(q);
QUEUE_INSERT_TAIL(&watcher_list->watchers, q);
uv_fs_event_stop(handle);
QUEUE_INSERT_TAIL(&tmp_watcher_list.watchers, &handle->watchers);
handle->path = tmp_path;
}
watcher_list->iterating = 0;
maybe_free_watcher_list(watcher_list, loop);
}
QUEUE_MOVE(&tmp_watcher_list.watchers, &queue);
while (!QUEUE_EMPTY(&queue)) {
q = QUEUE_HEAD(&queue);
QUEUE_REMOVE(q);
handle = QUEUE_DATA(q, uv_fs_event_t, watchers);
tmp_path = handle->path;
handle->path = NULL;
err = uv_fs_event_start(handle, handle->cb, tmp_path, 0);
uv__free(tmp_path);
if (err)
return err;
}
}
return 0;
}
static struct watcher_list* find_watcher(uv_loop_t* loop, int wd) {
struct watcher_list w;
w.wd = wd;
return RB_FIND(watcher_root, CAST(&loop->inotify_watchers), &w);
}
static void maybe_free_watcher_list(struct watcher_list* w, uv_loop_t* loop) {
/* if the watcher_list->watchers is being iterated over, we can't free it. */
if ((!w->iterating) && QUEUE_EMPTY(&w->watchers)) {
/* No watchers left for this path. Clean up. */
RB_REMOVE(watcher_root, CAST(&loop->inotify_watchers), w);
inotify_rm_watch(loop->inotify_fd, w->wd);
uv__free(w);
}
}
static void uv__inotify_read(uv_loop_t* loop,
uv__io_t* dummy,
unsigned int events) {
const struct inotify_event* e;
struct watcher_list* w;
uv_fs_event_t* h;
QUEUE queue;
QUEUE* q;
const char* path;
ssize_t size;
const char *p;
/* needs to be large enough for sizeof(inotify_event) + strlen(path) */
char buf[4096];
for (;;) {
do
size = read(loop->inotify_fd, buf, sizeof(buf));
while (size == -1 && errno == EINTR);
if (size == -1) {
assert(errno == EAGAIN || errno == EWOULDBLOCK);
break;
}
assert(size > 0); /* pre-2.6.21 thing, size=0 == read buffer too small */
/* Now we have one or more inotify_event structs. */
for (p = buf; p < buf + size; p += sizeof(*e) + e->len) {
e = (const struct inotify_event*) p;
events = 0;
if (e->mask & (IN_ATTRIB|IN_MODIFY))
events |= UV_CHANGE;
if (e->mask & ~(IN_ATTRIB|IN_MODIFY))
events |= UV_RENAME;
w = find_watcher(loop, e->wd);
if (w == NULL)
continue; /* Stale event, no watchers left. */
/* inotify does not return the filename when monitoring a single file
* for modifications. Repurpose the filename for API compatibility.
* I'm not convinced this is a good thing, maybe it should go.
*/
path = e->len ? (const char*) (e + 1) : uv__basename_r(w->path);
/* We're about to iterate over the queue and call user's callbacks.
* What can go wrong?
* A callback could call uv_fs_event_stop()
* and the queue can change under our feet.
* So, we use QUEUE_MOVE() trick to safely iterate over the queue.
* And we don't free the watcher_list until we're done iterating.
*
* First,
* tell uv_fs_event_stop() (that could be called from a user's callback)
* not to free watcher_list.
*/
w->iterating = 1;
QUEUE_MOVE(&w->watchers, &queue);
while (!QUEUE_EMPTY(&queue)) {
q = QUEUE_HEAD(&queue);
h = QUEUE_DATA(q, uv_fs_event_t, watchers);
QUEUE_REMOVE(q);
QUEUE_INSERT_TAIL(&w->watchers, q);
h->cb(h, path, events, 0);
}
/* done iterating, time to (maybe) free empty watcher_list */
w->iterating = 0;
maybe_free_watcher_list(w, loop);
}
}
}
int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT);
return 0;
}
int uv_fs_event_start(uv_fs_event_t* handle,
uv_fs_event_cb cb,
const char* path,
unsigned int flags) {
struct watcher_list* w;
size_t len;
int events;
int err;
int wd;
if (uv__is_active(handle))
return UV_EINVAL;
err = init_inotify(handle->loop);
if (err)
return err;
events = IN_ATTRIB
| IN_CREATE
| IN_MODIFY
| IN_DELETE
| IN_DELETE_SELF
| IN_MOVE_SELF
| IN_MOVED_FROM
| IN_MOVED_TO;
wd = inotify_add_watch(handle->loop->inotify_fd, path, events);
if (wd == -1)
return UV__ERR(errno);
w = find_watcher(handle->loop, wd);
if (w)
goto no_insert;
len = strlen(path) + 1;
w = uv__malloc(sizeof(*w) + len);
if (w == NULL)
return UV_ENOMEM;
w->wd = wd;
w->path = memcpy(w + 1, path, len);
QUEUE_INIT(&w->watchers);
w->iterating = 0;
RB_INSERT(watcher_root, CAST(&handle->loop->inotify_watchers), w);
no_insert:
uv__handle_start(handle);
QUEUE_INSERT_TAIL(&w->watchers, &handle->watchers);
handle->path = w->path;
handle->cb = cb;
handle->wd = wd;
return 0;
}
int uv_fs_event_stop(uv_fs_event_t* handle) {
struct watcher_list* w;
if (!uv__is_active(handle))
return 0;
w = find_watcher(handle->loop, handle->wd);
assert(w != NULL);
handle->wd = -1;
handle->path = NULL;
uv__handle_stop(handle);
QUEUE_REMOVE(&handle->watchers);
maybe_free_watcher_list(w, handle->loop);
return 0;
}
void uv__fs_event_close(uv_fs_event_t* handle) {
uv_fs_event_stop(handle);
}

View File

@ -1,264 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "linux-syscalls.h"
#include <unistd.h>
#include <signal.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <errno.h>
#if defined(__arm__)
# if defined(__thumb__) || defined(__ARM_EABI__)
# define UV_SYSCALL_BASE 0
# else
# define UV_SYSCALL_BASE 0x900000
# endif
#endif /* __arm__ */
#ifndef __NR_recvmmsg
# if defined(__x86_64__)
# define __NR_recvmmsg 299
# elif defined(__arm__)
# define __NR_recvmmsg (UV_SYSCALL_BASE + 365)
# endif
#endif /* __NR_recvmsg */
#ifndef __NR_sendmmsg
# if defined(__x86_64__)
# define __NR_sendmmsg 307
# elif defined(__arm__)
# define __NR_sendmmsg (UV_SYSCALL_BASE + 374)
# endif
#endif /* __NR_sendmmsg */
#ifndef __NR_utimensat
# if defined(__x86_64__)
# define __NR_utimensat 280
# elif defined(__i386__)
# define __NR_utimensat 320
# elif defined(__arm__)
# define __NR_utimensat (UV_SYSCALL_BASE + 348)
# endif
#endif /* __NR_utimensat */
#ifndef __NR_preadv
# if defined(__x86_64__)
# define __NR_preadv 295
# elif defined(__i386__)
# define __NR_preadv 333
# elif defined(__arm__)
# define __NR_preadv (UV_SYSCALL_BASE + 361)
# endif
#endif /* __NR_preadv */
#ifndef __NR_pwritev
# if defined(__x86_64__)
# define __NR_pwritev 296
# elif defined(__i386__)
# define __NR_pwritev 334
# elif defined(__arm__)
# define __NR_pwritev (UV_SYSCALL_BASE + 362)
# endif
#endif /* __NR_pwritev */
#ifndef __NR_dup3
# if defined(__x86_64__)
# define __NR_dup3 292
# elif defined(__i386__)
# define __NR_dup3 330
# elif defined(__arm__)
# define __NR_dup3 (UV_SYSCALL_BASE + 358)
# endif
#endif /* __NR_pwritev */
#ifndef __NR_copy_file_range
# if defined(__x86_64__)
# define __NR_copy_file_range 326
# elif defined(__i386__)
# define __NR_copy_file_range 377
# elif defined(__s390__)
# define __NR_copy_file_range 375
# elif defined(__arm__)
# define __NR_copy_file_range (UV_SYSCALL_BASE + 391)
# elif defined(__aarch64__)
# define __NR_copy_file_range 285
# elif defined(__powerpc__)
# define __NR_copy_file_range 379
# elif defined(__arc__)
# define __NR_copy_file_range 285
# endif
#endif /* __NR_copy_file_range */
#ifndef __NR_statx
# if defined(__x86_64__)
# define __NR_statx 332
# elif defined(__i386__)
# define __NR_statx 383
# elif defined(__aarch64__)
# define __NR_statx 397
# elif defined(__arm__)
# define __NR_statx (UV_SYSCALL_BASE + 397)
# elif defined(__ppc__)
# define __NR_statx 383
# elif defined(__s390__)
# define __NR_statx 379
# endif
#endif /* __NR_statx */
#ifndef __NR_getrandom
# if defined(__x86_64__)
# define __NR_getrandom 318
# elif defined(__i386__)
# define __NR_getrandom 355
# elif defined(__aarch64__)
# define __NR_getrandom 384
# elif defined(__arm__)
# define __NR_getrandom (UV_SYSCALL_BASE + 384)
# elif defined(__ppc__)
# define __NR_getrandom 359
# elif defined(__s390__)
# define __NR_getrandom 349
# endif
#endif /* __NR_getrandom */
struct uv__mmsghdr;
int uv__sendmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen) {
#if defined(__i386__)
unsigned long args[4];
int rc;
args[0] = (unsigned long) fd;
args[1] = (unsigned long) mmsg;
args[2] = (unsigned long) vlen;
args[3] = /* flags */ 0;
/* socketcall() raises EINVAL when SYS_SENDMMSG is not supported. */
rc = syscall(/* __NR_socketcall */ 102, 20 /* SYS_SENDMMSG */, args);
if (rc == -1)
if (errno == EINVAL)
errno = ENOSYS;
return rc;
#elif defined(__NR_sendmmsg)
return syscall(__NR_sendmmsg, fd, mmsg, vlen, /* flags */ 0);
#else
return errno = ENOSYS, -1;
#endif
}
int uv__recvmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen) {
#if defined(__i386__)
unsigned long args[5];
int rc;
args[0] = (unsigned long) fd;
args[1] = (unsigned long) mmsg;
args[2] = (unsigned long) vlen;
args[3] = /* flags */ 0;
args[4] = /* timeout */ 0;
/* socketcall() raises EINVAL when SYS_RECVMMSG is not supported. */
rc = syscall(/* __NR_socketcall */ 102, 19 /* SYS_RECVMMSG */, args);
if (rc == -1)
if (errno == EINVAL)
errno = ENOSYS;
return rc;
#elif defined(__NR_recvmmsg)
return syscall(__NR_recvmmsg, fd, mmsg, vlen, /* flags */ 0, /* timeout */ 0);
#else
return errno = ENOSYS, -1;
#endif
}
ssize_t uv__preadv(int fd, const struct iovec *iov, int iovcnt, int64_t offset) {
#if !defined(__NR_preadv) || defined(__ANDROID_API__) && __ANDROID_API__ < 24
return errno = ENOSYS, -1;
#else
return syscall(__NR_preadv, fd, iov, iovcnt, (long)offset, (long)(offset >> 32));
#endif
}
ssize_t uv__pwritev(int fd, const struct iovec *iov, int iovcnt, int64_t offset) {
#if !defined(__NR_pwritev) || defined(__ANDROID_API__) && __ANDROID_API__ < 24
return errno = ENOSYS, -1;
#else
return syscall(__NR_pwritev, fd, iov, iovcnt, (long)offset, (long)(offset >> 32));
#endif
}
int uv__dup3(int oldfd, int newfd, int flags) {
#if !defined(__NR_dup3) || defined(__ANDROID_API__) && __ANDROID_API__ < 21
return errno = ENOSYS, -1;
#else
return syscall(__NR_dup3, oldfd, newfd, flags);
#endif
}
ssize_t
uv__fs_copy_file_range(int fd_in,
off_t* off_in,
int fd_out,
off_t* off_out,
size_t len,
unsigned int flags)
{
#ifdef __NR_copy_file_range
return syscall(__NR_copy_file_range,
fd_in,
off_in,
fd_out,
off_out,
len,
flags);
#else
return errno = ENOSYS, -1;
#endif
}
int uv__statx(int dirfd,
const char* path,
int flags,
unsigned int mask,
struct uv__statx* statxbuf) {
#if !defined(__NR_statx) || defined(__ANDROID_API__) && __ANDROID_API__ < 30
return errno = ENOSYS, -1;
#else
return syscall(__NR_statx, dirfd, path, flags, mask, statxbuf);
#endif
}
ssize_t uv__getrandom(void* buf, size_t buflen, unsigned flags) {
#if !defined(__NR_getrandom) || defined(__ANDROID_API__) && __ANDROID_API__ < 28
return errno = ENOSYS, -1;
#else
return syscall(__NR_getrandom, buf, buflen, flags);
#endif
}

View File

@ -1,78 +0,0 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef UV_LINUX_SYSCALL_H_
#define UV_LINUX_SYSCALL_H_
#include <stdint.h>
#include <signal.h>
#include <sys/types.h>
#include <sys/time.h>
#include <sys/socket.h>
struct uv__statx_timestamp {
int64_t tv_sec;
uint32_t tv_nsec;
int32_t unused0;
};
struct uv__statx {
uint32_t stx_mask;
uint32_t stx_blksize;
uint64_t stx_attributes;
uint32_t stx_nlink;
uint32_t stx_uid;
uint32_t stx_gid;
uint16_t stx_mode;
uint16_t unused0;
uint64_t stx_ino;
uint64_t stx_size;
uint64_t stx_blocks;
uint64_t stx_attributes_mask;
struct uv__statx_timestamp stx_atime;
struct uv__statx_timestamp stx_btime;
struct uv__statx_timestamp stx_ctime;
struct uv__statx_timestamp stx_mtime;
uint32_t stx_rdev_major;
uint32_t stx_rdev_minor;
uint32_t stx_dev_major;
uint32_t stx_dev_minor;
uint64_t unused1[14];
};
ssize_t uv__preadv(int fd, const struct iovec *iov, int iovcnt, int64_t offset);
ssize_t uv__pwritev(int fd, const struct iovec *iov, int iovcnt, int64_t offset);
int uv__dup3(int oldfd, int newfd, int flags);
ssize_t
uv__fs_copy_file_range(int fd_in,
off_t* off_in,
int fd_out,
off_t* off_out,
size_t len,
unsigned int flags);
int uv__statx(int dirfd,
const char* path,
int flags,
unsigned int mask,
struct uv__statx* statxbuf);
ssize_t uv__getrandom(void* buf, size_t buflen, unsigned flags);
#endif /* UV_LINUX_SYSCALL_H_ */

2341
deps/libuv/src/unix/linux.c vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -45,6 +45,9 @@ int uv_loop_init(uv_loop_t* loop) {
err = uv_mutex_init(&lfields->loop_metrics.lock); err = uv_mutex_init(&lfields->loop_metrics.lock);
if (err) if (err)
goto fail_metrics_mutex_init; goto fail_metrics_mutex_init;
memset(&lfields->loop_metrics.metrics,
0,
sizeof(lfields->loop_metrics.metrics));
heap_init((struct heap*) &loop->timer_heap); heap_init((struct heap*) &loop->timer_heap);
QUEUE_INIT(&loop->wq); QUEUE_INIT(&loop->wq);
@ -79,12 +82,9 @@ int uv_loop_init(uv_loop_t* loop) {
goto fail_platform_init; goto fail_platform_init;
uv__signal_global_once_init(); uv__signal_global_once_init();
err = uv_signal_init(loop, &loop->child_watcher); err = uv__process_init(loop);
if (err) if (err)
goto fail_signal_init; goto fail_signal_init;
uv__handle_unref(&loop->child_watcher);
loop->child_watcher.flags |= UV_HANDLE_INTERNAL;
QUEUE_INIT(&loop->process_handles); QUEUE_INIT(&loop->process_handles);
err = uv_rwlock_init(&loop->cloexec_lock); err = uv_rwlock_init(&loop->cloexec_lock);

View File

@ -103,7 +103,7 @@ uint64_t uv_get_free_memory(void) {
int which[] = {CTL_VM, VM_UVMEXP}; int which[] = {CTL_VM, VM_UVMEXP};
if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0)) if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0))
return UV__ERR(errno); return 0;
return (uint64_t) info.free * sysconf(_SC_PAGESIZE); return (uint64_t) info.free * sysconf(_SC_PAGESIZE);
} }
@ -120,7 +120,7 @@ uint64_t uv_get_total_memory(void) {
size_t size = sizeof(info); size_t size = sizeof(info);
if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0)) if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0))
return UV__ERR(errno); return 0;
return (uint64_t) info; return (uint64_t) info;
} }
@ -131,6 +131,11 @@ uint64_t uv_get_constrained_memory(void) {
} }
uint64_t uv_get_available_memory(void) {
return uv_get_free_memory();
}
int uv_resident_set_memory(size_t* rss) { int uv_resident_set_memory(size_t* rss) {
kvm_t *kd = NULL; kvm_t *kd = NULL;
struct kinfo_proc2 *kinfo = NULL; struct kinfo_proc2 *kinfo = NULL;

View File

@ -116,7 +116,7 @@ uint64_t uv_get_free_memory(void) {
int which[] = {CTL_VM, VM_UVMEXP}; int which[] = {CTL_VM, VM_UVMEXP};
if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0)) if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0))
return UV__ERR(errno); return 0;
return (uint64_t) info.free * sysconf(_SC_PAGESIZE); return (uint64_t) info.free * sysconf(_SC_PAGESIZE);
} }
@ -128,7 +128,7 @@ uint64_t uv_get_total_memory(void) {
size_t size = sizeof(info); size_t size = sizeof(info);
if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0)) if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0))
return UV__ERR(errno); return 0;
return (uint64_t) info; return (uint64_t) info;
} }
@ -139,6 +139,11 @@ uint64_t uv_get_constrained_memory(void) {
} }
uint64_t uv_get_available_memory(void) {
return uv_get_free_memory();
}
int uv_resident_set_memory(size_t* rss) { int uv_resident_set_memory(size_t* rss) {
struct kinfo_proc kinfo; struct kinfo_proc kinfo;
size_t page_size = getpagesize(); size_t page_size = getpagesize();

View File

@ -198,6 +198,11 @@ uint64_t uv_get_constrained_memory(void) {
} }
uint64_t uv_get_available_memory(void) {
return uv_get_free_memory();
}
int uv_resident_set_memory(size_t* rss) { int uv_resident_set_memory(size_t* rss) {
char* ascb; char* ascb;
char* rax; char* rax;
@ -803,6 +808,7 @@ static int os390_message_queue_handler(uv__os390_epoll* ep) {
void uv__io_poll(uv_loop_t* loop, int timeout) { void uv__io_poll(uv_loop_t* loop, int timeout) {
static const int max_safe_timeout = 1789569; static const int max_safe_timeout = 1789569;
uv__loop_internal_fields_t* lfields;
struct epoll_event events[1024]; struct epoll_event events[1024];
struct epoll_event* pe; struct epoll_event* pe;
struct epoll_event e; struct epoll_event e;
@ -825,6 +831,8 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
return; return;
} }
lfields = uv__get_internal_fields(loop);
while (!QUEUE_EMPTY(&loop->watcher_queue)) { while (!QUEUE_EMPTY(&loop->watcher_queue)) {
uv_stream_t* stream; uv_stream_t* stream;
@ -872,7 +880,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
int nevents = 0; int nevents = 0;
have_signals = 0; have_signals = 0;
if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) { if (lfields->flags & UV_METRICS_IDLE_TIME) {
reset_timeout = 1; reset_timeout = 1;
user_timeout = timeout; user_timeout = timeout;
timeout = 0; timeout = 0;
@ -891,6 +899,12 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
if (sizeof(int32_t) == sizeof(long) && timeout >= max_safe_timeout) if (sizeof(int32_t) == sizeof(long) && timeout >= max_safe_timeout)
timeout = max_safe_timeout; timeout = max_safe_timeout;
/* Store the current timeout in a location that's globally accessible so
* other locations like uv__work_done() can determine whether the queue
* of events in the callback were waiting when poll was called.
*/
lfields->current_timeout = timeout;
nfds = epoll_wait(loop->ep, events, nfds = epoll_wait(loop->ep, events,
ARRAY_SIZE(events), timeout); ARRAY_SIZE(events), timeout);
@ -998,9 +1012,11 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
} }
} }
uv__metrics_inc_events(loop, nevents);
if (reset_timeout != 0) { if (reset_timeout != 0) {
timeout = user_timeout; timeout = user_timeout;
reset_timeout = 0; reset_timeout = 0;
uv__metrics_inc_events_waiting(loop, nevents);
} }
if (have_signals != 0) { if (have_signals != 0) {

View File

@ -357,7 +357,7 @@ int uv_pipe_chmod(uv_pipe_t* handle, int mode) {
} }
/* stat must be used as fstat has a bug on Darwin */ /* stat must be used as fstat has a bug on Darwin */
if (stat(name_buffer, &pipe_stat) == -1) { if (uv__stat(name_buffer, &pipe_stat) == -1) {
uv__free(name_buffer); uv__free(name_buffer);
return -errno; return -errno;
} }

View File

@ -23,13 +23,14 @@
#include "internal.h" #include "internal.h"
#include <stdint.h> #include <stdint.h>
#include <stdlib.h>
#include <time.h> #include <time.h>
#undef NANOSEC
#define NANOSEC ((uint64_t) 1e9)
uint64_t uv__hrtime(uv_clocktype_t type) { uint64_t uv__hrtime(uv_clocktype_t type) {
struct timespec ts; struct timespec t;
clock_gettime(CLOCK_MONOTONIC, &ts);
return (((uint64_t) ts.tv_sec) * NANOSEC + ts.tv_nsec); if (clock_gettime(CLOCK_MONOTONIC, &t))
abort();
return t.tv_sec * (uint64_t) 1e9 + t.tv_nsec;
} }

View File

@ -132,6 +132,7 @@ static void uv__pollfds_del(uv_loop_t* loop, int fd) {
void uv__io_poll(uv_loop_t* loop, int timeout) { void uv__io_poll(uv_loop_t* loop, int timeout) {
uv__loop_internal_fields_t* lfields;
sigset_t* pset; sigset_t* pset;
sigset_t set; sigset_t set;
uint64_t time_base; uint64_t time_base;
@ -152,6 +153,8 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
return; return;
} }
lfields = uv__get_internal_fields(loop);
/* Take queued watchers and add their fds to our poll fds array. */ /* Take queued watchers and add their fds to our poll fds array. */
while (!QUEUE_EMPTY(&loop->watcher_queue)) { while (!QUEUE_EMPTY(&loop->watcher_queue)) {
q = QUEUE_HEAD(&loop->watcher_queue); q = QUEUE_HEAD(&loop->watcher_queue);
@ -179,7 +182,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
assert(timeout >= -1); assert(timeout >= -1);
time_base = loop->time; time_base = loop->time;
if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) { if (lfields->flags & UV_METRICS_IDLE_TIME) {
reset_timeout = 1; reset_timeout = 1;
user_timeout = timeout; user_timeout = timeout;
timeout = 0; timeout = 0;
@ -198,6 +201,12 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
if (timeout != 0) if (timeout != 0)
uv__metrics_set_provider_entry_time(loop); uv__metrics_set_provider_entry_time(loop);
/* Store the current timeout in a location that's globally accessible so
* other locations like uv__work_done() can determine whether the queue
* of events in the callback were waiting when poll was called.
*/
lfields->current_timeout = timeout;
if (pset != NULL) if (pset != NULL)
if (pthread_sigmask(SIG_BLOCK, pset, NULL)) if (pthread_sigmask(SIG_BLOCK, pset, NULL))
abort(); abort();
@ -292,9 +301,11 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
} }
} }
uv__metrics_inc_events(loop, nevents);
if (reset_timeout != 0) { if (reset_timeout != 0) {
timeout = user_timeout; timeout = user_timeout;
reset_timeout = 0; reset_timeout = 0;
uv__metrics_inc_events_waiting(loop, nevents);
} }
if (have_signals != 0) { if (have_signals != 0) {

View File

@ -55,7 +55,7 @@
extern char **environ; extern char **environ;
#endif #endif
#if defined(__linux__) || defined(__GLIBC__) #if defined(__linux__)
# include <grp.h> # include <grp.h>
#endif #endif
@ -79,8 +79,28 @@ static void uv__chld(uv_signal_t* handle, int signum) {
assert(signum == SIGCHLD); assert(signum == SIGCHLD);
uv__wait_children(handle->loop); uv__wait_children(handle->loop);
} }
int uv__process_init(uv_loop_t* loop) {
int err;
err = uv_signal_init(loop, &loop->child_watcher);
if (err)
return err;
uv__handle_unref(&loop->child_watcher);
loop->child_watcher.flags |= UV_HANDLE_INTERNAL;
return 0;
}
#else
int uv__process_init(uv_loop_t* loop) {
memset(&loop->child_watcher, 0, sizeof(loop->child_watcher));
return 0;
}
#endif #endif
void uv__wait_children(uv_loop_t* loop) { void uv__wait_children(uv_loop_t* loop) {
uv_process_t* process; uv_process_t* process;
int exit_status; int exit_status;
@ -105,6 +125,7 @@ void uv__wait_children(uv_loop_t* loop) {
continue; continue;
options = 0; options = 0;
process->flags &= ~UV_HANDLE_REAP; process->flags &= ~UV_HANDLE_REAP;
loop->nfds--;
#else #else
options = WNOHANG; options = WNOHANG;
#endif #endif
@ -665,7 +686,7 @@ static int uv__spawn_resolve_and_spawn(const uv_process_options_t* options,
if (options->file == NULL) if (options->file == NULL)
return ENOENT; return ENOENT;
/* The environment for the child process is that of the parent unless overriden /* The environment for the child process is that of the parent unless overridden
* by options->env */ * by options->env */
char** env = environ; char** env = environ;
if (options->env != NULL) if (options->env != NULL)
@ -1012,6 +1033,10 @@ int uv_spawn(uv_loop_t* loop,
process->flags |= UV_HANDLE_REAP; process->flags |= UV_HANDLE_REAP;
loop->flags |= UV_LOOP_REAP_CHILDREN; loop->flags |= UV_LOOP_REAP_CHILDREN;
} }
/* This prevents uv__io_poll() from bailing out prematurely, being unaware
* that we added an event here for it to react to. We will decrement this
* again after the waitpid call succeeds. */
loop->nfds++;
#endif #endif
process->pid = pid; process->pid = pid;
@ -1080,6 +1105,8 @@ int uv_kill(int pid, int signum) {
void uv__process_close(uv_process_t* handle) { void uv__process_close(uv_process_t* handle) {
QUEUE_REMOVE(&handle->queue); QUEUE_REMOVE(&handle->queue);
uv__handle_stop(handle); uv__handle_stop(handle);
#ifdef UV_USE_SIGCHLD
if (QUEUE_EMPTY(&handle->loop->process_handles)) if (QUEUE_EMPTY(&handle->loop->process_handles))
uv_signal_stop(&handle->loop->child_watcher); uv_signal_stop(&handle->loop->child_watcher);
#endif
} }

View File

@ -1,58 +0,0 @@
/* Copyright (c) 2013, Sony Mobile Communications AB
* Copyright (c) 2012, Google Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Android versions < 4.1 have a broken pthread_sigmask. */
#include "uv-common.h"
#include <errno.h>
#include <pthread.h>
#include <signal.h>
int uv__pthread_sigmask(int how, const sigset_t* set, sigset_t* oset) {
static int workaround;
int err;
if (uv__load_relaxed(&workaround)) {
return sigprocmask(how, set, oset);
} else {
err = pthread_sigmask(how, set, oset);
if (err) {
if (err == EINVAL && sigprocmask(how, set, oset) == 0) {
uv__store_relaxed(&workaround, 1);
return 0;
} else {
return -1;
}
}
}
return 0;
}

View File

@ -88,6 +88,11 @@ uint64_t uv_get_constrained_memory(void) {
} }
uint64_t uv_get_available_memory(void) {
return uv_get_free_memory();
}
int uv_resident_set_memory(size_t* rss) { int uv_resident_set_memory(size_t* rss) {
int fd; int fd;
procfs_asinfo asinfo; procfs_asinfo asinfo;

View File

@ -40,7 +40,7 @@ int uv__random_readpath(const char* path, void* buf, size_t buflen) {
if (fd < 0) if (fd < 0)
return fd; return fd;
if (fstat(fd, &s)) { if (uv__fstat(fd, &s)) {
uv__close(fd); uv__close(fd);
return UV__ERR(errno); return UV__ERR(errno);
} }

View File

@ -24,8 +24,6 @@
#ifdef __linux__ #ifdef __linux__
#include "linux-syscalls.h"
#define uv__random_getrandom_init() 0 #define uv__random_getrandom_init() 0
#else /* !__linux__ */ #else /* !__linux__ */

View File

@ -279,6 +279,8 @@ static int uv__signal_loop_once_init(uv_loop_t* loop) {
int uv__signal_loop_fork(uv_loop_t* loop) { int uv__signal_loop_fork(uv_loop_t* loop) {
if (loop->signal_pipefd[0] == -1)
return 0;
uv__io_stop(loop, &loop->signal_io_watcher, POLLIN); uv__io_stop(loop, &loop->signal_io_watcher, POLLIN);
uv__close(loop->signal_pipefd[0]); uv__close(loop->signal_pipefd[0]);
uv__close(loop->signal_pipefd[1]); uv__close(loop->signal_pipefd[1]);

View File

@ -1,53 +0,0 @@
/* Copyright (c) 2013, Ben Noordhuis <info@bnoordhuis.nl>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef UV_SPINLOCK_H_
#define UV_SPINLOCK_H_
#include "internal.h" /* ACCESS_ONCE, UV_UNUSED */
#include "atomic-ops.h"
#define UV_SPINLOCK_INITIALIZER { 0 }
typedef struct {
int lock;
} uv_spinlock_t;
UV_UNUSED(static void uv_spinlock_init(uv_spinlock_t* spinlock));
UV_UNUSED(static void uv_spinlock_lock(uv_spinlock_t* spinlock));
UV_UNUSED(static void uv_spinlock_unlock(uv_spinlock_t* spinlock));
UV_UNUSED(static int uv_spinlock_trylock(uv_spinlock_t* spinlock));
UV_UNUSED(static void uv_spinlock_init(uv_spinlock_t* spinlock)) {
ACCESS_ONCE(int, spinlock->lock) = 0;
}
UV_UNUSED(static void uv_spinlock_lock(uv_spinlock_t* spinlock)) {
while (!uv_spinlock_trylock(spinlock)) cpu_relax();
}
UV_UNUSED(static void uv_spinlock_unlock(uv_spinlock_t* spinlock)) {
ACCESS_ONCE(int, spinlock->lock) = 0;
}
UV_UNUSED(static int uv_spinlock_trylock(uv_spinlock_t* spinlock)) {
/* TODO(bnoordhuis) Maybe change to a ticket lock to guarantee fair queueing.
* Not really critical until we have locks that are (frequently) contended
* for by several threads.
*/
return 0 == cmpxchgi(&spinlock->lock, 0, 1);
}
#endif /* UV_SPINLOCK_H_ */

View File

@ -60,6 +60,16 @@ struct uv__stream_select_s {
}; };
#endif /* defined(__APPLE__) */ #endif /* defined(__APPLE__) */
union uv__cmsg {
struct cmsghdr hdr;
/* This cannot be larger because of the IBMi PASE limitation that
* the total size of control messages cannot exceed 256 bytes.
*/
char pad[256];
};
STATIC_ASSERT(256 == sizeof(union uv__cmsg));
static void uv__stream_connect(uv_stream_t*); static void uv__stream_connect(uv_stream_t*);
static void uv__write(uv_stream_t* stream); static void uv__write(uv_stream_t* stream);
static void uv__read(uv_stream_t* stream); static void uv__read(uv_stream_t* stream);
@ -495,76 +505,34 @@ static int uv__emfile_trick(uv_loop_t* loop, int accept_fd) {
} }
#if defined(UV_HAVE_KQUEUE)
# define UV_DEC_BACKLOG(w) w->rcount--;
#else
# define UV_DEC_BACKLOG(w) /* no-op */
#endif /* defined(UV_HAVE_KQUEUE) */
void uv__server_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) { void uv__server_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
uv_stream_t* stream; uv_stream_t* stream;
int err; int err;
int fd;
stream = container_of(w, uv_stream_t, io_watcher); stream = container_of(w, uv_stream_t, io_watcher);
assert(events & POLLIN); assert(events & POLLIN);
assert(stream->accepted_fd == -1); assert(stream->accepted_fd == -1);
assert(!(stream->flags & UV_HANDLE_CLOSING)); assert(!(stream->flags & UV_HANDLE_CLOSING));
uv__io_start(stream->loop, &stream->io_watcher, POLLIN); fd = uv__stream_fd(stream);
err = uv__accept(fd);
/* connection_cb can close the server socket while we're if (err == UV_EMFILE || err == UV_ENFILE)
* in the loop so check it on each iteration. err = uv__emfile_trick(loop, fd); /* Shed load. */
*/
while (uv__stream_fd(stream) != -1) {
assert(stream->accepted_fd == -1);
#if defined(UV_HAVE_KQUEUE) if (err < 0)
if (w->rcount <= 0) return;
return;
#endif /* defined(UV_HAVE_KQUEUE) */
err = uv__accept(uv__stream_fd(stream)); stream->accepted_fd = err;
if (err < 0) { stream->connection_cb(stream, 0);
if (err == UV_EAGAIN || err == UV__ERR(EWOULDBLOCK))
return; /* Not an error. */
if (err == UV_ECONNABORTED) if (stream->accepted_fd != -1)
continue; /* Ignore. Nothing we can do about that. */ /* The user hasn't yet accepted called uv_accept() */
uv__io_stop(loop, &stream->io_watcher, POLLIN);
if (err == UV_EMFILE || err == UV_ENFILE) {
err = uv__emfile_trick(loop, uv__stream_fd(stream));
if (err == UV_EAGAIN || err == UV__ERR(EWOULDBLOCK))
break;
}
stream->connection_cb(stream, err);
continue;
}
UV_DEC_BACKLOG(w)
stream->accepted_fd = err;
stream->connection_cb(stream, 0);
if (stream->accepted_fd != -1) {
/* The user hasn't yet accepted called uv_accept() */
uv__io_stop(loop, &stream->io_watcher, POLLIN);
return;
}
if (stream->type == UV_TCP &&
(stream->flags & UV_HANDLE_TCP_SINGLE_ACCEPT)) {
/* Give other processes a chance to accept connections. */
struct timespec timeout = { 0, 1 };
nanosleep(&timeout, NULL);
}
}
} }
#undef UV_DEC_BACKLOG
int uv_accept(uv_stream_t* server, uv_stream_t* client) { int uv_accept(uv_stream_t* server, uv_stream_t* client) {
int err; int err;
@ -665,7 +633,7 @@ static void uv__drain(uv_stream_t* stream) {
uv__stream_osx_interrupt_select(stream); uv__stream_osx_interrupt_select(stream);
} }
if (!(stream->flags & UV_HANDLE_SHUTTING)) if (!uv__is_stream_shutting(stream))
return; return;
req = stream->shutdown_req; req = stream->shutdown_req;
@ -674,7 +642,6 @@ static void uv__drain(uv_stream_t* stream) {
if ((stream->flags & UV_HANDLE_CLOSING) || if ((stream->flags & UV_HANDLE_CLOSING) ||
!(stream->flags & UV_HANDLE_SHUT)) { !(stream->flags & UV_HANDLE_SHUT)) {
stream->shutdown_req = NULL; stream->shutdown_req = NULL;
stream->flags &= ~UV_HANDLE_SHUTTING;
uv__req_unregister(stream->loop, req); uv__req_unregister(stream->loop, req);
err = 0; err = 0;
@ -812,18 +779,14 @@ static int uv__try_write(uv_stream_t* stream,
if (send_handle != NULL) { if (send_handle != NULL) {
int fd_to_send; int fd_to_send;
struct msghdr msg; struct msghdr msg;
struct cmsghdr *cmsg; union uv__cmsg cmsg;
union {
char data[64];
struct cmsghdr alias;
} scratch;
if (uv__is_closing(send_handle)) if (uv__is_closing(send_handle))
return UV_EBADF; return UV_EBADF;
fd_to_send = uv__handle_fd((uv_handle_t*) send_handle); fd_to_send = uv__handle_fd((uv_handle_t*) send_handle);
memset(&scratch, 0, sizeof(scratch)); memset(&cmsg, 0, sizeof(cmsg));
assert(fd_to_send >= 0); assert(fd_to_send >= 0);
@ -833,20 +796,13 @@ static int uv__try_write(uv_stream_t* stream,
msg.msg_iovlen = iovcnt; msg.msg_iovlen = iovcnt;
msg.msg_flags = 0; msg.msg_flags = 0;
msg.msg_control = &scratch.alias; msg.msg_control = &cmsg.hdr;
msg.msg_controllen = CMSG_SPACE(sizeof(fd_to_send)); msg.msg_controllen = CMSG_SPACE(sizeof(fd_to_send));
cmsg = CMSG_FIRSTHDR(&msg); cmsg.hdr.cmsg_level = SOL_SOCKET;
cmsg->cmsg_level = SOL_SOCKET; cmsg.hdr.cmsg_type = SCM_RIGHTS;
cmsg->cmsg_type = SCM_RIGHTS; cmsg.hdr.cmsg_len = CMSG_LEN(sizeof(fd_to_send));
cmsg->cmsg_len = CMSG_LEN(sizeof(fd_to_send)); memcpy(CMSG_DATA(&cmsg.hdr), &fd_to_send, sizeof(fd_to_send));
/* silence aliasing warning */
{
void* pv = CMSG_DATA(cmsg);
int* pi = pv;
*pi = fd_to_send;
}
do do
n = sendmsg(uv__stream_fd(stream), &msg, 0); n = sendmsg(uv__stream_fd(stream), &msg, 0);
@ -884,9 +840,16 @@ static void uv__write(uv_stream_t* stream) {
QUEUE* q; QUEUE* q;
uv_write_t* req; uv_write_t* req;
ssize_t n; ssize_t n;
int count;
assert(uv__stream_fd(stream) >= 0); assert(uv__stream_fd(stream) >= 0);
/* Prevent loop starvation when the consumer of this stream read as fast as
* (or faster than) we can write it. This `count` mechanism does not need to
* change even if we switch to edge-triggered I/O.
*/
count = 32;
for (;;) { for (;;) {
if (QUEUE_EMPTY(&stream->write_queue)) if (QUEUE_EMPTY(&stream->write_queue))
return; return;
@ -905,10 +868,13 @@ static void uv__write(uv_stream_t* stream) {
req->send_handle = NULL; req->send_handle = NULL;
if (uv__write_req_update(stream, req, n)) { if (uv__write_req_update(stream, req, n)) {
uv__write_req_finish(req); uv__write_req_finish(req);
return; /* TODO(bnoordhuis) Start trying to write the next request. */ if (count-- > 0)
continue; /* Start trying to write the next request. */
return;
} }
} else if (n != UV_EAGAIN) } else if (n != UV_EAGAIN)
break; goto error;
/* If this is a blocking stream, try again. */ /* If this is a blocking stream, try again. */
if (stream->flags & UV_HANDLE_BLOCKING_WRITES) if (stream->flags & UV_HANDLE_BLOCKING_WRITES)
@ -923,6 +889,7 @@ static void uv__write(uv_stream_t* stream) {
return; return;
} }
error:
req->error = n; req->error = n;
uv__write_req_finish(req); uv__write_req_finish(req);
uv__io_stop(stream->loop, &stream->io_watcher, POLLOUT); uv__io_stop(stream->loop, &stream->io_watcher, POLLOUT);
@ -1010,57 +977,38 @@ static int uv__stream_queue_fd(uv_stream_t* stream, int fd) {
} }
#if defined(__PASE__)
/* on IBMi PASE the control message length can not exceed 256. */
# define UV__CMSG_FD_COUNT 60
#else
# define UV__CMSG_FD_COUNT 64
#endif
#define UV__CMSG_FD_SIZE (UV__CMSG_FD_COUNT * sizeof(int))
static int uv__stream_recv_cmsg(uv_stream_t* stream, struct msghdr* msg) { static int uv__stream_recv_cmsg(uv_stream_t* stream, struct msghdr* msg) {
struct cmsghdr* cmsg; struct cmsghdr* cmsg;
int fd;
int err;
size_t i;
size_t count;
for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg)) { for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg)) {
char* start;
char* end;
int err;
void* pv;
int* pi;
unsigned int i;
unsigned int count;
if (cmsg->cmsg_type != SCM_RIGHTS) { if (cmsg->cmsg_type != SCM_RIGHTS) {
fprintf(stderr, "ignoring non-SCM_RIGHTS ancillary data: %d\n", fprintf(stderr, "ignoring non-SCM_RIGHTS ancillary data: %d\n",
cmsg->cmsg_type); cmsg->cmsg_type);
continue; continue;
} }
/* silence aliasing warning */ assert(cmsg->cmsg_len >= CMSG_LEN(0));
pv = CMSG_DATA(cmsg); count = cmsg->cmsg_len - CMSG_LEN(0);
pi = pv; assert(count % sizeof(fd) == 0);
count /= sizeof(fd);
/* Count available fds */
start = (char*) cmsg;
end = (char*) cmsg + cmsg->cmsg_len;
count = 0;
while (start + CMSG_LEN(count * sizeof(*pi)) < end)
count++;
assert(start + CMSG_LEN(count * sizeof(*pi)) == end);
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
memcpy(&fd, (char*) CMSG_DATA(cmsg) + i * sizeof(fd), sizeof(fd));
/* Already has accepted fd, queue now */ /* Already has accepted fd, queue now */
if (stream->accepted_fd != -1) { if (stream->accepted_fd != -1) {
err = uv__stream_queue_fd(stream, pi[i]); err = uv__stream_queue_fd(stream, fd);
if (err != 0) { if (err != 0) {
/* Close rest */ /* Close rest */
for (; i < count; i++) for (; i < count; i++)
uv__close(pi[i]); uv__close(fd);
return err; return err;
} }
} else { } else {
stream->accepted_fd = pi[i]; stream->accepted_fd = fd;
} }
} }
} }
@ -1069,17 +1017,11 @@ static int uv__stream_recv_cmsg(uv_stream_t* stream, struct msghdr* msg) {
} }
#ifdef __clang__
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wgnu-folding-constant"
# pragma clang diagnostic ignored "-Wvla-extension"
#endif
static void uv__read(uv_stream_t* stream) { static void uv__read(uv_stream_t* stream) {
uv_buf_t buf; uv_buf_t buf;
ssize_t nread; ssize_t nread;
struct msghdr msg; struct msghdr msg;
char cmsg_space[CMSG_SPACE(UV__CMSG_FD_SIZE)]; union uv__cmsg cmsg;
int count; int count;
int err; int err;
int is_ipc; int is_ipc;
@ -1125,8 +1067,8 @@ static void uv__read(uv_stream_t* stream) {
msg.msg_name = NULL; msg.msg_name = NULL;
msg.msg_namelen = 0; msg.msg_namelen = 0;
/* Set up to receive a descriptor even if one isn't in the message */ /* Set up to receive a descriptor even if one isn't in the message */
msg.msg_controllen = sizeof(cmsg_space); msg.msg_controllen = sizeof(cmsg);
msg.msg_control = cmsg_space; msg.msg_control = &cmsg.hdr;
do { do {
nread = uv__recvmsg(uv__stream_fd(stream), &msg, 0); nread = uv__recvmsg(uv__stream_fd(stream), &msg, 0);
@ -1210,14 +1152,6 @@ static void uv__read(uv_stream_t* stream) {
} }
#ifdef __clang__
# pragma clang diagnostic pop
#endif
#undef UV__CMSG_FD_COUNT
#undef UV__CMSG_FD_SIZE
int uv_shutdown(uv_shutdown_t* req, uv_stream_t* stream, uv_shutdown_cb cb) { int uv_shutdown(uv_shutdown_t* req, uv_stream_t* stream, uv_shutdown_cb cb) {
assert(stream->type == UV_TCP || assert(stream->type == UV_TCP ||
stream->type == UV_TTY || stream->type == UV_TTY ||
@ -1225,7 +1159,7 @@ int uv_shutdown(uv_shutdown_t* req, uv_stream_t* stream, uv_shutdown_cb cb) {
if (!(stream->flags & UV_HANDLE_WRITABLE) || if (!(stream->flags & UV_HANDLE_WRITABLE) ||
stream->flags & UV_HANDLE_SHUT || stream->flags & UV_HANDLE_SHUT ||
stream->flags & UV_HANDLE_SHUTTING || uv__is_stream_shutting(stream) ||
uv__is_closing(stream)) { uv__is_closing(stream)) {
return UV_ENOTCONN; return UV_ENOTCONN;
} }
@ -1238,7 +1172,6 @@ int uv_shutdown(uv_shutdown_t* req, uv_stream_t* stream, uv_shutdown_cb cb) {
req->handle = stream; req->handle = stream;
req->cb = cb; req->cb = cb;
stream->shutdown_req = req; stream->shutdown_req = req;
stream->flags |= UV_HANDLE_SHUTTING;
stream->flags &= ~UV_HANDLE_WRITABLE; stream->flags &= ~UV_HANDLE_WRITABLE;
if (QUEUE_EMPTY(&stream->write_queue)) if (QUEUE_EMPTY(&stream->write_queue))

View File

@ -320,9 +320,11 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue); QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
} }
uv__metrics_inc_events(loop, nevents);
if (reset_timeout != 0) { if (reset_timeout != 0) {
timeout = user_timeout; timeout = user_timeout;
reset_timeout = 0; reset_timeout = 0;
uv__metrics_inc_events_waiting(loop, nevents);
} }
if (have_signals != 0) { if (have_signals != 0) {
@ -415,6 +417,11 @@ uint64_t uv_get_constrained_memory(void) {
} }
uint64_t uv_get_available_memory(void) {
return uv_get_free_memory();
}
void uv_loadavg(double avg[3]) { void uv_loadavg(double avg[3]) {
(void) getloadavg(avg, 3); (void) getloadavg(avg, 3);
} }

View File

@ -28,16 +28,39 @@
#include <errno.h> #include <errno.h>
static int new_socket(uv_tcp_t* handle, int domain, unsigned long flags) { static int maybe_bind_socket(int fd) {
struct sockaddr_storage saddr; union uv__sockaddr s;
socklen_t slen; socklen_t slen;
slen = sizeof(s);
memset(&s, 0, sizeof(s));
if (getsockname(fd, &s.addr, &slen))
return UV__ERR(errno);
if (s.addr.sa_family == AF_INET)
if (s.in.sin_port != 0)
return 0; /* Already bound to a port. */
if (s.addr.sa_family == AF_INET6)
if (s.in6.sin6_port != 0)
return 0; /* Already bound to a port. */
/* Bind to an arbitrary port. */
if (bind(fd, &s.addr, slen))
return UV__ERR(errno);
return 0;
}
static int new_socket(uv_tcp_t* handle, int domain, unsigned int flags) {
int sockfd; int sockfd;
int err; int err;
err = uv__socket(domain, SOCK_STREAM, 0); sockfd = uv__socket(domain, SOCK_STREAM, 0);
if (err < 0) if (sockfd < 0)
return err; return sockfd;
sockfd = err;
err = uv__stream_open((uv_stream_t*) handle, sockfd, flags); err = uv__stream_open((uv_stream_t*) handle, sockfd, flags);
if (err) { if (err) {
@ -45,74 +68,44 @@ static int new_socket(uv_tcp_t* handle, int domain, unsigned long flags) {
return err; return err;
} }
if (flags & UV_HANDLE_BOUND) { if (flags & UV_HANDLE_BOUND)
/* Bind this new socket to an arbitrary port */ return maybe_bind_socket(sockfd);
slen = sizeof(saddr);
memset(&saddr, 0, sizeof(saddr));
if (getsockname(uv__stream_fd(handle), (struct sockaddr*) &saddr, &slen)) {
uv__close(sockfd);
return UV__ERR(errno);
}
if (bind(uv__stream_fd(handle), (struct sockaddr*) &saddr, slen)) {
uv__close(sockfd);
return UV__ERR(errno);
}
}
return 0; return 0;
} }
static int maybe_new_socket(uv_tcp_t* handle, int domain, unsigned long flags) { static int maybe_new_socket(uv_tcp_t* handle, int domain, unsigned int flags) {
struct sockaddr_storage saddr; int sockfd;
socklen_t slen; int err;
if (domain == AF_UNSPEC) { if (domain == AF_UNSPEC)
handle->flags |= flags; goto out;
return 0;
}
if (uv__stream_fd(handle) != -1) { sockfd = uv__stream_fd(handle);
if (sockfd == -1)
return new_socket(handle, domain, flags);
if (flags & UV_HANDLE_BOUND) { if (!(flags & UV_HANDLE_BOUND))
goto out;
if (handle->flags & UV_HANDLE_BOUND) { if (handle->flags & UV_HANDLE_BOUND)
/* It is already bound to a port. */ goto out; /* Already bound to a port. */
handle->flags |= flags;
return 0;
}
/* Query to see if tcp socket is bound. */ err = maybe_bind_socket(sockfd);
slen = sizeof(saddr); if (err)
memset(&saddr, 0, sizeof(saddr)); return err;
if (getsockname(uv__stream_fd(handle), (struct sockaddr*) &saddr, &slen))
return UV__ERR(errno);
if ((saddr.ss_family == AF_INET6 && out:
((struct sockaddr_in6*) &saddr)->sin6_port != 0) ||
(saddr.ss_family == AF_INET &&
((struct sockaddr_in*) &saddr)->sin_port != 0)) {
/* Handle is already bound to a port. */
handle->flags |= flags;
return 0;
}
/* Bind to arbitrary port */ handle->flags |= flags;
if (bind(uv__stream_fd(handle), (struct sockaddr*) &saddr, slen)) return 0;
return UV__ERR(errno);
}
handle->flags |= flags;
return 0;
}
return new_socket(handle, domain, flags);
} }
int uv_tcp_init_ex(uv_loop_t* loop, uv_tcp_t* tcp, unsigned int flags) { int uv_tcp_init_ex(uv_loop_t* loop, uv_tcp_t* tcp, unsigned int flags) {
int domain; int domain;
int err;
/* Use the lower 8 bits for the domain */ /* Use the lower 8 bits for the domain */
domain = flags & 0xFF; domain = flags & 0xFF;
@ -129,9 +122,12 @@ int uv_tcp_init_ex(uv_loop_t* loop, uv_tcp_t* tcp, unsigned int flags) {
*/ */
if (domain != AF_UNSPEC) { if (domain != AF_UNSPEC) {
int err = maybe_new_socket(tcp, domain, 0); err = new_socket(tcp, domain, 0);
if (err) { if (err) {
QUEUE_REMOVE(&tcp->handle_queue); QUEUE_REMOVE(&tcp->handle_queue);
if (tcp->io_watcher.fd != -1)
uv__close(tcp->io_watcher.fd);
tcp->io_watcher.fd = -1;
return err; return err;
} }
} }
@ -317,7 +313,7 @@ int uv_tcp_close_reset(uv_tcp_t* handle, uv_close_cb close_cb) {
struct linger l = { 1, 0 }; struct linger l = { 1, 0 };
/* Disallow setting SO_LINGER to zero due to some platform inconsistencies */ /* Disallow setting SO_LINGER to zero due to some platform inconsistencies */
if (handle->flags & UV_HANDLE_SHUTTING) if (uv__is_stream_shutting(handle))
return UV_EINVAL; return UV_EINVAL;
fd = uv__stream_fd(handle); fd = uv__stream_fd(handle);
@ -338,24 +334,12 @@ int uv_tcp_close_reset(uv_tcp_t* handle, uv_close_cb close_cb) {
int uv__tcp_listen(uv_tcp_t* tcp, int backlog, uv_connection_cb cb) { int uv__tcp_listen(uv_tcp_t* tcp, int backlog, uv_connection_cb cb) {
static int single_accept_cached = -1; unsigned int flags;
unsigned long flags;
int single_accept;
int err; int err;
if (tcp->delayed_error) if (tcp->delayed_error)
return tcp->delayed_error; return tcp->delayed_error;
single_accept = uv__load_relaxed(&single_accept_cached);
if (single_accept == -1) {
const char* val = getenv("UV_TCP_SINGLE_ACCEPT");
single_accept = (val != NULL && atoi(val) != 0); /* Off by default. */
uv__store_relaxed(&single_accept_cached, single_accept);
}
if (single_accept)
tcp->flags |= UV_HANDLE_TCP_SINGLE_ACCEPT;
flags = 0; flags = 0;
#if defined(__MVS__) #if defined(__MVS__)
/* on zOS the listen call does not bind automatically /* on zOS the listen call does not bind automatically
@ -460,10 +444,6 @@ int uv_tcp_keepalive(uv_tcp_t* handle, int on, unsigned int delay) {
int uv_tcp_simultaneous_accepts(uv_tcp_t* handle, int enable) { int uv_tcp_simultaneous_accepts(uv_tcp_t* handle, int enable) {
if (enable)
handle->flags &= ~UV_HANDLE_TCP_SINGLE_ACCEPT;
else
handle->flags |= UV_HANDLE_TCP_SINGLE_ACCEPT;
return 0; return 0;
} }

View File

@ -41,127 +41,20 @@
#include <gnu/libc-version.h> /* gnu_get_libc_version() */ #include <gnu/libc-version.h> /* gnu_get_libc_version() */
#endif #endif
#if defined(__linux__)
# include <sched.h>
# define uv__cpu_set_t cpu_set_t
#elif defined(__FreeBSD__)
# include <sys/param.h>
# include <sys/cpuset.h>
# include <pthread_np.h>
# define uv__cpu_set_t cpuset_t
#endif
#undef NANOSEC #undef NANOSEC
#define NANOSEC ((uint64_t) 1e9) #define NANOSEC ((uint64_t) 1e9)
#if defined(PTHREAD_BARRIER_SERIAL_THREAD)
STATIC_ASSERT(sizeof(uv_barrier_t) == sizeof(pthread_barrier_t));
#endif
/* Note: guard clauses should match uv_barrier_t's in include/uv/unix.h. */
#if defined(_AIX) || \
defined(__OpenBSD__) || \
!defined(PTHREAD_BARRIER_SERIAL_THREAD)
int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
struct _uv_barrier* b;
int rc;
if (barrier == NULL || count == 0)
return UV_EINVAL;
b = uv__malloc(sizeof(*b));
if (b == NULL)
return UV_ENOMEM;
b->in = 0;
b->out = 0;
b->threshold = count;
rc = uv_mutex_init(&b->mutex);
if (rc != 0)
goto error2;
rc = uv_cond_init(&b->cond);
if (rc != 0)
goto error;
barrier->b = b;
return 0;
error:
uv_mutex_destroy(&b->mutex);
error2:
uv__free(b);
return rc;
}
int uv_barrier_wait(uv_barrier_t* barrier) {
struct _uv_barrier* b;
int last;
if (barrier == NULL || barrier->b == NULL)
return UV_EINVAL;
b = barrier->b;
uv_mutex_lock(&b->mutex);
if (++b->in == b->threshold) {
b->in = 0;
b->out = b->threshold;
uv_cond_signal(&b->cond);
} else {
do
uv_cond_wait(&b->cond, &b->mutex);
while (b->in != 0);
}
last = (--b->out == 0);
uv_cond_signal(&b->cond);
uv_mutex_unlock(&b->mutex);
return last;
}
void uv_barrier_destroy(uv_barrier_t* barrier) {
struct _uv_barrier* b;
b = barrier->b;
uv_mutex_lock(&b->mutex);
assert(b->in == 0);
while (b->out != 0)
uv_cond_wait(&b->cond, &b->mutex);
if (b->in != 0)
abort();
uv_mutex_unlock(&b->mutex);
uv_mutex_destroy(&b->mutex);
uv_cond_destroy(&b->cond);
uv__free(barrier->b);
barrier->b = NULL;
}
#else
int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
return UV__ERR(pthread_barrier_init(barrier, NULL, count));
}
int uv_barrier_wait(uv_barrier_t* barrier) {
int rc;
rc = pthread_barrier_wait(barrier);
if (rc != 0)
if (rc != PTHREAD_BARRIER_SERIAL_THREAD)
abort();
return rc == PTHREAD_BARRIER_SERIAL_THREAD;
}
void uv_barrier_destroy(uv_barrier_t* barrier) {
if (pthread_barrier_destroy(barrier))
abort();
}
#endif
/* Musl's PTHREAD_STACK_MIN is 2 KB on all architectures, which is /* Musl's PTHREAD_STACK_MIN is 2 KB on all architectures, which is
* too small to safely receive signals on. * too small to safely receive signals on.
* *
@ -284,6 +177,106 @@ int uv_thread_create_ex(uv_thread_t* tid,
return UV__ERR(err); return UV__ERR(err);
} }
#if UV__CPU_AFFINITY_SUPPORTED
int uv_thread_setaffinity(uv_thread_t* tid,
char* cpumask,
char* oldmask,
size_t mask_size) {
int i;
int r;
uv__cpu_set_t cpuset;
int cpumasksize;
cpumasksize = uv_cpumask_size();
if (cpumasksize < 0)
return cpumasksize;
if (mask_size < (size_t)cpumasksize)
return UV_EINVAL;
if (oldmask != NULL) {
r = uv_thread_getaffinity(tid, oldmask, mask_size);
if (r < 0)
return r;
}
CPU_ZERO(&cpuset);
for (i = 0; i < cpumasksize; i++)
if (cpumask[i])
CPU_SET(i, &cpuset);
#if defined(__ANDROID__)
if (sched_setaffinity(pthread_gettid_np(*tid), sizeof(cpuset), &cpuset))
r = errno;
else
r = 0;
#else
r = pthread_setaffinity_np(*tid, sizeof(cpuset), &cpuset);
#endif
return UV__ERR(r);
}
int uv_thread_getaffinity(uv_thread_t* tid,
char* cpumask,
size_t mask_size) {
int r;
int i;
uv__cpu_set_t cpuset;
int cpumasksize;
cpumasksize = uv_cpumask_size();
if (cpumasksize < 0)
return cpumasksize;
if (mask_size < (size_t)cpumasksize)
return UV_EINVAL;
CPU_ZERO(&cpuset);
#if defined(__ANDROID__)
if (sched_getaffinity(pthread_gettid_np(*tid), sizeof(cpuset), &cpuset))
r = errno;
else
r = 0;
#else
r = pthread_getaffinity_np(*tid, sizeof(cpuset), &cpuset);
#endif
if (r)
return UV__ERR(r);
for (i = 0; i < cpumasksize; i++)
cpumask[i] = !!CPU_ISSET(i, &cpuset);
return 0;
}
#else
int uv_thread_setaffinity(uv_thread_t* tid,
char* cpumask,
char* oldmask,
size_t mask_size) {
return UV_ENOTSUP;
}
int uv_thread_getaffinity(uv_thread_t* tid,
char* cpumask,
size_t mask_size) {
return UV_ENOTSUP;
}
#endif /* defined(__linux__) || defined(UV_BSD_H) */
int uv_thread_getcpu(void) {
#if UV__CPU_AFFINITY_SUPPORTED
int cpu;
cpu = sched_getcpu();
if (cpu < 0)
return UV__ERR(errno);
return cpu;
#else
return UV_ENOTSUP;
#endif
}
uv_thread_t uv_thread_self(void) { uv_thread_t uv_thread_self(void) {
return pthread_self(); return pthread_self();
@ -585,7 +578,7 @@ static void uv__custom_sem_post(uv_sem_t* sem_) {
uv_mutex_lock(&sem->mutex); uv_mutex_lock(&sem->mutex);
sem->value++; sem->value++;
if (sem->value == 1) if (sem->value == 1)
uv_cond_signal(&sem->cond); uv_cond_signal(&sem->cond); /* Release one to replace us. */
uv_mutex_unlock(&sem->mutex); uv_mutex_unlock(&sem->mutex);
} }

View File

@ -21,8 +21,8 @@
#include "uv.h" #include "uv.h"
#include "internal.h" #include "internal.h"
#include "spinlock.h"
#include <stdatomic.h>
#include <stdlib.h> #include <stdlib.h>
#include <assert.h> #include <assert.h>
#include <unistd.h> #include <unistd.h>
@ -64,7 +64,7 @@ static int isreallyatty(int file) {
static int orig_termios_fd = -1; static int orig_termios_fd = -1;
static struct termios orig_termios; static struct termios orig_termios;
static uv_spinlock_t termios_spinlock = UV_SPINLOCK_INITIALIZER; static _Atomic int termios_spinlock;
int uv__tcsetattr(int fd, int how, const struct termios *term) { int uv__tcsetattr(int fd, int how, const struct termios *term) {
int rc; int rc;
@ -81,7 +81,7 @@ int uv__tcsetattr(int fd, int how, const struct termios *term) {
static int uv__tty_is_slave(const int fd) { static int uv__tty_is_slave(const int fd) {
int result; int result;
#if defined(__linux__) || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) #if defined(__linux__) || defined(__FreeBSD__)
int dummy; int dummy;
result = ioctl(fd, TIOCGPTN, &dummy) != 0; result = ioctl(fd, TIOCGPTN, &dummy) != 0;
@ -113,7 +113,7 @@ static int uv__tty_is_slave(const int fd) {
} }
/* Lookup stat structure behind the file descriptor. */ /* Lookup stat structure behind the file descriptor. */
if (fstat(fd, &sb) != 0) if (uv__fstat(fd, &sb) != 0)
abort(); abort();
/* Assert character device. */ /* Assert character device. */
@ -280,6 +280,7 @@ static void uv__tty_make_raw(struct termios* tio) {
int uv_tty_set_mode(uv_tty_t* tty, uv_tty_mode_t mode) { int uv_tty_set_mode(uv_tty_t* tty, uv_tty_mode_t mode) {
struct termios tmp; struct termios tmp;
int expected;
int fd; int fd;
int rc; int rc;
@ -296,12 +297,16 @@ int uv_tty_set_mode(uv_tty_t* tty, uv_tty_mode_t mode) {
return UV__ERR(errno); return UV__ERR(errno);
/* This is used for uv_tty_reset_mode() */ /* This is used for uv_tty_reset_mode() */
uv_spinlock_lock(&termios_spinlock); do
expected = 0;
while (!atomic_compare_exchange_strong(&termios_spinlock, &expected, 1));
if (orig_termios_fd == -1) { if (orig_termios_fd == -1) {
orig_termios = tty->orig_termios; orig_termios = tty->orig_termios;
orig_termios_fd = fd; orig_termios_fd = fd;
} }
uv_spinlock_unlock(&termios_spinlock);
atomic_store(&termios_spinlock, 0);
} }
tmp = tty->orig_termios; tmp = tty->orig_termios;
@ -360,7 +365,7 @@ uv_handle_type uv_guess_handle(uv_file file) {
if (isatty(file)) if (isatty(file))
return UV_TTY; return UV_TTY;
if (fstat(file, &s)) { if (uv__fstat(file, &s)) {
#if defined(__PASE__) #if defined(__PASE__)
/* On ibmi receiving RST from TCP instead of FIN immediately puts fd into /* On ibmi receiving RST from TCP instead of FIN immediately puts fd into
* an error state. fstat will return EINVAL, getsockname will also return * an error state. fstat will return EINVAL, getsockname will also return
@ -445,14 +450,15 @@ int uv_tty_reset_mode(void) {
int err; int err;
saved_errno = errno; saved_errno = errno;
if (!uv_spinlock_trylock(&termios_spinlock))
if (atomic_exchange(&termios_spinlock, 1))
return UV_EBUSY; /* In uv_tty_set_mode(). */ return UV_EBUSY; /* In uv_tty_set_mode(). */
err = 0; err = 0;
if (orig_termios_fd != -1) if (orig_termios_fd != -1)
err = uv__tcsetattr(orig_termios_fd, TCSANOW, &orig_termios); err = uv__tcsetattr(orig_termios_fd, TCSANOW, &orig_termios);
uv_spinlock_unlock(&termios_spinlock); atomic_store(&termios_spinlock, 0);
errno = saved_errno; errno = saved_errno;
return err; return err;

View File

@ -40,12 +40,6 @@
# define IPV6_DROP_MEMBERSHIP IPV6_LEAVE_GROUP # define IPV6_DROP_MEMBERSHIP IPV6_LEAVE_GROUP
#endif #endif
union uv__sockaddr {
struct sockaddr_in6 in6;
struct sockaddr_in in;
struct sockaddr addr;
};
static void uv__udp_run_completed(uv_udp_t* handle); static void uv__udp_run_completed(uv_udp_t* handle);
static void uv__udp_io(uv_loop_t* loop, uv__io_t* w, unsigned int revents); static void uv__udp_io(uv_loop_t* loop, uv__io_t* w, unsigned int revents);
static void uv__udp_recvmsg(uv_udp_t* handle); static void uv__udp_recvmsg(uv_udp_t* handle);
@ -54,36 +48,6 @@ static int uv__udp_maybe_deferred_bind(uv_udp_t* handle,
int domain, int domain,
unsigned int flags); unsigned int flags);
#if HAVE_MMSG
#define UV__MMSG_MAXWIDTH 20
static int uv__udp_recvmmsg(uv_udp_t* handle, uv_buf_t* buf);
static void uv__udp_sendmmsg(uv_udp_t* handle);
static int uv__recvmmsg_avail;
static int uv__sendmmsg_avail;
static uv_once_t once = UV_ONCE_INIT;
static void uv__udp_mmsg_init(void) {
int ret;
int s;
s = uv__socket(AF_INET, SOCK_DGRAM, 0);
if (s < 0)
return;
ret = uv__sendmmsg(s, NULL, 0);
if (ret == 0 || errno != ENOSYS) {
uv__sendmmsg_avail = 1;
uv__recvmmsg_avail = 1;
} else {
ret = uv__recvmmsg(s, NULL, 0);
if (ret == 0 || errno != ENOSYS)
uv__recvmmsg_avail = 1;
}
uv__close(s);
}
#endif
void uv__udp_close(uv_udp_t* handle) { void uv__udp_close(uv_udp_t* handle) {
uv__io_close(handle->loop, &handle->io_watcher); uv__io_close(handle->loop, &handle->io_watcher);
@ -183,11 +147,11 @@ static void uv__udp_io(uv_loop_t* loop, uv__io_t* w, unsigned int revents) {
} }
} }
#if HAVE_MMSG
static int uv__udp_recvmmsg(uv_udp_t* handle, uv_buf_t* buf) { static int uv__udp_recvmmsg(uv_udp_t* handle, uv_buf_t* buf) {
struct sockaddr_in6 peers[UV__MMSG_MAXWIDTH]; #if defined(__linux__) || defined(__FreeBSD__)
struct iovec iov[UV__MMSG_MAXWIDTH]; struct sockaddr_in6 peers[20];
struct uv__mmsghdr msgs[UV__MMSG_MAXWIDTH]; struct iovec iov[ARRAY_SIZE(peers)];
struct mmsghdr msgs[ARRAY_SIZE(peers)];
ssize_t nread; ssize_t nread;
uv_buf_t chunk_buf; uv_buf_t chunk_buf;
size_t chunks; size_t chunks;
@ -212,7 +176,7 @@ static int uv__udp_recvmmsg(uv_udp_t* handle, uv_buf_t* buf) {
} }
do do
nread = uv__recvmmsg(handle->io_watcher.fd, msgs, chunks); nread = recvmmsg(handle->io_watcher.fd, msgs, chunks, 0, NULL);
while (nread == -1 && errno == EINTR); while (nread == -1 && errno == EINTR);
if (nread < 1) { if (nread < 1) {
@ -240,8 +204,10 @@ static int uv__udp_recvmmsg(uv_udp_t* handle, uv_buf_t* buf) {
handle->recv_cb(handle, 0, buf, NULL, UV_UDP_MMSG_FREE); handle->recv_cb(handle, 0, buf, NULL, UV_UDP_MMSG_FREE);
} }
return nread; return nread;
#else /* __linux__ || ____FreeBSD__ */
return UV_ENOSYS;
#endif /* __linux__ || ____FreeBSD__ */
} }
#endif
static void uv__udp_recvmsg(uv_udp_t* handle) { static void uv__udp_recvmsg(uv_udp_t* handle) {
struct sockaddr_storage peer; struct sockaddr_storage peer;
@ -268,14 +234,12 @@ static void uv__udp_recvmsg(uv_udp_t* handle) {
} }
assert(buf.base != NULL); assert(buf.base != NULL);
#if HAVE_MMSG
if (uv_udp_using_recvmmsg(handle)) { if (uv_udp_using_recvmmsg(handle)) {
nread = uv__udp_recvmmsg(handle, &buf); nread = uv__udp_recvmmsg(handle, &buf);
if (nread > 0) if (nread > 0)
count -= nread; count -= nread;
continue; continue;
} }
#endif
memset(&h, 0, sizeof(h)); memset(&h, 0, sizeof(h));
memset(&peer, 0, sizeof(peer)); memset(&peer, 0, sizeof(peer));
@ -311,11 +275,11 @@ static void uv__udp_recvmsg(uv_udp_t* handle) {
&& handle->recv_cb != NULL); && handle->recv_cb != NULL);
} }
#if HAVE_MMSG static void uv__udp_sendmsg(uv_udp_t* handle) {
static void uv__udp_sendmmsg(uv_udp_t* handle) { #if defined(__linux__) || defined(__FreeBSD__)
uv_udp_send_t* req; uv_udp_send_t* req;
struct uv__mmsghdr h[UV__MMSG_MAXWIDTH]; struct mmsghdr h[20];
struct uv__mmsghdr *p; struct mmsghdr* p;
QUEUE* q; QUEUE* q;
ssize_t npkts; ssize_t npkts;
size_t pkts; size_t pkts;
@ -326,7 +290,7 @@ static void uv__udp_sendmmsg(uv_udp_t* handle) {
write_queue_drain: write_queue_drain:
for (pkts = 0, q = QUEUE_HEAD(&handle->write_queue); for (pkts = 0, q = QUEUE_HEAD(&handle->write_queue);
pkts < UV__MMSG_MAXWIDTH && q != &handle->write_queue; pkts < ARRAY_SIZE(h) && q != &handle->write_queue;
++pkts, q = QUEUE_HEAD(q)) { ++pkts, q = QUEUE_HEAD(q)) {
assert(q != NULL); assert(q != NULL);
req = QUEUE_DATA(q, uv_udp_send_t, queue); req = QUEUE_DATA(q, uv_udp_send_t, queue);
@ -355,7 +319,7 @@ write_queue_drain:
} }
do do
npkts = uv__sendmmsg(handle->io_watcher.fd, h, pkts); npkts = sendmmsg(handle->io_watcher.fd, h, pkts, 0);
while (npkts == -1 && errno == EINTR); while (npkts == -1 && errno == EINTR);
if (npkts < 1) { if (npkts < 1) {
@ -401,24 +365,12 @@ write_queue_drain:
if (!QUEUE_EMPTY(&handle->write_queue)) if (!QUEUE_EMPTY(&handle->write_queue))
goto write_queue_drain; goto write_queue_drain;
uv__io_feed(handle->loop, &handle->io_watcher); uv__io_feed(handle->loop, &handle->io_watcher);
return; #else /* __linux__ || ____FreeBSD__ */
}
#endif
static void uv__udp_sendmsg(uv_udp_t* handle) {
uv_udp_send_t* req; uv_udp_send_t* req;
struct msghdr h; struct msghdr h;
QUEUE* q; QUEUE* q;
ssize_t size; ssize_t size;
#if HAVE_MMSG
uv_once(&once, uv__udp_mmsg_init);
if (uv__sendmmsg_avail) {
uv__udp_sendmmsg(handle);
return;
}
#endif
while (!QUEUE_EMPTY(&handle->write_queue)) { while (!QUEUE_EMPTY(&handle->write_queue)) {
q = QUEUE_HEAD(&handle->write_queue); q = QUEUE_HEAD(&handle->write_queue);
assert(q != NULL); assert(q != NULL);
@ -466,6 +418,7 @@ static void uv__udp_sendmsg(uv_udp_t* handle) {
QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue); QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue);
uv__io_feed(handle->loop, &handle->io_watcher); uv__io_feed(handle->loop, &handle->io_watcher);
} }
#endif /* __linux__ || ____FreeBSD__ */
} }
/* On the BSDs, SO_REUSEPORT implies SO_REUSEADDR but with some additional /* On the BSDs, SO_REUSEPORT implies SO_REUSEADDR but with some additional
@ -495,7 +448,8 @@ static int uv__set_reuse(int fd) {
if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &yes, sizeof(yes))) if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &yes, sizeof(yes)))
return UV__ERR(errno); return UV__ERR(errno);
} }
#elif defined(SO_REUSEPORT) && !defined(__linux__) && !defined(__GNU__) #elif defined(SO_REUSEPORT) && !defined(__linux__) && !defined(__GNU__) && \
!defined(__sun__)
if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &yes, sizeof(yes))) if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &yes, sizeof(yes)))
return UV__ERR(errno); return UV__ERR(errno);
#else #else
@ -1061,11 +1015,9 @@ int uv__udp_init_ex(uv_loop_t* loop,
int uv_udp_using_recvmmsg(const uv_udp_t* handle) { int uv_udp_using_recvmmsg(const uv_udp_t* handle) {
#if HAVE_MMSG #if defined(__linux__) || defined(__FreeBSD__)
if (handle->flags & UV_HANDLE_UDP_RECVMMSG) { if (handle->flags & UV_HANDLE_UDP_RECVMMSG)
uv_once(&once, uv__udp_mmsg_init); return 1;
return uv__recvmmsg_avail;
}
#endif #endif
return 0; return 0;
} }

View File

@ -128,6 +128,39 @@ int uv_replace_allocator(uv_malloc_func malloc_func,
return 0; return 0;
} }
void uv_os_free_passwd(uv_passwd_t* pwd) {
if (pwd == NULL)
return;
/* On unix, the memory for name, shell, and homedir are allocated in a single
* uv__malloc() call. The base of the pointer is stored in pwd->username, so
* that is the field that needs to be freed.
*/
uv__free(pwd->username);
#ifdef _WIN32
uv__free(pwd->homedir);
#endif
pwd->username = NULL;
pwd->shell = NULL;
pwd->homedir = NULL;
}
void uv_os_free_group(uv_group_t *grp) {
if (grp == NULL)
return;
/* The memory for is allocated in a single uv__malloc() call. The base of the
* pointer is stored in grp->members, so that is the only field that needs to
* be freed.
*/
uv__free(grp->members);
grp->members = NULL;
grp->groupname = NULL;
}
#define XX(uc, lc) case UV_##uc: return sizeof(uv_##lc##_t); #define XX(uc, lc) case UV_##uc: return sizeof(uv_##lc##_t);
size_t uv_handle_size(uv_handle_type type) { size_t uv_handle_size(uv_handle_type type) {
@ -650,14 +683,22 @@ static unsigned int* uv__get_nbufs(uv_fs_t* req) {
void uv__fs_scandir_cleanup(uv_fs_t* req) { void uv__fs_scandir_cleanup(uv_fs_t* req) {
uv__dirent_t** dents; uv__dirent_t** dents;
unsigned int* nbufs;
unsigned int i;
unsigned int n;
unsigned int* nbufs = uv__get_nbufs(req); if (req->result >= 0) {
dents = req->ptr;
nbufs = uv__get_nbufs(req);
dents = req->ptr; i = 0;
if (*nbufs > 0 && *nbufs != (unsigned int) req->result) if (*nbufs > 0)
(*nbufs)--; i = *nbufs - 1;
for (; *nbufs < (unsigned int) req->result; (*nbufs)++)
uv__fs_scandir_free(dents[*nbufs]); n = (unsigned int) req->result;
for (; i < n; i++)
uv__fs_scandir_free(dents[i]);
}
uv__fs_scandir_free(req->ptr); uv__fs_scandir_free(req->ptr);
req->ptr = NULL; req->ptr = NULL;
@ -879,12 +920,17 @@ void uv_os_free_environ(uv_env_item_t* envitems, int count) {
void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) { void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) {
#ifdef __linux__
(void) &count;
uv__free(cpu_infos);
#else
int i; int i;
for (i = 0; i < count; i++) for (i = 0; i < count; i++)
uv__free(cpu_infos[i].model); uv__free(cpu_infos[i].model);
uv__free(cpu_infos); uv__free(cpu_infos);
#endif /* __linux__ */
} }
@ -898,7 +944,7 @@ __attribute__((destructor))
void uv_library_shutdown(void) { void uv_library_shutdown(void) {
static int was_shutdown; static int was_shutdown;
if (uv__load_relaxed(&was_shutdown)) if (uv__exchange_int_relaxed(&was_shutdown, 1))
return; return;
uv__process_title_cleanup(); uv__process_title_cleanup();
@ -909,7 +955,6 @@ void uv_library_shutdown(void) {
#else #else
uv__threadpool_cleanup(); uv__threadpool_cleanup();
#endif #endif
uv__store_relaxed(&was_shutdown, 1);
} }
@ -955,6 +1000,15 @@ void uv__metrics_set_provider_entry_time(uv_loop_t* loop) {
} }
int uv_metrics_info(uv_loop_t* loop, uv_metrics_t* metrics) {
memcpy(metrics,
&uv__get_loop_metrics(loop)->metrics,
sizeof(*metrics));
return 0;
}
uint64_t uv_metrics_idle_time(uv_loop_t* loop) { uint64_t uv_metrics_idle_time(uv_loop_t* loop) {
uv__loop_metrics_t* loop_metrics; uv__loop_metrics_t* loop_metrics;
uint64_t entry_time; uint64_t entry_time;

View File

@ -30,18 +30,17 @@
#include <assert.h> #include <assert.h>
#include <stdarg.h> #include <stdarg.h>
#include <stddef.h> #include <stddef.h>
#include <stdint.h>
#if defined(_MSC_VER) && _MSC_VER < 1600
# include "uv/stdint-msvc2008.h"
#else
# include <stdint.h>
#endif
#include "uv.h" #include "uv.h"
#include "uv/tree.h" #include "uv/tree.h"
#include "queue.h" #include "queue.h"
#include "strscpy.h" #include "strscpy.h"
#ifndef _MSC_VER
# include <stdatomic.h>
#endif
#if EDOM > 0 #if EDOM > 0
# define UV__ERR(x) (-(x)) # define UV__ERR(x) (-(x))
#else #else
@ -53,19 +52,25 @@ extern int snprintf(char*, size_t, const char*, ...);
#endif #endif
#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0])) #define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
#define ARRAY_END(a) ((a) + ARRAY_SIZE(a))
#define container_of(ptr, type, member) \ #define container_of(ptr, type, member) \
((type *) ((char *) (ptr) - offsetof(type, member))) ((type *) ((char *) (ptr) - offsetof(type, member)))
/* C11 defines static_assert to be a macro which calls _Static_assert. */
#if defined(static_assert)
#define STATIC_ASSERT(expr) static_assert(expr, #expr)
#else
#define STATIC_ASSERT(expr) \ #define STATIC_ASSERT(expr) \
void uv__static_assert(int static_assert_failed[1 - 2 * !(expr)]) void uv__static_assert(int static_assert_failed[1 - 2 * !(expr)])
#endif
#if defined(__GNUC__) && (__GNUC__ > 4 || __GNUC__ == 4 && __GNUC_MINOR__ >= 7) #ifdef _MSC_VER
#define uv__load_relaxed(p) __atomic_load_n(p, __ATOMIC_RELAXED) #define uv__exchange_int_relaxed(p, v) \
#define uv__store_relaxed(p, v) __atomic_store_n(p, v, __ATOMIC_RELAXED) InterlockedExchangeNoFence((LONG volatile*)(p), v)
#else #else
#define uv__load_relaxed(p) (*p) #define uv__exchange_int_relaxed(p, v) \
#define uv__store_relaxed(p, v) do *p = v; while (0) atomic_exchange_explicit((_Atomic int*)(p), v, memory_order_relaxed)
#endif #endif
#define UV__UDP_DGRAM_MAXSIZE (64 * 1024) #define UV__UDP_DGRAM_MAXSIZE (64 * 1024)
@ -83,7 +88,6 @@ enum {
/* Used by streams. */ /* Used by streams. */
UV_HANDLE_LISTENING = 0x00000040, UV_HANDLE_LISTENING = 0x00000040,
UV_HANDLE_CONNECTION = 0x00000080, UV_HANDLE_CONNECTION = 0x00000080,
UV_HANDLE_SHUTTING = 0x00000100,
UV_HANDLE_SHUT = 0x00000200, UV_HANDLE_SHUT = 0x00000200,
UV_HANDLE_READ_PARTIAL = 0x00000400, UV_HANDLE_READ_PARTIAL = 0x00000400,
UV_HANDLE_READ_EOF = 0x00000800, UV_HANDLE_READ_EOF = 0x00000800,
@ -263,6 +267,14 @@ void uv__threadpool_cleanup(void);
#define uv__is_closing(h) \ #define uv__is_closing(h) \
(((h)->flags & (UV_HANDLE_CLOSING | UV_HANDLE_CLOSED)) != 0) (((h)->flags & (UV_HANDLE_CLOSING | UV_HANDLE_CLOSED)) != 0)
#if defined(_WIN32)
# define uv__is_stream_shutting(h) \
(h->stream.conn.shutdown_req != NULL)
#else
# define uv__is_stream_shutting(h) \
(h->shutdown_req != NULL)
#endif
#define uv__handle_start(h) \ #define uv__handle_start(h) \
do { \ do { \
if (((h)->flags & UV_HANDLE_ACTIVE) != 0) break; \ if (((h)->flags & UV_HANDLE_ACTIVE) != 0) break; \
@ -347,6 +359,21 @@ void uv__threadpool_cleanup(void);
#define uv__get_loop_metrics(loop) \ #define uv__get_loop_metrics(loop) \
(&uv__get_internal_fields(loop)->loop_metrics) (&uv__get_internal_fields(loop)->loop_metrics)
#define uv__metrics_inc_loop_count(loop) \
do { \
uv__get_loop_metrics(loop)->metrics.loop_count++; \
} while (0)
#define uv__metrics_inc_events(loop, e) \
do { \
uv__get_loop_metrics(loop)->metrics.events += (e); \
} while (0)
#define uv__metrics_inc_events_waiting(loop, e) \
do { \
uv__get_loop_metrics(loop)->metrics.events_waiting += (e); \
} while (0)
/* Allocator prototypes */ /* Allocator prototypes */
void *uv__calloc(size_t count, size_t size); void *uv__calloc(size_t count, size_t size);
char *uv__strdup(const char* s); char *uv__strdup(const char* s);
@ -360,6 +387,7 @@ typedef struct uv__loop_metrics_s uv__loop_metrics_t;
typedef struct uv__loop_internal_fields_s uv__loop_internal_fields_t; typedef struct uv__loop_internal_fields_s uv__loop_internal_fields_t;
struct uv__loop_metrics_s { struct uv__loop_metrics_s {
uv_metrics_t metrics;
uint64_t provider_entry_time; uint64_t provider_entry_time;
uint64_t provider_idle_time; uint64_t provider_idle_time;
uv_mutex_t lock; uv_mutex_t lock;
@ -368,9 +396,37 @@ struct uv__loop_metrics_s {
void uv__metrics_update_idle_time(uv_loop_t* loop); void uv__metrics_update_idle_time(uv_loop_t* loop);
void uv__metrics_set_provider_entry_time(uv_loop_t* loop); void uv__metrics_set_provider_entry_time(uv_loop_t* loop);
#ifdef __linux__
struct uv__iou {
uint32_t* sqhead;
uint32_t* sqtail;
uint32_t* sqarray;
uint32_t sqmask;
uint32_t* sqflags;
uint32_t* cqhead;
uint32_t* cqtail;
uint32_t cqmask;
void* sq; /* pointer to munmap() on event loop teardown */
void* cqe; /* pointer to array of struct uv__io_uring_cqe */
void* sqe; /* pointer to array of struct uv__io_uring_sqe */
size_t sqlen;
size_t cqlen;
size_t maxlen;
size_t sqelen;
int ringfd;
uint32_t in_flight;
};
#endif /* __linux__ */
struct uv__loop_internal_fields_s { struct uv__loop_internal_fields_s {
unsigned int flags; unsigned int flags;
uv__loop_metrics_t loop_metrics; uv__loop_metrics_t loop_metrics;
int current_timeout;
#ifdef __linux__
struct uv__iou ctl;
struct uv__iou iou;
void* inv; /* used by uv__platform_invalidate_fd() */
#endif /* __linux__ */
}; };
#endif /* UV_COMMON_H_ */ #endif /* UV_COMMON_H_ */

View File

@ -245,6 +245,9 @@ int uv_loop_init(uv_loop_t* loop) {
err = uv_mutex_init(&lfields->loop_metrics.lock); err = uv_mutex_init(&lfields->loop_metrics.lock);
if (err) if (err)
goto fail_metrics_mutex_init; goto fail_metrics_mutex_init;
memset(&lfields->loop_metrics.metrics,
0,
sizeof(lfields->loop_metrics.metrics));
/* To prevent uninitialized memory access, loop->time must be initialized /* To prevent uninitialized memory access, loop->time must be initialized
* to zero before calling uv_update_time for the first time. * to zero before calling uv_update_time for the first time.
@ -279,9 +282,6 @@ int uv_loop_init(uv_loop_t* loop) {
memset(&loop->poll_peer_sockets, 0, sizeof loop->poll_peer_sockets); memset(&loop->poll_peer_sockets, 0, sizeof loop->poll_peer_sockets);
loop->active_tcp_streams = 0;
loop->active_udp_streams = 0;
loop->timer_counter = 0; loop->timer_counter = 0;
loop->stop_flag = 0; loop->stop_flag = 0;
@ -424,6 +424,7 @@ int uv_backend_timeout(const uv_loop_t* loop) {
static void uv__poll_wine(uv_loop_t* loop, DWORD timeout) { static void uv__poll_wine(uv_loop_t* loop, DWORD timeout) {
uv__loop_internal_fields_t* lfields;
DWORD bytes; DWORD bytes;
ULONG_PTR key; ULONG_PTR key;
OVERLAPPED* overlapped; OVERLAPPED* overlapped;
@ -433,9 +434,10 @@ static void uv__poll_wine(uv_loop_t* loop, DWORD timeout) {
uint64_t user_timeout; uint64_t user_timeout;
int reset_timeout; int reset_timeout;
lfields = uv__get_internal_fields(loop);
timeout_time = loop->time + timeout; timeout_time = loop->time + timeout;
if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) { if (lfields->flags & UV_METRICS_IDLE_TIME) {
reset_timeout = 1; reset_timeout = 1;
user_timeout = timeout; user_timeout = timeout;
timeout = 0; timeout = 0;
@ -450,6 +452,12 @@ static void uv__poll_wine(uv_loop_t* loop, DWORD timeout) {
if (timeout != 0) if (timeout != 0)
uv__metrics_set_provider_entry_time(loop); uv__metrics_set_provider_entry_time(loop);
/* Store the current timeout in a location that's globally accessible so
* other locations like uv__work_done() can determine whether the queue
* of events in the callback were waiting when poll was called.
*/
lfields->current_timeout = timeout;
GetQueuedCompletionStatus(loop->iocp, GetQueuedCompletionStatus(loop->iocp,
&bytes, &bytes,
&key, &key,
@ -457,6 +465,8 @@ static void uv__poll_wine(uv_loop_t* loop, DWORD timeout) {
timeout); timeout);
if (reset_timeout != 0) { if (reset_timeout != 0) {
if (overlapped && timeout == 0)
uv__metrics_inc_events_waiting(loop, 1);
timeout = user_timeout; timeout = user_timeout;
reset_timeout = 0; reset_timeout = 0;
} }
@ -469,6 +479,8 @@ static void uv__poll_wine(uv_loop_t* loop, DWORD timeout) {
uv__metrics_update_idle_time(loop); uv__metrics_update_idle_time(loop);
if (overlapped) { if (overlapped) {
uv__metrics_inc_events(loop, 1);
/* Package was dequeued */ /* Package was dequeued */
req = uv__overlapped_to_req(overlapped); req = uv__overlapped_to_req(overlapped);
uv__insert_pending_req(loop, req); uv__insert_pending_req(loop, req);
@ -503,6 +515,7 @@ static void uv__poll_wine(uv_loop_t* loop, DWORD timeout) {
static void uv__poll(uv_loop_t* loop, DWORD timeout) { static void uv__poll(uv_loop_t* loop, DWORD timeout) {
uv__loop_internal_fields_t* lfields;
BOOL success; BOOL success;
uv_req_t* req; uv_req_t* req;
OVERLAPPED_ENTRY overlappeds[128]; OVERLAPPED_ENTRY overlappeds[128];
@ -511,11 +524,13 @@ static void uv__poll(uv_loop_t* loop, DWORD timeout) {
int repeat; int repeat;
uint64_t timeout_time; uint64_t timeout_time;
uint64_t user_timeout; uint64_t user_timeout;
uint64_t actual_timeout;
int reset_timeout; int reset_timeout;
lfields = uv__get_internal_fields(loop);
timeout_time = loop->time + timeout; timeout_time = loop->time + timeout;
if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) { if (lfields->flags & UV_METRICS_IDLE_TIME) {
reset_timeout = 1; reset_timeout = 1;
user_timeout = timeout; user_timeout = timeout;
timeout = 0; timeout = 0;
@ -524,12 +539,20 @@ static void uv__poll(uv_loop_t* loop, DWORD timeout) {
} }
for (repeat = 0; ; repeat++) { for (repeat = 0; ; repeat++) {
actual_timeout = timeout;
/* Only need to set the provider_entry_time if timeout != 0. The function /* Only need to set the provider_entry_time if timeout != 0. The function
* will return early if the loop isn't configured with UV_METRICS_IDLE_TIME. * will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
*/ */
if (timeout != 0) if (timeout != 0)
uv__metrics_set_provider_entry_time(loop); uv__metrics_set_provider_entry_time(loop);
/* Store the current timeout in a location that's globally accessible so
* other locations like uv__work_done() can determine whether the queue
* of events in the callback were waiting when poll was called.
*/
lfields->current_timeout = timeout;
success = pGetQueuedCompletionStatusEx(loop->iocp, success = pGetQueuedCompletionStatusEx(loop->iocp,
overlappeds, overlappeds,
ARRAY_SIZE(overlappeds), ARRAY_SIZE(overlappeds),
@ -543,9 +566,9 @@ static void uv__poll(uv_loop_t* loop, DWORD timeout) {
} }
/* Placed here because on success the loop will break whether there is an /* Placed here because on success the loop will break whether there is an
* empty package or not, or if GetQueuedCompletionStatus returned early then * empty package or not, or if pGetQueuedCompletionStatusEx returned early
* the timeout will be updated and the loop will run again. In either case * then the timeout will be updated and the loop will run again. In either
* the idle time will need to be updated. * case the idle time will need to be updated.
*/ */
uv__metrics_update_idle_time(loop); uv__metrics_update_idle_time(loop);
@ -555,6 +578,10 @@ static void uv__poll(uv_loop_t* loop, DWORD timeout) {
* meant only to wake us up. * meant only to wake us up.
*/ */
if (overlappeds[i].lpOverlapped) { if (overlappeds[i].lpOverlapped) {
uv__metrics_inc_events(loop, 1);
if (actual_timeout == 0)
uv__metrics_inc_events_waiting(loop, 1);
req = uv__overlapped_to_req(overlappeds[i].lpOverlapped); req = uv__overlapped_to_req(overlappeds[i].lpOverlapped);
uv__insert_pending_req(loop, req); uv__insert_pending_req(loop, req);
} }
@ -598,10 +625,17 @@ int uv_run(uv_loop_t *loop, uv_run_mode mode) {
if (!r) if (!r)
uv_update_time(loop); uv_update_time(loop);
while (r != 0 && loop->stop_flag == 0) { /* Maintain backwards compatibility by processing timers before entering the
uv_update_time(loop); * while loop for UV_RUN_DEFAULT. Otherwise timers only need to be executed
* once, which should be done after polling in order to maintain proper
* execution order of the conceptual event loop. */
if (mode == UV_RUN_DEFAULT) {
if (r)
uv_update_time(loop);
uv__run_timers(loop); uv__run_timers(loop);
}
while (r != 0 && loop->stop_flag == 0) {
can_sleep = loop->pending_reqs_tail == NULL && loop->idle_handles == NULL; can_sleep = loop->pending_reqs_tail == NULL && loop->idle_handles == NULL;
uv__process_reqs(loop); uv__process_reqs(loop);
@ -612,6 +646,8 @@ int uv_run(uv_loop_t *loop, uv_run_mode mode) {
if ((mode == UV_RUN_ONCE && can_sleep) || mode == UV_RUN_DEFAULT) if ((mode == UV_RUN_ONCE && can_sleep) || mode == UV_RUN_DEFAULT)
timeout = uv_backend_timeout(loop); timeout = uv_backend_timeout(loop);
uv__metrics_inc_loop_count(loop);
if (pGetQueuedCompletionStatusEx) if (pGetQueuedCompletionStatusEx)
uv__poll(loop, timeout); uv__poll(loop, timeout);
else else
@ -632,18 +668,8 @@ int uv_run(uv_loop_t *loop, uv_run_mode mode) {
uv__check_invoke(loop); uv__check_invoke(loop);
uv__process_endgames(loop); uv__process_endgames(loop);
if (mode == UV_RUN_ONCE) { uv_update_time(loop);
/* UV_RUN_ONCE implies forward progress: at least one callback must have uv__run_timers(loop);
* been invoked when it returns. uv__io_poll() can return without doing
* I/O (meaning: no callbacks) when its timeout expires - which means we
* have pending timers that satisfy the forward progress constraint.
*
* UV_RUN_NOWAIT makes no guarantees about progress so it's omitted from
* the check.
*/
uv_update_time(loop);
uv__run_timers(loop);
}
r = uv__loop_alive(loop); r = uv__loop_alive(loop);
if (mode == UV_RUN_ONCE || mode == UV_RUN_NOWAIT) if (mode == UV_RUN_ONCE || mode == UV_RUN_NOWAIT)

View File

@ -36,6 +36,8 @@
#include "handle-inl.h" #include "handle-inl.h"
#include "fs-fd-hash-inl.h" #include "fs-fd-hash-inl.h"
#include <winioctl.h>
#define UV_FS_FREE_PATHS 0x0002 #define UV_FS_FREE_PATHS 0x0002
#define UV_FS_FREE_PTR 0x0008 #define UV_FS_FREE_PTR 0x0008
@ -1706,11 +1708,36 @@ void fs__closedir(uv_fs_t* req) {
INLINE static int fs__stat_handle(HANDLE handle, uv_stat_t* statbuf, INLINE static int fs__stat_handle(HANDLE handle, uv_stat_t* statbuf,
int do_lstat) { int do_lstat) {
FILE_FS_DEVICE_INFORMATION device_info;
FILE_ALL_INFORMATION file_info; FILE_ALL_INFORMATION file_info;
FILE_FS_VOLUME_INFORMATION volume_info; FILE_FS_VOLUME_INFORMATION volume_info;
NTSTATUS nt_status; NTSTATUS nt_status;
IO_STATUS_BLOCK io_status; IO_STATUS_BLOCK io_status;
nt_status = pNtQueryVolumeInformationFile(handle,
&io_status,
&device_info,
sizeof device_info,
FileFsDeviceInformation);
/* Buffer overflow (a warning status code) is expected here. */
if (NT_ERROR(nt_status)) {
SetLastError(pRtlNtStatusToDosError(nt_status));
return -1;
}
/* If it's NUL device set fields as reasonable as possible and return. */
if (device_info.DeviceType == FILE_DEVICE_NULL) {
memset(statbuf, 0, sizeof(uv_stat_t));
statbuf->st_mode = _S_IFCHR;
statbuf->st_mode |= (_S_IREAD | _S_IWRITE) | ((_S_IREAD | _S_IWRITE) >> 3) |
((_S_IREAD | _S_IWRITE) >> 6);
statbuf->st_nlink = 1;
statbuf->st_blksize = 4096;
statbuf->st_rdev = FILE_DEVICE_NULL << 16;
return 0;
}
nt_status = pNtQueryInformationFile(handle, nt_status = pNtQueryInformationFile(handle,
&io_status, &io_status,
&file_info, &file_info,
@ -1915,6 +1942,37 @@ INLINE static void fs__stat_impl(uv_fs_t* req, int do_lstat) {
} }
INLINE static int fs__fstat_handle(int fd, HANDLE handle, uv_stat_t* statbuf) {
DWORD file_type;
/* Each file type is processed differently. */
file_type = uv_guess_handle(fd);
switch (file_type) {
/* Disk files use the existing logic from fs__stat_handle. */
case UV_FILE:
return fs__stat_handle(handle, statbuf, 0);
/* Devices and pipes are processed identically. There is no more information
* for them from any API. Fields are set as reasonably as possible and the
* function returns. */
case UV_TTY:
case UV_NAMED_PIPE:
memset(statbuf, 0, sizeof(uv_stat_t));
statbuf->st_mode = file_type == UV_TTY ? _S_IFCHR : _S_IFIFO;
statbuf->st_nlink = 1;
statbuf->st_rdev = (file_type == UV_TTY ? FILE_DEVICE_CONSOLE : FILE_DEVICE_NAMED_PIPE) << 16;
statbuf->st_ino = (uint64_t) handle;
return 0;
/* If file type is unknown it is an error. */
case UV_UNKNOWN_HANDLE:
default:
SetLastError(ERROR_INVALID_HANDLE);
return -1;
}
}
static void fs__stat(uv_fs_t* req) { static void fs__stat(uv_fs_t* req) {
fs__stat_prepare_path(req->file.pathw); fs__stat_prepare_path(req->file.pathw);
fs__stat_impl(req, 0); fs__stat_impl(req, 0);
@ -1940,7 +1998,7 @@ static void fs__fstat(uv_fs_t* req) {
return; return;
} }
if (fs__stat_handle(handle, &req->statbuf, 0) != 0) { if (fs__fstat_handle(fd, handle, &req->statbuf) != 0) {
SET_REQ_WIN32_ERROR(req, GetLastError()); SET_REQ_WIN32_ERROR(req, GetLastError());
return; return;
} }
@ -2221,7 +2279,7 @@ static void fs__fchmod(uv_fs_t* req) {
SET_REQ_WIN32_ERROR(req, pRtlNtStatusToDosError(nt_status)); SET_REQ_WIN32_ERROR(req, pRtlNtStatusToDosError(nt_status));
goto fchmod_cleanup; goto fchmod_cleanup;
} }
/* Remeber to clear the flag later on */ /* Remember to clear the flag later on */
clear_archive_flag = 1; clear_archive_flag = 1;
} else { } else {
clear_archive_flag = 0; clear_archive_flag = 0;
@ -2604,7 +2662,10 @@ static void fs__readlink(uv_fs_t* req) {
} }
if (fs__readlink_handle(handle, (char**) &req->ptr, NULL) != 0) { if (fs__readlink_handle(handle, (char**) &req->ptr, NULL) != 0) {
SET_REQ_WIN32_ERROR(req, GetLastError()); DWORD error = GetLastError();
SET_REQ_WIN32_ERROR(req, error);
if (error == ERROR_NOT_A_REPARSE_POINT)
req->result = UV_EINVAL;
CloseHandle(handle); CloseHandle(handle);
return; return;
} }

View File

@ -267,7 +267,6 @@ void uv__util_init(void);
uint64_t uv__hrtime(unsigned int scale); uint64_t uv__hrtime(unsigned int scale);
__declspec(noreturn) void uv_fatal_error(const int errorno, const char* syscall); __declspec(noreturn) void uv_fatal_error(const int errorno, const char* syscall);
int uv__getpwuid_r(uv_passwd_t* pwd);
int uv__convert_utf16_to_utf8(const WCHAR* utf16, int utf16len, char** utf8); int uv__convert_utf16_to_utf8(const WCHAR* utf16, int utf16len, char** utf8);
int uv__convert_utf8_to_utf16(const char* utf8, int utf8len, WCHAR** utf16); int uv__convert_utf8_to_utf16(const char* utf8, int utf8len, WCHAR** utf16);

View File

@ -792,15 +792,17 @@ static DWORD WINAPI pipe_connect_thread_proc(void* parameter) {
/* We're here because CreateFile on a pipe returned ERROR_PIPE_BUSY. We wait /* We're here because CreateFile on a pipe returned ERROR_PIPE_BUSY. We wait
* up to 30 seconds for the pipe to become available with WaitNamedPipe. */ * up to 30 seconds for the pipe to become available with WaitNamedPipe. */
while (WaitNamedPipeW(handle->name, 30000)) { while (WaitNamedPipeW(req->u.connect.name, 30000)) {
/* The pipe is now available, try to connect. */ /* The pipe is now available, try to connect. */
pipeHandle = open_named_pipe(handle->name, &duplex_flags); pipeHandle = open_named_pipe(req->u.connect.name, &duplex_flags);
if (pipeHandle != INVALID_HANDLE_VALUE) if (pipeHandle != INVALID_HANDLE_VALUE)
break; break;
SwitchToThread(); SwitchToThread();
} }
uv__free(req->u.connect.name);
req->u.connect.name = NULL;
if (pipeHandle != INVALID_HANDLE_VALUE) { if (pipeHandle != INVALID_HANDLE_VALUE) {
SET_REQ_SUCCESS(req); SET_REQ_SUCCESS(req);
req->u.connect.pipeHandle = pipeHandle; req->u.connect.pipeHandle = pipeHandle;
@ -828,6 +830,7 @@ void uv_pipe_connect(uv_connect_t* req, uv_pipe_t* handle,
req->cb = cb; req->cb = cb;
req->u.connect.pipeHandle = INVALID_HANDLE_VALUE; req->u.connect.pipeHandle = INVALID_HANDLE_VALUE;
req->u.connect.duplex_flags = 0; req->u.connect.duplex_flags = 0;
req->u.connect.name = NULL;
if (handle->flags & UV_HANDLE_PIPESERVER) { if (handle->flags & UV_HANDLE_PIPESERVER) {
err = ERROR_INVALID_PARAMETER; err = ERROR_INVALID_PARAMETER;
@ -859,10 +862,19 @@ void uv_pipe_connect(uv_connect_t* req, uv_pipe_t* handle,
pipeHandle = open_named_pipe(handle->name, &duplex_flags); pipeHandle = open_named_pipe(handle->name, &duplex_flags);
if (pipeHandle == INVALID_HANDLE_VALUE) { if (pipeHandle == INVALID_HANDLE_VALUE) {
if (GetLastError() == ERROR_PIPE_BUSY) { if (GetLastError() == ERROR_PIPE_BUSY) {
req->u.connect.name = uv__malloc(nameSize);
if (!req->u.connect.name) {
uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
}
memcpy(req->u.connect.name, handle->name, nameSize);
/* Wait for the server to make a pipe instance available. */ /* Wait for the server to make a pipe instance available. */
if (!QueueUserWorkItem(&pipe_connect_thread_proc, if (!QueueUserWorkItem(&pipe_connect_thread_proc,
req, req,
WT_EXECUTELONGFUNCTION)) { WT_EXECUTELONGFUNCTION)) {
uv__free(req->u.connect.name);
req->u.connect.name = NULL;
err = GetLastError(); err = GetLastError();
goto error; goto error;
} }
@ -1067,11 +1079,12 @@ int uv__pipe_accept(uv_pipe_t* server, uv_stream_t* client) {
err = uv__tcp_xfer_import( err = uv__tcp_xfer_import(
(uv_tcp_t*) client, item->xfer_type, &item->xfer_info); (uv_tcp_t*) client, item->xfer_type, &item->xfer_info);
if (err != 0)
return err;
uv__free(item); uv__free(item);
if (err != 0)
return err;
} else { } else {
pipe_client = (uv_pipe_t*) client; pipe_client = (uv_pipe_t*) client;
uv__pipe_connection_init(pipe_client); uv__pipe_connection_init(pipe_client);
@ -1638,8 +1651,12 @@ static DWORD uv__pipe_get_ipc_remote_pid(uv_pipe_t* handle) {
/* If the both ends of the IPC pipe are owned by the same process, /* If the both ends of the IPC pipe are owned by the same process,
* the remote end pid may not yet be set. If so, do it here. * the remote end pid may not yet be set. If so, do it here.
* TODO: this is weird; it'd probably better to use a handshake. */ * TODO: this is weird; it'd probably better to use a handshake. */
if (*pid == 0) if (*pid == 0) {
*pid = GetCurrentProcessId(); GetNamedPipeClientProcessId(handle->handle, pid);
if (*pid == GetCurrentProcessId()) {
GetNamedPipeServerProcessId(handle->handle, pid);
}
}
return *pid; return *pid;
} }
@ -2069,9 +2086,9 @@ void uv__process_pipe_write_req(uv_loop_t* loop, uv_pipe_t* handle,
uv__queue_non_overlapped_write(handle); uv__queue_non_overlapped_write(handle);
} }
if (handle->stream.conn.write_reqs_pending == 0) if (handle->stream.conn.write_reqs_pending == 0 &&
if (handle->flags & UV_HANDLE_SHUTTING) uv__is_stream_shutting(handle))
uv__pipe_shutdown(loop, handle, handle->stream.conn.shutdown_req); uv__pipe_shutdown(loop, handle, handle->stream.conn.shutdown_req);
DECREASE_PENDING_REQ_COUNT(handle); DECREASE_PENDING_REQ_COUNT(handle);
} }
@ -2126,7 +2143,10 @@ void uv__process_pipe_connect_req(uv_loop_t* loop, uv_pipe_t* handle,
if (REQ_SUCCESS(req)) { if (REQ_SUCCESS(req)) {
pipeHandle = req->u.connect.pipeHandle; pipeHandle = req->u.connect.pipeHandle;
duplex_flags = req->u.connect.duplex_flags; duplex_flags = req->u.connect.duplex_flags;
err = uv__set_pipe_handle(loop, handle, pipeHandle, -1, duplex_flags); if (handle->flags & UV_HANDLE_CLOSING)
err = UV_ECANCELED;
else
err = uv__set_pipe_handle(loop, handle, pipeHandle, -1, duplex_flags);
if (err) if (err)
CloseHandle(pipeHandle); CloseHandle(pipeHandle);
} else { } else {
@ -2149,7 +2169,6 @@ void uv__process_pipe_shutdown_req(uv_loop_t* loop, uv_pipe_t* handle,
/* Clear the shutdown_req field so we don't go here again. */ /* Clear the shutdown_req field so we don't go here again. */
handle->stream.conn.shutdown_req = NULL; handle->stream.conn.shutdown_req = NULL;
handle->flags &= ~UV_HANDLE_SHUTTING;
UNREGISTER_HANDLE_REQ(loop, handle, req); UNREGISTER_HANDLE_REQ(loop, handle, req);
if (handle->flags & UV_HANDLE_CLOSING) { if (handle->flags & UV_HANDLE_CLOSING) {
@ -2342,7 +2361,10 @@ int uv_pipe_open(uv_pipe_t* pipe, uv_file file) {
if (pipe->ipc) { if (pipe->ipc) {
assert(!(pipe->flags & UV_HANDLE_NON_OVERLAPPED_PIPE)); assert(!(pipe->flags & UV_HANDLE_NON_OVERLAPPED_PIPE));
pipe->pipe.conn.ipc_remote_pid = uv_os_getppid(); GetNamedPipeClientProcessId(os_handle, &pipe->pipe.conn.ipc_remote_pid);
if (pipe->pipe.conn.ipc_remote_pid == GetCurrentProcessId()) {
GetNamedPipeServerProcessId(os_handle, &pipe->pipe.conn.ipc_remote_pid);
}
assert(pipe->pipe.conn.ipc_remote_pid != (DWORD)(uv_pid_t) -1); assert(pipe->pipe.conn.ipc_remote_pid != (DWORD)(uv_pid_t) -1);
} }
return 0; return 0;

View File

@ -425,9 +425,8 @@ int uv_poll_init_socket(uv_loop_t* loop, uv_poll_t* handle,
return uv_translate_sys_error(WSAGetLastError()); return uv_translate_sys_error(WSAGetLastError());
/* Try to obtain a base handle for the socket. This increases this chances that /* Try to obtain a base handle for the socket. This increases this chances that
* we find an AFD handle and are able to use the fast poll mechanism. This will * we find an AFD handle and are able to use the fast poll mechanism.
* always fail on windows XP/2k3, since they don't support the. SIO_BASE_HANDLE */
* ioctl. */
#ifndef NDEBUG #ifndef NDEBUG
base_socket = INVALID_SOCKET; base_socket = INVALID_SOCKET;
#endif #endif

View File

@ -32,6 +32,9 @@
#include "internal.h" #include "internal.h"
#include "handle-inl.h" #include "handle-inl.h"
#include "req-inl.h" #include "req-inl.h"
#include <dbghelp.h>
#include <shlobj.h>
#include <psapi.h> /* GetModuleBaseNameW */
#define SIGKILL 9 #define SIGKILL 9
@ -144,7 +147,6 @@ static void uv__process_init(uv_loop_t* loop, uv_process_t* handle) {
handle->exit_signal = 0; handle->exit_signal = 0;
handle->wait_handle = INVALID_HANDLE_VALUE; handle->wait_handle = INVALID_HANDLE_VALUE;
handle->process_handle = INVALID_HANDLE_VALUE; handle->process_handle = INVALID_HANDLE_VALUE;
handle->child_stdio_buffer = NULL;
handle->exit_cb_pending = 0; handle->exit_cb_pending = 0;
UV_REQ_INIT(&handle->exit_req, UV_PROCESS_EXIT); UV_REQ_INIT(&handle->exit_req, UV_PROCESS_EXIT);
@ -947,9 +949,11 @@ int uv_spawn(uv_loop_t* loop,
STARTUPINFOW startup; STARTUPINFOW startup;
PROCESS_INFORMATION info; PROCESS_INFORMATION info;
DWORD process_flags; DWORD process_flags;
BYTE* child_stdio_buffer;
uv__process_init(loop, process); uv__process_init(loop, process);
process->exit_cb = options->exit_cb; process->exit_cb = options->exit_cb;
child_stdio_buffer = NULL;
if (options->flags & (UV_PROCESS_SETGID | UV_PROCESS_SETUID)) { if (options->flags & (UV_PROCESS_SETGID | UV_PROCESS_SETUID)) {
return UV_ENOTSUP; return UV_ENOTSUP;
@ -1040,7 +1044,7 @@ int uv_spawn(uv_loop_t* loop,
} }
} }
err = uv__stdio_create(loop, options, &process->child_stdio_buffer); err = uv__stdio_create(loop, options, &child_stdio_buffer);
if (err) if (err)
goto done; goto done;
@ -1059,12 +1063,12 @@ int uv_spawn(uv_loop_t* loop,
startup.lpTitle = NULL; startup.lpTitle = NULL;
startup.dwFlags = STARTF_USESTDHANDLES | STARTF_USESHOWWINDOW; startup.dwFlags = STARTF_USESTDHANDLES | STARTF_USESHOWWINDOW;
startup.cbReserved2 = uv__stdio_size(process->child_stdio_buffer); startup.cbReserved2 = uv__stdio_size(child_stdio_buffer);
startup.lpReserved2 = (BYTE*) process->child_stdio_buffer; startup.lpReserved2 = (BYTE*) child_stdio_buffer;
startup.hStdInput = uv__stdio_handle(process->child_stdio_buffer, 0); startup.hStdInput = uv__stdio_handle(child_stdio_buffer, 0);
startup.hStdOutput = uv__stdio_handle(process->child_stdio_buffer, 1); startup.hStdOutput = uv__stdio_handle(child_stdio_buffer, 1);
startup.hStdError = uv__stdio_handle(process->child_stdio_buffer, 2); startup.hStdError = uv__stdio_handle(child_stdio_buffer, 2);
process_flags = CREATE_UNICODE_ENVIRONMENT; process_flags = CREATE_UNICODE_ENVIRONMENT;
@ -1178,10 +1182,10 @@ int uv_spawn(uv_loop_t* loop,
uv__free(env); uv__free(env);
uv__free(alloc_path); uv__free(alloc_path);
if (process->child_stdio_buffer != NULL) { if (child_stdio_buffer != NULL) {
/* Clean up child stdio handles. */ /* Clean up child stdio handles. */
uv__stdio_destroy(process->child_stdio_buffer); uv__stdio_destroy(child_stdio_buffer);
process->child_stdio_buffer = NULL; child_stdio_buffer = NULL;
} }
return uv_translate_sys_error(err); return uv_translate_sys_error(err);
@ -1193,7 +1197,120 @@ static int uv__kill(HANDLE process_handle, int signum) {
return UV_EINVAL; return UV_EINVAL;
} }
/* Create a dump file for the targeted process, if the registry key
* `HKLM:Software\Microsoft\Windows\Windows Error Reporting\LocalDumps`
* exists. The location of the dumps can be influenced by the `DumpFolder`
* sub-key, which has a default value of `%LOCALAPPDATA%\CrashDumps`, see [0]
* for more detail. Note that if the dump folder does not exist, we attempt
* to create it, to match behavior with WER itself.
* [0]: https://learn.microsoft.com/en-us/windows/win32/wer/collecting-user-mode-dumps */
if (signum == SIGQUIT) {
HKEY registry_key;
DWORD pid, ret;
WCHAR basename[MAX_PATH];
/* Get target process name. */
GetModuleBaseNameW(process_handle, NULL, &basename[0], sizeof(basename));
/* Get PID of target process. */
pid = GetProcessId(process_handle);
/* Get LocalDumps directory path. */
ret = RegOpenKeyExW(
HKEY_LOCAL_MACHINE,
L"SOFTWARE\\Microsoft\\Windows\\Windows Error Reporting\\LocalDumps",
0,
KEY_QUERY_VALUE,
&registry_key);
if (ret == ERROR_SUCCESS) {
HANDLE hDumpFile = NULL;
WCHAR dump_folder[MAX_PATH], dump_name[MAX_PATH];
DWORD dump_folder_len = sizeof(dump_folder), key_type = 0;
ret = RegGetValueW(registry_key,
NULL,
L"DumpFolder",
RRF_RT_ANY,
&key_type,
(PVOID) dump_folder,
&dump_folder_len);
if (ret != ERROR_SUCCESS) {
/* Default value for `dump_folder` is `%LOCALAPPDATA%\CrashDumps`. */
WCHAR* localappdata;
SHGetKnownFolderPath(&FOLDERID_LocalAppData, 0, NULL, &localappdata);
_snwprintf_s(dump_folder,
sizeof(dump_folder),
_TRUNCATE,
L"%ls\\CrashDumps",
localappdata);
CoTaskMemFree(localappdata);
}
RegCloseKey(registry_key);
/* Create dump folder if it doesn't already exist. */
CreateDirectoryW(dump_folder, NULL);
/* Construct dump filename from process name and PID. */
_snwprintf_s(dump_name,
sizeof(dump_name),
_TRUNCATE,
L"%ls\\%ls.%d.dmp",
dump_folder,
basename,
pid);
hDumpFile = CreateFileW(dump_name,
GENERIC_WRITE,
0,
NULL,
CREATE_NEW,
FILE_ATTRIBUTE_NORMAL,
NULL);
if (hDumpFile != INVALID_HANDLE_VALUE) {
DWORD dump_options, sym_options;
FILE_DISPOSITION_INFO DeleteOnClose = { TRUE };
/* If something goes wrong while writing it out, delete the file. */
SetFileInformationByHandle(hDumpFile,
FileDispositionInfo,
&DeleteOnClose,
sizeof(DeleteOnClose));
/* Tell wine to dump ELF modules as well. */
sym_options = SymGetOptions();
SymSetOptions(sym_options | 0x40000000);
/* MiniDumpWithAvxXStateContext might be undef in server2012r2 or mingw < 12 */
#ifndef MiniDumpWithAvxXStateContext
#define MiniDumpWithAvxXStateContext 0x00200000
#endif
/* We default to a fairly complete dump. In the future, we may want to
* allow clients to customize what kind of dump to create. */
dump_options = MiniDumpWithFullMemory |
MiniDumpIgnoreInaccessibleMemory |
MiniDumpWithAvxXStateContext;
if (MiniDumpWriteDump(process_handle,
pid,
hDumpFile,
dump_options,
NULL,
NULL,
NULL)) {
/* Don't delete the file on close if we successfully wrote it out. */
FILE_DISPOSITION_INFO DontDeleteOnClose = { FALSE };
SetFileInformationByHandle(hDumpFile,
FileDispositionInfo,
&DontDeleteOnClose,
sizeof(DontDeleteOnClose));
}
SymSetOptions(sym_options);
CloseHandle(hDumpFile);
}
}
}
switch (signum) { switch (signum) {
case SIGQUIT:
case SIGTERM: case SIGTERM:
case SIGKILL: case SIGKILL:
case SIGINT: { case SIGINT: {

View File

@ -204,7 +204,7 @@ int uv_shutdown(uv_shutdown_t* req, uv_stream_t* handle, uv_shutdown_cb cb) {
uv_loop_t* loop = handle->loop; uv_loop_t* loop = handle->loop;
if (!(handle->flags & UV_HANDLE_WRITABLE) || if (!(handle->flags & UV_HANDLE_WRITABLE) ||
handle->flags & UV_HANDLE_SHUTTING || uv__is_stream_shutting(handle) ||
uv__is_closing(handle)) { uv__is_closing(handle)) {
return UV_ENOTCONN; return UV_ENOTCONN;
} }
@ -214,7 +214,6 @@ int uv_shutdown(uv_shutdown_t* req, uv_stream_t* handle, uv_shutdown_cb cb) {
req->cb = cb; req->cb = cb;
handle->flags &= ~UV_HANDLE_WRITABLE; handle->flags &= ~UV_HANDLE_WRITABLE;
handle->flags |= UV_HANDLE_SHUTTING;
handle->stream.conn.shutdown_req = req; handle->stream.conn.shutdown_req = req;
handle->reqs_pending++; handle->reqs_pending++;
REGISTER_HANDLE_REQ(loop, handle, req); REGISTER_HANDLE_REQ(loop, handle, req);

View File

@ -29,14 +29,6 @@
#include "req-inl.h" #include "req-inl.h"
/*
* Threshold of active tcp streams for which to preallocate tcp read buffers.
* (Due to node slab allocator performing poorly under this pattern,
* the optimization is temporarily disabled (threshold=0). This will be
* revisited once node allocator is improved.)
*/
const unsigned int uv_active_tcp_streams_threshold = 0;
/* /*
* Number of simultaneous pending AcceptEx calls. * Number of simultaneous pending AcceptEx calls.
*/ */
@ -214,7 +206,6 @@ void uv__process_tcp_shutdown_req(uv_loop_t* loop, uv_tcp_t* stream, uv_shutdown
assert(stream->flags & UV_HANDLE_CONNECTION); assert(stream->flags & UV_HANDLE_CONNECTION);
stream->stream.conn.shutdown_req = NULL; stream->stream.conn.shutdown_req = NULL;
stream->flags &= ~UV_HANDLE_SHUTTING;
UNREGISTER_HANDLE_REQ(loop, stream, req); UNREGISTER_HANDLE_REQ(loop, stream, req);
err = 0; err = 0;
@ -274,7 +265,6 @@ void uv__tcp_endgame(uv_loop_t* loop, uv_tcp_t* handle) {
} }
uv__handle_close(handle); uv__handle_close(handle);
loop->active_tcp_streams--;
} }
@ -484,26 +474,9 @@ static void uv__tcp_queue_read(uv_loop_t* loop, uv_tcp_t* handle) {
req = &handle->read_req; req = &handle->read_req;
memset(&req->u.io.overlapped, 0, sizeof(req->u.io.overlapped)); memset(&req->u.io.overlapped, 0, sizeof(req->u.io.overlapped));
/* handle->flags |= UV_HANDLE_ZERO_READ;
* Preallocate a read buffer if the number of active streams is below buf.base = (char*) &uv_zero_;
* the threshold. buf.len = 0;
*/
if (loop->active_tcp_streams < uv_active_tcp_streams_threshold) {
handle->flags &= ~UV_HANDLE_ZERO_READ;
handle->tcp.conn.read_buffer = uv_buf_init(NULL, 0);
handle->alloc_cb((uv_handle_t*) handle, 65536, &handle->tcp.conn.read_buffer);
if (handle->tcp.conn.read_buffer.base == NULL ||
handle->tcp.conn.read_buffer.len == 0) {
handle->read_cb((uv_stream_t*) handle, UV_ENOBUFS, &handle->tcp.conn.read_buffer);
return;
}
assert(handle->tcp.conn.read_buffer.base != NULL);
buf = handle->tcp.conn.read_buffer;
} else {
handle->flags |= UV_HANDLE_ZERO_READ;
buf.base = (char*) &uv_zero_;
buf.len = 0;
}
/* Prepare the overlapped structure. */ /* Prepare the overlapped structure. */
memset(&(req->u.io.overlapped), 0, sizeof(req->u.io.overlapped)); memset(&(req->u.io.overlapped), 0, sizeof(req->u.io.overlapped));
@ -550,7 +523,7 @@ int uv_tcp_close_reset(uv_tcp_t* handle, uv_close_cb close_cb) {
struct linger l = { 1, 0 }; struct linger l = { 1, 0 };
/* Disallow setting SO_LINGER to zero due to some platform inconsistencies */ /* Disallow setting SO_LINGER to zero due to some platform inconsistencies */
if (handle->flags & UV_HANDLE_SHUTTING) if (uv__is_stream_shutting(handle))
return UV_EINVAL; return UV_EINVAL;
if (0 != setsockopt(handle->socket, SOL_SOCKET, SO_LINGER, (const char*)&l, sizeof(l))) if (0 != setsockopt(handle->socket, SOL_SOCKET, SO_LINGER, (const char*)&l, sizeof(l)))
@ -654,7 +627,6 @@ int uv__tcp_listen(uv_tcp_t* handle, int backlog, uv_connection_cb cb) {
int uv__tcp_accept(uv_tcp_t* server, uv_tcp_t* client) { int uv__tcp_accept(uv_tcp_t* server, uv_tcp_t* client) {
uv_loop_t* loop = server->loop;
int err = 0; int err = 0;
int family; int family;
@ -716,8 +688,6 @@ int uv__tcp_accept(uv_tcp_t* server, uv_tcp_t* client) {
} }
} }
loop->active_tcp_streams++;
return err; return err;
} }
@ -1163,7 +1133,7 @@ void uv__process_tcp_write_req(uv_loop_t* loop, uv_tcp_t* handle,
closesocket(handle->socket); closesocket(handle->socket);
handle->socket = INVALID_SOCKET; handle->socket = INVALID_SOCKET;
} }
if (handle->flags & UV_HANDLE_SHUTTING) if (uv__is_stream_shutting(handle))
uv__process_tcp_shutdown_req(loop, uv__process_tcp_shutdown_req(loop,
handle, handle,
handle->stream.conn.shutdown_req); handle->stream.conn.shutdown_req);
@ -1248,7 +1218,6 @@ void uv__process_tcp_connect_req(uv_loop_t* loop, uv_tcp_t* handle,
0) == 0) { 0) == 0) {
uv__connection_init((uv_stream_t*)handle); uv__connection_init((uv_stream_t*)handle);
handle->flags |= UV_HANDLE_READABLE | UV_HANDLE_WRITABLE; handle->flags |= UV_HANDLE_READABLE | UV_HANDLE_WRITABLE;
loop->active_tcp_streams++;
} else { } else {
err = WSAGetLastError(); err = WSAGetLastError();
} }
@ -1331,7 +1300,6 @@ int uv__tcp_xfer_import(uv_tcp_t* tcp,
tcp->flags |= UV_HANDLE_READABLE | UV_HANDLE_WRITABLE; tcp->flags |= UV_HANDLE_READABLE | UV_HANDLE_WRITABLE;
} }
tcp->loop->active_tcp_streams++;
return 0; return 0;
} }
@ -1432,7 +1400,7 @@ static void uv__tcp_try_cancel_reqs(uv_tcp_t* tcp) {
uv_tcp_non_ifs_lsp_ipv4; uv_tcp_non_ifs_lsp_ipv4;
/* If there are non-ifs LSPs then try to obtain a base handle for the socket. /* If there are non-ifs LSPs then try to obtain a base handle for the socket.
* This will always fail on Windows XP/3k. */ */
if (non_ifs_lsp) { if (non_ifs_lsp) {
DWORD bytes; DWORD bytes;
if (WSAIoctl(socket, if (WSAIoctl(socket,

View File

@ -180,6 +180,81 @@ int uv_thread_create_ex(uv_thread_t* tid,
return UV_EIO; return UV_EIO;
} }
int uv_thread_setaffinity(uv_thread_t* tid,
char* cpumask,
char* oldmask,
size_t mask_size) {
int i;
HANDLE hproc;
DWORD_PTR procmask;
DWORD_PTR sysmask;
DWORD_PTR threadmask;
DWORD_PTR oldthreadmask;
int cpumasksize;
cpumasksize = uv_cpumask_size();
assert(cpumasksize > 0);
if (mask_size < (size_t)cpumasksize)
return UV_EINVAL;
hproc = GetCurrentProcess();
if (!GetProcessAffinityMask(hproc, &procmask, &sysmask))
return uv_translate_sys_error(GetLastError());
threadmask = 0;
for (i = 0; i < cpumasksize; i++) {
if (cpumask[i]) {
if (procmask & (1 << i))
threadmask |= 1 << i;
else
return UV_EINVAL;
}
}
oldthreadmask = SetThreadAffinityMask(*tid, threadmask);
if (oldthreadmask == 0)
return uv_translate_sys_error(GetLastError());
if (oldmask != NULL) {
for (i = 0; i < cpumasksize; i++)
oldmask[i] = (oldthreadmask >> i) & 1;
}
return 0;
}
int uv_thread_getaffinity(uv_thread_t* tid,
char* cpumask,
size_t mask_size) {
int i;
HANDLE hproc;
DWORD_PTR procmask;
DWORD_PTR sysmask;
DWORD_PTR threadmask;
int cpumasksize;
cpumasksize = uv_cpumask_size();
assert(cpumasksize > 0);
if (mask_size < (size_t)cpumasksize)
return UV_EINVAL;
hproc = GetCurrentProcess();
if (!GetProcessAffinityMask(hproc, &procmask, &sysmask))
return uv_translate_sys_error(GetLastError());
threadmask = SetThreadAffinityMask(*tid, procmask);
if (threadmask == 0 || SetThreadAffinityMask(*tid, threadmask) == 0)
return uv_translate_sys_error(GetLastError());
for (i = 0; i < cpumasksize; i++)
cpumask[i] = (threadmask >> i) & 1;
return 0;
}
int uv_thread_getcpu(void) {
return GetCurrentProcessorNumber();
}
uv_thread_t uv_thread_self(void) { uv_thread_t uv_thread_self(void) {
uv_thread_t key; uv_thread_t key;
@ -374,6 +449,7 @@ void uv_cond_wait(uv_cond_t* cond, uv_mutex_t* mutex) {
abort(); abort();
} }
int uv_cond_timedwait(uv_cond_t* cond, uv_mutex_t* mutex, uint64_t timeout) { int uv_cond_timedwait(uv_cond_t* cond, uv_mutex_t* mutex, uint64_t timeout) {
if (SleepConditionVariableCS(&cond->cond_var, mutex, (DWORD)(timeout / 1e6))) if (SleepConditionVariableCS(&cond->cond_var, mutex, (DWORD)(timeout / 1e6)))
return 0; return 0;
@ -383,69 +459,6 @@ int uv_cond_timedwait(uv_cond_t* cond, uv_mutex_t* mutex, uint64_t timeout) {
} }
int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
int err;
barrier->n = count;
barrier->count = 0;
err = uv_mutex_init(&barrier->mutex);
if (err)
return err;
err = uv_sem_init(&barrier->turnstile1, 0);
if (err)
goto error2;
err = uv_sem_init(&barrier->turnstile2, 1);
if (err)
goto error;
return 0;
error:
uv_sem_destroy(&barrier->turnstile1);
error2:
uv_mutex_destroy(&barrier->mutex);
return err;
}
void uv_barrier_destroy(uv_barrier_t* barrier) {
uv_sem_destroy(&barrier->turnstile2);
uv_sem_destroy(&barrier->turnstile1);
uv_mutex_destroy(&barrier->mutex);
}
int uv_barrier_wait(uv_barrier_t* barrier) {
int serial_thread;
uv_mutex_lock(&barrier->mutex);
if (++barrier->count == barrier->n) {
uv_sem_wait(&barrier->turnstile2);
uv_sem_post(&barrier->turnstile1);
}
uv_mutex_unlock(&barrier->mutex);
uv_sem_wait(&barrier->turnstile1);
uv_sem_post(&barrier->turnstile1);
uv_mutex_lock(&barrier->mutex);
serial_thread = (--barrier->count == 0);
if (serial_thread) {
uv_sem_wait(&barrier->turnstile1);
uv_sem_post(&barrier->turnstile2);
}
uv_mutex_unlock(&barrier->mutex);
uv_sem_wait(&barrier->turnstile2);
uv_sem_post(&barrier->turnstile2);
return serial_thread;
}
int uv_key_create(uv_key_t* key) { int uv_key_create(uv_key_t* key) {
key->tls_index = TlsAlloc(); key->tls_index = TlsAlloc();
if (key->tls_index == TLS_OUT_OF_INDEXES) if (key->tls_index == TLS_OUT_OF_INDEXES)

View File

@ -23,12 +23,7 @@
#include <io.h> #include <io.h>
#include <string.h> #include <string.h>
#include <stdlib.h> #include <stdlib.h>
#include <stdint.h>
#if defined(_MSC_VER) && _MSC_VER < 1600
# include "uv/stdint-msvc2008.h"
#else
# include <stdint.h>
#endif
#ifndef COMMON_LVB_REVERSE_VIDEO #ifndef COMMON_LVB_REVERSE_VIDEO
# define COMMON_LVB_REVERSE_VIDEO 0x4000 # define COMMON_LVB_REVERSE_VIDEO 0x4000
@ -175,14 +170,14 @@ void uv__console_init(void) {
0); 0);
if (uv__tty_console_handle != INVALID_HANDLE_VALUE) { if (uv__tty_console_handle != INVALID_HANDLE_VALUE) {
CONSOLE_SCREEN_BUFFER_INFO sb_info; CONSOLE_SCREEN_BUFFER_INFO sb_info;
QueueUserWorkItem(uv__tty_console_resize_message_loop_thread,
NULL,
WT_EXECUTELONGFUNCTION);
uv_mutex_init(&uv__tty_console_resize_mutex); uv_mutex_init(&uv__tty_console_resize_mutex);
if (GetConsoleScreenBufferInfo(uv__tty_console_handle, &sb_info)) { if (GetConsoleScreenBufferInfo(uv__tty_console_handle, &sb_info)) {
uv__tty_console_width = sb_info.dwSize.X; uv__tty_console_width = sb_info.dwSize.X;
uv__tty_console_height = sb_info.srWindow.Bottom - sb_info.srWindow.Top + 1; uv__tty_console_height = sb_info.srWindow.Bottom - sb_info.srWindow.Top + 1;
} }
QueueUserWorkItem(uv__tty_console_resize_message_loop_thread,
NULL,
WT_EXECUTELONGFUNCTION);
} }
} }
@ -2239,11 +2234,11 @@ void uv__process_tty_write_req(uv_loop_t* loop, uv_tty_t* handle,
handle->stream.conn.write_reqs_pending--; handle->stream.conn.write_reqs_pending--;
if (handle->stream.conn.write_reqs_pending == 0) if (handle->stream.conn.write_reqs_pending == 0 &&
if (handle->flags & UV_HANDLE_SHUTTING) uv__is_stream_shutting(handle))
uv__process_tty_shutdown_req(loop, uv__process_tty_shutdown_req(loop,
handle, handle,
handle->stream.conn.shutdown_req); handle->stream.conn.shutdown_req);
DECREASE_PENDING_REQ_COUNT(handle); DECREASE_PENDING_REQ_COUNT(handle);
} }
@ -2274,7 +2269,6 @@ void uv__process_tty_shutdown_req(uv_loop_t* loop, uv_tty_t* stream, uv_shutdown
assert(req); assert(req);
stream->stream.conn.shutdown_req = NULL; stream->stream.conn.shutdown_req = NULL;
stream->flags &= ~UV_HANDLE_SHUTTING;
UNREGISTER_HANDLE_REQ(loop, stream, req); UNREGISTER_HANDLE_REQ(loop, stream, req);
/* TTY shutdown is really just a no-op */ /* TTY shutdown is really just a no-op */
@ -2429,7 +2423,6 @@ static void uv__tty_console_signal_resize(void) {
height = sb_info.srWindow.Bottom - sb_info.srWindow.Top + 1; height = sb_info.srWindow.Bottom - sb_info.srWindow.Top + 1;
uv_mutex_lock(&uv__tty_console_resize_mutex); uv_mutex_lock(&uv__tty_console_resize_mutex);
assert(uv__tty_console_width != -1 && uv__tty_console_height != -1);
if (width != uv__tty_console_width || height != uv__tty_console_height) { if (width != uv__tty_console_width || height != uv__tty_console_height) {
uv__tty_console_width = width; uv__tty_console_width = width;
uv__tty_console_height = height; uv__tty_console_height = height;

View File

@ -29,11 +29,6 @@
#include "req-inl.h" #include "req-inl.h"
/*
* Threshold of active udp streams for which to preallocate udp read buffers.
*/
const unsigned int uv_active_udp_streams_threshold = 0;
/* A zero-size buffer for use by uv_udp_read */ /* A zero-size buffer for use by uv_udp_read */
static char uv_zero_[] = ""; static char uv_zero_[] = "";
int uv_udp_getpeername(const uv_udp_t* handle, int uv_udp_getpeername(const uv_udp_t* handle,
@ -276,84 +271,35 @@ static void uv__udp_queue_recv(uv_loop_t* loop, uv_udp_t* handle) {
req = &handle->recv_req; req = &handle->recv_req;
memset(&req->u.io.overlapped, 0, sizeof(req->u.io.overlapped)); memset(&req->u.io.overlapped, 0, sizeof(req->u.io.overlapped));
/* handle->flags |= UV_HANDLE_ZERO_READ;
* Preallocate a read buffer if the number of active streams is below
* the threshold.
*/
if (loop->active_udp_streams < uv_active_udp_streams_threshold) {
handle->flags &= ~UV_HANDLE_ZERO_READ;
handle->recv_buffer = uv_buf_init(NULL, 0); buf.base = (char*) uv_zero_;
handle->alloc_cb((uv_handle_t*) handle, UV__UDP_DGRAM_MAXSIZE, &handle->recv_buffer); buf.len = 0;
if (handle->recv_buffer.base == NULL || handle->recv_buffer.len == 0) { flags = MSG_PEEK;
handle->recv_cb(handle, UV_ENOBUFS, &handle->recv_buffer, NULL, 0);
return;
}
assert(handle->recv_buffer.base != NULL);
buf = handle->recv_buffer; result = handle->func_wsarecv(handle->socket,
memset(&handle->recv_from, 0, sizeof handle->recv_from); (WSABUF*) &buf,
handle->recv_from_len = sizeof handle->recv_from; 1,
flags = 0; &bytes,
&flags,
result = handle->func_wsarecvfrom(handle->socket, &req->u.io.overlapped,
(WSABUF*) &buf, NULL);
1,
&bytes,
&flags,
(struct sockaddr*) &handle->recv_from,
&handle->recv_from_len,
&req->u.io.overlapped,
NULL);
if (UV_SUCCEEDED_WITHOUT_IOCP(result == 0)) {
/* Process the req without IOCP. */
handle->flags |= UV_HANDLE_READ_PENDING;
req->u.io.overlapped.InternalHigh = bytes;
handle->reqs_pending++;
uv__insert_pending_req(loop, req);
} else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) {
/* The req will be processed with IOCP. */
handle->flags |= UV_HANDLE_READ_PENDING;
handle->reqs_pending++;
} else {
/* Make this req pending reporting an error. */
SET_REQ_ERROR(req, WSAGetLastError());
uv__insert_pending_req(loop, req);
handle->reqs_pending++;
}
if (UV_SUCCEEDED_WITHOUT_IOCP(result == 0)) {
/* Process the req without IOCP. */
handle->flags |= UV_HANDLE_READ_PENDING;
req->u.io.overlapped.InternalHigh = bytes;
handle->reqs_pending++;
uv__insert_pending_req(loop, req);
} else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) {
/* The req will be processed with IOCP. */
handle->flags |= UV_HANDLE_READ_PENDING;
handle->reqs_pending++;
} else { } else {
handle->flags |= UV_HANDLE_ZERO_READ; /* Make this req pending reporting an error. */
SET_REQ_ERROR(req, WSAGetLastError());
buf.base = (char*) uv_zero_; uv__insert_pending_req(loop, req);
buf.len = 0; handle->reqs_pending++;
flags = MSG_PEEK;
result = handle->func_wsarecv(handle->socket,
(WSABUF*) &buf,
1,
&bytes,
&flags,
&req->u.io.overlapped,
NULL);
if (UV_SUCCEEDED_WITHOUT_IOCP(result == 0)) {
/* Process the req without IOCP. */
handle->flags |= UV_HANDLE_READ_PENDING;
req->u.io.overlapped.InternalHigh = bytes;
handle->reqs_pending++;
uv__insert_pending_req(loop, req);
} else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) {
/* The req will be processed with IOCP. */
handle->flags |= UV_HANDLE_READ_PENDING;
handle->reqs_pending++;
} else {
/* Make this req pending reporting an error. */
SET_REQ_ERROR(req, WSAGetLastError());
uv__insert_pending_req(loop, req);
handle->reqs_pending++;
}
} }
} }
@ -376,7 +322,6 @@ int uv__udp_recv_start(uv_udp_t* handle, uv_alloc_cb alloc_cb,
handle->flags |= UV_HANDLE_READING; handle->flags |= UV_HANDLE_READING;
INCREASE_ACTIVE_COUNT(loop, handle); INCREASE_ACTIVE_COUNT(loop, handle);
loop->active_udp_streams++;
handle->recv_cb = recv_cb; handle->recv_cb = recv_cb;
handle->alloc_cb = alloc_cb; handle->alloc_cb = alloc_cb;
@ -393,7 +338,6 @@ int uv__udp_recv_start(uv_udp_t* handle, uv_alloc_cb alloc_cb,
int uv__udp_recv_stop(uv_udp_t* handle) { int uv__udp_recv_stop(uv_udp_t* handle) {
if (handle->flags & UV_HANDLE_READING) { if (handle->flags & UV_HANDLE_READING) {
handle->flags &= ~UV_HANDLE_READING; handle->flags &= ~UV_HANDLE_READING;
handle->loop->active_udp_streams--;
DECREASE_ACTIVE_COUNT(loop, handle); DECREASE_ACTIVE_COUNT(loop, handle);
} }
@ -497,57 +441,68 @@ void uv__process_udp_recv_req(uv_loop_t* loop, uv_udp_t* handle,
DWORD bytes, err, flags; DWORD bytes, err, flags;
struct sockaddr_storage from; struct sockaddr_storage from;
int from_len; int from_len;
int count;
/* Do a nonblocking receive. /* Prevent loop starvation when the data comes in as fast as
* TODO: try to read multiple datagrams at once. FIONREAD maybe? */ * (or faster than) we can read it. */
buf = uv_buf_init(NULL, 0); count = 32;
handle->alloc_cb((uv_handle_t*) handle, UV__UDP_DGRAM_MAXSIZE, &buf);
if (buf.base == NULL || buf.len == 0) {
handle->recv_cb(handle, UV_ENOBUFS, &buf, NULL, 0);
goto done;
}
assert(buf.base != NULL);
memset(&from, 0, sizeof from); do {
from_len = sizeof from; /* Do at most `count` nonblocking receive. */
buf = uv_buf_init(NULL, 0);
handle->alloc_cb((uv_handle_t*) handle, UV__UDP_DGRAM_MAXSIZE, &buf);
if (buf.base == NULL || buf.len == 0) {
handle->recv_cb(handle, UV_ENOBUFS, &buf, NULL, 0);
goto done;
}
flags = 0; memset(&from, 0, sizeof from);
from_len = sizeof from;
if (WSARecvFrom(handle->socket, flags = 0;
(WSABUF*)&buf,
1,
&bytes,
&flags,
(struct sockaddr*) &from,
&from_len,
NULL,
NULL) != SOCKET_ERROR) {
/* Message received */ if (WSARecvFrom(handle->socket,
handle->recv_cb(handle, bytes, &buf, (const struct sockaddr*) &from, 0); (WSABUF*)&buf,
} else { 1,
err = WSAGetLastError(); &bytes,
if (err == WSAEMSGSIZE) { &flags,
/* Message truncated */ (struct sockaddr*) &from,
handle->recv_cb(handle, &from_len,
bytes, NULL,
&buf, NULL) != SOCKET_ERROR) {
(const struct sockaddr*) &from,
UV_UDP_PARTIAL); /* Message received */
} else if (err == WSAEWOULDBLOCK) { err = ERROR_SUCCESS;
/* Kernel buffer empty */ handle->recv_cb(handle, bytes, &buf, (const struct sockaddr*) &from, 0);
handle->recv_cb(handle, 0, &buf, NULL, 0);
} else if (err == WSAECONNRESET || err == WSAENETRESET) {
/* WSAECONNRESET/WSANETRESET is ignored because this just indicates
* that a previous sendto operation failed.
*/
handle->recv_cb(handle, 0, &buf, NULL, 0);
} else { } else {
/* Any other error that we want to report back to the user. */ err = WSAGetLastError();
uv_udp_recv_stop(handle); if (err == WSAEMSGSIZE) {
handle->recv_cb(handle, uv_translate_sys_error(err), &buf, NULL, 0); /* Message truncated */
handle->recv_cb(handle,
bytes,
&buf,
(const struct sockaddr*) &from,
UV_UDP_PARTIAL);
} else if (err == WSAEWOULDBLOCK) {
/* Kernel buffer empty */
handle->recv_cb(handle, 0, &buf, NULL, 0);
} else if (err == WSAECONNRESET || err == WSAENETRESET) {
/* WSAECONNRESET/WSANETRESET is ignored because this just indicates
* that a previous sendto operation failed.
*/
handle->recv_cb(handle, 0, &buf, NULL, 0);
} else {
/* Any other error that we want to report back to the user. */
uv_udp_recv_stop(handle);
handle->recv_cb(handle, uv_translate_sys_error(err), &buf, NULL, 0);
}
} }
} }
while (err == ERROR_SUCCESS &&
count-- > 0 &&
/* The recv_cb callback may decide to pause or close the handle. */
(handle->flags & UV_HANDLE_READING) &&
!(handle->flags & UV_HANDLE_READ_PENDING));
} }
done: done:

Some files were not shown because too many files have changed in this diff Show More