libuv 1.44.2
git-svn-id: https://www.unprompted.com/svn/projects/tildefriends/trunk@3934 ed5197a5-7fde-0310-b194-c3ffbd925b24
This commit is contained in:
13
deps/libuv/src/win/core.c
vendored
13
deps/libuv/src/win/core.c
vendored
@ -592,7 +592,7 @@ static void uv__poll(uv_loop_t* loop, DWORD timeout) {
|
||||
int uv_run(uv_loop_t *loop, uv_run_mode mode) {
|
||||
DWORD timeout;
|
||||
int r;
|
||||
int ran_pending;
|
||||
int can_sleep;
|
||||
|
||||
r = uv__loop_alive(loop);
|
||||
if (!r)
|
||||
@ -602,12 +602,14 @@ int uv_run(uv_loop_t *loop, uv_run_mode mode) {
|
||||
uv_update_time(loop);
|
||||
uv__run_timers(loop);
|
||||
|
||||
ran_pending = uv__process_reqs(loop);
|
||||
can_sleep = loop->pending_reqs_tail == NULL && loop->idle_handles == NULL;
|
||||
|
||||
uv__process_reqs(loop);
|
||||
uv__idle_invoke(loop);
|
||||
uv__prepare_invoke(loop);
|
||||
|
||||
timeout = 0;
|
||||
if ((mode == UV_RUN_ONCE && !ran_pending) || mode == UV_RUN_DEFAULT)
|
||||
if ((mode == UV_RUN_ONCE && can_sleep) || mode == UV_RUN_DEFAULT)
|
||||
timeout = uv_backend_timeout(loop);
|
||||
|
||||
if (pGetQueuedCompletionStatusEx)
|
||||
@ -615,6 +617,11 @@ int uv_run(uv_loop_t *loop, uv_run_mode mode) {
|
||||
else
|
||||
uv__poll_wine(loop, timeout);
|
||||
|
||||
/* Process immediate callbacks (e.g. write_cb) a small fixed number of
|
||||
* times to avoid loop starvation.*/
|
||||
for (r = 0; r < 8 && loop->pending_reqs_tail != NULL; r++)
|
||||
uv__process_reqs(loop);
|
||||
|
||||
/* Run one final update on the provider_idle_time in case uv__poll*
|
||||
* returned because the timeout expired, but no events were received. This
|
||||
* call will be ignored if the provider_entry_time was either never set (if
|
||||
|
2
deps/libuv/src/win/error.c
vendored
2
deps/libuv/src/win/error.c
vendored
@ -73,7 +73,6 @@ int uv_translate_sys_error(int sys_errno) {
|
||||
case WSAEACCES: return UV_EACCES;
|
||||
case ERROR_ELEVATION_REQUIRED: return UV_EACCES;
|
||||
case ERROR_CANT_ACCESS_FILE: return UV_EACCES;
|
||||
case ERROR_ACCESS_DENIED: return UV_EACCES;
|
||||
case ERROR_ADDRESS_ALREADY_ASSOCIATED: return UV_EADDRINUSE;
|
||||
case WSAEADDRINUSE: return UV_EADDRINUSE;
|
||||
case WSAEADDRNOTAVAIL: return UV_EADDRNOTAVAIL;
|
||||
@ -155,6 +154,7 @@ int uv_translate_sys_error(int sys_errno) {
|
||||
case WSAENOTSOCK: return UV_ENOTSOCK;
|
||||
case ERROR_NOT_SUPPORTED: return UV_ENOTSUP;
|
||||
case ERROR_BROKEN_PIPE: return UV_EOF;
|
||||
case ERROR_ACCESS_DENIED: return UV_EPERM;
|
||||
case ERROR_PRIVILEGE_NOT_HELD: return UV_EPERM;
|
||||
case ERROR_BAD_PIPE: return UV_EPIPE;
|
||||
case ERROR_NO_DATA: return UV_EPIPE;
|
||||
|
9
deps/libuv/src/win/internal.h
vendored
9
deps/libuv/src/win/internal.h
vendored
@ -88,6 +88,9 @@ void uv__process_tcp_accept_req(uv_loop_t* loop, uv_tcp_t* handle,
|
||||
uv_req_t* req);
|
||||
void uv__process_tcp_connect_req(uv_loop_t* loop, uv_tcp_t* handle,
|
||||
uv_connect_t* req);
|
||||
void uv__process_tcp_shutdown_req(uv_loop_t* loop,
|
||||
uv_tcp_t* stream,
|
||||
uv_shutdown_t* req);
|
||||
|
||||
void uv__tcp_close(uv_loop_t* loop, uv_tcp_t* tcp);
|
||||
void uv__tcp_endgame(uv_loop_t* loop, uv_tcp_t* handle);
|
||||
@ -130,6 +133,7 @@ int uv__pipe_write(uv_loop_t* loop,
|
||||
size_t nbufs,
|
||||
uv_stream_t* send_handle,
|
||||
uv_write_cb cb);
|
||||
void uv__pipe_shutdown(uv_loop_t* loop, uv_pipe_t* handle, uv_shutdown_t* req);
|
||||
|
||||
void uv__process_pipe_read_req(uv_loop_t* loop, uv_pipe_t* handle,
|
||||
uv_req_t* req);
|
||||
@ -143,7 +147,6 @@ void uv__process_pipe_shutdown_req(uv_loop_t* loop, uv_pipe_t* handle,
|
||||
uv_shutdown_t* req);
|
||||
|
||||
void uv__pipe_close(uv_loop_t* loop, uv_pipe_t* handle);
|
||||
void uv__pipe_cleanup(uv_loop_t* loop, uv_pipe_t* handle);
|
||||
void uv__pipe_endgame(uv_loop_t* loop, uv_pipe_t* handle);
|
||||
|
||||
|
||||
@ -177,7 +180,9 @@ void uv__process_tty_accept_req(uv_loop_t* loop, uv_tty_t* handle,
|
||||
*/
|
||||
void uv__process_tty_connect_req(uv_loop_t* loop, uv_tty_t* handle,
|
||||
uv_connect_t* req);
|
||||
|
||||
void uv__process_tty_shutdown_req(uv_loop_t* loop,
|
||||
uv_tty_t* stream,
|
||||
uv_shutdown_t* req);
|
||||
void uv__tty_endgame(uv_loop_t* loop, uv_tty_t* handle);
|
||||
|
||||
|
||||
|
464
deps/libuv/src/win/pipe.c
vendored
464
deps/libuv/src/win/pipe.c
vendored
@ -121,14 +121,10 @@ int uv_pipe_init(uv_loop_t* loop, uv_pipe_t* handle, int ipc) {
|
||||
|
||||
|
||||
static void uv__pipe_connection_init(uv_pipe_t* handle) {
|
||||
assert(!(handle->flags & UV_HANDLE_PIPESERVER));
|
||||
uv__connection_init((uv_stream_t*) handle);
|
||||
handle->read_req.data = handle;
|
||||
handle->pipe.conn.eof_timer = NULL;
|
||||
assert(!(handle->flags & UV_HANDLE_PIPESERVER));
|
||||
if (handle->flags & UV_HANDLE_NON_OVERLAPPED_PIPE) {
|
||||
handle->pipe.conn.readfile_thread_handle = NULL;
|
||||
InitializeCriticalSection(&handle->pipe.conn.readfile_thread_lock);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -393,6 +389,8 @@ int uv__create_stdio_pipe_pair(uv_loop_t* loop,
|
||||
unsigned int client_flags;
|
||||
int err;
|
||||
|
||||
uv__pipe_connection_init(parent_pipe);
|
||||
|
||||
server_pipe = INVALID_HANDLE_VALUE;
|
||||
client_pipe = INVALID_HANDLE_VALUE;
|
||||
|
||||
@ -427,7 +425,6 @@ int uv__create_stdio_pipe_pair(uv_loop_t* loop,
|
||||
goto error;
|
||||
}
|
||||
|
||||
uv__pipe_connection_init(parent_pipe);
|
||||
parent_pipe->handle = server_pipe;
|
||||
*child_pipe_ptr = client_pipe;
|
||||
|
||||
@ -462,7 +459,9 @@ static int uv__set_pipe_handle(uv_loop_t* loop,
|
||||
DWORD current_mode = 0;
|
||||
DWORD err = 0;
|
||||
|
||||
if (handle->flags & UV_HANDLE_PIPESERVER)
|
||||
assert(handle->flags & UV_HANDLE_CONNECTION);
|
||||
assert(!(handle->flags & UV_HANDLE_PIPESERVER));
|
||||
if (handle->flags & UV_HANDLE_CLOSING)
|
||||
return UV_EINVAL;
|
||||
if (handle->handle != INVALID_HANDLE_VALUE)
|
||||
return UV_EBUSY;
|
||||
@ -478,18 +477,17 @@ static int uv__set_pipe_handle(uv_loop_t* loop,
|
||||
*/
|
||||
if (!GetNamedPipeHandleState(pipeHandle, ¤t_mode, NULL, NULL,
|
||||
NULL, NULL, 0)) {
|
||||
return -1;
|
||||
return uv_translate_sys_error(GetLastError());
|
||||
} else if (current_mode & PIPE_NOWAIT) {
|
||||
SetLastError(ERROR_ACCESS_DENIED);
|
||||
return -1;
|
||||
return UV_EACCES;
|
||||
}
|
||||
} else {
|
||||
/* If this returns ERROR_INVALID_PARAMETER we probably opened
|
||||
* something that is not a pipe. */
|
||||
if (err == ERROR_INVALID_PARAMETER) {
|
||||
SetLastError(WSAENOTSOCK);
|
||||
return UV_ENOTSOCK;
|
||||
}
|
||||
return -1;
|
||||
return uv_translate_sys_error(err);
|
||||
}
|
||||
}
|
||||
|
||||
@ -500,13 +498,15 @@ static int uv__set_pipe_handle(uv_loop_t* loop,
|
||||
sizeof(mode_info),
|
||||
FileModeInformation);
|
||||
if (nt_status != STATUS_SUCCESS) {
|
||||
return -1;
|
||||
return uv_translate_sys_error(err);
|
||||
}
|
||||
|
||||
if (mode_info.Mode & FILE_SYNCHRONOUS_IO_ALERT ||
|
||||
mode_info.Mode & FILE_SYNCHRONOUS_IO_NONALERT) {
|
||||
/* Non-overlapped pipe. */
|
||||
handle->flags |= UV_HANDLE_NON_OVERLAPPED_PIPE;
|
||||
handle->pipe.conn.readfile_thread_handle = NULL;
|
||||
InitializeCriticalSection(&handle->pipe.conn.readfile_thread_lock);
|
||||
} else {
|
||||
/* Overlapped pipe. Try to associate with IOCP. */
|
||||
if (CreateIoCompletionPort(pipeHandle,
|
||||
@ -578,135 +578,109 @@ static DWORD WINAPI pipe_shutdown_thread_proc(void* parameter) {
|
||||
}
|
||||
|
||||
|
||||
void uv__pipe_endgame(uv_loop_t* loop, uv_pipe_t* handle) {
|
||||
int err;
|
||||
void uv__pipe_shutdown(uv_loop_t* loop, uv_pipe_t* handle, uv_shutdown_t *req) {
|
||||
DWORD result;
|
||||
uv_shutdown_t* req;
|
||||
NTSTATUS nt_status;
|
||||
IO_STATUS_BLOCK io_status;
|
||||
FILE_PIPE_LOCAL_INFORMATION pipe_info;
|
||||
|
||||
assert(handle->flags & UV_HANDLE_CONNECTION);
|
||||
assert(req != NULL);
|
||||
assert(handle->stream.conn.write_reqs_pending == 0);
|
||||
SET_REQ_SUCCESS(req);
|
||||
|
||||
if (handle->flags & UV_HANDLE_CLOSING) {
|
||||
uv__insert_pending_req(loop, (uv_req_t*) req);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Try to avoid flushing the pipe buffer in the thread pool. */
|
||||
nt_status = pNtQueryInformationFile(handle->handle,
|
||||
&io_status,
|
||||
&pipe_info,
|
||||
sizeof pipe_info,
|
||||
FilePipeLocalInformation);
|
||||
|
||||
if (nt_status != STATUS_SUCCESS) {
|
||||
SET_REQ_ERROR(req, pRtlNtStatusToDosError(nt_status));
|
||||
handle->flags |= UV_HANDLE_WRITABLE; /* Questionable. */
|
||||
uv__insert_pending_req(loop, (uv_req_t*) req);
|
||||
return;
|
||||
}
|
||||
|
||||
if (pipe_info.OutboundQuota == pipe_info.WriteQuotaAvailable) {
|
||||
/* Short-circuit, no need to call FlushFileBuffers:
|
||||
* all writes have been read. */
|
||||
uv__insert_pending_req(loop, (uv_req_t*) req);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Run FlushFileBuffers in the thread pool. */
|
||||
result = QueueUserWorkItem(pipe_shutdown_thread_proc,
|
||||
req,
|
||||
WT_EXECUTELONGFUNCTION);
|
||||
if (!result) {
|
||||
SET_REQ_ERROR(req, GetLastError());
|
||||
handle->flags |= UV_HANDLE_WRITABLE; /* Questionable. */
|
||||
uv__insert_pending_req(loop, (uv_req_t*) req);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void uv__pipe_endgame(uv_loop_t* loop, uv_pipe_t* handle) {
|
||||
uv__ipc_xfer_queue_item_t* xfer_queue_item;
|
||||
|
||||
if ((handle->flags & UV_HANDLE_CONNECTION) &&
|
||||
handle->stream.conn.shutdown_req != NULL &&
|
||||
handle->stream.conn.write_reqs_pending == 0) {
|
||||
req = handle->stream.conn.shutdown_req;
|
||||
assert(handle->reqs_pending == 0);
|
||||
assert(handle->flags & UV_HANDLE_CLOSING);
|
||||
assert(!(handle->flags & UV_HANDLE_CLOSED));
|
||||
|
||||
/* Clear the shutdown_req field so we don't go here again. */
|
||||
handle->stream.conn.shutdown_req = NULL;
|
||||
if (handle->flags & UV_HANDLE_CONNECTION) {
|
||||
/* Free pending sockets */
|
||||
while (!QUEUE_EMPTY(&handle->pipe.conn.ipc_xfer_queue)) {
|
||||
QUEUE* q;
|
||||
SOCKET socket;
|
||||
|
||||
if (handle->flags & UV_HANDLE_CLOSING) {
|
||||
UNREGISTER_HANDLE_REQ(loop, handle, req);
|
||||
q = QUEUE_HEAD(&handle->pipe.conn.ipc_xfer_queue);
|
||||
QUEUE_REMOVE(q);
|
||||
xfer_queue_item = QUEUE_DATA(q, uv__ipc_xfer_queue_item_t, member);
|
||||
|
||||
/* Already closing. Cancel the shutdown. */
|
||||
if (req->cb) {
|
||||
req->cb(req, UV_ECANCELED);
|
||||
/* Materialize socket and close it */
|
||||
socket = WSASocketW(FROM_PROTOCOL_INFO,
|
||||
FROM_PROTOCOL_INFO,
|
||||
FROM_PROTOCOL_INFO,
|
||||
&xfer_queue_item->xfer_info.socket_info,
|
||||
0,
|
||||
WSA_FLAG_OVERLAPPED);
|
||||
uv__free(xfer_queue_item);
|
||||
|
||||
if (socket != INVALID_SOCKET)
|
||||
closesocket(socket);
|
||||
}
|
||||
handle->pipe.conn.ipc_xfer_queue_length = 0;
|
||||
|
||||
if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
|
||||
if (handle->read_req.wait_handle != INVALID_HANDLE_VALUE) {
|
||||
UnregisterWait(handle->read_req.wait_handle);
|
||||
handle->read_req.wait_handle = INVALID_HANDLE_VALUE;
|
||||
}
|
||||
|
||||
DECREASE_PENDING_REQ_COUNT(handle);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Try to avoid flushing the pipe buffer in the thread pool. */
|
||||
nt_status = pNtQueryInformationFile(handle->handle,
|
||||
&io_status,
|
||||
&pipe_info,
|
||||
sizeof pipe_info,
|
||||
FilePipeLocalInformation);
|
||||
|
||||
if (nt_status != STATUS_SUCCESS) {
|
||||
/* Failure */
|
||||
UNREGISTER_HANDLE_REQ(loop, handle, req);
|
||||
|
||||
handle->flags |= UV_HANDLE_WRITABLE; /* Questionable */
|
||||
if (req->cb) {
|
||||
err = pRtlNtStatusToDosError(nt_status);
|
||||
req->cb(req, uv_translate_sys_error(err));
|
||||
if (handle->read_req.event_handle != NULL) {
|
||||
CloseHandle(handle->read_req.event_handle);
|
||||
handle->read_req.event_handle = NULL;
|
||||
}
|
||||
|
||||
DECREASE_PENDING_REQ_COUNT(handle);
|
||||
return;
|
||||
}
|
||||
|
||||
if (pipe_info.OutboundQuota == pipe_info.WriteQuotaAvailable) {
|
||||
/* Short-circuit, no need to call FlushFileBuffers. */
|
||||
uv__insert_pending_req(loop, (uv_req_t*) req);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Run FlushFileBuffers in the thread pool. */
|
||||
result = QueueUserWorkItem(pipe_shutdown_thread_proc,
|
||||
req,
|
||||
WT_EXECUTELONGFUNCTION);
|
||||
if (result) {
|
||||
return;
|
||||
|
||||
} else {
|
||||
/* Failure. */
|
||||
UNREGISTER_HANDLE_REQ(loop, handle, req);
|
||||
|
||||
handle->flags |= UV_HANDLE_WRITABLE; /* Questionable */
|
||||
if (req->cb) {
|
||||
err = GetLastError();
|
||||
req->cb(req, uv_translate_sys_error(err));
|
||||
}
|
||||
|
||||
DECREASE_PENDING_REQ_COUNT(handle);
|
||||
return;
|
||||
}
|
||||
if (handle->flags & UV_HANDLE_NON_OVERLAPPED_PIPE)
|
||||
DeleteCriticalSection(&handle->pipe.conn.readfile_thread_lock);
|
||||
}
|
||||
|
||||
if (handle->flags & UV_HANDLE_CLOSING &&
|
||||
handle->reqs_pending == 0) {
|
||||
assert(!(handle->flags & UV_HANDLE_CLOSED));
|
||||
|
||||
if (handle->flags & UV_HANDLE_CONNECTION) {
|
||||
/* Free pending sockets */
|
||||
while (!QUEUE_EMPTY(&handle->pipe.conn.ipc_xfer_queue)) {
|
||||
QUEUE* q;
|
||||
SOCKET socket;
|
||||
|
||||
q = QUEUE_HEAD(&handle->pipe.conn.ipc_xfer_queue);
|
||||
QUEUE_REMOVE(q);
|
||||
xfer_queue_item = QUEUE_DATA(q, uv__ipc_xfer_queue_item_t, member);
|
||||
|
||||
/* Materialize socket and close it */
|
||||
socket = WSASocketW(FROM_PROTOCOL_INFO,
|
||||
FROM_PROTOCOL_INFO,
|
||||
FROM_PROTOCOL_INFO,
|
||||
&xfer_queue_item->xfer_info.socket_info,
|
||||
0,
|
||||
WSA_FLAG_OVERLAPPED);
|
||||
uv__free(xfer_queue_item);
|
||||
|
||||
if (socket != INVALID_SOCKET)
|
||||
closesocket(socket);
|
||||
}
|
||||
handle->pipe.conn.ipc_xfer_queue_length = 0;
|
||||
|
||||
if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
|
||||
if (handle->read_req.wait_handle != INVALID_HANDLE_VALUE) {
|
||||
UnregisterWait(handle->read_req.wait_handle);
|
||||
handle->read_req.wait_handle = INVALID_HANDLE_VALUE;
|
||||
}
|
||||
if (handle->read_req.event_handle != NULL) {
|
||||
CloseHandle(handle->read_req.event_handle);
|
||||
handle->read_req.event_handle = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if (handle->flags & UV_HANDLE_NON_OVERLAPPED_PIPE)
|
||||
DeleteCriticalSection(&handle->pipe.conn.readfile_thread_lock);
|
||||
}
|
||||
|
||||
if (handle->flags & UV_HANDLE_PIPESERVER) {
|
||||
assert(handle->pipe.serv.accept_reqs);
|
||||
uv__free(handle->pipe.serv.accept_reqs);
|
||||
handle->pipe.serv.accept_reqs = NULL;
|
||||
}
|
||||
|
||||
uv__handle_close(handle);
|
||||
if (handle->flags & UV_HANDLE_PIPESERVER) {
|
||||
assert(handle->pipe.serv.accept_reqs);
|
||||
uv__free(handle->pipe.serv.accept_reqs);
|
||||
handle->pipe.serv.accept_reqs = NULL;
|
||||
}
|
||||
|
||||
uv__handle_close(handle);
|
||||
}
|
||||
|
||||
|
||||
@ -731,7 +705,9 @@ int uv_pipe_bind(uv_pipe_t* handle, const char* name) {
|
||||
if (!name) {
|
||||
return UV_EINVAL;
|
||||
}
|
||||
|
||||
if (uv__is_closing(handle)) {
|
||||
return UV_EINVAL;
|
||||
}
|
||||
if (!(handle->flags & UV_HANDLE_PIPESERVER)) {
|
||||
handle->pipe.serv.pending_instances = default_pending_pipe_instances;
|
||||
}
|
||||
@ -815,7 +791,7 @@ static DWORD WINAPI pipe_connect_thread_proc(void* parameter) {
|
||||
assert(loop);
|
||||
|
||||
/* We're here because CreateFile on a pipe returned ERROR_PIPE_BUSY. We wait
|
||||
* for the pipe to become available with WaitNamedPipe. */
|
||||
* up to 30 seconds for the pipe to become available with WaitNamedPipe. */
|
||||
while (WaitNamedPipeW(handle->name, 30000)) {
|
||||
/* The pipe is now available, try to connect. */
|
||||
pipeHandle = open_named_pipe(handle->name, &duplex_flags);
|
||||
@ -825,9 +801,10 @@ static DWORD WINAPI pipe_connect_thread_proc(void* parameter) {
|
||||
SwitchToThread();
|
||||
}
|
||||
|
||||
if (pipeHandle != INVALID_HANDLE_VALUE &&
|
||||
!uv__set_pipe_handle(loop, handle, pipeHandle, -1, duplex_flags)) {
|
||||
if (pipeHandle != INVALID_HANDLE_VALUE) {
|
||||
SET_REQ_SUCCESS(req);
|
||||
req->u.connect.pipeHandle = pipeHandle;
|
||||
req->u.connect.duplex_flags = duplex_flags;
|
||||
} else {
|
||||
SET_REQ_ERROR(req, GetLastError());
|
||||
}
|
||||
@ -849,6 +826,18 @@ void uv_pipe_connect(uv_connect_t* req, uv_pipe_t* handle,
|
||||
UV_REQ_INIT(req, UV_CONNECT);
|
||||
req->handle = (uv_stream_t*) handle;
|
||||
req->cb = cb;
|
||||
req->u.connect.pipeHandle = INVALID_HANDLE_VALUE;
|
||||
req->u.connect.duplex_flags = 0;
|
||||
|
||||
if (handle->flags & UV_HANDLE_PIPESERVER) {
|
||||
err = ERROR_INVALID_PARAMETER;
|
||||
goto error;
|
||||
}
|
||||
if (handle->flags & UV_HANDLE_CONNECTION) {
|
||||
err = ERROR_PIPE_BUSY;
|
||||
goto error;
|
||||
}
|
||||
uv__pipe_connection_init(handle);
|
||||
|
||||
/* Convert name to UTF16. */
|
||||
nameSize = MultiByteToWideChar(CP_UTF8, 0, name, -1, NULL, 0) * sizeof(WCHAR);
|
||||
@ -888,17 +877,8 @@ void uv_pipe_connect(uv_connect_t* req, uv_pipe_t* handle,
|
||||
goto error;
|
||||
}
|
||||
|
||||
assert(pipeHandle != INVALID_HANDLE_VALUE);
|
||||
|
||||
if (uv__set_pipe_handle(loop,
|
||||
(uv_pipe_t*) req->handle,
|
||||
pipeHandle,
|
||||
-1,
|
||||
duplex_flags)) {
|
||||
err = GetLastError();
|
||||
goto error;
|
||||
}
|
||||
|
||||
req->u.connect.pipeHandle = pipeHandle;
|
||||
req->u.connect.duplex_flags = duplex_flags;
|
||||
SET_REQ_SUCCESS(req);
|
||||
uv__insert_pending_req(loop, (uv_req_t*) req);
|
||||
handle->reqs_pending++;
|
||||
@ -937,7 +917,7 @@ void uv__pipe_interrupt_read(uv_pipe_t* handle) {
|
||||
/* Cancel asynchronous read. */
|
||||
r = CancelIoEx(handle->handle, &handle->read_req.u.io.overlapped);
|
||||
assert(r || GetLastError() == ERROR_NOT_FOUND);
|
||||
|
||||
(void) r;
|
||||
} else {
|
||||
/* Cancel synchronous read (which is happening in the thread pool). */
|
||||
HANDLE thread;
|
||||
@ -973,17 +953,30 @@ void uv__pipe_interrupt_read(uv_pipe_t* handle) {
|
||||
void uv__pipe_read_stop(uv_pipe_t* handle) {
|
||||
handle->flags &= ~UV_HANDLE_READING;
|
||||
DECREASE_ACTIVE_COUNT(handle->loop, handle);
|
||||
|
||||
uv__pipe_interrupt_read(handle);
|
||||
}
|
||||
|
||||
|
||||
/* Cleans up uv_pipe_t (server or connection) and all resources associated with
|
||||
* it. */
|
||||
void uv__pipe_cleanup(uv_loop_t* loop, uv_pipe_t* handle) {
|
||||
void uv__pipe_close(uv_loop_t* loop, uv_pipe_t* handle) {
|
||||
int i;
|
||||
HANDLE pipeHandle;
|
||||
|
||||
if (handle->flags & UV_HANDLE_READING) {
|
||||
handle->flags &= ~UV_HANDLE_READING;
|
||||
DECREASE_ACTIVE_COUNT(loop, handle);
|
||||
}
|
||||
|
||||
if (handle->flags & UV_HANDLE_LISTENING) {
|
||||
handle->flags &= ~UV_HANDLE_LISTENING;
|
||||
DECREASE_ACTIVE_COUNT(loop, handle);
|
||||
}
|
||||
|
||||
handle->flags &= ~(UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);
|
||||
|
||||
uv__handle_closing(handle);
|
||||
|
||||
uv__pipe_interrupt_read(handle);
|
||||
|
||||
if (handle->name) {
|
||||
@ -1003,35 +996,17 @@ void uv__pipe_cleanup(uv_loop_t* loop, uv_pipe_t* handle) {
|
||||
}
|
||||
|
||||
if (handle->flags & UV_HANDLE_CONNECTION) {
|
||||
handle->flags &= ~UV_HANDLE_WRITABLE;
|
||||
eof_timer_destroy(handle);
|
||||
}
|
||||
|
||||
if ((handle->flags & UV_HANDLE_CONNECTION)
|
||||
&& handle->handle != INVALID_HANDLE_VALUE)
|
||||
&& handle->handle != INVALID_HANDLE_VALUE) {
|
||||
/* This will eventually destroy the write queue for us too. */
|
||||
close_pipe(handle);
|
||||
}
|
||||
|
||||
|
||||
void uv__pipe_close(uv_loop_t* loop, uv_pipe_t* handle) {
|
||||
if (handle->flags & UV_HANDLE_READING) {
|
||||
handle->flags &= ~UV_HANDLE_READING;
|
||||
DECREASE_ACTIVE_COUNT(loop, handle);
|
||||
}
|
||||
|
||||
if (handle->flags & UV_HANDLE_LISTENING) {
|
||||
handle->flags &= ~UV_HANDLE_LISTENING;
|
||||
DECREASE_ACTIVE_COUNT(loop, handle);
|
||||
}
|
||||
|
||||
uv__pipe_cleanup(loop, handle);
|
||||
|
||||
if (handle->reqs_pending == 0) {
|
||||
if (handle->reqs_pending == 0)
|
||||
uv__want_endgame(loop, (uv_handle_t*) handle);
|
||||
}
|
||||
|
||||
handle->flags &= ~(UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);
|
||||
uv__handle_closing(handle);
|
||||
}
|
||||
|
||||
|
||||
@ -1099,6 +1074,7 @@ int uv__pipe_accept(uv_pipe_t* server, uv_stream_t* client) {
|
||||
|
||||
} else {
|
||||
pipe_client = (uv_pipe_t*) client;
|
||||
uv__pipe_connection_init(pipe_client);
|
||||
|
||||
/* Find a connection instance that has been connected, but not yet
|
||||
* accepted. */
|
||||
@ -1110,7 +1086,6 @@ int uv__pipe_accept(uv_pipe_t* server, uv_stream_t* client) {
|
||||
}
|
||||
|
||||
/* Initialize the client handle and copy the pipeHandle to the client */
|
||||
uv__pipe_connection_init(pipe_client);
|
||||
pipe_client->handle = req->pipeHandle;
|
||||
pipe_client->flags |= UV_HANDLE_READABLE | UV_HANDLE_WRITABLE;
|
||||
|
||||
@ -2094,10 +2069,9 @@ void uv__process_pipe_write_req(uv_loop_t* loop, uv_pipe_t* handle,
|
||||
uv__queue_non_overlapped_write(handle);
|
||||
}
|
||||
|
||||
if (handle->stream.conn.shutdown_req != NULL &&
|
||||
handle->stream.conn.write_reqs_pending == 0) {
|
||||
uv__want_endgame(loop, (uv_handle_t*)handle);
|
||||
}
|
||||
if (handle->stream.conn.write_reqs_pending == 0)
|
||||
if (handle->flags & UV_HANDLE_SHUTTING)
|
||||
uv__pipe_shutdown(loop, handle, handle->stream.conn.shutdown_req);
|
||||
|
||||
DECREASE_PENDING_REQ_COUNT(handle);
|
||||
}
|
||||
@ -2110,7 +2084,7 @@ void uv__process_pipe_accept_req(uv_loop_t* loop, uv_pipe_t* handle,
|
||||
assert(handle->type == UV_NAMED_PIPE);
|
||||
|
||||
if (handle->flags & UV_HANDLE_CLOSING) {
|
||||
/* The req->pipeHandle should be freed already in uv__pipe_cleanup(). */
|
||||
/* The req->pipeHandle should be freed already in uv__pipe_close(). */
|
||||
assert(req->pipeHandle == INVALID_HANDLE_VALUE);
|
||||
DECREASE_PENDING_REQ_COUNT(handle);
|
||||
return;
|
||||
@ -2140,52 +2114,72 @@ void uv__process_pipe_accept_req(uv_loop_t* loop, uv_pipe_t* handle,
|
||||
|
||||
void uv__process_pipe_connect_req(uv_loop_t* loop, uv_pipe_t* handle,
|
||||
uv_connect_t* req) {
|
||||
HANDLE pipeHandle;
|
||||
DWORD duplex_flags;
|
||||
int err;
|
||||
|
||||
assert(handle->type == UV_NAMED_PIPE);
|
||||
|
||||
UNREGISTER_HANDLE_REQ(loop, handle, req);
|
||||
|
||||
if (req->cb) {
|
||||
err = 0;
|
||||
if (REQ_SUCCESS(req)) {
|
||||
uv__pipe_connection_init(handle);
|
||||
} else {
|
||||
err = GET_REQ_ERROR(req);
|
||||
}
|
||||
req->cb(req, uv_translate_sys_error(err));
|
||||
err = 0;
|
||||
if (REQ_SUCCESS(req)) {
|
||||
pipeHandle = req->u.connect.pipeHandle;
|
||||
duplex_flags = req->u.connect.duplex_flags;
|
||||
err = uv__set_pipe_handle(loop, handle, pipeHandle, -1, duplex_flags);
|
||||
if (err)
|
||||
CloseHandle(pipeHandle);
|
||||
} else {
|
||||
err = uv_translate_sys_error(GET_REQ_ERROR(req));
|
||||
}
|
||||
|
||||
if (req->cb)
|
||||
req->cb(req, err);
|
||||
|
||||
DECREASE_PENDING_REQ_COUNT(handle);
|
||||
}
|
||||
|
||||
|
||||
|
||||
void uv__process_pipe_shutdown_req(uv_loop_t* loop, uv_pipe_t* handle,
|
||||
uv_shutdown_t* req) {
|
||||
int err;
|
||||
|
||||
assert(handle->type == UV_NAMED_PIPE);
|
||||
|
||||
/* Clear the shutdown_req field so we don't go here again. */
|
||||
handle->stream.conn.shutdown_req = NULL;
|
||||
handle->flags &= ~UV_HANDLE_SHUTTING;
|
||||
UNREGISTER_HANDLE_REQ(loop, handle, req);
|
||||
|
||||
if (handle->flags & UV_HANDLE_READABLE) {
|
||||
/* Initialize and optionally start the eof timer. Only do this if the pipe
|
||||
* is readable and we haven't seen EOF come in ourselves. */
|
||||
eof_timer_init(handle);
|
||||
|
||||
/* If reading start the timer right now. Otherwise uv__pipe_queue_read will
|
||||
* start it. */
|
||||
if (handle->flags & UV_HANDLE_READ_PENDING) {
|
||||
eof_timer_start(handle);
|
||||
}
|
||||
|
||||
if (handle->flags & UV_HANDLE_CLOSING) {
|
||||
/* Already closing. Cancel the shutdown. */
|
||||
err = UV_ECANCELED;
|
||||
} else if (!REQ_SUCCESS(req)) {
|
||||
/* An error occurred in trying to shutdown gracefully. */
|
||||
err = uv_translate_sys_error(GET_REQ_ERROR(req));
|
||||
} else {
|
||||
/* This pipe is not readable. We can just close it to let the other end
|
||||
* know that we're done writing. */
|
||||
close_pipe(handle);
|
||||
if (handle->flags & UV_HANDLE_READABLE) {
|
||||
/* Initialize and optionally start the eof timer. Only do this if the pipe
|
||||
* is readable and we haven't seen EOF come in ourselves. */
|
||||
eof_timer_init(handle);
|
||||
|
||||
/* If reading start the timer right now. Otherwise uv__pipe_queue_read will
|
||||
* start it. */
|
||||
if (handle->flags & UV_HANDLE_READ_PENDING) {
|
||||
eof_timer_start(handle);
|
||||
}
|
||||
|
||||
} else {
|
||||
/* This pipe is not readable. We can just close it to let the other end
|
||||
* know that we're done writing. */
|
||||
close_pipe(handle);
|
||||
}
|
||||
err = 0;
|
||||
}
|
||||
|
||||
if (req->cb) {
|
||||
req->cb(req, 0);
|
||||
}
|
||||
if (req->cb)
|
||||
req->cb(req, err);
|
||||
|
||||
DECREASE_PENDING_REQ_COUNT(handle);
|
||||
}
|
||||
@ -2200,7 +2194,8 @@ static void eof_timer_init(uv_pipe_t* pipe) {
|
||||
pipe->pipe.conn.eof_timer = (uv_timer_t*) uv__malloc(sizeof *pipe->pipe.conn.eof_timer);
|
||||
|
||||
r = uv_timer_init(pipe->loop, pipe->pipe.conn.eof_timer);
|
||||
assert(r == 0); /* timers can't fail */
|
||||
assert(r == 0); /* timers can't fail */
|
||||
(void) r;
|
||||
pipe->pipe.conn.eof_timer->data = pipe;
|
||||
uv_unref((uv_handle_t*) pipe->pipe.conn.eof_timer);
|
||||
}
|
||||
@ -2280,10 +2275,16 @@ int uv_pipe_open(uv_pipe_t* pipe, uv_file file) {
|
||||
IO_STATUS_BLOCK io_status;
|
||||
FILE_ACCESS_INFORMATION access;
|
||||
DWORD duplex_flags = 0;
|
||||
int err;
|
||||
|
||||
if (os_handle == INVALID_HANDLE_VALUE)
|
||||
return UV_EBADF;
|
||||
if (pipe->flags & UV_HANDLE_PIPESERVER)
|
||||
return UV_EINVAL;
|
||||
if (pipe->flags & UV_HANDLE_CONNECTION)
|
||||
return UV_EBUSY;
|
||||
|
||||
uv__pipe_connection_init(pipe);
|
||||
uv__once_init();
|
||||
/* In order to avoid closing a stdio file descriptor 0-2, duplicate the
|
||||
* underlying OS handle and forget about the original fd.
|
||||
@ -2300,6 +2301,7 @@ int uv_pipe_open(uv_pipe_t* pipe, uv_file file) {
|
||||
FALSE,
|
||||
DUPLICATE_SAME_ACCESS))
|
||||
return uv_translate_sys_error(GetLastError());
|
||||
assert(os_handle != INVALID_HANDLE_VALUE);
|
||||
file = -1;
|
||||
}
|
||||
|
||||
@ -2327,17 +2329,17 @@ int uv_pipe_open(uv_pipe_t* pipe, uv_file file) {
|
||||
if (access.AccessFlags & FILE_READ_DATA)
|
||||
duplex_flags |= UV_HANDLE_READABLE;
|
||||
|
||||
if (os_handle == INVALID_HANDLE_VALUE ||
|
||||
uv__set_pipe_handle(pipe->loop,
|
||||
pipe,
|
||||
os_handle,
|
||||
file,
|
||||
duplex_flags) == -1) {
|
||||
return UV_EINVAL;
|
||||
err = uv__set_pipe_handle(pipe->loop,
|
||||
pipe,
|
||||
os_handle,
|
||||
file,
|
||||
duplex_flags);
|
||||
if (err) {
|
||||
if (file == -1)
|
||||
CloseHandle(os_handle);
|
||||
return err;
|
||||
}
|
||||
|
||||
uv__pipe_connection_init(pipe);
|
||||
|
||||
if (pipe->ipc) {
|
||||
assert(!(pipe->flags & UV_HANDLE_NON_OVERLAPPED_PIPE));
|
||||
pipe->pipe.conn.ipc_remote_pid = uv_os_getppid();
|
||||
@ -2361,6 +2363,51 @@ static int uv__pipe_getname(const uv_pipe_t* handle, char* buffer, size_t* size)
|
||||
uv__once_init();
|
||||
name_info = NULL;
|
||||
|
||||
if (handle->name != NULL) {
|
||||
/* The user might try to query the name before we are connected,
|
||||
* and this is just easier to return the cached value if we have it. */
|
||||
name_buf = handle->name;
|
||||
name_len = wcslen(name_buf);
|
||||
|
||||
/* check how much space we need */
|
||||
addrlen = WideCharToMultiByte(CP_UTF8,
|
||||
0,
|
||||
name_buf,
|
||||
name_len,
|
||||
NULL,
|
||||
0,
|
||||
NULL,
|
||||
NULL);
|
||||
if (!addrlen) {
|
||||
*size = 0;
|
||||
err = uv_translate_sys_error(GetLastError());
|
||||
return err;
|
||||
} else if (addrlen >= *size) {
|
||||
*size = addrlen + 1;
|
||||
err = UV_ENOBUFS;
|
||||
goto error;
|
||||
}
|
||||
|
||||
addrlen = WideCharToMultiByte(CP_UTF8,
|
||||
0,
|
||||
name_buf,
|
||||
name_len,
|
||||
buffer,
|
||||
addrlen,
|
||||
NULL,
|
||||
NULL);
|
||||
if (!addrlen) {
|
||||
*size = 0;
|
||||
err = uv_translate_sys_error(GetLastError());
|
||||
return err;
|
||||
}
|
||||
|
||||
*size = addrlen;
|
||||
buffer[addrlen] = '\0';
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (handle->handle == INVALID_HANDLE_VALUE) {
|
||||
*size = 0;
|
||||
return UV_EINVAL;
|
||||
@ -2498,6 +2545,11 @@ int uv_pipe_getpeername(const uv_pipe_t* handle, char* buffer, size_t* size) {
|
||||
if (handle->handle != INVALID_HANDLE_VALUE)
|
||||
return uv__pipe_getname(handle, buffer, size);
|
||||
|
||||
if (handle->flags & UV_HANDLE_CONNECTION) {
|
||||
if (handle->name != NULL)
|
||||
return uv__pipe_getname(handle, buffer, size);
|
||||
}
|
||||
|
||||
return UV_EBADF;
|
||||
}
|
||||
|
||||
|
13
deps/libuv/src/win/req-inl.h
vendored
13
deps/libuv/src/win/req-inl.h
vendored
@ -138,13 +138,13 @@ INLINE static void uv__insert_pending_req(uv_loop_t* loop, uv_req_t* req) {
|
||||
} while (0)
|
||||
|
||||
|
||||
INLINE static int uv__process_reqs(uv_loop_t* loop) {
|
||||
INLINE static void uv__process_reqs(uv_loop_t* loop) {
|
||||
uv_req_t* req;
|
||||
uv_req_t* first;
|
||||
uv_req_t* next;
|
||||
|
||||
if (loop->pending_reqs_tail == NULL)
|
||||
return 0;
|
||||
return;
|
||||
|
||||
first = loop->pending_reqs_tail->next_req;
|
||||
next = first;
|
||||
@ -172,12 +172,7 @@ INLINE static int uv__process_reqs(uv_loop_t* loop) {
|
||||
break;
|
||||
|
||||
case UV_SHUTDOWN:
|
||||
/* Tcp shutdown requests don't come here. */
|
||||
assert(((uv_shutdown_t*) req)->handle->type == UV_NAMED_PIPE);
|
||||
uv__process_pipe_shutdown_req(
|
||||
loop,
|
||||
(uv_pipe_t*) ((uv_shutdown_t*) req)->handle,
|
||||
(uv_shutdown_t*) req);
|
||||
DELEGATE_STREAM_REQ(loop, (uv_shutdown_t*) req, shutdown, handle);
|
||||
break;
|
||||
|
||||
case UV_UDP_RECV:
|
||||
@ -214,8 +209,6 @@ INLINE static int uv__process_reqs(uv_loop_t* loop) {
|
||||
assert(0);
|
||||
}
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
#endif /* UV_WIN_REQ_INL_H_ */
|
||||
|
11
deps/libuv/src/win/stream.c
vendored
11
deps/libuv/src/win/stream.c
vendored
@ -29,7 +29,9 @@
|
||||
|
||||
int uv_listen(uv_stream_t* stream, int backlog, uv_connection_cb cb) {
|
||||
int err;
|
||||
|
||||
if (uv__is_closing(stream)) {
|
||||
return UV_EINVAL;
|
||||
}
|
||||
err = ERROR_INVALID_PARAMETER;
|
||||
switch (stream->type) {
|
||||
case UV_TCP:
|
||||
@ -217,7 +219,12 @@ int uv_shutdown(uv_shutdown_t* req, uv_stream_t* handle, uv_shutdown_cb cb) {
|
||||
handle->reqs_pending++;
|
||||
REGISTER_HANDLE_REQ(loop, handle, req);
|
||||
|
||||
uv__want_endgame(loop, (uv_handle_t*)handle);
|
||||
if (handle->stream.conn.write_reqs_pending == 0) {
|
||||
if (handle->type == UV_NAMED_PIPE)
|
||||
uv__pipe_shutdown(loop, (uv_pipe_t*) handle, req);
|
||||
else
|
||||
uv__insert_pending_req(loop, (uv_req_t*) req);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
139
deps/libuv/src/win/tcp.c
vendored
139
deps/libuv/src/win/tcp.c
vendored
@ -205,73 +205,76 @@ int uv_tcp_init(uv_loop_t* loop, uv_tcp_t* handle) {
|
||||
}
|
||||
|
||||
|
||||
void uv__tcp_endgame(uv_loop_t* loop, uv_tcp_t* handle) {
|
||||
void uv__process_tcp_shutdown_req(uv_loop_t* loop, uv_tcp_t* stream, uv_shutdown_t *req) {
|
||||
int err;
|
||||
|
||||
assert(req);
|
||||
assert(stream->stream.conn.write_reqs_pending == 0);
|
||||
assert(!(stream->flags & UV_HANDLE_SHUT));
|
||||
assert(stream->flags & UV_HANDLE_CONNECTION);
|
||||
|
||||
stream->stream.conn.shutdown_req = NULL;
|
||||
stream->flags &= ~UV_HANDLE_SHUTTING;
|
||||
UNREGISTER_HANDLE_REQ(loop, stream, req);
|
||||
|
||||
err = 0;
|
||||
if (stream->flags & UV_HANDLE_CLOSING)
|
||||
/* The user destroyed the stream before we got to do the shutdown. */
|
||||
err = UV_ECANCELED;
|
||||
else if (shutdown(stream->socket, SD_SEND) == SOCKET_ERROR)
|
||||
err = uv_translate_sys_error(WSAGetLastError());
|
||||
else /* Success. */
|
||||
stream->flags |= UV_HANDLE_SHUT;
|
||||
|
||||
if (req->cb)
|
||||
req->cb(req, err);
|
||||
|
||||
DECREASE_PENDING_REQ_COUNT(stream);
|
||||
}
|
||||
|
||||
|
||||
void uv__tcp_endgame(uv_loop_t* loop, uv_tcp_t* handle) {
|
||||
unsigned int i;
|
||||
uv_tcp_accept_t* req;
|
||||
|
||||
if (handle->flags & UV_HANDLE_CONNECTION &&
|
||||
handle->stream.conn.shutdown_req != NULL &&
|
||||
handle->stream.conn.write_reqs_pending == 0) {
|
||||
assert(handle->flags & UV_HANDLE_CLOSING);
|
||||
assert(handle->reqs_pending == 0);
|
||||
assert(!(handle->flags & UV_HANDLE_CLOSED));
|
||||
assert(handle->socket == INVALID_SOCKET);
|
||||
|
||||
UNREGISTER_HANDLE_REQ(loop, handle, handle->stream.conn.shutdown_req);
|
||||
|
||||
err = 0;
|
||||
if (handle->flags & UV_HANDLE_CLOSING) {
|
||||
err = ERROR_OPERATION_ABORTED;
|
||||
} else if (shutdown(handle->socket, SD_SEND) == SOCKET_ERROR) {
|
||||
err = WSAGetLastError();
|
||||
}
|
||||
|
||||
if (handle->stream.conn.shutdown_req->cb) {
|
||||
handle->stream.conn.shutdown_req->cb(handle->stream.conn.shutdown_req,
|
||||
uv_translate_sys_error(err));
|
||||
}
|
||||
|
||||
handle->stream.conn.shutdown_req = NULL;
|
||||
DECREASE_PENDING_REQ_COUNT(handle);
|
||||
return;
|
||||
}
|
||||
|
||||
if (handle->flags & UV_HANDLE_CLOSING &&
|
||||
handle->reqs_pending == 0) {
|
||||
assert(!(handle->flags & UV_HANDLE_CLOSED));
|
||||
assert(handle->socket == INVALID_SOCKET);
|
||||
|
||||
if (!(handle->flags & UV_HANDLE_CONNECTION) && handle->tcp.serv.accept_reqs) {
|
||||
if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
|
||||
for (i = 0; i < uv_simultaneous_server_accepts; i++) {
|
||||
req = &handle->tcp.serv.accept_reqs[i];
|
||||
if (req->wait_handle != INVALID_HANDLE_VALUE) {
|
||||
UnregisterWait(req->wait_handle);
|
||||
req->wait_handle = INVALID_HANDLE_VALUE;
|
||||
}
|
||||
if (req->event_handle != NULL) {
|
||||
CloseHandle(req->event_handle);
|
||||
req->event_handle = NULL;
|
||||
}
|
||||
if (!(handle->flags & UV_HANDLE_CONNECTION) && handle->tcp.serv.accept_reqs) {
|
||||
if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
|
||||
for (i = 0; i < uv_simultaneous_server_accepts; i++) {
|
||||
req = &handle->tcp.serv.accept_reqs[i];
|
||||
if (req->wait_handle != INVALID_HANDLE_VALUE) {
|
||||
UnregisterWait(req->wait_handle);
|
||||
req->wait_handle = INVALID_HANDLE_VALUE;
|
||||
}
|
||||
if (req->event_handle != NULL) {
|
||||
CloseHandle(req->event_handle);
|
||||
req->event_handle = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
uv__free(handle->tcp.serv.accept_reqs);
|
||||
handle->tcp.serv.accept_reqs = NULL;
|
||||
}
|
||||
|
||||
if (handle->flags & UV_HANDLE_CONNECTION &&
|
||||
handle->flags & UV_HANDLE_EMULATE_IOCP) {
|
||||
if (handle->read_req.wait_handle != INVALID_HANDLE_VALUE) {
|
||||
UnregisterWait(handle->read_req.wait_handle);
|
||||
handle->read_req.wait_handle = INVALID_HANDLE_VALUE;
|
||||
}
|
||||
if (handle->read_req.event_handle != NULL) {
|
||||
CloseHandle(handle->read_req.event_handle);
|
||||
handle->read_req.event_handle = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
uv__handle_close(handle);
|
||||
loop->active_tcp_streams--;
|
||||
uv__free(handle->tcp.serv.accept_reqs);
|
||||
handle->tcp.serv.accept_reqs = NULL;
|
||||
}
|
||||
|
||||
if (handle->flags & UV_HANDLE_CONNECTION &&
|
||||
handle->flags & UV_HANDLE_EMULATE_IOCP) {
|
||||
if (handle->read_req.wait_handle != INVALID_HANDLE_VALUE) {
|
||||
UnregisterWait(handle->read_req.wait_handle);
|
||||
handle->read_req.wait_handle = INVALID_HANDLE_VALUE;
|
||||
}
|
||||
if (handle->read_req.event_handle != NULL) {
|
||||
CloseHandle(handle->read_req.event_handle);
|
||||
handle->read_req.event_handle = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
uv__handle_close(handle);
|
||||
loop->active_tcp_streams--;
|
||||
}
|
||||
|
||||
|
||||
@ -1160,9 +1163,10 @@ void uv__process_tcp_write_req(uv_loop_t* loop, uv_tcp_t* handle,
|
||||
closesocket(handle->socket);
|
||||
handle->socket = INVALID_SOCKET;
|
||||
}
|
||||
if (handle->stream.conn.shutdown_req != NULL) {
|
||||
uv__want_endgame(loop, (uv_handle_t*)handle);
|
||||
}
|
||||
if (handle->flags & UV_HANDLE_SHUTTING)
|
||||
uv__process_tcp_shutdown_req(loop,
|
||||
handle,
|
||||
handle->stream.conn.shutdown_req);
|
||||
}
|
||||
|
||||
DECREASE_PENDING_REQ_COUNT(handle);
|
||||
@ -1411,7 +1415,7 @@ static void uv__tcp_try_cancel_reqs(uv_tcp_t* tcp) {
|
||||
int writing;
|
||||
|
||||
socket = tcp->socket;
|
||||
reading = tcp->flags & UV_HANDLE_READING;
|
||||
reading = tcp->flags & UV_HANDLE_READ_PENDING;
|
||||
writing = tcp->stream.conn.write_reqs_pending > 0;
|
||||
if (!reading && !writing)
|
||||
return;
|
||||
@ -1458,10 +1462,10 @@ static void uv__tcp_try_cancel_reqs(uv_tcp_t* tcp) {
|
||||
|
||||
void uv__tcp_close(uv_loop_t* loop, uv_tcp_t* tcp) {
|
||||
if (tcp->flags & UV_HANDLE_CONNECTION) {
|
||||
uv__tcp_try_cancel_reqs(tcp);
|
||||
if (tcp->flags & UV_HANDLE_READING) {
|
||||
uv_read_stop((uv_stream_t*) tcp);
|
||||
}
|
||||
uv__tcp_try_cancel_reqs(tcp);
|
||||
} else {
|
||||
if (tcp->tcp.serv.accept_reqs != NULL) {
|
||||
/* First close the incoming sockets to cancel the accept operations before
|
||||
@ -1483,6 +1487,9 @@ void uv__tcp_close(uv_loop_t* loop, uv_tcp_t* tcp) {
|
||||
DECREASE_ACTIVE_COUNT(loop, tcp);
|
||||
}
|
||||
|
||||
tcp->flags &= ~(UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);
|
||||
uv__handle_closing(tcp);
|
||||
|
||||
/* If any overlapped req failed to cancel, calling `closesocket` now would
|
||||
* cause Win32 to send an RST packet. Try to avoid that for writes, if
|
||||
* possibly applicable, by waiting to process the completion notifications
|
||||
@ -1494,12 +1501,8 @@ void uv__tcp_close(uv_loop_t* loop, uv_tcp_t* tcp) {
|
||||
tcp->socket = INVALID_SOCKET;
|
||||
}
|
||||
|
||||
tcp->flags &= ~(UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);
|
||||
uv__handle_closing(tcp);
|
||||
|
||||
if (tcp->reqs_pending == 0) {
|
||||
uv__want_endgame(tcp->loop, (uv_handle_t*)tcp);
|
||||
}
|
||||
if (tcp->reqs_pending == 0)
|
||||
uv__want_endgame(loop, (uv_handle_t*) tcp);
|
||||
}
|
||||
|
||||
|
||||
|
68
deps/libuv/src/win/tty.c
vendored
68
deps/libuv/src/win/tty.c
vendored
@ -2237,11 +2237,13 @@ void uv__process_tty_write_req(uv_loop_t* loop, uv_tty_t* handle,
|
||||
req->cb(req, uv_translate_sys_error(err));
|
||||
}
|
||||
|
||||
|
||||
handle->stream.conn.write_reqs_pending--;
|
||||
if (handle->stream.conn.shutdown_req != NULL &&
|
||||
handle->stream.conn.write_reqs_pending == 0) {
|
||||
uv__want_endgame(loop, (uv_handle_t*)handle);
|
||||
}
|
||||
if (handle->stream.conn.write_reqs_pending == 0)
|
||||
if (handle->flags & UV_HANDLE_SHUTTING)
|
||||
uv__process_tty_shutdown_req(loop,
|
||||
handle,
|
||||
handle->stream.conn.shutdown_req);
|
||||
|
||||
DECREASE_PENDING_REQ_COUNT(handle);
|
||||
}
|
||||
@ -2262,43 +2264,43 @@ void uv__tty_close(uv_tty_t* handle) {
|
||||
handle->flags &= ~(UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);
|
||||
uv__handle_closing(handle);
|
||||
|
||||
if (handle->reqs_pending == 0) {
|
||||
if (handle->reqs_pending == 0)
|
||||
uv__want_endgame(handle->loop, (uv_handle_t*) handle);
|
||||
}
|
||||
|
||||
|
||||
void uv__process_tty_shutdown_req(uv_loop_t* loop, uv_tty_t* stream, uv_shutdown_t* req) {
|
||||
assert(stream->stream.conn.write_reqs_pending == 0);
|
||||
assert(req);
|
||||
|
||||
stream->stream.conn.shutdown_req = NULL;
|
||||
stream->flags &= ~UV_HANDLE_SHUTTING;
|
||||
UNREGISTER_HANDLE_REQ(loop, stream, req);
|
||||
|
||||
/* TTY shutdown is really just a no-op */
|
||||
if (req->cb) {
|
||||
if (stream->flags & UV_HANDLE_CLOSING) {
|
||||
req->cb(req, UV_ECANCELED);
|
||||
} else {
|
||||
req->cb(req, 0);
|
||||
}
|
||||
}
|
||||
|
||||
DECREASE_PENDING_REQ_COUNT(stream);
|
||||
}
|
||||
|
||||
|
||||
void uv__tty_endgame(uv_loop_t* loop, uv_tty_t* handle) {
|
||||
if (!(handle->flags & UV_HANDLE_TTY_READABLE) &&
|
||||
handle->stream.conn.shutdown_req != NULL &&
|
||||
handle->stream.conn.write_reqs_pending == 0) {
|
||||
UNREGISTER_HANDLE_REQ(loop, handle, handle->stream.conn.shutdown_req);
|
||||
assert(handle->flags & UV_HANDLE_CLOSING);
|
||||
assert(handle->reqs_pending == 0);
|
||||
|
||||
/* TTY shutdown is really just a no-op */
|
||||
if (handle->stream.conn.shutdown_req->cb) {
|
||||
if (handle->flags & UV_HANDLE_CLOSING) {
|
||||
handle->stream.conn.shutdown_req->cb(handle->stream.conn.shutdown_req, UV_ECANCELED);
|
||||
} else {
|
||||
handle->stream.conn.shutdown_req->cb(handle->stream.conn.shutdown_req, 0);
|
||||
}
|
||||
}
|
||||
/* The wait handle used for raw reading should be unregistered when the
|
||||
* wait callback runs. */
|
||||
assert(!(handle->flags & UV_HANDLE_TTY_READABLE) ||
|
||||
handle->tty.rd.read_raw_wait == NULL);
|
||||
|
||||
handle->stream.conn.shutdown_req = NULL;
|
||||
|
||||
DECREASE_PENDING_REQ_COUNT(handle);
|
||||
return;
|
||||
}
|
||||
|
||||
if (handle->flags & UV_HANDLE_CLOSING &&
|
||||
handle->reqs_pending == 0) {
|
||||
/* The wait handle used for raw reading should be unregistered when the
|
||||
* wait callback runs. */
|
||||
assert(!(handle->flags & UV_HANDLE_TTY_READABLE) ||
|
||||
handle->tty.rd.read_raw_wait == NULL);
|
||||
|
||||
assert(!(handle->flags & UV_HANDLE_CLOSED));
|
||||
uv__handle_close(handle);
|
||||
}
|
||||
assert(!(handle->flags & UV_HANDLE_CLOSED));
|
||||
uv__handle_close(handle);
|
||||
}
|
||||
|
||||
|
||||
|
5
deps/libuv/src/win/udp.c
vendored
5
deps/libuv/src/win/udp.c
vendored
@ -1087,7 +1087,7 @@ int uv__udp_disconnect(uv_udp_t* handle) {
|
||||
|
||||
memset(&addr, 0, sizeof(addr));
|
||||
|
||||
err = connect(handle->socket, &addr, sizeof(addr));
|
||||
err = connect(handle->socket, (struct sockaddr*) &addr, sizeof(addr));
|
||||
if (err)
|
||||
return uv_translate_sys_error(WSAGetLastError());
|
||||
|
||||
@ -1146,6 +1146,7 @@ int uv__udp_try_send(uv_udp_t* handle,
|
||||
err = uv__convert_to_localhost_if_unspecified(addr, &converted);
|
||||
if (err)
|
||||
return err;
|
||||
addr = (const struct sockaddr*) &converted;
|
||||
}
|
||||
|
||||
/* Already sending a message.*/
|
||||
@ -1169,7 +1170,7 @@ int uv__udp_try_send(uv_udp_t* handle,
|
||||
nbufs,
|
||||
&bytes,
|
||||
0,
|
||||
(const struct sockaddr*) &converted,
|
||||
addr,
|
||||
addrlen,
|
||||
NULL,
|
||||
NULL);
|
||||
|
Reference in New Issue
Block a user