2021-01-02 13:10:00 -05:00
|
|
|
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
* of this software and associated documentation files (the "Software"), to
|
|
|
|
* deal in the Software without restriction, including without limitation the
|
|
|
|
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
|
|
|
* sell copies of the Software, and to permit persons to whom the Software is
|
|
|
|
* furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
|
|
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
* IN THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "uv-common.h"
|
|
|
|
|
|
|
|
#if !defined(_WIN32)
|
|
|
|
# include "unix/internal.h"
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#include <stdlib.h>
|
|
|
|
|
|
|
|
#define MAX_THREADPOOL_SIZE 1024
|
|
|
|
|
|
|
|
static uv_once_t once = UV_ONCE_INIT;
|
|
|
|
static uv_cond_t cond;
|
|
|
|
static uv_mutex_t mutex;
|
|
|
|
static unsigned int idle_threads;
|
|
|
|
static unsigned int slow_io_work_running;
|
|
|
|
static unsigned int nthreads;
|
|
|
|
static uv_thread_t* threads;
|
|
|
|
static uv_thread_t default_threads[4];
|
2023-07-03 20:24:48 -04:00
|
|
|
static struct uv__queue exit_message;
|
|
|
|
static struct uv__queue wq;
|
|
|
|
static struct uv__queue run_slow_work_message;
|
|
|
|
static struct uv__queue slow_io_pending_wq;
|
2021-01-02 13:10:00 -05:00
|
|
|
|
|
|
|
static unsigned int slow_work_thread_threshold(void) {
|
|
|
|
return (nthreads + 1) / 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void uv__cancelled(struct uv__work* w) {
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* To avoid deadlock with uv_cancel() it's crucial that the worker
|
|
|
|
* never holds the global mutex and the loop-local mutex at the same time.
|
|
|
|
*/
|
|
|
|
static void worker(void* arg) {
|
|
|
|
struct uv__work* w;
|
2023-07-03 20:24:48 -04:00
|
|
|
struct uv__queue* q;
|
2021-01-02 13:10:00 -05:00
|
|
|
int is_slow_work;
|
|
|
|
|
|
|
|
uv_sem_post((uv_sem_t*) arg);
|
|
|
|
arg = NULL;
|
|
|
|
|
|
|
|
uv_mutex_lock(&mutex);
|
|
|
|
for (;;) {
|
|
|
|
/* `mutex` should always be locked at this point. */
|
|
|
|
|
|
|
|
/* Keep waiting while either no work is present or only slow I/O
|
|
|
|
and we're at the threshold for that. */
|
2023-07-03 20:24:48 -04:00
|
|
|
while (uv__queue_empty(&wq) ||
|
|
|
|
(uv__queue_head(&wq) == &run_slow_work_message &&
|
|
|
|
uv__queue_next(&run_slow_work_message) == &wq &&
|
2021-01-02 13:10:00 -05:00
|
|
|
slow_io_work_running >= slow_work_thread_threshold())) {
|
|
|
|
idle_threads += 1;
|
|
|
|
uv_cond_wait(&cond, &mutex);
|
|
|
|
idle_threads -= 1;
|
|
|
|
}
|
|
|
|
|
2023-07-03 20:24:48 -04:00
|
|
|
q = uv__queue_head(&wq);
|
2021-01-02 13:10:00 -05:00
|
|
|
if (q == &exit_message) {
|
|
|
|
uv_cond_signal(&cond);
|
|
|
|
uv_mutex_unlock(&mutex);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2023-07-03 20:24:48 -04:00
|
|
|
uv__queue_remove(q);
|
|
|
|
uv__queue_init(q); /* Signal uv_cancel() that the work req is executing. */
|
2021-01-02 13:10:00 -05:00
|
|
|
|
|
|
|
is_slow_work = 0;
|
|
|
|
if (q == &run_slow_work_message) {
|
|
|
|
/* If we're at the slow I/O threshold, re-schedule until after all
|
|
|
|
other work in the queue is done. */
|
|
|
|
if (slow_io_work_running >= slow_work_thread_threshold()) {
|
2023-07-03 20:24:48 -04:00
|
|
|
uv__queue_insert_tail(&wq, q);
|
2021-01-02 13:10:00 -05:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If we encountered a request to run slow I/O work but there is none
|
|
|
|
to run, that means it's cancelled => Start over. */
|
2023-07-03 20:24:48 -04:00
|
|
|
if (uv__queue_empty(&slow_io_pending_wq))
|
2021-01-02 13:10:00 -05:00
|
|
|
continue;
|
|
|
|
|
|
|
|
is_slow_work = 1;
|
|
|
|
slow_io_work_running++;
|
|
|
|
|
2023-07-03 20:24:48 -04:00
|
|
|
q = uv__queue_head(&slow_io_pending_wq);
|
|
|
|
uv__queue_remove(q);
|
|
|
|
uv__queue_init(q);
|
2021-01-02 13:10:00 -05:00
|
|
|
|
|
|
|
/* If there is more slow I/O work, schedule it to be run as well. */
|
2023-07-03 20:24:48 -04:00
|
|
|
if (!uv__queue_empty(&slow_io_pending_wq)) {
|
|
|
|
uv__queue_insert_tail(&wq, &run_slow_work_message);
|
2021-01-02 13:10:00 -05:00
|
|
|
if (idle_threads > 0)
|
|
|
|
uv_cond_signal(&cond);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
uv_mutex_unlock(&mutex);
|
|
|
|
|
2023-07-03 20:24:48 -04:00
|
|
|
w = uv__queue_data(q, struct uv__work, wq);
|
2021-01-02 13:10:00 -05:00
|
|
|
w->work(w);
|
|
|
|
|
|
|
|
uv_mutex_lock(&w->loop->wq_mutex);
|
|
|
|
w->work = NULL; /* Signal uv_cancel() that the work req is done
|
|
|
|
executing. */
|
2023-07-03 20:24:48 -04:00
|
|
|
uv__queue_insert_tail(&w->loop->wq, &w->wq);
|
2021-01-02 13:10:00 -05:00
|
|
|
uv_async_send(&w->loop->wq_async);
|
|
|
|
uv_mutex_unlock(&w->loop->wq_mutex);
|
|
|
|
|
|
|
|
/* Lock `mutex` since that is expected at the start of the next
|
|
|
|
* iteration. */
|
|
|
|
uv_mutex_lock(&mutex);
|
|
|
|
if (is_slow_work) {
|
|
|
|
/* `slow_io_work_running` is protected by `mutex`. */
|
|
|
|
slow_io_work_running--;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2023-07-03 20:24:48 -04:00
|
|
|
static void post(struct uv__queue* q, enum uv__work_kind kind) {
|
2021-01-02 13:10:00 -05:00
|
|
|
uv_mutex_lock(&mutex);
|
|
|
|
if (kind == UV__WORK_SLOW_IO) {
|
|
|
|
/* Insert into a separate queue. */
|
2023-07-03 20:24:48 -04:00
|
|
|
uv__queue_insert_tail(&slow_io_pending_wq, q);
|
|
|
|
if (!uv__queue_empty(&run_slow_work_message)) {
|
2021-01-02 13:10:00 -05:00
|
|
|
/* Running slow I/O tasks is already scheduled => Nothing to do here.
|
|
|
|
The worker that runs said other task will schedule this one as well. */
|
|
|
|
uv_mutex_unlock(&mutex);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
q = &run_slow_work_message;
|
|
|
|
}
|
|
|
|
|
2023-07-03 20:24:48 -04:00
|
|
|
uv__queue_insert_tail(&wq, q);
|
2021-01-02 13:10:00 -05:00
|
|
|
if (idle_threads > 0)
|
|
|
|
uv_cond_signal(&cond);
|
|
|
|
uv_mutex_unlock(&mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2022-01-04 21:04:05 -05:00
|
|
|
#ifdef __MVS__
|
|
|
|
/* TODO(itodorov) - zos: revisit when Woz compiler is available. */
|
|
|
|
__attribute__((destructor))
|
|
|
|
#endif
|
2021-01-02 13:10:00 -05:00
|
|
|
void uv__threadpool_cleanup(void) {
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
if (nthreads == 0)
|
|
|
|
return;
|
|
|
|
|
2022-01-04 21:04:05 -05:00
|
|
|
#ifndef __MVS__
|
|
|
|
/* TODO(gabylb) - zos: revisit when Woz compiler is available. */
|
2021-01-02 13:10:00 -05:00
|
|
|
post(&exit_message, UV__WORK_CPU);
|
2022-01-04 21:04:05 -05:00
|
|
|
#endif
|
2021-01-02 13:10:00 -05:00
|
|
|
|
|
|
|
for (i = 0; i < nthreads; i++)
|
|
|
|
if (uv_thread_join(threads + i))
|
|
|
|
abort();
|
|
|
|
|
|
|
|
if (threads != default_threads)
|
|
|
|
uv__free(threads);
|
|
|
|
|
|
|
|
uv_mutex_destroy(&mutex);
|
|
|
|
uv_cond_destroy(&cond);
|
|
|
|
|
|
|
|
threads = NULL;
|
|
|
|
nthreads = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void init_threads(void) {
|
2023-05-21 17:36:51 -04:00
|
|
|
uv_thread_options_t config;
|
2021-01-02 13:10:00 -05:00
|
|
|
unsigned int i;
|
|
|
|
const char* val;
|
|
|
|
uv_sem_t sem;
|
|
|
|
|
|
|
|
nthreads = ARRAY_SIZE(default_threads);
|
|
|
|
val = getenv("UV_THREADPOOL_SIZE");
|
|
|
|
if (val != NULL)
|
|
|
|
nthreads = atoi(val);
|
|
|
|
if (nthreads == 0)
|
|
|
|
nthreads = 1;
|
|
|
|
if (nthreads > MAX_THREADPOOL_SIZE)
|
|
|
|
nthreads = MAX_THREADPOOL_SIZE;
|
|
|
|
|
|
|
|
threads = default_threads;
|
|
|
|
if (nthreads > ARRAY_SIZE(default_threads)) {
|
|
|
|
threads = uv__malloc(nthreads * sizeof(threads[0]));
|
|
|
|
if (threads == NULL) {
|
|
|
|
nthreads = ARRAY_SIZE(default_threads);
|
|
|
|
threads = default_threads;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (uv_cond_init(&cond))
|
|
|
|
abort();
|
|
|
|
|
|
|
|
if (uv_mutex_init(&mutex))
|
|
|
|
abort();
|
|
|
|
|
2023-07-03 20:24:48 -04:00
|
|
|
uv__queue_init(&wq);
|
|
|
|
uv__queue_init(&slow_io_pending_wq);
|
|
|
|
uv__queue_init(&run_slow_work_message);
|
2021-01-02 13:10:00 -05:00
|
|
|
|
|
|
|
if (uv_sem_init(&sem, 0))
|
|
|
|
abort();
|
|
|
|
|
2023-05-21 17:36:51 -04:00
|
|
|
config.flags = UV_THREAD_HAS_STACK_SIZE;
|
|
|
|
config.stack_size = 8u << 20; /* 8 MB */
|
|
|
|
|
2021-01-02 13:10:00 -05:00
|
|
|
for (i = 0; i < nthreads; i++)
|
2023-05-21 17:36:51 -04:00
|
|
|
if (uv_thread_create_ex(threads + i, &config, worker, &sem))
|
2021-01-02 13:10:00 -05:00
|
|
|
abort();
|
|
|
|
|
|
|
|
for (i = 0; i < nthreads; i++)
|
|
|
|
uv_sem_wait(&sem);
|
|
|
|
|
|
|
|
uv_sem_destroy(&sem);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#ifndef _WIN32
|
|
|
|
static void reset_once(void) {
|
|
|
|
uv_once_t child_once = UV_ONCE_INIT;
|
|
|
|
memcpy(&once, &child_once, sizeof(child_once));
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
static void init_once(void) {
|
|
|
|
#ifndef _WIN32
|
|
|
|
/* Re-initialize the threadpool after fork.
|
|
|
|
* Note that this discards the global mutex and condition as well
|
|
|
|
* as the work queue.
|
|
|
|
*/
|
|
|
|
if (pthread_atfork(NULL, NULL, &reset_once))
|
|
|
|
abort();
|
|
|
|
#endif
|
|
|
|
init_threads();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void uv__work_submit(uv_loop_t* loop,
|
|
|
|
struct uv__work* w,
|
|
|
|
enum uv__work_kind kind,
|
|
|
|
void (*work)(struct uv__work* w),
|
|
|
|
void (*done)(struct uv__work* w, int status)) {
|
|
|
|
uv_once(&once, init_once);
|
|
|
|
w->loop = loop;
|
|
|
|
w->work = work;
|
|
|
|
w->done = done;
|
|
|
|
post(&w->wq, kind);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2023-05-21 17:36:51 -04:00
|
|
|
/* TODO(bnoordhuis) teach libuv how to cancel file operations
|
|
|
|
* that go through io_uring instead of the thread pool.
|
|
|
|
*/
|
2021-01-02 13:10:00 -05:00
|
|
|
static int uv__work_cancel(uv_loop_t* loop, uv_req_t* req, struct uv__work* w) {
|
|
|
|
int cancelled;
|
|
|
|
|
2023-05-21 17:36:51 -04:00
|
|
|
uv_once(&once, init_once); /* Ensure |mutex| is initialized. */
|
2021-01-02 13:10:00 -05:00
|
|
|
uv_mutex_lock(&mutex);
|
|
|
|
uv_mutex_lock(&w->loop->wq_mutex);
|
|
|
|
|
2023-07-03 20:24:48 -04:00
|
|
|
cancelled = !uv__queue_empty(&w->wq) && w->work != NULL;
|
2021-01-02 13:10:00 -05:00
|
|
|
if (cancelled)
|
2023-07-03 20:24:48 -04:00
|
|
|
uv__queue_remove(&w->wq);
|
2021-01-02 13:10:00 -05:00
|
|
|
|
|
|
|
uv_mutex_unlock(&w->loop->wq_mutex);
|
|
|
|
uv_mutex_unlock(&mutex);
|
|
|
|
|
|
|
|
if (!cancelled)
|
|
|
|
return UV_EBUSY;
|
|
|
|
|
|
|
|
w->work = uv__cancelled;
|
|
|
|
uv_mutex_lock(&loop->wq_mutex);
|
2023-07-03 20:24:48 -04:00
|
|
|
uv__queue_insert_tail(&loop->wq, &w->wq);
|
2021-01-02 13:10:00 -05:00
|
|
|
uv_async_send(&loop->wq_async);
|
|
|
|
uv_mutex_unlock(&loop->wq_mutex);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void uv__work_done(uv_async_t* handle) {
|
|
|
|
struct uv__work* w;
|
|
|
|
uv_loop_t* loop;
|
2023-07-03 20:24:48 -04:00
|
|
|
struct uv__queue* q;
|
|
|
|
struct uv__queue wq;
|
2021-01-02 13:10:00 -05:00
|
|
|
int err;
|
2023-05-21 17:36:51 -04:00
|
|
|
int nevents;
|
2021-01-02 13:10:00 -05:00
|
|
|
|
|
|
|
loop = container_of(handle, uv_loop_t, wq_async);
|
|
|
|
uv_mutex_lock(&loop->wq_mutex);
|
2023-07-03 20:24:48 -04:00
|
|
|
uv__queue_move(&loop->wq, &wq);
|
2021-01-02 13:10:00 -05:00
|
|
|
uv_mutex_unlock(&loop->wq_mutex);
|
|
|
|
|
2023-05-21 17:36:51 -04:00
|
|
|
nevents = 0;
|
|
|
|
|
2023-07-03 20:24:48 -04:00
|
|
|
while (!uv__queue_empty(&wq)) {
|
|
|
|
q = uv__queue_head(&wq);
|
|
|
|
uv__queue_remove(q);
|
2021-01-02 13:10:00 -05:00
|
|
|
|
|
|
|
w = container_of(q, struct uv__work, wq);
|
|
|
|
err = (w->work == uv__cancelled) ? UV_ECANCELED : 0;
|
|
|
|
w->done(w, err);
|
2023-05-21 17:36:51 -04:00
|
|
|
nevents++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* This check accomplishes 2 things:
|
|
|
|
* 1. Even if the queue was empty, the call to uv__work_done() should count
|
|
|
|
* as an event. Which will have been added by the event loop when
|
|
|
|
* calling this callback.
|
|
|
|
* 2. Prevents accidental wrap around in case nevents == 0 events == 0.
|
|
|
|
*/
|
|
|
|
if (nevents > 1) {
|
|
|
|
/* Subtract 1 to counter the call to uv__work_done(). */
|
|
|
|
uv__metrics_inc_events(loop, nevents - 1);
|
|
|
|
if (uv__get_internal_fields(loop)->current_timeout == 0)
|
|
|
|
uv__metrics_inc_events_waiting(loop, nevents - 1);
|
2021-01-02 13:10:00 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void uv__queue_work(struct uv__work* w) {
|
|
|
|
uv_work_t* req = container_of(w, uv_work_t, work_req);
|
|
|
|
|
|
|
|
req->work_cb(req);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void uv__queue_done(struct uv__work* w, int err) {
|
|
|
|
uv_work_t* req;
|
|
|
|
|
|
|
|
req = container_of(w, uv_work_t, work_req);
|
|
|
|
uv__req_unregister(req->loop, req);
|
|
|
|
|
|
|
|
if (req->after_work_cb == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
req->after_work_cb(req, err);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int uv_queue_work(uv_loop_t* loop,
|
|
|
|
uv_work_t* req,
|
|
|
|
uv_work_cb work_cb,
|
|
|
|
uv_after_work_cb after_work_cb) {
|
|
|
|
if (work_cb == NULL)
|
|
|
|
return UV_EINVAL;
|
|
|
|
|
|
|
|
uv__req_init(loop, req, UV_WORK);
|
|
|
|
req->loop = loop;
|
|
|
|
req->work_cb = work_cb;
|
|
|
|
req->after_work_cb = after_work_cb;
|
|
|
|
uv__work_submit(loop,
|
|
|
|
&req->work_req,
|
|
|
|
UV__WORK_CPU,
|
|
|
|
uv__queue_work,
|
|
|
|
uv__queue_done);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int uv_cancel(uv_req_t* req) {
|
|
|
|
struct uv__work* wreq;
|
|
|
|
uv_loop_t* loop;
|
|
|
|
|
|
|
|
switch (req->type) {
|
|
|
|
case UV_FS:
|
|
|
|
loop = ((uv_fs_t*) req)->loop;
|
|
|
|
wreq = &((uv_fs_t*) req)->work_req;
|
|
|
|
break;
|
|
|
|
case UV_GETADDRINFO:
|
|
|
|
loop = ((uv_getaddrinfo_t*) req)->loop;
|
|
|
|
wreq = &((uv_getaddrinfo_t*) req)->work_req;
|
|
|
|
break;
|
|
|
|
case UV_GETNAMEINFO:
|
|
|
|
loop = ((uv_getnameinfo_t*) req)->loop;
|
|
|
|
wreq = &((uv_getnameinfo_t*) req)->work_req;
|
|
|
|
break;
|
|
|
|
case UV_RANDOM:
|
|
|
|
loop = ((uv_random_t*) req)->loop;
|
|
|
|
wreq = &((uv_random_t*) req)->work_req;
|
|
|
|
break;
|
|
|
|
case UV_WORK:
|
|
|
|
loop = ((uv_work_t*) req)->loop;
|
|
|
|
wreq = &((uv_work_t*) req)->work_req;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return UV_EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return uv__work_cancel(loop, req, wreq);
|
|
|
|
}
|