libuv 1.45.0, #include cleanup, probably something else.

git-svn-id: https://www.unprompted.com/svn/projects/tildefriends/trunk@4308 ed5197a5-7fde-0310-b194-c3ffbd925b24
This commit is contained in:
2023-05-21 21:36:51 +00:00
parent 1ccb9183b4
commit f421606e21
299 changed files with 7167 additions and 4918 deletions

View File

@ -25,6 +25,17 @@
#define UV_NS_TO_MS 1000000
typedef struct {
uv_fs_t open_req;
uv_fs_t write_req;
uv_fs_t close_req;
} fs_reqs_t;
static uint64_t last_events_count;
static char test_buf[] = "test-buffer\n";
static fs_reqs_t fs_reqs;
static int pool_events_counter;
static void timer_spin_cb(uv_timer_t* handle) {
uint64_t t;
@ -37,6 +48,9 @@ static void timer_spin_cb(uv_timer_t* handle) {
TEST_IMPL(metrics_idle_time) {
#if defined(__OpenBSD__)
RETURN_SKIP("Test does not currently work in OpenBSD");
#endif
const uint64_t timeout = 1000;
uv_timer_t timer;
uint64_t idle_time;
@ -55,10 +69,10 @@ TEST_IMPL(metrics_idle_time) {
idle_time = uv_metrics_idle_time(uv_default_loop());
/* Permissive check that the idle time matches within the timeout ±500 ms. */
ASSERT((idle_time <= (timeout + 500) * UV_NS_TO_MS) &&
(idle_time >= (timeout - 500) * UV_NS_TO_MS));
ASSERT_LE(idle_time, (timeout + 500) * UV_NS_TO_MS);
ASSERT_GE(idle_time, (timeout - 500) * UV_NS_TO_MS);
MAKE_VALGRIND_HAPPY();
MAKE_VALGRIND_HAPPY(uv_default_loop());
return 0;
}
@ -116,6 +130,7 @@ static void timer_noop_cb(uv_timer_t* handle) {
TEST_IMPL(metrics_idle_time_zero) {
uv_metrics_t metrics;
uv_timer_t timer;
int cntr;
@ -130,6 +145,248 @@ TEST_IMPL(metrics_idle_time_zero) {
ASSERT_GT(cntr, 0);
ASSERT_EQ(0, uv_metrics_idle_time(uv_default_loop()));
MAKE_VALGRIND_HAPPY();
ASSERT_EQ(0, uv_metrics_info(uv_default_loop(), &metrics));
ASSERT_UINT64_EQ(cntr, metrics.loop_count);
MAKE_VALGRIND_HAPPY(uv_default_loop());
return 0;
}
static void close_cb(uv_fs_t* req) {
uv_metrics_t metrics;
ASSERT_EQ(0, uv_metrics_info(uv_default_loop(), &metrics));
ASSERT_UINT64_EQ(3, metrics.loop_count);
ASSERT_UINT64_GT(metrics.events, last_events_count);
uv_fs_req_cleanup(req);
last_events_count = metrics.events;
}
static void write_cb(uv_fs_t* req) {
uv_metrics_t metrics;
ASSERT_EQ(0, uv_metrics_info(uv_default_loop(), &metrics));
ASSERT_UINT64_EQ(2, metrics.loop_count);
ASSERT_UINT64_GT(metrics.events, last_events_count);
ASSERT_EQ(req->result, sizeof(test_buf));
uv_fs_req_cleanup(req);
last_events_count = metrics.events;
ASSERT_EQ(0, uv_fs_close(uv_default_loop(),
&fs_reqs.close_req,
fs_reqs.open_req.result,
close_cb));
}
static void create_cb(uv_fs_t* req) {
uv_metrics_t metrics;
ASSERT_EQ(0, uv_metrics_info(uv_default_loop(), &metrics));
/* Event count here is still 0 so not going to check. */
ASSERT_UINT64_EQ(1, metrics.loop_count);
ASSERT_GE(req->result, 0);
uv_fs_req_cleanup(req);
last_events_count = metrics.events;
uv_buf_t iov = uv_buf_init(test_buf, sizeof(test_buf));
ASSERT_EQ(0, uv_fs_write(uv_default_loop(),
&fs_reqs.write_req,
req->result,
&iov,
1,
0,
write_cb));
}
static void prepare_cb(uv_prepare_t* handle) {
uv_metrics_t metrics;
uv_prepare_stop(handle);
ASSERT_EQ(0, uv_metrics_info(uv_default_loop(), &metrics));
ASSERT_UINT64_EQ(0, metrics.loop_count);
ASSERT_UINT64_EQ(0, metrics.events);
ASSERT_EQ(0, uv_fs_open(uv_default_loop(),
&fs_reqs.open_req,
"test_file",
O_WRONLY | O_CREAT, S_IRUSR | S_IWUSR,
create_cb));
}
TEST_IMPL(metrics_info_check) {
uv_fs_t unlink_req;
uv_prepare_t prepare;
uv_fs_unlink(NULL, &unlink_req, "test_file", NULL);
uv_fs_req_cleanup(&unlink_req);
ASSERT_EQ(0, uv_prepare_init(uv_default_loop(), &prepare));
ASSERT_EQ(0, uv_prepare_start(&prepare, prepare_cb));
ASSERT_EQ(0, uv_run(uv_default_loop(), UV_RUN_DEFAULT));
uv_fs_unlink(NULL, &unlink_req, "test_file", NULL);
uv_fs_req_cleanup(&unlink_req);
MAKE_VALGRIND_HAPPY(uv_default_loop());
return 0;
}
static void fs_prepare_cb(uv_prepare_t* handle) {
uv_metrics_t metrics;
ASSERT_OK(uv_metrics_info(uv_default_loop(), &metrics));
if (pool_events_counter == 1)
ASSERT_EQ(metrics.events, metrics.events_waiting);
if (pool_events_counter < 7)
return;
uv_prepare_stop(handle);
pool_events_counter = -42;
}
static void fs_stat_cb(uv_fs_t* req) {
uv_fs_req_cleanup(req);
pool_events_counter++;
}
static void fs_work_cb(uv_work_t* req) {
}
static void fs_after_work_cb(uv_work_t* req, int status) {
free(req);
pool_events_counter++;
}
static void fs_write_cb(uv_fs_t* req) {
uv_work_t* work1 = malloc(sizeof(*work1));
uv_work_t* work2 = malloc(sizeof(*work2));
pool_events_counter++;
uv_fs_req_cleanup(req);
ASSERT_OK(uv_queue_work(uv_default_loop(),
work1,
fs_work_cb,
fs_after_work_cb));
ASSERT_OK(uv_queue_work(uv_default_loop(),
work2,
fs_work_cb,
fs_after_work_cb));
}
static void fs_random_cb(uv_random_t* req, int status, void* buf, size_t len) {
pool_events_counter++;
}
static void fs_addrinfo_cb(uv_getaddrinfo_t* req,
int status,
struct addrinfo* res) {
uv_freeaddrinfo(req->addrinfo);
pool_events_counter++;
}
TEST_IMPL(metrics_pool_events) {
uv_buf_t iov;
uv_fs_t open_req;
uv_fs_t stat1_req;
uv_fs_t stat2_req;
uv_fs_t unlink_req;
uv_fs_t write_req;
uv_getaddrinfo_t addrinfo_req;
uv_metrics_t metrics;
uv_prepare_t prepare;
uv_random_t random_req;
int fd;
char rdata;
ASSERT_OK(uv_loop_configure(uv_default_loop(), UV_METRICS_IDLE_TIME));
uv_fs_unlink(NULL, &unlink_req, "test_file", NULL);
uv_fs_req_cleanup(&unlink_req);
ASSERT_OK(uv_prepare_init(uv_default_loop(), &prepare));
ASSERT_OK(uv_prepare_start(&prepare, fs_prepare_cb));
pool_events_counter = 0;
fd = uv_fs_open(NULL,
&open_req,
"test_file",
O_WRONLY | O_CREAT,
S_IRUSR | S_IWUSR,
NULL);
ASSERT_GT(fd, 0);
uv_fs_req_cleanup(&open_req);
iov = uv_buf_init(test_buf, sizeof(test_buf));
ASSERT_OK(uv_fs_write(uv_default_loop(),
&write_req,
fd,
&iov,
1,
0,
fs_write_cb));
ASSERT_OK(uv_fs_stat(uv_default_loop(),
&stat1_req,
"test_file",
fs_stat_cb));
ASSERT_OK(uv_fs_stat(uv_default_loop(),
&stat2_req,
"test_file",
fs_stat_cb));
ASSERT_OK(uv_random(uv_default_loop(),
&random_req,
&rdata,
1,
0,
fs_random_cb));
ASSERT_OK(uv_getaddrinfo(uv_default_loop(),
&addrinfo_req,
fs_addrinfo_cb,
"example.invalid",
NULL,
NULL));
/* Sleep for a moment to hopefully force the events to complete before
* entering the event loop. */
uv_sleep(100);
ASSERT_OK(uv_run(uv_default_loop(), UV_RUN_DEFAULT));
ASSERT_OK(uv_metrics_info(uv_default_loop(), &metrics));
/* It's possible for uv__work_done() to execute one extra time even though the
* QUEUE has already been cleared out. This has to do with the way we use an
* uv_async to tell the event loop thread to process the worker pool QUEUE. */
ASSERT_GE(metrics.events, 7);
/* It's possible one of the other events also got stuck in the event queue, so
* check GE instead of EQ. Reason for 4 instead of 5 is because the call to
* uv_getaddrinfo() is racey and slow. So can't guarantee that it'll always
* execute before sleep completes. */
ASSERT_GE(metrics.events_waiting, 4);
ASSERT_EQ(pool_events_counter, -42);
uv_fs_unlink(NULL, &unlink_req, "test_file", NULL);
uv_fs_req_cleanup(&unlink_req);
MAKE_VALGRIND_HAPPY(uv_default_loop());
return 0;
}