libuv 1.46.0.
git-svn-id: https://www.unprompted.com/svn/projects/tildefriends/trunk@4336 ed5197a5-7fde-0310-b194-c3ffbd925b24
This commit is contained in:
		
							
								
								
									
										132
									
								
								deps/libuv/src/queue.h
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										132
									
								
								deps/libuv/src/queue.h
									
									
									
									
										vendored
									
									
								
							| @@ -18,91 +18,73 @@ | ||||
|  | ||||
| #include <stddef.h> | ||||
|  | ||||
| typedef void *QUEUE[2]; | ||||
| #define uv__queue_data(pointer, type, field)                                  \ | ||||
|   ((type*) ((char*) (pointer) - offsetof(type, field))) | ||||
|  | ||||
| /* Private macros. */ | ||||
| #define QUEUE_NEXT(q)       (*(QUEUE **) &((*(q))[0])) | ||||
| #define QUEUE_PREV(q)       (*(QUEUE **) &((*(q))[1])) | ||||
| #define QUEUE_PREV_NEXT(q)  (QUEUE_NEXT(QUEUE_PREV(q))) | ||||
| #define QUEUE_NEXT_PREV(q)  (QUEUE_PREV(QUEUE_NEXT(q))) | ||||
| #define uv__queue_foreach(q, h)                                               \ | ||||
|   for ((q) = (h)->next; (q) != (h); (q) = (q)->next) | ||||
|  | ||||
| /* Public macros. */ | ||||
| #define QUEUE_DATA(ptr, type, field)                                          \ | ||||
|   ((type *) ((char *) (ptr) - offsetof(type, field))) | ||||
| static inline void uv__queue_init(struct uv__queue* q) { | ||||
|   q->next = q; | ||||
|   q->prev = q; | ||||
| } | ||||
|  | ||||
| /* Important note: mutating the list while QUEUE_FOREACH is | ||||
|  * iterating over its elements results in undefined behavior. | ||||
|  */ | ||||
| #define QUEUE_FOREACH(q, h)                                                   \ | ||||
|   for ((q) = QUEUE_NEXT(h); (q) != (h); (q) = QUEUE_NEXT(q)) | ||||
| static inline int uv__queue_empty(const struct uv__queue* q) { | ||||
|   return q == q->next; | ||||
| } | ||||
|  | ||||
| #define QUEUE_EMPTY(q)                                                        \ | ||||
|   ((const QUEUE *) (q) == (const QUEUE *) QUEUE_NEXT(q)) | ||||
| static inline struct uv__queue* uv__queue_head(const struct uv__queue* q) { | ||||
|   return q->next; | ||||
| } | ||||
|  | ||||
| #define QUEUE_HEAD(q)                                                         \ | ||||
|   (QUEUE_NEXT(q)) | ||||
| static inline struct uv__queue* uv__queue_next(const struct uv__queue* q) { | ||||
|   return q->next; | ||||
| } | ||||
|  | ||||
| #define QUEUE_INIT(q)                                                         \ | ||||
|   do {                                                                        \ | ||||
|     QUEUE_NEXT(q) = (q);                                                      \ | ||||
|     QUEUE_PREV(q) = (q);                                                      \ | ||||
|   }                                                                           \ | ||||
|   while (0) | ||||
| static inline void uv__queue_add(struct uv__queue* h, struct uv__queue* n) { | ||||
|   h->prev->next = n->next; | ||||
|   n->next->prev = h->prev; | ||||
|   h->prev = n->prev; | ||||
|   h->prev->next = h; | ||||
| } | ||||
|  | ||||
| #define QUEUE_ADD(h, n)                                                       \ | ||||
|   do {                                                                        \ | ||||
|     QUEUE_PREV_NEXT(h) = QUEUE_NEXT(n);                                       \ | ||||
|     QUEUE_NEXT_PREV(n) = QUEUE_PREV(h);                                       \ | ||||
|     QUEUE_PREV(h) = QUEUE_PREV(n);                                            \ | ||||
|     QUEUE_PREV_NEXT(h) = (h);                                                 \ | ||||
|   }                                                                           \ | ||||
|   while (0) | ||||
| static inline void uv__queue_split(struct uv__queue* h, | ||||
|                                    struct uv__queue* q, | ||||
|                                    struct uv__queue* n) { | ||||
|   n->prev = h->prev; | ||||
|   n->prev->next = n; | ||||
|   n->next = q; | ||||
|   h->prev = q->prev; | ||||
|   h->prev->next = h; | ||||
|   q->prev = n; | ||||
| } | ||||
|  | ||||
| #define QUEUE_SPLIT(h, q, n)                                                  \ | ||||
|   do {                                                                        \ | ||||
|     QUEUE_PREV(n) = QUEUE_PREV(h);                                            \ | ||||
|     QUEUE_PREV_NEXT(n) = (n);                                                 \ | ||||
|     QUEUE_NEXT(n) = (q);                                                      \ | ||||
|     QUEUE_PREV(h) = QUEUE_PREV(q);                                            \ | ||||
|     QUEUE_PREV_NEXT(h) = (h);                                                 \ | ||||
|     QUEUE_PREV(q) = (n);                                                      \ | ||||
|   }                                                                           \ | ||||
|   while (0) | ||||
| static inline void uv__queue_move(struct uv__queue* h, struct uv__queue* n) { | ||||
|   if (uv__queue_empty(h)) | ||||
|     uv__queue_init(n); | ||||
|   else | ||||
|     uv__queue_split(h, h->next, n); | ||||
| } | ||||
|  | ||||
| #define QUEUE_MOVE(h, n)                                                      \ | ||||
|   do {                                                                        \ | ||||
|     if (QUEUE_EMPTY(h))                                                       \ | ||||
|       QUEUE_INIT(n);                                                          \ | ||||
|     else {                                                                    \ | ||||
|       QUEUE* q = QUEUE_HEAD(h);                                               \ | ||||
|       QUEUE_SPLIT(h, q, n);                                                   \ | ||||
|     }                                                                         \ | ||||
|   }                                                                           \ | ||||
|   while (0) | ||||
| static inline void uv__queue_insert_head(struct uv__queue* h, | ||||
|                                          struct uv__queue* q) { | ||||
|   q->next = h->next; | ||||
|   q->prev = h; | ||||
|   q->next->prev = q; | ||||
|   h->next = q; | ||||
| } | ||||
|  | ||||
| #define QUEUE_INSERT_HEAD(h, q)                                               \ | ||||
|   do {                                                                        \ | ||||
|     QUEUE_NEXT(q) = QUEUE_NEXT(h);                                            \ | ||||
|     QUEUE_PREV(q) = (h);                                                      \ | ||||
|     QUEUE_NEXT_PREV(q) = (q);                                                 \ | ||||
|     QUEUE_NEXT(h) = (q);                                                      \ | ||||
|   }                                                                           \ | ||||
|   while (0) | ||||
| static inline void uv__queue_insert_tail(struct uv__queue* h, | ||||
|                                          struct uv__queue* q) { | ||||
|   q->next = h; | ||||
|   q->prev = h->prev; | ||||
|   q->prev->next = q; | ||||
|   h->prev = q; | ||||
| } | ||||
|  | ||||
| #define QUEUE_INSERT_TAIL(h, q)                                               \ | ||||
|   do {                                                                        \ | ||||
|     QUEUE_NEXT(q) = (h);                                                      \ | ||||
|     QUEUE_PREV(q) = QUEUE_PREV(h);                                            \ | ||||
|     QUEUE_PREV_NEXT(q) = (q);                                                 \ | ||||
|     QUEUE_PREV(h) = (q);                                                      \ | ||||
|   }                                                                           \ | ||||
|   while (0) | ||||
|  | ||||
| #define QUEUE_REMOVE(q)                                                       \ | ||||
|   do {                                                                        \ | ||||
|     QUEUE_PREV_NEXT(q) = QUEUE_NEXT(q);                                       \ | ||||
|     QUEUE_NEXT_PREV(q) = QUEUE_PREV(q);                                       \ | ||||
|   }                                                                           \ | ||||
|   while (0) | ||||
| static inline void uv__queue_remove(struct uv__queue* q) { | ||||
|   q->prev->next = q->next; | ||||
|   q->next->prev = q->prev; | ||||
| } | ||||
|  | ||||
| #endif /* QUEUE_H_ */ | ||||
|   | ||||
							
								
								
									
										72
									
								
								deps/libuv/src/threadpool.c
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										72
									
								
								deps/libuv/src/threadpool.c
									
									
									
									
										vendored
									
									
								
							| @@ -37,10 +37,10 @@ static unsigned int slow_io_work_running; | ||||
| static unsigned int nthreads; | ||||
| static uv_thread_t* threads; | ||||
| static uv_thread_t default_threads[4]; | ||||
| static QUEUE exit_message; | ||||
| static QUEUE wq; | ||||
| static QUEUE run_slow_work_message; | ||||
| static QUEUE slow_io_pending_wq; | ||||
| static struct uv__queue exit_message; | ||||
| static struct uv__queue wq; | ||||
| static struct uv__queue run_slow_work_message; | ||||
| static struct uv__queue slow_io_pending_wq; | ||||
|  | ||||
| static unsigned int slow_work_thread_threshold(void) { | ||||
|   return (nthreads + 1) / 2; | ||||
| @@ -56,7 +56,7 @@ static void uv__cancelled(struct uv__work* w) { | ||||
|  */ | ||||
| static void worker(void* arg) { | ||||
|   struct uv__work* w; | ||||
|   QUEUE* q; | ||||
|   struct uv__queue* q; | ||||
|   int is_slow_work; | ||||
|  | ||||
|   uv_sem_post((uv_sem_t*) arg); | ||||
| @@ -68,49 +68,49 @@ static void worker(void* arg) { | ||||
|  | ||||
|     /* Keep waiting while either no work is present or only slow I/O | ||||
|        and we're at the threshold for that. */ | ||||
|     while (QUEUE_EMPTY(&wq) || | ||||
|            (QUEUE_HEAD(&wq) == &run_slow_work_message && | ||||
|             QUEUE_NEXT(&run_slow_work_message) == &wq && | ||||
|     while (uv__queue_empty(&wq) || | ||||
|            (uv__queue_head(&wq) == &run_slow_work_message && | ||||
|             uv__queue_next(&run_slow_work_message) == &wq && | ||||
|             slow_io_work_running >= slow_work_thread_threshold())) { | ||||
|       idle_threads += 1; | ||||
|       uv_cond_wait(&cond, &mutex); | ||||
|       idle_threads -= 1; | ||||
|     } | ||||
|  | ||||
|     q = QUEUE_HEAD(&wq); | ||||
|     q = uv__queue_head(&wq); | ||||
|     if (q == &exit_message) { | ||||
|       uv_cond_signal(&cond); | ||||
|       uv_mutex_unlock(&mutex); | ||||
|       break; | ||||
|     } | ||||
|  | ||||
|     QUEUE_REMOVE(q); | ||||
|     QUEUE_INIT(q);  /* Signal uv_cancel() that the work req is executing. */ | ||||
|     uv__queue_remove(q); | ||||
|     uv__queue_init(q);  /* Signal uv_cancel() that the work req is executing. */ | ||||
|  | ||||
|     is_slow_work = 0; | ||||
|     if (q == &run_slow_work_message) { | ||||
|       /* If we're at the slow I/O threshold, re-schedule until after all | ||||
|          other work in the queue is done. */ | ||||
|       if (slow_io_work_running >= slow_work_thread_threshold()) { | ||||
|         QUEUE_INSERT_TAIL(&wq, q); | ||||
|         uv__queue_insert_tail(&wq, q); | ||||
|         continue; | ||||
|       } | ||||
|  | ||||
|       /* If we encountered a request to run slow I/O work but there is none | ||||
|          to run, that means it's cancelled => Start over. */ | ||||
|       if (QUEUE_EMPTY(&slow_io_pending_wq)) | ||||
|       if (uv__queue_empty(&slow_io_pending_wq)) | ||||
|         continue; | ||||
|  | ||||
|       is_slow_work = 1; | ||||
|       slow_io_work_running++; | ||||
|  | ||||
|       q = QUEUE_HEAD(&slow_io_pending_wq); | ||||
|       QUEUE_REMOVE(q); | ||||
|       QUEUE_INIT(q); | ||||
|       q = uv__queue_head(&slow_io_pending_wq); | ||||
|       uv__queue_remove(q); | ||||
|       uv__queue_init(q); | ||||
|  | ||||
|       /* If there is more slow I/O work, schedule it to be run as well. */ | ||||
|       if (!QUEUE_EMPTY(&slow_io_pending_wq)) { | ||||
|         QUEUE_INSERT_TAIL(&wq, &run_slow_work_message); | ||||
|       if (!uv__queue_empty(&slow_io_pending_wq)) { | ||||
|         uv__queue_insert_tail(&wq, &run_slow_work_message); | ||||
|         if (idle_threads > 0) | ||||
|           uv_cond_signal(&cond); | ||||
|       } | ||||
| @@ -118,13 +118,13 @@ static void worker(void* arg) { | ||||
|  | ||||
|     uv_mutex_unlock(&mutex); | ||||
|  | ||||
|     w = QUEUE_DATA(q, struct uv__work, wq); | ||||
|     w = uv__queue_data(q, struct uv__work, wq); | ||||
|     w->work(w); | ||||
|  | ||||
|     uv_mutex_lock(&w->loop->wq_mutex); | ||||
|     w->work = NULL;  /* Signal uv_cancel() that the work req is done | ||||
|                         executing. */ | ||||
|     QUEUE_INSERT_TAIL(&w->loop->wq, &w->wq); | ||||
|     uv__queue_insert_tail(&w->loop->wq, &w->wq); | ||||
|     uv_async_send(&w->loop->wq_async); | ||||
|     uv_mutex_unlock(&w->loop->wq_mutex); | ||||
|  | ||||
| @@ -139,12 +139,12 @@ static void worker(void* arg) { | ||||
| } | ||||
|  | ||||
|  | ||||
| static void post(QUEUE* q, enum uv__work_kind kind) { | ||||
| static void post(struct uv__queue* q, enum uv__work_kind kind) { | ||||
|   uv_mutex_lock(&mutex); | ||||
|   if (kind == UV__WORK_SLOW_IO) { | ||||
|     /* Insert into a separate queue. */ | ||||
|     QUEUE_INSERT_TAIL(&slow_io_pending_wq, q); | ||||
|     if (!QUEUE_EMPTY(&run_slow_work_message)) { | ||||
|     uv__queue_insert_tail(&slow_io_pending_wq, q); | ||||
|     if (!uv__queue_empty(&run_slow_work_message)) { | ||||
|       /* Running slow I/O tasks is already scheduled => Nothing to do here. | ||||
|          The worker that runs said other task will schedule this one as well. */ | ||||
|       uv_mutex_unlock(&mutex); | ||||
| @@ -153,7 +153,7 @@ static void post(QUEUE* q, enum uv__work_kind kind) { | ||||
|     q = &run_slow_work_message; | ||||
|   } | ||||
|  | ||||
|   QUEUE_INSERT_TAIL(&wq, q); | ||||
|   uv__queue_insert_tail(&wq, q); | ||||
|   if (idle_threads > 0) | ||||
|     uv_cond_signal(&cond); | ||||
|   uv_mutex_unlock(&mutex); | ||||
| @@ -220,9 +220,9 @@ static void init_threads(void) { | ||||
|   if (uv_mutex_init(&mutex)) | ||||
|     abort(); | ||||
|  | ||||
|   QUEUE_INIT(&wq); | ||||
|   QUEUE_INIT(&slow_io_pending_wq); | ||||
|   QUEUE_INIT(&run_slow_work_message); | ||||
|   uv__queue_init(&wq); | ||||
|   uv__queue_init(&slow_io_pending_wq); | ||||
|   uv__queue_init(&run_slow_work_message); | ||||
|  | ||||
|   if (uv_sem_init(&sem, 0)) | ||||
|     abort(); | ||||
| @@ -285,9 +285,9 @@ static int uv__work_cancel(uv_loop_t* loop, uv_req_t* req, struct uv__work* w) { | ||||
|   uv_mutex_lock(&mutex); | ||||
|   uv_mutex_lock(&w->loop->wq_mutex); | ||||
|  | ||||
|   cancelled = !QUEUE_EMPTY(&w->wq) && w->work != NULL; | ||||
|   cancelled = !uv__queue_empty(&w->wq) && w->work != NULL; | ||||
|   if (cancelled) | ||||
|     QUEUE_REMOVE(&w->wq); | ||||
|     uv__queue_remove(&w->wq); | ||||
|  | ||||
|   uv_mutex_unlock(&w->loop->wq_mutex); | ||||
|   uv_mutex_unlock(&mutex); | ||||
| @@ -297,7 +297,7 @@ static int uv__work_cancel(uv_loop_t* loop, uv_req_t* req, struct uv__work* w) { | ||||
|  | ||||
|   w->work = uv__cancelled; | ||||
|   uv_mutex_lock(&loop->wq_mutex); | ||||
|   QUEUE_INSERT_TAIL(&loop->wq, &w->wq); | ||||
|   uv__queue_insert_tail(&loop->wq, &w->wq); | ||||
|   uv_async_send(&loop->wq_async); | ||||
|   uv_mutex_unlock(&loop->wq_mutex); | ||||
|  | ||||
| @@ -308,21 +308,21 @@ static int uv__work_cancel(uv_loop_t* loop, uv_req_t* req, struct uv__work* w) { | ||||
| void uv__work_done(uv_async_t* handle) { | ||||
|   struct uv__work* w; | ||||
|   uv_loop_t* loop; | ||||
|   QUEUE* q; | ||||
|   QUEUE wq; | ||||
|   struct uv__queue* q; | ||||
|   struct uv__queue wq; | ||||
|   int err; | ||||
|   int nevents; | ||||
|  | ||||
|   loop = container_of(handle, uv_loop_t, wq_async); | ||||
|   uv_mutex_lock(&loop->wq_mutex); | ||||
|   QUEUE_MOVE(&loop->wq, &wq); | ||||
|   uv__queue_move(&loop->wq, &wq); | ||||
|   uv_mutex_unlock(&loop->wq_mutex); | ||||
|  | ||||
|   nevents = 0; | ||||
|  | ||||
|   while (!QUEUE_EMPTY(&wq)) { | ||||
|     q = QUEUE_HEAD(&wq); | ||||
|     QUEUE_REMOVE(q); | ||||
|   while (!uv__queue_empty(&wq)) { | ||||
|     q = uv__queue_head(&wq); | ||||
|     uv__queue_remove(q); | ||||
|  | ||||
|     w = container_of(q, struct uv__work, wq); | ||||
|     err = (w->work == uv__cancelled) ? UV_ECANCELED : 0; | ||||
|   | ||||
							
								
								
									
										14
									
								
								deps/libuv/src/unix/aix.c
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										14
									
								
								deps/libuv/src/unix/aix.c
									
									
									
									
										vendored
									
									
								
							| @@ -136,7 +136,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { | ||||
|   struct pollfd pqry; | ||||
|   struct pollfd* pe; | ||||
|   struct poll_ctl pc; | ||||
|   QUEUE* q; | ||||
|   struct uv__queue* q; | ||||
|   uv__io_t* w; | ||||
|   uint64_t base; | ||||
|   uint64_t diff; | ||||
| @@ -151,18 +151,18 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { | ||||
|   int reset_timeout; | ||||
|  | ||||
|   if (loop->nfds == 0) { | ||||
|     assert(QUEUE_EMPTY(&loop->watcher_queue)); | ||||
|     assert(uv__queue_empty(&loop->watcher_queue)); | ||||
|     return; | ||||
|   } | ||||
|  | ||||
|   lfields = uv__get_internal_fields(loop); | ||||
|  | ||||
|   while (!QUEUE_EMPTY(&loop->watcher_queue)) { | ||||
|     q = QUEUE_HEAD(&loop->watcher_queue); | ||||
|     QUEUE_REMOVE(q); | ||||
|     QUEUE_INIT(q); | ||||
|   while (!uv__queue_empty(&loop->watcher_queue)) { | ||||
|     q = uv__queue_head(&loop->watcher_queue); | ||||
|     uv__queue_remove(q); | ||||
|     uv__queue_init(q); | ||||
|  | ||||
|     w = QUEUE_DATA(q, uv__io_t, watcher_queue); | ||||
|     w = uv__queue_data(q, uv__io_t, watcher_queue); | ||||
|     assert(w->pevents != 0); | ||||
|     assert(w->fd >= 0); | ||||
|     assert(w->fd < (int) loop->nwatchers); | ||||
|   | ||||
							
								
								
									
										52
									
								
								deps/libuv/src/unix/async.c
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										52
									
								
								deps/libuv/src/unix/async.c
									
									
									
									
										vendored
									
									
								
							| @@ -55,7 +55,7 @@ int uv_async_init(uv_loop_t* loop, uv_async_t* handle, uv_async_cb async_cb) { | ||||
|   handle->pending = 0; | ||||
|   handle->u.fd = 0; /* This will be used as a busy flag. */ | ||||
|  | ||||
|   QUEUE_INSERT_TAIL(&loop->async_handles, &handle->queue); | ||||
|   uv__queue_insert_tail(&loop->async_handles, &handle->queue); | ||||
|   uv__handle_start(handle); | ||||
|  | ||||
|   return 0; | ||||
| @@ -124,7 +124,7 @@ static void uv__async_spin(uv_async_t* handle) { | ||||
|  | ||||
| void uv__async_close(uv_async_t* handle) { | ||||
|   uv__async_spin(handle); | ||||
|   QUEUE_REMOVE(&handle->queue); | ||||
|   uv__queue_remove(&handle->queue); | ||||
|   uv__handle_stop(handle); | ||||
| } | ||||
|  | ||||
| @@ -132,8 +132,8 @@ void uv__async_close(uv_async_t* handle) { | ||||
| static void uv__async_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) { | ||||
|   char buf[1024]; | ||||
|   ssize_t r; | ||||
|   QUEUE queue; | ||||
|   QUEUE* q; | ||||
|   struct uv__queue queue; | ||||
|   struct uv__queue* q; | ||||
|   uv_async_t* h; | ||||
|   _Atomic int *pending; | ||||
|  | ||||
| @@ -157,13 +157,13 @@ static void uv__async_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) { | ||||
|     abort(); | ||||
|   } | ||||
|  | ||||
|   QUEUE_MOVE(&loop->async_handles, &queue); | ||||
|   while (!QUEUE_EMPTY(&queue)) { | ||||
|     q = QUEUE_HEAD(&queue); | ||||
|     h = QUEUE_DATA(q, uv_async_t, queue); | ||||
|   uv__queue_move(&loop->async_handles, &queue); | ||||
|   while (!uv__queue_empty(&queue)) { | ||||
|     q = uv__queue_head(&queue); | ||||
|     h = uv__queue_data(q, uv_async_t, queue); | ||||
|  | ||||
|     QUEUE_REMOVE(q); | ||||
|     QUEUE_INSERT_TAIL(&loop->async_handles, q); | ||||
|     uv__queue_remove(q); | ||||
|     uv__queue_insert_tail(&loop->async_handles, q); | ||||
|  | ||||
|     /* Atomically fetch and clear pending flag */ | ||||
|     pending = (_Atomic int*) &h->pending; | ||||
| @@ -241,8 +241,8 @@ static int uv__async_start(uv_loop_t* loop) { | ||||
|  | ||||
|  | ||||
| void uv__async_stop(uv_loop_t* loop) { | ||||
|   QUEUE queue; | ||||
|   QUEUE* q; | ||||
|   struct uv__queue queue; | ||||
|   struct uv__queue* q; | ||||
|   uv_async_t* h; | ||||
|  | ||||
|   if (loop->async_io_watcher.fd == -1) | ||||
| @@ -251,13 +251,13 @@ void uv__async_stop(uv_loop_t* loop) { | ||||
|   /* Make sure no other thread is accessing the async handle fd after the loop | ||||
|    * cleanup. | ||||
|    */ | ||||
|   QUEUE_MOVE(&loop->async_handles, &queue); | ||||
|   while (!QUEUE_EMPTY(&queue)) { | ||||
|     q = QUEUE_HEAD(&queue); | ||||
|     h = QUEUE_DATA(q, uv_async_t, queue); | ||||
|   uv__queue_move(&loop->async_handles, &queue); | ||||
|   while (!uv__queue_empty(&queue)) { | ||||
|     q = uv__queue_head(&queue); | ||||
|     h = uv__queue_data(q, uv_async_t, queue); | ||||
|  | ||||
|     QUEUE_REMOVE(q); | ||||
|     QUEUE_INSERT_TAIL(&loop->async_handles, q); | ||||
|     uv__queue_remove(q); | ||||
|     uv__queue_insert_tail(&loop->async_handles, q); | ||||
|  | ||||
|     uv__async_spin(h); | ||||
|   } | ||||
| @@ -275,20 +275,20 @@ void uv__async_stop(uv_loop_t* loop) { | ||||
|  | ||||
|  | ||||
| int uv__async_fork(uv_loop_t* loop) { | ||||
|   QUEUE queue; | ||||
|   QUEUE* q; | ||||
|   struct uv__queue queue; | ||||
|   struct uv__queue* q; | ||||
|   uv_async_t* h; | ||||
|  | ||||
|   if (loop->async_io_watcher.fd == -1) /* never started */ | ||||
|     return 0; | ||||
|  | ||||
|   QUEUE_MOVE(&loop->async_handles, &queue); | ||||
|   while (!QUEUE_EMPTY(&queue)) { | ||||
|     q = QUEUE_HEAD(&queue); | ||||
|     h = QUEUE_DATA(q, uv_async_t, queue); | ||||
|   uv__queue_move(&loop->async_handles, &queue); | ||||
|   while (!uv__queue_empty(&queue)) { | ||||
|     q = uv__queue_head(&queue); | ||||
|     h = uv__queue_data(q, uv_async_t, queue); | ||||
|  | ||||
|     QUEUE_REMOVE(q); | ||||
|     QUEUE_INSERT_TAIL(&loop->async_handles, q); | ||||
|     uv__queue_remove(q); | ||||
|     uv__queue_insert_tail(&loop->async_handles, q); | ||||
|  | ||||
|     /* The state of any thread that set pending is now likely corrupt in this | ||||
|      * child because the user called fork, so just clear these flags and move | ||||
|   | ||||
							
								
								
									
										67
									
								
								deps/libuv/src/unix/core.c
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										67
									
								
								deps/libuv/src/unix/core.c
									
									
									
									
										vendored
									
									
								
							| @@ -344,7 +344,7 @@ static void uv__finish_close(uv_handle_t* handle) { | ||||
|   } | ||||
|  | ||||
|   uv__handle_unref(handle); | ||||
|   QUEUE_REMOVE(&handle->handle_queue); | ||||
|   uv__queue_remove(&handle->handle_queue); | ||||
|  | ||||
|   if (handle->close_cb) { | ||||
|     handle->close_cb(handle); | ||||
| @@ -380,7 +380,7 @@ int uv_backend_fd(const uv_loop_t* loop) { | ||||
| static int uv__loop_alive(const uv_loop_t* loop) { | ||||
|   return uv__has_active_handles(loop) || | ||||
|          uv__has_active_reqs(loop) || | ||||
|          !QUEUE_EMPTY(&loop->pending_queue) || | ||||
|          !uv__queue_empty(&loop->pending_queue) || | ||||
|          loop->closing_handles != NULL; | ||||
| } | ||||
|  | ||||
| @@ -389,8 +389,8 @@ static int uv__backend_timeout(const uv_loop_t* loop) { | ||||
|   if (loop->stop_flag == 0 && | ||||
|       /* uv__loop_alive(loop) && */ | ||||
|       (uv__has_active_handles(loop) || uv__has_active_reqs(loop)) && | ||||
|       QUEUE_EMPTY(&loop->pending_queue) && | ||||
|       QUEUE_EMPTY(&loop->idle_handles) && | ||||
|       uv__queue_empty(&loop->pending_queue) && | ||||
|       uv__queue_empty(&loop->idle_handles) && | ||||
|       (loop->flags & UV_LOOP_REAP_CHILDREN) == 0 && | ||||
|       loop->closing_handles == NULL) | ||||
|     return uv__next_timeout(loop); | ||||
| @@ -399,7 +399,7 @@ static int uv__backend_timeout(const uv_loop_t* loop) { | ||||
|  | ||||
|  | ||||
| int uv_backend_timeout(const uv_loop_t* loop) { | ||||
|   if (QUEUE_EMPTY(&loop->watcher_queue)) | ||||
|   if (uv__queue_empty(&loop->watcher_queue)) | ||||
|     return uv__backend_timeout(loop); | ||||
|   /* Need to call uv_run to update the backend fd state. */ | ||||
|   return 0; | ||||
| @@ -424,15 +424,15 @@ int uv_run(uv_loop_t* loop, uv_run_mode mode) { | ||||
|    * while loop for UV_RUN_DEFAULT. Otherwise timers only need to be executed | ||||
|    * once, which should be done after polling in order to maintain proper | ||||
|    * execution order of the conceptual event loop. */ | ||||
|   if (mode == UV_RUN_DEFAULT) { | ||||
|     if (r) | ||||
|       uv__update_time(loop); | ||||
|   if (mode == UV_RUN_DEFAULT && r != 0 && loop->stop_flag == 0) { | ||||
|     uv__update_time(loop); | ||||
|     uv__run_timers(loop); | ||||
|   } | ||||
|  | ||||
|   while (r != 0 && loop->stop_flag == 0) { | ||||
|     can_sleep = | ||||
|         QUEUE_EMPTY(&loop->pending_queue) && QUEUE_EMPTY(&loop->idle_handles); | ||||
|         uv__queue_empty(&loop->pending_queue) && | ||||
|         uv__queue_empty(&loop->idle_handles); | ||||
|  | ||||
|     uv__run_pending(loop); | ||||
|     uv__run_idle(loop); | ||||
| @@ -448,7 +448,7 @@ int uv_run(uv_loop_t* loop, uv_run_mode mode) { | ||||
|  | ||||
|     /* Process immediate callbacks (e.g. write_cb) a small fixed number of | ||||
|      * times to avoid loop starvation.*/ | ||||
|     for (r = 0; r < 8 && !QUEUE_EMPTY(&loop->pending_queue); r++) | ||||
|     for (r = 0; r < 8 && !uv__queue_empty(&loop->pending_queue); r++) | ||||
|       uv__run_pending(loop); | ||||
|  | ||||
|     /* Run one final update on the provider_idle_time in case uv__io_poll | ||||
| @@ -827,17 +827,17 @@ int uv_fileno(const uv_handle_t* handle, uv_os_fd_t* fd) { | ||||
|  | ||||
|  | ||||
| static void uv__run_pending(uv_loop_t* loop) { | ||||
|   QUEUE* q; | ||||
|   QUEUE pq; | ||||
|   struct uv__queue* q; | ||||
|   struct uv__queue pq; | ||||
|   uv__io_t* w; | ||||
|  | ||||
|   QUEUE_MOVE(&loop->pending_queue, &pq); | ||||
|   uv__queue_move(&loop->pending_queue, &pq); | ||||
|  | ||||
|   while (!QUEUE_EMPTY(&pq)) { | ||||
|     q = QUEUE_HEAD(&pq); | ||||
|     QUEUE_REMOVE(q); | ||||
|     QUEUE_INIT(q); | ||||
|     w = QUEUE_DATA(q, uv__io_t, pending_queue); | ||||
|   while (!uv__queue_empty(&pq)) { | ||||
|     q = uv__queue_head(&pq); | ||||
|     uv__queue_remove(q); | ||||
|     uv__queue_init(q); | ||||
|     w = uv__queue_data(q, uv__io_t, pending_queue); | ||||
|     w->cb(loop, w, POLLOUT); | ||||
|   } | ||||
| } | ||||
| @@ -892,8 +892,8 @@ static void maybe_resize(uv_loop_t* loop, unsigned int len) { | ||||
| void uv__io_init(uv__io_t* w, uv__io_cb cb, int fd) { | ||||
|   assert(cb != NULL); | ||||
|   assert(fd >= -1); | ||||
|   QUEUE_INIT(&w->pending_queue); | ||||
|   QUEUE_INIT(&w->watcher_queue); | ||||
|   uv__queue_init(&w->pending_queue); | ||||
|   uv__queue_init(&w->watcher_queue); | ||||
|   w->cb = cb; | ||||
|   w->fd = fd; | ||||
|   w->events = 0; | ||||
| @@ -919,8 +919,8 @@ void uv__io_start(uv_loop_t* loop, uv__io_t* w, unsigned int events) { | ||||
|     return; | ||||
| #endif | ||||
|  | ||||
|   if (QUEUE_EMPTY(&w->watcher_queue)) | ||||
|     QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue); | ||||
|   if (uv__queue_empty(&w->watcher_queue)) | ||||
|     uv__queue_insert_tail(&loop->watcher_queue, &w->watcher_queue); | ||||
|  | ||||
|   if (loop->watchers[w->fd] == NULL) { | ||||
|     loop->watchers[w->fd] = w; | ||||
| @@ -945,8 +945,8 @@ void uv__io_stop(uv_loop_t* loop, uv__io_t* w, unsigned int events) { | ||||
|   w->pevents &= ~events; | ||||
|  | ||||
|   if (w->pevents == 0) { | ||||
|     QUEUE_REMOVE(&w->watcher_queue); | ||||
|     QUEUE_INIT(&w->watcher_queue); | ||||
|     uv__queue_remove(&w->watcher_queue); | ||||
|     uv__queue_init(&w->watcher_queue); | ||||
|     w->events = 0; | ||||
|  | ||||
|     if (w == loop->watchers[w->fd]) { | ||||
| @@ -955,14 +955,14 @@ void uv__io_stop(uv_loop_t* loop, uv__io_t* w, unsigned int events) { | ||||
|       loop->nfds--; | ||||
|     } | ||||
|   } | ||||
|   else if (QUEUE_EMPTY(&w->watcher_queue)) | ||||
|     QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue); | ||||
|   else if (uv__queue_empty(&w->watcher_queue)) | ||||
|     uv__queue_insert_tail(&loop->watcher_queue, &w->watcher_queue); | ||||
| } | ||||
|  | ||||
|  | ||||
| void uv__io_close(uv_loop_t* loop, uv__io_t* w) { | ||||
|   uv__io_stop(loop, w, POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI); | ||||
|   QUEUE_REMOVE(&w->pending_queue); | ||||
|   uv__queue_remove(&w->pending_queue); | ||||
|  | ||||
|   /* Remove stale events for this file descriptor */ | ||||
|   if (w->fd != -1) | ||||
| @@ -971,8 +971,8 @@ void uv__io_close(uv_loop_t* loop, uv__io_t* w) { | ||||
|  | ||||
|  | ||||
| void uv__io_feed(uv_loop_t* loop, uv__io_t* w) { | ||||
|   if (QUEUE_EMPTY(&w->pending_queue)) | ||||
|     QUEUE_INSERT_TAIL(&loop->pending_queue, &w->pending_queue); | ||||
|   if (uv__queue_empty(&w->pending_queue)) | ||||
|     uv__queue_insert_tail(&loop->pending_queue, &w->pending_queue); | ||||
| } | ||||
|  | ||||
|  | ||||
| @@ -1020,8 +1020,8 @@ int uv_getrusage(uv_rusage_t* rusage) { | ||||
|   /* Most platforms report ru_maxrss in kilobytes; macOS and Solaris are | ||||
|    * the outliers because of course they are. | ||||
|    */ | ||||
| #if defined(__APPLE__) && !TARGET_OS_IPHONE | ||||
|   rusage->ru_maxrss /= 1024;                  /* macOS reports bytes. */ | ||||
| #if defined(__APPLE__) | ||||
|   rusage->ru_maxrss /= 1024;                  /* macOS and iOS report bytes. */ | ||||
| #elif defined(__sun) | ||||
|   rusage->ru_maxrss /= getpagesize() / 1024;  /* Solaris reports pages. */ | ||||
| #endif | ||||
| @@ -1271,6 +1271,10 @@ static int uv__getpwuid_r(uv_passwd_t *pwd, uid_t uid) { | ||||
|  | ||||
|  | ||||
| int uv_os_get_group(uv_group_t* grp, uv_uid_t gid) { | ||||
| #if defined(__ANDROID__) && __ANDROID_API__ < 24 | ||||
|   /* This function getgrgid_r() was added in Android N (level 24) */ | ||||
|   return UV_ENOSYS; | ||||
| #else | ||||
|   struct group gp; | ||||
|   struct group* result; | ||||
|   char* buf; | ||||
| @@ -1347,6 +1351,7 @@ int uv_os_get_group(uv_group_t* grp, uv_uid_t gid) { | ||||
|   uv__free(buf); | ||||
|  | ||||
|   return 0; | ||||
| #endif | ||||
| } | ||||
|  | ||||
|  | ||||
|   | ||||
							
								
								
									
										64
									
								
								deps/libuv/src/unix/fs.c
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										64
									
								
								deps/libuv/src/unix/fs.c
									
									
									
									
										vendored
									
									
								
							| @@ -55,9 +55,13 @@ | ||||
| # define HAVE_PREADV 0 | ||||
| #endif | ||||
|  | ||||
| /* preadv() and pwritev() were added in Android N (level 24) */ | ||||
| #if defined(__linux__) && !(defined(__ANDROID__) && __ANDROID_API__ < 24) | ||||
| # define TRY_PREADV 1 | ||||
| #endif | ||||
|  | ||||
| #if defined(__linux__) | ||||
| # include <sys/sendfile.h> | ||||
| # include <sys/utsname.h> | ||||
| #endif | ||||
|  | ||||
| #if defined(__sun) | ||||
| @@ -457,7 +461,7 @@ static ssize_t uv__fs_preadv(uv_file fd, | ||||
|  | ||||
|  | ||||
| static ssize_t uv__fs_read(uv_fs_t* req) { | ||||
| #if defined(__linux__) | ||||
| #if TRY_PREADV | ||||
|   static _Atomic int no_preadv; | ||||
| #endif | ||||
|   unsigned int iovmax; | ||||
| @@ -481,13 +485,13 @@ static ssize_t uv__fs_read(uv_fs_t* req) { | ||||
| #if HAVE_PREADV | ||||
|     result = preadv(req->file, (struct iovec*) req->bufs, req->nbufs, req->off); | ||||
| #else | ||||
| # if defined(__linux__) | ||||
| # if TRY_PREADV | ||||
|     if (atomic_load_explicit(&no_preadv, memory_order_relaxed)) retry: | ||||
| # endif | ||||
|     { | ||||
|       result = uv__fs_preadv(req->file, req->bufs, req->nbufs, req->off); | ||||
|     } | ||||
| # if defined(__linux__) | ||||
| # if TRY_PREADV | ||||
|     else { | ||||
|       result = preadv(req->file, | ||||
|                       (struct iovec*) req->bufs, | ||||
| @@ -899,31 +903,6 @@ out: | ||||
|  | ||||
|  | ||||
| #ifdef __linux__ | ||||
| static unsigned uv__kernel_version(void) { | ||||
|   static _Atomic unsigned cached_version; | ||||
|   struct utsname u; | ||||
|   unsigned version; | ||||
|   unsigned major; | ||||
|   unsigned minor; | ||||
|   unsigned patch; | ||||
|  | ||||
|   version = atomic_load_explicit(&cached_version, memory_order_relaxed); | ||||
|   if (version != 0) | ||||
|     return version; | ||||
|  | ||||
|   if (-1 == uname(&u)) | ||||
|     return 0; | ||||
|  | ||||
|   if (3 != sscanf(u.release, "%u.%u.%u", &major, &minor, &patch)) | ||||
|     return 0; | ||||
|  | ||||
|   version = major * 65536 + minor * 256 + patch; | ||||
|   atomic_store_explicit(&cached_version, version, memory_order_relaxed); | ||||
|  | ||||
|   return version; | ||||
| } | ||||
|  | ||||
|  | ||||
| /* Pre-4.20 kernels have a bug where CephFS uses the RADOS copy-from command | ||||
|  * in copy_file_range() when it shouldn't. There is no workaround except to | ||||
|  * fall back to a regular copy. | ||||
| @@ -1182,8 +1161,8 @@ static ssize_t uv__fs_lutime(uv_fs_t* req) { | ||||
|  | ||||
|  | ||||
| static ssize_t uv__fs_write(uv_fs_t* req) { | ||||
| #if defined(__linux__) | ||||
|   static int no_pwritev; | ||||
| #if TRY_PREADV | ||||
|   static _Atomic int no_pwritev; | ||||
| #endif | ||||
|   ssize_t r; | ||||
|  | ||||
| @@ -1211,20 +1190,20 @@ static ssize_t uv__fs_write(uv_fs_t* req) { | ||||
| #if HAVE_PREADV | ||||
|     r = pwritev(req->file, (struct iovec*) req->bufs, req->nbufs, req->off); | ||||
| #else | ||||
| # if defined(__linux__) | ||||
|     if (no_pwritev) retry: | ||||
| # if TRY_PREADV | ||||
|     if (atomic_load_explicit(&no_pwritev, memory_order_relaxed)) retry: | ||||
| # endif | ||||
|     { | ||||
|       r = pwrite(req->file, req->bufs[0].base, req->bufs[0].len, req->off); | ||||
|     } | ||||
| # if defined(__linux__) | ||||
| # if TRY_PREADV | ||||
|     else { | ||||
|       r = pwritev(req->file, | ||||
|                   (struct iovec*) req->bufs, | ||||
|                   req->nbufs, | ||||
|                   req->off); | ||||
|       if (r == -1 && errno == ENOSYS) { | ||||
|         no_pwritev = 1; | ||||
|         atomic_store_explicit(&no_pwritev, 1, memory_order_relaxed); | ||||
|         goto retry; | ||||
|       } | ||||
|     } | ||||
| @@ -1926,6 +1905,9 @@ int uv_fs_link(uv_loop_t* loop, | ||||
|                uv_fs_cb cb) { | ||||
|   INIT(LINK); | ||||
|   PATH2; | ||||
|   if (cb != NULL) | ||||
|     if (uv__iou_fs_link(loop, req)) | ||||
|       return 0; | ||||
|   POST; | ||||
| } | ||||
|  | ||||
| @@ -1938,6 +1920,9 @@ int uv_fs_mkdir(uv_loop_t* loop, | ||||
|   INIT(MKDIR); | ||||
|   PATH; | ||||
|   req->mode = mode; | ||||
|   if (cb != NULL) | ||||
|     if (uv__iou_fs_mkdir(loop, req)) | ||||
|       return 0; | ||||
|   POST; | ||||
| } | ||||
|  | ||||
| @@ -2089,6 +2074,9 @@ int uv_fs_rename(uv_loop_t* loop, | ||||
|                  uv_fs_cb cb) { | ||||
|   INIT(RENAME); | ||||
|   PATH2; | ||||
|   if (cb != NULL) | ||||
|     if (uv__iou_fs_rename(loop, req)) | ||||
|       return 0; | ||||
|   POST; | ||||
| } | ||||
|  | ||||
| @@ -2135,6 +2123,9 @@ int uv_fs_symlink(uv_loop_t* loop, | ||||
|   INIT(SYMLINK); | ||||
|   PATH2; | ||||
|   req->flags = flags; | ||||
|   if (cb != NULL) | ||||
|     if (uv__iou_fs_symlink(loop, req)) | ||||
|       return 0; | ||||
|   POST; | ||||
| } | ||||
|  | ||||
| @@ -2142,6 +2133,9 @@ int uv_fs_symlink(uv_loop_t* loop, | ||||
| int uv_fs_unlink(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) { | ||||
|   INIT(UNLINK); | ||||
|   PATH; | ||||
|   if (cb != NULL) | ||||
|     if (uv__iou_fs_unlink(loop, req)) | ||||
|       return 0; | ||||
|   POST; | ||||
| } | ||||
|  | ||||
|   | ||||
							
								
								
									
										84
									
								
								deps/libuv/src/unix/fsevents.c
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										84
									
								
								deps/libuv/src/unix/fsevents.c
									
									
									
									
										vendored
									
									
								
							| @@ -80,13 +80,13 @@ enum uv__cf_loop_signal_type_e { | ||||
| typedef enum uv__cf_loop_signal_type_e uv__cf_loop_signal_type_t; | ||||
|  | ||||
| struct uv__cf_loop_signal_s { | ||||
|   QUEUE member; | ||||
|   struct uv__queue member; | ||||
|   uv_fs_event_t* handle; | ||||
|   uv__cf_loop_signal_type_t type; | ||||
| }; | ||||
|  | ||||
| struct uv__fsevents_event_s { | ||||
|   QUEUE member; | ||||
|   struct uv__queue member; | ||||
|   int events; | ||||
|   char path[1]; | ||||
| }; | ||||
| @@ -98,7 +98,7 @@ struct uv__cf_loop_state_s { | ||||
|   FSEventStreamRef fsevent_stream; | ||||
|   uv_sem_t fsevent_sem; | ||||
|   uv_mutex_t fsevent_mutex; | ||||
|   void* fsevent_handles[2]; | ||||
|   struct uv__queue fsevent_handles; | ||||
|   unsigned int fsevent_handle_count; | ||||
| }; | ||||
|  | ||||
| @@ -150,22 +150,22 @@ static void (*pFSEventStreamStop)(FSEventStreamRef); | ||||
|  | ||||
| #define UV__FSEVENTS_PROCESS(handle, block)                                   \ | ||||
|     do {                                                                      \ | ||||
|       QUEUE events;                                                           \ | ||||
|       QUEUE* q;                                                               \ | ||||
|       struct uv__queue events;                                                \ | ||||
|       struct uv__queue* q;                                                    \ | ||||
|       uv__fsevents_event_t* event;                                            \ | ||||
|       int err;                                                                \ | ||||
|       uv_mutex_lock(&(handle)->cf_mutex);                                     \ | ||||
|       /* Split-off all events and empty original queue */                     \ | ||||
|       QUEUE_MOVE(&(handle)->cf_events, &events);                              \ | ||||
|       uv__queue_move(&(handle)->cf_events, &events);                          \ | ||||
|       /* Get error (if any) and zero original one */                          \ | ||||
|       err = (handle)->cf_error;                                               \ | ||||
|       (handle)->cf_error = 0;                                                 \ | ||||
|       uv_mutex_unlock(&(handle)->cf_mutex);                                   \ | ||||
|       /* Loop through events, deallocating each after processing */           \ | ||||
|       while (!QUEUE_EMPTY(&events)) {                                         \ | ||||
|         q = QUEUE_HEAD(&events);                                              \ | ||||
|         event = QUEUE_DATA(q, uv__fsevents_event_t, member);                  \ | ||||
|         QUEUE_REMOVE(q);                                                      \ | ||||
|       while (!uv__queue_empty(&events)) {                                     \ | ||||
|         q = uv__queue_head(&events);                                          \ | ||||
|         event = uv__queue_data(q, uv__fsevents_event_t, member);              \ | ||||
|         uv__queue_remove(q);                                                  \ | ||||
|         /* NOTE: Checking uv__is_active() is required here, because handle    \ | ||||
|          * callback may close handle and invoking it after it will lead to    \ | ||||
|          * incorrect behaviour */                                             \ | ||||
| @@ -193,14 +193,14 @@ static void uv__fsevents_cb(uv_async_t* cb) { | ||||
|  | ||||
| /* Runs in CF thread, pushed event into handle's event list */ | ||||
| static void uv__fsevents_push_event(uv_fs_event_t* handle, | ||||
|                                     QUEUE* events, | ||||
|                                     struct uv__queue* events, | ||||
|                                     int err) { | ||||
|   assert(events != NULL || err != 0); | ||||
|   uv_mutex_lock(&handle->cf_mutex); | ||||
|  | ||||
|   /* Concatenate two queues */ | ||||
|   if (events != NULL) | ||||
|     QUEUE_ADD(&handle->cf_events, events); | ||||
|     uv__queue_add(&handle->cf_events, events); | ||||
|  | ||||
|   /* Propagate error */ | ||||
|   if (err != 0) | ||||
| @@ -224,12 +224,12 @@ static void uv__fsevents_event_cb(const FSEventStreamRef streamRef, | ||||
|   char* path; | ||||
|   char* pos; | ||||
|   uv_fs_event_t* handle; | ||||
|   QUEUE* q; | ||||
|   struct uv__queue* q; | ||||
|   uv_loop_t* loop; | ||||
|   uv__cf_loop_state_t* state; | ||||
|   uv__fsevents_event_t* event; | ||||
|   FSEventStreamEventFlags flags; | ||||
|   QUEUE head; | ||||
|   struct uv__queue head; | ||||
|  | ||||
|   loop = info; | ||||
|   state = loop->cf_state; | ||||
| @@ -238,9 +238,9 @@ static void uv__fsevents_event_cb(const FSEventStreamRef streamRef, | ||||
|  | ||||
|   /* For each handle */ | ||||
|   uv_mutex_lock(&state->fsevent_mutex); | ||||
|   QUEUE_FOREACH(q, &state->fsevent_handles) { | ||||
|     handle = QUEUE_DATA(q, uv_fs_event_t, cf_member); | ||||
|     QUEUE_INIT(&head); | ||||
|   uv__queue_foreach(q, &state->fsevent_handles) { | ||||
|     handle = uv__queue_data(q, uv_fs_event_t, cf_member); | ||||
|     uv__queue_init(&head); | ||||
|  | ||||
|     /* Process and filter out events */ | ||||
|     for (i = 0; i < numEvents; i++) { | ||||
| @@ -318,10 +318,10 @@ static void uv__fsevents_event_cb(const FSEventStreamRef streamRef, | ||||
|           event->events = UV_CHANGE; | ||||
|       } | ||||
|  | ||||
|       QUEUE_INSERT_TAIL(&head, &event->member); | ||||
|       uv__queue_insert_tail(&head, &event->member); | ||||
|     } | ||||
|  | ||||
|     if (!QUEUE_EMPTY(&head)) | ||||
|     if (!uv__queue_empty(&head)) | ||||
|       uv__fsevents_push_event(handle, &head, 0); | ||||
|   } | ||||
|   uv_mutex_unlock(&state->fsevent_mutex); | ||||
| @@ -403,7 +403,7 @@ static void uv__fsevents_destroy_stream(uv__cf_loop_state_t* state) { | ||||
| static void uv__fsevents_reschedule(uv__cf_loop_state_t* state, | ||||
|                                     uv_loop_t* loop, | ||||
|                                     uv__cf_loop_signal_type_t type) { | ||||
|   QUEUE* q; | ||||
|   struct uv__queue* q; | ||||
|   uv_fs_event_t* curr; | ||||
|   CFArrayRef cf_paths; | ||||
|   CFStringRef* paths; | ||||
| @@ -446,9 +446,9 @@ static void uv__fsevents_reschedule(uv__cf_loop_state_t* state, | ||||
|  | ||||
|     q = &state->fsevent_handles; | ||||
|     for (; i < path_count; i++) { | ||||
|       q = QUEUE_NEXT(q); | ||||
|       q = uv__queue_next(q); | ||||
|       assert(q != &state->fsevent_handles); | ||||
|       curr = QUEUE_DATA(q, uv_fs_event_t, cf_member); | ||||
|       curr = uv__queue_data(q, uv_fs_event_t, cf_member); | ||||
|  | ||||
|       assert(curr->realpath != NULL); | ||||
|       paths[i] = | ||||
| @@ -486,8 +486,8 @@ final: | ||||
|  | ||||
|     /* Broadcast error to all handles */ | ||||
|     uv_mutex_lock(&state->fsevent_mutex); | ||||
|     QUEUE_FOREACH(q, &state->fsevent_handles) { | ||||
|       curr = QUEUE_DATA(q, uv_fs_event_t, cf_member); | ||||
|     uv__queue_foreach(q, &state->fsevent_handles) { | ||||
|       curr = uv__queue_data(q, uv_fs_event_t, cf_member); | ||||
|       uv__fsevents_push_event(curr, NULL, err); | ||||
|     } | ||||
|     uv_mutex_unlock(&state->fsevent_mutex); | ||||
| @@ -606,7 +606,7 @@ static int uv__fsevents_loop_init(uv_loop_t* loop) { | ||||
|   if (err) | ||||
|     goto fail_sem_init; | ||||
|  | ||||
|   QUEUE_INIT(&loop->cf_signals); | ||||
|   uv__queue_init(&loop->cf_signals); | ||||
|  | ||||
|   err = uv_sem_init(&state->fsevent_sem, 0); | ||||
|   if (err) | ||||
| @@ -616,7 +616,7 @@ static int uv__fsevents_loop_init(uv_loop_t* loop) { | ||||
|   if (err) | ||||
|     goto fail_fsevent_mutex_init; | ||||
|  | ||||
|   QUEUE_INIT(&state->fsevent_handles); | ||||
|   uv__queue_init(&state->fsevent_handles); | ||||
|   state->fsevent_need_reschedule = 0; | ||||
|   state->fsevent_handle_count = 0; | ||||
|  | ||||
| @@ -675,7 +675,7 @@ fail_mutex_init: | ||||
| void uv__fsevents_loop_delete(uv_loop_t* loop) { | ||||
|   uv__cf_loop_signal_t* s; | ||||
|   uv__cf_loop_state_t* state; | ||||
|   QUEUE* q; | ||||
|   struct uv__queue* q; | ||||
|  | ||||
|   if (loop->cf_state == NULL) | ||||
|     return; | ||||
| @@ -688,10 +688,10 @@ void uv__fsevents_loop_delete(uv_loop_t* loop) { | ||||
|   uv_mutex_destroy(&loop->cf_mutex); | ||||
|  | ||||
|   /* Free any remaining data */ | ||||
|   while (!QUEUE_EMPTY(&loop->cf_signals)) { | ||||
|     q = QUEUE_HEAD(&loop->cf_signals); | ||||
|     s = QUEUE_DATA(q, uv__cf_loop_signal_t, member); | ||||
|     QUEUE_REMOVE(q); | ||||
|   while (!uv__queue_empty(&loop->cf_signals)) { | ||||
|     q = uv__queue_head(&loop->cf_signals); | ||||
|     s = uv__queue_data(q, uv__cf_loop_signal_t, member); | ||||
|     uv__queue_remove(q); | ||||
|     uv__free(s); | ||||
|   } | ||||
|  | ||||
| @@ -735,22 +735,22 @@ static void* uv__cf_loop_runner(void* arg) { | ||||
| static void uv__cf_loop_cb(void* arg) { | ||||
|   uv_loop_t* loop; | ||||
|   uv__cf_loop_state_t* state; | ||||
|   QUEUE* item; | ||||
|   QUEUE split_head; | ||||
|   struct uv__queue* item; | ||||
|   struct uv__queue split_head; | ||||
|   uv__cf_loop_signal_t* s; | ||||
|  | ||||
|   loop = arg; | ||||
|   state = loop->cf_state; | ||||
|  | ||||
|   uv_mutex_lock(&loop->cf_mutex); | ||||
|   QUEUE_MOVE(&loop->cf_signals, &split_head); | ||||
|   uv__queue_move(&loop->cf_signals, &split_head); | ||||
|   uv_mutex_unlock(&loop->cf_mutex); | ||||
|  | ||||
|   while (!QUEUE_EMPTY(&split_head)) { | ||||
|     item = QUEUE_HEAD(&split_head); | ||||
|     QUEUE_REMOVE(item); | ||||
|   while (!uv__queue_empty(&split_head)) { | ||||
|     item = uv__queue_head(&split_head); | ||||
|     uv__queue_remove(item); | ||||
|  | ||||
|     s = QUEUE_DATA(item, uv__cf_loop_signal_t, member); | ||||
|     s = uv__queue_data(item, uv__cf_loop_signal_t, member); | ||||
|  | ||||
|     /* This was a termination signal */ | ||||
|     if (s->handle == NULL) | ||||
| @@ -778,7 +778,7 @@ int uv__cf_loop_signal(uv_loop_t* loop, | ||||
|   item->type = type; | ||||
|  | ||||
|   uv_mutex_lock(&loop->cf_mutex); | ||||
|   QUEUE_INSERT_TAIL(&loop->cf_signals, &item->member); | ||||
|   uv__queue_insert_tail(&loop->cf_signals, &item->member); | ||||
|  | ||||
|   state = loop->cf_state; | ||||
|   assert(state != NULL); | ||||
| @@ -807,7 +807,7 @@ int uv__fsevents_init(uv_fs_event_t* handle) { | ||||
|   handle->realpath_len = strlen(handle->realpath); | ||||
|  | ||||
|   /* Initialize event queue */ | ||||
|   QUEUE_INIT(&handle->cf_events); | ||||
|   uv__queue_init(&handle->cf_events); | ||||
|   handle->cf_error = 0; | ||||
|  | ||||
|   /* | ||||
| @@ -832,7 +832,7 @@ int uv__fsevents_init(uv_fs_event_t* handle) { | ||||
|   /* Insert handle into the list */ | ||||
|   state = handle->loop->cf_state; | ||||
|   uv_mutex_lock(&state->fsevent_mutex); | ||||
|   QUEUE_INSERT_TAIL(&state->fsevent_handles, &handle->cf_member); | ||||
|   uv__queue_insert_tail(&state->fsevent_handles, &handle->cf_member); | ||||
|   state->fsevent_handle_count++; | ||||
|   state->fsevent_need_reschedule = 1; | ||||
|   uv_mutex_unlock(&state->fsevent_mutex); | ||||
| @@ -872,7 +872,7 @@ int uv__fsevents_close(uv_fs_event_t* handle) { | ||||
|   /* Remove handle from  the list */ | ||||
|   state = handle->loop->cf_state; | ||||
|   uv_mutex_lock(&state->fsevent_mutex); | ||||
|   QUEUE_REMOVE(&handle->cf_member); | ||||
|   uv__queue_remove(&handle->cf_member); | ||||
|   state->fsevent_handle_count--; | ||||
|   state->fsevent_need_reschedule = 1; | ||||
|   uv_mutex_unlock(&state->fsevent_mutex); | ||||
|   | ||||
							
								
								
									
										11
									
								
								deps/libuv/src/unix/internal.h
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										11
									
								
								deps/libuv/src/unix/internal.h
									
									
									
									
										vendored
									
									
								
							| @@ -335,20 +335,30 @@ int uv__iou_fs_close(uv_loop_t* loop, uv_fs_t* req); | ||||
| int uv__iou_fs_fsync_or_fdatasync(uv_loop_t* loop, | ||||
|                                   uv_fs_t* req, | ||||
|                                   uint32_t fsync_flags); | ||||
| int uv__iou_fs_link(uv_loop_t* loop, uv_fs_t* req); | ||||
| int uv__iou_fs_mkdir(uv_loop_t* loop, uv_fs_t* req); | ||||
| int uv__iou_fs_open(uv_loop_t* loop, uv_fs_t* req); | ||||
| int uv__iou_fs_read_or_write(uv_loop_t* loop, | ||||
|                              uv_fs_t* req, | ||||
|                              int is_read); | ||||
| int uv__iou_fs_rename(uv_loop_t* loop, uv_fs_t* req); | ||||
| int uv__iou_fs_statx(uv_loop_t* loop, | ||||
|                      uv_fs_t* req, | ||||
|                      int is_fstat, | ||||
|                      int is_lstat); | ||||
| int uv__iou_fs_symlink(uv_loop_t* loop, uv_fs_t* req); | ||||
| int uv__iou_fs_unlink(uv_loop_t* loop, uv_fs_t* req); | ||||
| #else | ||||
| #define uv__iou_fs_close(loop, req) 0 | ||||
| #define uv__iou_fs_fsync_or_fdatasync(loop, req, fsync_flags) 0 | ||||
| #define uv__iou_fs_link(loop, req) 0 | ||||
| #define uv__iou_fs_mkdir(loop, req) 0 | ||||
| #define uv__iou_fs_open(loop, req) 0 | ||||
| #define uv__iou_fs_read_or_write(loop, req, is_read) 0 | ||||
| #define uv__iou_fs_rename(loop, req) 0 | ||||
| #define uv__iou_fs_statx(loop, req, is_fstat, is_lstat) 0 | ||||
| #define uv__iou_fs_symlink(loop, req) 0 | ||||
| #define uv__iou_fs_unlink(loop, req) 0 | ||||
| #endif | ||||
|  | ||||
| #if defined(__APPLE__) | ||||
| @@ -429,6 +439,7 @@ int uv__statx(int dirfd, | ||||
|               struct uv__statx* statxbuf); | ||||
| void uv__statx_to_stat(const struct uv__statx* statxbuf, uv_stat_t* buf); | ||||
| ssize_t uv__getrandom(void* buf, size_t buflen, unsigned flags); | ||||
| unsigned uv__kernel_version(void); | ||||
| #endif | ||||
|  | ||||
| typedef int (*uv__peersockfunc)(int, struct sockaddr*, socklen_t*); | ||||
|   | ||||
							
								
								
									
										18
									
								
								deps/libuv/src/unix/kqueue.c
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										18
									
								
								deps/libuv/src/unix/kqueue.c
									
									
									
									
										vendored
									
									
								
							| @@ -133,7 +133,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { | ||||
|   struct timespec spec; | ||||
|   unsigned int nevents; | ||||
|   unsigned int revents; | ||||
|   QUEUE* q; | ||||
|   struct uv__queue* q; | ||||
|   uv__io_t* w; | ||||
|   uv_process_t* process; | ||||
|   sigset_t* pset; | ||||
| @@ -152,19 +152,19 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { | ||||
|   int reset_timeout; | ||||
|  | ||||
|   if (loop->nfds == 0) { | ||||
|     assert(QUEUE_EMPTY(&loop->watcher_queue)); | ||||
|     assert(uv__queue_empty(&loop->watcher_queue)); | ||||
|     return; | ||||
|   } | ||||
|  | ||||
|   lfields = uv__get_internal_fields(loop); | ||||
|   nevents = 0; | ||||
|  | ||||
|   while (!QUEUE_EMPTY(&loop->watcher_queue)) { | ||||
|     q = QUEUE_HEAD(&loop->watcher_queue); | ||||
|     QUEUE_REMOVE(q); | ||||
|     QUEUE_INIT(q); | ||||
|   while (!uv__queue_empty(&loop->watcher_queue)) { | ||||
|     q = uv__queue_head(&loop->watcher_queue); | ||||
|     uv__queue_remove(q); | ||||
|     uv__queue_init(q); | ||||
|  | ||||
|     w = QUEUE_DATA(q, uv__io_t, watcher_queue); | ||||
|     w = uv__queue_data(q, uv__io_t, watcher_queue); | ||||
|     assert(w->pevents != 0); | ||||
|     assert(w->fd >= 0); | ||||
|     assert(w->fd < (int) loop->nwatchers); | ||||
| @@ -307,8 +307,8 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { | ||||
|  | ||||
|       /* Handle kevent NOTE_EXIT results */ | ||||
|       if (ev->filter == EVFILT_PROC) { | ||||
|         QUEUE_FOREACH(q, &loop->process_handles) { | ||||
|           process = QUEUE_DATA(q, uv_process_t, queue); | ||||
|         uv__queue_foreach(q, &loop->process_handles) { | ||||
|           process = uv__queue_data(q, uv_process_t, queue); | ||||
|           if (process->pid == fd) { | ||||
|             process->flags |= UV_HANDLE_REAP; | ||||
|             loop->flags |= UV_LOOP_REAP_CHILDREN; | ||||
|   | ||||
							
								
								
									
										259
									
								
								deps/libuv/src/unix/linux.c
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										259
									
								
								deps/libuv/src/unix/linux.c
									
									
									
									
										vendored
									
									
								
							| @@ -48,6 +48,7 @@ | ||||
| #include <sys/sysinfo.h> | ||||
| #include <sys/sysmacros.h> | ||||
| #include <sys/types.h> | ||||
| #include <sys/utsname.h> | ||||
| #include <time.h> | ||||
| #include <unistd.h> | ||||
|  | ||||
| @@ -150,6 +151,11 @@ enum { | ||||
|   UV__IORING_OP_CLOSE = 19, | ||||
|   UV__IORING_OP_STATX = 21, | ||||
|   UV__IORING_OP_EPOLL_CTL = 29, | ||||
|   UV__IORING_OP_RENAMEAT = 35, | ||||
|   UV__IORING_OP_UNLINKAT = 36, | ||||
|   UV__IORING_OP_MKDIRAT = 37, | ||||
|   UV__IORING_OP_SYMLINKAT = 38, | ||||
|   UV__IORING_OP_LINKAT = 39, | ||||
| }; | ||||
|  | ||||
| enum { | ||||
| @@ -162,6 +168,10 @@ enum { | ||||
|   UV__IORING_SQ_CQ_OVERFLOW = 2u, | ||||
| }; | ||||
|  | ||||
| enum { | ||||
|   UV__MKDIRAT_SYMLINKAT_LINKAT = 1u, | ||||
| }; | ||||
|  | ||||
| struct uv__io_cqring_offsets { | ||||
|   uint32_t head; | ||||
|   uint32_t tail; | ||||
| @@ -257,7 +267,7 @@ STATIC_ASSERT(EPOLL_CTL_MOD < 4); | ||||
|  | ||||
| struct watcher_list { | ||||
|   RB_ENTRY(watcher_list) entry; | ||||
|   QUEUE watchers; | ||||
|   struct uv__queue watchers; | ||||
|   int iterating; | ||||
|   char* path; | ||||
|   int wd; | ||||
| @@ -300,6 +310,31 @@ static struct watcher_root* uv__inotify_watchers(uv_loop_t* loop) { | ||||
| } | ||||
|  | ||||
|  | ||||
| unsigned uv__kernel_version(void) { | ||||
|   static _Atomic unsigned cached_version; | ||||
|   struct utsname u; | ||||
|   unsigned version; | ||||
|   unsigned major; | ||||
|   unsigned minor; | ||||
|   unsigned patch; | ||||
|  | ||||
|   version = atomic_load_explicit(&cached_version, memory_order_relaxed); | ||||
|   if (version != 0) | ||||
|     return version; | ||||
|  | ||||
|   if (-1 == uname(&u)) | ||||
|     return 0; | ||||
|  | ||||
|   if (3 != sscanf(u.release, "%u.%u.%u", &major, &minor, &patch)) | ||||
|     return 0; | ||||
|  | ||||
|   version = major * 65536 + minor * 256 + patch; | ||||
|   atomic_store_explicit(&cached_version, version, memory_order_relaxed); | ||||
|  | ||||
|   return version; | ||||
| } | ||||
|  | ||||
|  | ||||
| ssize_t | ||||
| uv__fs_copy_file_range(int fd_in, | ||||
|                        off_t* off_in, | ||||
| @@ -385,6 +420,9 @@ int uv__io_uring_register(int fd, unsigned opcode, void* arg, unsigned nargs) { | ||||
|  | ||||
|  | ||||
| static int uv__use_io_uring(void) { | ||||
| #if defined(__ANDROID_API__) | ||||
|   return 0;  /* Possibly available but blocked by seccomp. */ | ||||
| #else | ||||
|   /* Ternary: unknown=0, yes=1, no=-1 */ | ||||
|   static _Atomic int use_io_uring; | ||||
|   char* val; | ||||
| @@ -399,6 +437,7 @@ static int uv__use_io_uring(void) { | ||||
|   } | ||||
|  | ||||
|   return use > 0; | ||||
| #endif | ||||
| } | ||||
|  | ||||
|  | ||||
| @@ -503,6 +542,10 @@ static void uv__iou_init(int epollfd, | ||||
|   iou->sqelen = sqelen; | ||||
|   iou->ringfd = ringfd; | ||||
|   iou->in_flight = 0; | ||||
|   iou->flags = 0; | ||||
|  | ||||
|   if (uv__kernel_version() >= /* 5.15.0 */ 0x050F00) | ||||
|     iou->flags |= UV__MKDIRAT_SYMLINKAT_LINKAT; | ||||
|  | ||||
|   for (i = 0; i <= iou->sqmask; i++) | ||||
|     iou->sqarray[i] = i;  /* Slot -> sqe identity mapping. */ | ||||
| @@ -684,7 +727,7 @@ static struct uv__io_uring_sqe* uv__iou_get_sqe(struct uv__iou* iou, | ||||
|   req->work_req.loop = loop; | ||||
|   req->work_req.work = NULL; | ||||
|   req->work_req.done = NULL; | ||||
|   QUEUE_INIT(&req->work_req.wq); | ||||
|   uv__queue_init(&req->work_req.wq); | ||||
|  | ||||
|   uv__req_register(loop, req); | ||||
|   iou->in_flight++; | ||||
| @@ -714,6 +757,17 @@ int uv__iou_fs_close(uv_loop_t* loop, uv_fs_t* req) { | ||||
|   struct uv__io_uring_sqe* sqe; | ||||
|   struct uv__iou* iou; | ||||
|  | ||||
|   /* Work around a poorly understood bug in older kernels where closing a file | ||||
|    * descriptor pointing to /foo/bar results in ETXTBSY errors when trying to | ||||
|    * execve("/foo/bar") later on. The bug seems to have been fixed somewhere | ||||
|    * between 5.15.85 and 5.15.90. I couldn't pinpoint the responsible commit | ||||
|    * but good candidates are the several data race fixes. Interestingly, it | ||||
|    * seems to manifest only when running under Docker so the possibility of | ||||
|    * a Docker bug can't be completely ruled out either. Yay, computers. | ||||
|    */ | ||||
|   if (uv__kernel_version() < /* 5.15.90 */ 0x050F5A) | ||||
|     return 0; | ||||
|  | ||||
|   iou = &uv__get_internal_fields(loop)->iou; | ||||
|  | ||||
|   sqe = uv__iou_get_sqe(iou, loop, req); | ||||
| @@ -754,6 +808,55 @@ int uv__iou_fs_fsync_or_fdatasync(uv_loop_t* loop, | ||||
| } | ||||
|  | ||||
|  | ||||
| int uv__iou_fs_link(uv_loop_t* loop, uv_fs_t* req) { | ||||
|   struct uv__io_uring_sqe* sqe; | ||||
|   struct uv__iou* iou; | ||||
|  | ||||
|   iou = &uv__get_internal_fields(loop)->iou; | ||||
|  | ||||
|   if (!(iou->flags & UV__MKDIRAT_SYMLINKAT_LINKAT)) | ||||
|     return 0; | ||||
|  | ||||
|   sqe = uv__iou_get_sqe(iou, loop, req); | ||||
|   if (sqe == NULL) | ||||
|     return 0; | ||||
|  | ||||
|   sqe->addr = (uintptr_t) req->path; | ||||
|   sqe->fd = AT_FDCWD; | ||||
|   sqe->addr2 = (uintptr_t) req->new_path; | ||||
|   sqe->len = AT_FDCWD; | ||||
|   sqe->opcode = UV__IORING_OP_LINKAT; | ||||
|  | ||||
|   uv__iou_submit(iou); | ||||
|  | ||||
|   return 1; | ||||
| } | ||||
|  | ||||
|  | ||||
| int uv__iou_fs_mkdir(uv_loop_t* loop, uv_fs_t* req) { | ||||
|   struct uv__io_uring_sqe* sqe; | ||||
|   struct uv__iou* iou; | ||||
|  | ||||
|   iou = &uv__get_internal_fields(loop)->iou; | ||||
|  | ||||
|   if (!(iou->flags & UV__MKDIRAT_SYMLINKAT_LINKAT)) | ||||
|     return 0; | ||||
|  | ||||
|   sqe = uv__iou_get_sqe(iou, loop, req); | ||||
|   if (sqe == NULL) | ||||
|     return 0; | ||||
|  | ||||
|   sqe->addr = (uintptr_t) req->path; | ||||
|   sqe->fd = AT_FDCWD; | ||||
|   sqe->len = req->mode; | ||||
|   sqe->opcode = UV__IORING_OP_MKDIRAT; | ||||
|  | ||||
|   uv__iou_submit(iou); | ||||
|  | ||||
|   return 1; | ||||
| } | ||||
|  | ||||
|  | ||||
| int uv__iou_fs_open(uv_loop_t* loop, uv_fs_t* req) { | ||||
|   struct uv__io_uring_sqe* sqe; | ||||
|   struct uv__iou* iou; | ||||
| @@ -776,16 +879,86 @@ int uv__iou_fs_open(uv_loop_t* loop, uv_fs_t* req) { | ||||
| } | ||||
|  | ||||
|  | ||||
| int uv__iou_fs_rename(uv_loop_t* loop, uv_fs_t* req) { | ||||
|   struct uv__io_uring_sqe* sqe; | ||||
|   struct uv__iou* iou; | ||||
|  | ||||
|   iou = &uv__get_internal_fields(loop)->iou; | ||||
|  | ||||
|   sqe = uv__iou_get_sqe(iou, loop, req); | ||||
|   if (sqe == NULL) | ||||
|     return 0; | ||||
|  | ||||
|   sqe->addr = (uintptr_t) req->path; | ||||
|   sqe->fd = AT_FDCWD; | ||||
|   sqe->addr2 = (uintptr_t) req->new_path; | ||||
|   sqe->len = AT_FDCWD; | ||||
|   sqe->opcode = UV__IORING_OP_RENAMEAT; | ||||
|  | ||||
|   uv__iou_submit(iou); | ||||
|  | ||||
|   return 1; | ||||
| } | ||||
|  | ||||
|  | ||||
| int uv__iou_fs_symlink(uv_loop_t* loop, uv_fs_t* req) { | ||||
|   struct uv__io_uring_sqe* sqe; | ||||
|   struct uv__iou* iou; | ||||
|  | ||||
|   iou = &uv__get_internal_fields(loop)->iou; | ||||
|  | ||||
|   if (!(iou->flags & UV__MKDIRAT_SYMLINKAT_LINKAT)) | ||||
|     return 0; | ||||
|  | ||||
|   sqe = uv__iou_get_sqe(iou, loop, req); | ||||
|   if (sqe == NULL) | ||||
|     return 0; | ||||
|  | ||||
|   sqe->addr = (uintptr_t) req->path; | ||||
|   sqe->fd = AT_FDCWD; | ||||
|   sqe->addr2 = (uintptr_t) req->new_path; | ||||
|   sqe->opcode = UV__IORING_OP_SYMLINKAT; | ||||
|  | ||||
|   uv__iou_submit(iou); | ||||
|  | ||||
|   return 1; | ||||
| } | ||||
|  | ||||
|  | ||||
| int uv__iou_fs_unlink(uv_loop_t* loop, uv_fs_t* req) { | ||||
|   struct uv__io_uring_sqe* sqe; | ||||
|   struct uv__iou* iou; | ||||
|  | ||||
|   iou = &uv__get_internal_fields(loop)->iou; | ||||
|  | ||||
|   sqe = uv__iou_get_sqe(iou, loop, req); | ||||
|   if (sqe == NULL) | ||||
|     return 0; | ||||
|  | ||||
|   sqe->addr = (uintptr_t) req->path; | ||||
|   sqe->fd = AT_FDCWD; | ||||
|   sqe->opcode = UV__IORING_OP_UNLINKAT; | ||||
|  | ||||
|   uv__iou_submit(iou); | ||||
|  | ||||
|   return 1; | ||||
| } | ||||
|  | ||||
|  | ||||
| int uv__iou_fs_read_or_write(uv_loop_t* loop, | ||||
|                              uv_fs_t* req, | ||||
|                              int is_read) { | ||||
|   struct uv__io_uring_sqe* sqe; | ||||
|   struct uv__iou* iou; | ||||
|  | ||||
|   /* For the moment, if iovcnt is greater than IOV_MAX, fallback to the | ||||
|    * threadpool. In the future we might take advantage of IOSQE_IO_LINK. */ | ||||
|   if (req->nbufs > IOV_MAX) | ||||
|     return 0; | ||||
|   /* If iovcnt is greater than IOV_MAX, cap it to IOV_MAX on reads and fallback | ||||
|    * to the threadpool on writes */ | ||||
|   if (req->nbufs > IOV_MAX) { | ||||
|     if (is_read) | ||||
|       req->nbufs = IOV_MAX; | ||||
|     else | ||||
|       return 0; | ||||
|   } | ||||
|  | ||||
|   iou = &uv__get_internal_fields(loop)->iou; | ||||
|  | ||||
| @@ -1092,7 +1265,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { | ||||
|   struct uv__iou* ctl; | ||||
|   struct uv__iou* iou; | ||||
|   int real_timeout; | ||||
|   QUEUE* q; | ||||
|   struct uv__queue* q; | ||||
|   uv__io_t* w; | ||||
|   sigset_t* sigmask; | ||||
|   sigset_t sigset; | ||||
| @@ -1138,11 +1311,11 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { | ||||
|  | ||||
|   memset(&e, 0, sizeof(e)); | ||||
|  | ||||
|   while (!QUEUE_EMPTY(&loop->watcher_queue)) { | ||||
|     q = QUEUE_HEAD(&loop->watcher_queue); | ||||
|     w = QUEUE_DATA(q, uv__io_t, watcher_queue); | ||||
|     QUEUE_REMOVE(q); | ||||
|     QUEUE_INIT(q); | ||||
|   while (!uv__queue_empty(&loop->watcher_queue)) { | ||||
|     q = uv__queue_head(&loop->watcher_queue); | ||||
|     w = uv__queue_data(q, uv__io_t, watcher_queue); | ||||
|     uv__queue_remove(q); | ||||
|     uv__queue_init(q); | ||||
|  | ||||
|     op = EPOLL_CTL_MOD; | ||||
|     if (w->events == 0) | ||||
| @@ -1479,6 +1652,8 @@ int uv_cpu_info(uv_cpu_info_t** ci, int* count) { | ||||
|   static const char model_marker[] = "CPU part\t: "; | ||||
| #elif defined(__mips__) | ||||
|   static const char model_marker[] = "cpu model\t\t: "; | ||||
| #elif defined(__loongarch__) | ||||
|   static const char model_marker[] = "cpu family\t\t: "; | ||||
| #else | ||||
|   static const char model_marker[] = "model name\t: "; | ||||
| #endif | ||||
| @@ -2097,8 +2272,8 @@ static int uv__inotify_fork(uv_loop_t* loop, struct watcher_list* root) { | ||||
|   struct watcher_list* tmp_watcher_list_iter; | ||||
|   struct watcher_list* watcher_list; | ||||
|   struct watcher_list tmp_watcher_list; | ||||
|   QUEUE queue; | ||||
|   QUEUE* q; | ||||
|   struct uv__queue queue; | ||||
|   struct uv__queue* q; | ||||
|   uv_fs_event_t* handle; | ||||
|   char* tmp_path; | ||||
|  | ||||
| @@ -2110,41 +2285,41 @@ static int uv__inotify_fork(uv_loop_t* loop, struct watcher_list* root) { | ||||
|    */ | ||||
|   loop->inotify_watchers = root; | ||||
|  | ||||
|   QUEUE_INIT(&tmp_watcher_list.watchers); | ||||
|   uv__queue_init(&tmp_watcher_list.watchers); | ||||
|   /* Note that the queue we use is shared with the start and stop() | ||||
|    * functions, making QUEUE_FOREACH unsafe to use. So we use the | ||||
|    * QUEUE_MOVE trick to safely iterate. Also don't free the watcher | ||||
|    * functions, making uv__queue_foreach unsafe to use. So we use the | ||||
|    * uv__queue_move trick to safely iterate. Also don't free the watcher | ||||
|    * list until we're done iterating. c.f. uv__inotify_read. | ||||
|    */ | ||||
|   RB_FOREACH_SAFE(watcher_list, watcher_root, | ||||
|                   uv__inotify_watchers(loop), tmp_watcher_list_iter) { | ||||
|     watcher_list->iterating = 1; | ||||
|     QUEUE_MOVE(&watcher_list->watchers, &queue); | ||||
|     while (!QUEUE_EMPTY(&queue)) { | ||||
|       q = QUEUE_HEAD(&queue); | ||||
|       handle = QUEUE_DATA(q, uv_fs_event_t, watchers); | ||||
|     uv__queue_move(&watcher_list->watchers, &queue); | ||||
|     while (!uv__queue_empty(&queue)) { | ||||
|       q = uv__queue_head(&queue); | ||||
|       handle = uv__queue_data(q, uv_fs_event_t, watchers); | ||||
|       /* It's critical to keep a copy of path here, because it | ||||
|        * will be set to NULL by stop() and then deallocated by | ||||
|        * maybe_free_watcher_list | ||||
|        */ | ||||
|       tmp_path = uv__strdup(handle->path); | ||||
|       assert(tmp_path != NULL); | ||||
|       QUEUE_REMOVE(q); | ||||
|       QUEUE_INSERT_TAIL(&watcher_list->watchers, q); | ||||
|       uv__queue_remove(q); | ||||
|       uv__queue_insert_tail(&watcher_list->watchers, q); | ||||
|       uv_fs_event_stop(handle); | ||||
|  | ||||
|       QUEUE_INSERT_TAIL(&tmp_watcher_list.watchers, &handle->watchers); | ||||
|       uv__queue_insert_tail(&tmp_watcher_list.watchers, &handle->watchers); | ||||
|       handle->path = tmp_path; | ||||
|     } | ||||
|     watcher_list->iterating = 0; | ||||
|     maybe_free_watcher_list(watcher_list, loop); | ||||
|   } | ||||
|  | ||||
|   QUEUE_MOVE(&tmp_watcher_list.watchers, &queue); | ||||
|   while (!QUEUE_EMPTY(&queue)) { | ||||
|       q = QUEUE_HEAD(&queue); | ||||
|       QUEUE_REMOVE(q); | ||||
|       handle = QUEUE_DATA(q, uv_fs_event_t, watchers); | ||||
|   uv__queue_move(&tmp_watcher_list.watchers, &queue); | ||||
|   while (!uv__queue_empty(&queue)) { | ||||
|       q = uv__queue_head(&queue); | ||||
|       uv__queue_remove(q); | ||||
|       handle = uv__queue_data(q, uv_fs_event_t, watchers); | ||||
|       tmp_path = handle->path; | ||||
|       handle->path = NULL; | ||||
|       err = uv_fs_event_start(handle, handle->cb, tmp_path, 0); | ||||
| @@ -2166,7 +2341,7 @@ static struct watcher_list* find_watcher(uv_loop_t* loop, int wd) { | ||||
|  | ||||
| static void maybe_free_watcher_list(struct watcher_list* w, uv_loop_t* loop) { | ||||
|   /* if the watcher_list->watchers is being iterated over, we can't free it. */ | ||||
|   if ((!w->iterating) && QUEUE_EMPTY(&w->watchers)) { | ||||
|   if ((!w->iterating) && uv__queue_empty(&w->watchers)) { | ||||
|     /* No watchers left for this path. Clean up. */ | ||||
|     RB_REMOVE(watcher_root, uv__inotify_watchers(loop), w); | ||||
|     inotify_rm_watch(loop->inotify_fd, w->wd); | ||||
| @@ -2181,8 +2356,8 @@ static void uv__inotify_read(uv_loop_t* loop, | ||||
|   const struct inotify_event* e; | ||||
|   struct watcher_list* w; | ||||
|   uv_fs_event_t* h; | ||||
|   QUEUE queue; | ||||
|   QUEUE* q; | ||||
|   struct uv__queue queue; | ||||
|   struct uv__queue* q; | ||||
|   const char* path; | ||||
|   ssize_t size; | ||||
|   const char *p; | ||||
| @@ -2225,7 +2400,7 @@ static void uv__inotify_read(uv_loop_t* loop, | ||||
|        * What can go wrong? | ||||
|        * A callback could call uv_fs_event_stop() | ||||
|        * and the queue can change under our feet. | ||||
|        * So, we use QUEUE_MOVE() trick to safely iterate over the queue. | ||||
|        * So, we use uv__queue_move() trick to safely iterate over the queue. | ||||
|        * And we don't free the watcher_list until we're done iterating. | ||||
|        * | ||||
|        * First, | ||||
| @@ -2233,13 +2408,13 @@ static void uv__inotify_read(uv_loop_t* loop, | ||||
|        * not to free watcher_list. | ||||
|        */ | ||||
|       w->iterating = 1; | ||||
|       QUEUE_MOVE(&w->watchers, &queue); | ||||
|       while (!QUEUE_EMPTY(&queue)) { | ||||
|         q = QUEUE_HEAD(&queue); | ||||
|         h = QUEUE_DATA(q, uv_fs_event_t, watchers); | ||||
|       uv__queue_move(&w->watchers, &queue); | ||||
|       while (!uv__queue_empty(&queue)) { | ||||
|         q = uv__queue_head(&queue); | ||||
|         h = uv__queue_data(q, uv_fs_event_t, watchers); | ||||
|  | ||||
|         QUEUE_REMOVE(q); | ||||
|         QUEUE_INSERT_TAIL(&w->watchers, q); | ||||
|         uv__queue_remove(q); | ||||
|         uv__queue_insert_tail(&w->watchers, q); | ||||
|  | ||||
|         h->cb(h, path, events, 0); | ||||
|       } | ||||
| @@ -2301,13 +2476,13 @@ int uv_fs_event_start(uv_fs_event_t* handle, | ||||
|  | ||||
|   w->wd = wd; | ||||
|   w->path = memcpy(w + 1, path, len); | ||||
|   QUEUE_INIT(&w->watchers); | ||||
|   uv__queue_init(&w->watchers); | ||||
|   w->iterating = 0; | ||||
|   RB_INSERT(watcher_root, uv__inotify_watchers(loop), w); | ||||
|  | ||||
| no_insert: | ||||
|   uv__handle_start(handle); | ||||
|   QUEUE_INSERT_TAIL(&w->watchers, &handle->watchers); | ||||
|   uv__queue_insert_tail(&w->watchers, &handle->watchers); | ||||
|   handle->path = w->path; | ||||
|   handle->cb = cb; | ||||
|   handle->wd = wd; | ||||
| @@ -2328,7 +2503,7 @@ int uv_fs_event_stop(uv_fs_event_t* handle) { | ||||
|   handle->wd = -1; | ||||
|   handle->path = NULL; | ||||
|   uv__handle_stop(handle); | ||||
|   QUEUE_REMOVE(&handle->watchers); | ||||
|   uv__queue_remove(&handle->watchers); | ||||
|  | ||||
|   maybe_free_watcher_list(w, handle->loop); | ||||
|  | ||||
|   | ||||
							
								
								
									
										20
									
								
								deps/libuv/src/unix/loop-watcher.c
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										20
									
								
								deps/libuv/src/unix/loop-watcher.c
									
									
									
									
										vendored
									
									
								
							| @@ -32,7 +32,7 @@ | ||||
|   int uv_##name##_start(uv_##name##_t* handle, uv_##name##_cb cb) {           \ | ||||
|     if (uv__is_active(handle)) return 0;                                      \ | ||||
|     if (cb == NULL) return UV_EINVAL;                                         \ | ||||
|     QUEUE_INSERT_HEAD(&handle->loop->name##_handles, &handle->queue);         \ | ||||
|     uv__queue_insert_head(&handle->loop->name##_handles, &handle->queue);     \ | ||||
|     handle->name##_cb = cb;                                                   \ | ||||
|     uv__handle_start(handle);                                                 \ | ||||
|     return 0;                                                                 \ | ||||
| @@ -40,21 +40,21 @@ | ||||
|                                                                               \ | ||||
|   int uv_##name##_stop(uv_##name##_t* handle) {                               \ | ||||
|     if (!uv__is_active(handle)) return 0;                                     \ | ||||
|     QUEUE_REMOVE(&handle->queue);                                             \ | ||||
|     uv__queue_remove(&handle->queue);                                         \ | ||||
|     uv__handle_stop(handle);                                                  \ | ||||
|     return 0;                                                                 \ | ||||
|   }                                                                           \ | ||||
|                                                                               \ | ||||
|   void uv__run_##name(uv_loop_t* loop) {                                      \ | ||||
|     uv_##name##_t* h;                                                         \ | ||||
|     QUEUE queue;                                                              \ | ||||
|     QUEUE* q;                                                                 \ | ||||
|     QUEUE_MOVE(&loop->name##_handles, &queue);                                \ | ||||
|     while (!QUEUE_EMPTY(&queue)) {                                            \ | ||||
|       q = QUEUE_HEAD(&queue);                                                 \ | ||||
|       h = QUEUE_DATA(q, uv_##name##_t, queue);                                \ | ||||
|       QUEUE_REMOVE(q);                                                        \ | ||||
|       QUEUE_INSERT_TAIL(&loop->name##_handles, q);                            \ | ||||
|     struct uv__queue queue;                                                   \ | ||||
|     struct uv__queue* q;                                                      \ | ||||
|     uv__queue_move(&loop->name##_handles, &queue);                            \ | ||||
|     while (!uv__queue_empty(&queue)) {                                        \ | ||||
|       q = uv__queue_head(&queue);                                             \ | ||||
|       h = uv__queue_data(q, uv_##name##_t, queue);                            \ | ||||
|       uv__queue_remove(q);                                                    \ | ||||
|       uv__queue_insert_tail(&loop->name##_handles, q);                        \ | ||||
|       h->name##_cb(h);                                                        \ | ||||
|     }                                                                         \ | ||||
|   }                                                                           \ | ||||
|   | ||||
							
								
								
									
										28
									
								
								deps/libuv/src/unix/loop.c
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										28
									
								
								deps/libuv/src/unix/loop.c
									
									
									
									
										vendored
									
									
								
							| @@ -50,20 +50,20 @@ int uv_loop_init(uv_loop_t* loop) { | ||||
|          sizeof(lfields->loop_metrics.metrics)); | ||||
|  | ||||
|   heap_init((struct heap*) &loop->timer_heap); | ||||
|   QUEUE_INIT(&loop->wq); | ||||
|   QUEUE_INIT(&loop->idle_handles); | ||||
|   QUEUE_INIT(&loop->async_handles); | ||||
|   QUEUE_INIT(&loop->check_handles); | ||||
|   QUEUE_INIT(&loop->prepare_handles); | ||||
|   QUEUE_INIT(&loop->handle_queue); | ||||
|   uv__queue_init(&loop->wq); | ||||
|   uv__queue_init(&loop->idle_handles); | ||||
|   uv__queue_init(&loop->async_handles); | ||||
|   uv__queue_init(&loop->check_handles); | ||||
|   uv__queue_init(&loop->prepare_handles); | ||||
|   uv__queue_init(&loop->handle_queue); | ||||
|  | ||||
|   loop->active_handles = 0; | ||||
|   loop->active_reqs.count = 0; | ||||
|   loop->nfds = 0; | ||||
|   loop->watchers = NULL; | ||||
|   loop->nwatchers = 0; | ||||
|   QUEUE_INIT(&loop->pending_queue); | ||||
|   QUEUE_INIT(&loop->watcher_queue); | ||||
|   uv__queue_init(&loop->pending_queue); | ||||
|   uv__queue_init(&loop->watcher_queue); | ||||
|  | ||||
|   loop->closing_handles = NULL; | ||||
|   uv__update_time(loop); | ||||
| @@ -85,7 +85,7 @@ int uv_loop_init(uv_loop_t* loop) { | ||||
|   err = uv__process_init(loop); | ||||
|   if (err) | ||||
|     goto fail_signal_init; | ||||
|   QUEUE_INIT(&loop->process_handles); | ||||
|   uv__queue_init(&loop->process_handles); | ||||
|  | ||||
|   err = uv_rwlock_init(&loop->cloexec_lock); | ||||
|   if (err) | ||||
| @@ -152,9 +152,9 @@ int uv_loop_fork(uv_loop_t* loop) { | ||||
|     if (w == NULL) | ||||
|       continue; | ||||
|  | ||||
|     if (w->pevents != 0 && QUEUE_EMPTY(&w->watcher_queue)) { | ||||
|     if (w->pevents != 0 && uv__queue_empty(&w->watcher_queue)) { | ||||
|       w->events = 0; /* Force re-registration in uv__io_poll. */ | ||||
|       QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue); | ||||
|       uv__queue_insert_tail(&loop->watcher_queue, &w->watcher_queue); | ||||
|     } | ||||
|   } | ||||
|  | ||||
| @@ -180,7 +180,7 @@ void uv__loop_close(uv_loop_t* loop) { | ||||
|   } | ||||
|  | ||||
|   uv_mutex_lock(&loop->wq_mutex); | ||||
|   assert(QUEUE_EMPTY(&loop->wq) && "thread pool work queue not empty!"); | ||||
|   assert(uv__queue_empty(&loop->wq) && "thread pool work queue not empty!"); | ||||
|   assert(!uv__has_active_reqs(loop)); | ||||
|   uv_mutex_unlock(&loop->wq_mutex); | ||||
|   uv_mutex_destroy(&loop->wq_mutex); | ||||
| @@ -192,8 +192,8 @@ void uv__loop_close(uv_loop_t* loop) { | ||||
|   uv_rwlock_destroy(&loop->cloexec_lock); | ||||
|  | ||||
| #if 0 | ||||
|   assert(QUEUE_EMPTY(&loop->pending_queue)); | ||||
|   assert(QUEUE_EMPTY(&loop->watcher_queue)); | ||||
|   assert(uv__queue_empty(&loop->pending_queue)); | ||||
|   assert(uv__queue_empty(&loop->watcher_queue)); | ||||
|   assert(loop->nfds == 0); | ||||
| #endif | ||||
|  | ||||
|   | ||||
							
								
								
									
										24
									
								
								deps/libuv/src/unix/os390-syscalls.c
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										24
									
								
								deps/libuv/src/unix/os390-syscalls.c
									
									
									
									
										vendored
									
									
								
							| @@ -27,7 +27,7 @@ | ||||
| #include <termios.h> | ||||
| #include <sys/msg.h> | ||||
|  | ||||
| static QUEUE global_epoll_queue; | ||||
| static struct uv__queue global_epoll_queue; | ||||
| static uv_mutex_t global_epoll_lock; | ||||
| static uv_once_t once = UV_ONCE_INIT; | ||||
|  | ||||
| @@ -178,18 +178,18 @@ static void after_fork(void) { | ||||
|  | ||||
|  | ||||
| static void child_fork(void) { | ||||
|   QUEUE* q; | ||||
|   struct uv__queue* q; | ||||
|   uv_once_t child_once = UV_ONCE_INIT; | ||||
|  | ||||
|   /* reset once */ | ||||
|   memcpy(&once, &child_once, sizeof(child_once)); | ||||
|  | ||||
|   /* reset epoll list */ | ||||
|   while (!QUEUE_EMPTY(&global_epoll_queue)) { | ||||
|   while (!uv__queue_empty(&global_epoll_queue)) { | ||||
|     uv__os390_epoll* lst; | ||||
|     q = QUEUE_HEAD(&global_epoll_queue); | ||||
|     QUEUE_REMOVE(q); | ||||
|     lst = QUEUE_DATA(q, uv__os390_epoll, member); | ||||
|     q = uv__queue_head(&global_epoll_queue); | ||||
|     uv__queue_remove(q); | ||||
|     lst = uv__queue_data(q, uv__os390_epoll, member); | ||||
|     uv__free(lst->items); | ||||
|     lst->items = NULL; | ||||
|     lst->size = 0; | ||||
| @@ -201,7 +201,7 @@ static void child_fork(void) { | ||||
|  | ||||
|  | ||||
| static void epoll_init(void) { | ||||
|   QUEUE_INIT(&global_epoll_queue); | ||||
|   uv__queue_init(&global_epoll_queue); | ||||
|   if (uv_mutex_init(&global_epoll_lock)) | ||||
|     abort(); | ||||
|  | ||||
| @@ -225,7 +225,7 @@ uv__os390_epoll* epoll_create1(int flags) { | ||||
|     lst->items[lst->size - 1].revents = 0; | ||||
|     uv_once(&once, epoll_init); | ||||
|     uv_mutex_lock(&global_epoll_lock); | ||||
|     QUEUE_INSERT_TAIL(&global_epoll_queue, &lst->member); | ||||
|     uv__queue_insert_tail(&global_epoll_queue, &lst->member); | ||||
|     uv_mutex_unlock(&global_epoll_lock); | ||||
|   } | ||||
|  | ||||
| @@ -352,14 +352,14 @@ int epoll_wait(uv__os390_epoll* lst, struct epoll_event* events, | ||||
|  | ||||
|  | ||||
| int epoll_file_close(int fd) { | ||||
|   QUEUE* q; | ||||
|   struct uv__queue* q; | ||||
|  | ||||
|   uv_once(&once, epoll_init); | ||||
|   uv_mutex_lock(&global_epoll_lock); | ||||
|   QUEUE_FOREACH(q, &global_epoll_queue) { | ||||
|   uv__queue_foreach(q, &global_epoll_queue) { | ||||
|     uv__os390_epoll* lst; | ||||
|  | ||||
|     lst = QUEUE_DATA(q, uv__os390_epoll, member); | ||||
|     lst = uv__queue_data(q, uv__os390_epoll, member); | ||||
|     if (fd < lst->size && lst->items != NULL && lst->items[fd].fd != -1) | ||||
|       lst->items[fd].fd = -1; | ||||
|   } | ||||
| @@ -371,7 +371,7 @@ int epoll_file_close(int fd) { | ||||
| void epoll_queue_close(uv__os390_epoll* lst) { | ||||
|   /* Remove epoll instance from global queue */ | ||||
|   uv_mutex_lock(&global_epoll_lock); | ||||
|   QUEUE_REMOVE(&lst->member); | ||||
|   uv__queue_remove(&lst->member); | ||||
|   uv_mutex_unlock(&global_epoll_lock); | ||||
|  | ||||
|   /* Free resources */ | ||||
|   | ||||
							
								
								
									
										2
									
								
								deps/libuv/src/unix/os390-syscalls.h
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								deps/libuv/src/unix/os390-syscalls.h
									
									
									
									
										vendored
									
									
								
							| @@ -45,7 +45,7 @@ struct epoll_event { | ||||
| }; | ||||
|  | ||||
| typedef struct { | ||||
|   QUEUE member; | ||||
|   struct uv__queue member; | ||||
|   struct pollfd* items; | ||||
|   unsigned long size; | ||||
|   int msg_queue; | ||||
|   | ||||
							
								
								
									
										14
									
								
								deps/libuv/src/unix/os390.c
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										14
									
								
								deps/libuv/src/unix/os390.c
									
									
									
									
										vendored
									
									
								
							| @@ -815,7 +815,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { | ||||
|   uv__os390_epoll* ep; | ||||
|   int have_signals; | ||||
|   int real_timeout; | ||||
|   QUEUE* q; | ||||
|   struct uv__queue* q; | ||||
|   uv__io_t* w; | ||||
|   uint64_t base; | ||||
|   int count; | ||||
| @@ -827,19 +827,19 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { | ||||
|   int reset_timeout; | ||||
|  | ||||
|   if (loop->nfds == 0) { | ||||
|     assert(QUEUE_EMPTY(&loop->watcher_queue)); | ||||
|     assert(uv__queue_empty(&loop->watcher_queue)); | ||||
|     return; | ||||
|   } | ||||
|  | ||||
|   lfields = uv__get_internal_fields(loop); | ||||
|  | ||||
|   while (!QUEUE_EMPTY(&loop->watcher_queue)) { | ||||
|   while (!uv__queue_empty(&loop->watcher_queue)) { | ||||
|     uv_stream_t* stream; | ||||
|  | ||||
|     q = QUEUE_HEAD(&loop->watcher_queue); | ||||
|     QUEUE_REMOVE(q); | ||||
|     QUEUE_INIT(q); | ||||
|     w = QUEUE_DATA(q, uv__io_t, watcher_queue); | ||||
|     q = uv__queue_head(&loop->watcher_queue); | ||||
|     uv__queue_remove(q); | ||||
|     uv__queue_init(q); | ||||
|     w = uv__queue_data(q, uv__io_t, watcher_queue); | ||||
|  | ||||
|     assert(w->pevents != 0); | ||||
|     assert(w->fd >= 0); | ||||
|   | ||||
							
								
								
									
										98
									
								
								deps/libuv/src/unix/pipe.c
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										98
									
								
								deps/libuv/src/unix/pipe.c
									
									
									
									
										vendored
									
									
								
							| @@ -41,26 +41,60 @@ int uv_pipe_init(uv_loop_t* loop, uv_pipe_t* handle, int ipc) { | ||||
|  | ||||
|  | ||||
| int uv_pipe_bind(uv_pipe_t* handle, const char* name) { | ||||
|   return uv_pipe_bind2(handle, name, strlen(name), 0); | ||||
| } | ||||
|  | ||||
|  | ||||
| int uv_pipe_bind2(uv_pipe_t* handle, | ||||
|                   const char* name, | ||||
|                   size_t namelen, | ||||
|                   unsigned int flags) { | ||||
|   struct sockaddr_un saddr; | ||||
|   const char* pipe_fname; | ||||
|   char* pipe_fname; | ||||
|   int sockfd; | ||||
|   int err; | ||||
|  | ||||
|   pipe_fname = NULL; | ||||
|  | ||||
|   if (flags & ~UV_PIPE_NO_TRUNCATE) | ||||
|     return UV_EINVAL; | ||||
|  | ||||
|   if (name == NULL) | ||||
|     return UV_EINVAL; | ||||
|  | ||||
|   if (namelen == 0) | ||||
|     return UV_EINVAL; | ||||
|  | ||||
| #ifndef __linux__ | ||||
|   /* Abstract socket namespace only works on Linux. */ | ||||
|   if (*name == '\0') | ||||
|     return UV_EINVAL; | ||||
| #endif | ||||
|  | ||||
|   if (flags & UV_PIPE_NO_TRUNCATE) | ||||
|     if (namelen > sizeof(saddr.sun_path)) | ||||
|       return UV_EINVAL; | ||||
|  | ||||
|   /* Truncate long paths. Documented behavior. */ | ||||
|   if (namelen > sizeof(saddr.sun_path)) | ||||
|     namelen = sizeof(saddr.sun_path); | ||||
|  | ||||
|   /* Already bound? */ | ||||
|   if (uv__stream_fd(handle) >= 0) | ||||
|     return UV_EINVAL; | ||||
|   if (uv__is_closing(handle)) { | ||||
|     return UV_EINVAL; | ||||
|   } | ||||
|   /* Make a copy of the file name, it outlives this function's scope. */ | ||||
|   pipe_fname = uv__strdup(name); | ||||
|   if (pipe_fname == NULL) | ||||
|     return UV_ENOMEM; | ||||
|  | ||||
|   /* We've got a copy, don't touch the original any more. */ | ||||
|   name = NULL; | ||||
|   if (uv__is_closing(handle)) | ||||
|     return UV_EINVAL; | ||||
|  | ||||
|   /* Make a copy of the file path unless it is an abstract socket. | ||||
|    * We unlink the file later but abstract sockets disappear | ||||
|    * automatically since they're not real file system entities. | ||||
|    */ | ||||
|   if (*name != '\0') { | ||||
|     pipe_fname = uv__strdup(name); | ||||
|     if (pipe_fname == NULL) | ||||
|       return UV_ENOMEM; | ||||
|   } | ||||
|  | ||||
|   err = uv__socket(AF_UNIX, SOCK_STREAM, 0); | ||||
|   if (err < 0) | ||||
| @@ -68,7 +102,7 @@ int uv_pipe_bind(uv_pipe_t* handle, const char* name) { | ||||
|   sockfd = err; | ||||
|  | ||||
|   memset(&saddr, 0, sizeof saddr); | ||||
|   uv__strscpy(saddr.sun_path, pipe_fname, sizeof(saddr.sun_path)); | ||||
|   memcpy(&saddr.sun_path, name, namelen); | ||||
|   saddr.sun_family = AF_UNIX; | ||||
|  | ||||
|   if (bind(sockfd, (struct sockaddr*)&saddr, sizeof saddr)) { | ||||
| @@ -83,12 +117,12 @@ int uv_pipe_bind(uv_pipe_t* handle, const char* name) { | ||||
|  | ||||
|   /* Success. */ | ||||
|   handle->flags |= UV_HANDLE_BOUND; | ||||
|   handle->pipe_fname = pipe_fname; /* Is a strdup'ed copy. */ | ||||
|   handle->pipe_fname = pipe_fname; /* NULL or a strdup'ed copy. */ | ||||
|   handle->io_watcher.fd = sockfd; | ||||
|   return 0; | ||||
|  | ||||
| err_socket: | ||||
|   uv__free((void*)pipe_fname); | ||||
|   uv__free(pipe_fname); | ||||
|   return err; | ||||
| } | ||||
|  | ||||
| @@ -176,11 +210,44 @@ void uv_pipe_connect(uv_connect_t* req, | ||||
|                     uv_pipe_t* handle, | ||||
|                     const char* name, | ||||
|                     uv_connect_cb cb) { | ||||
|   uv_pipe_connect2(req, handle, name, strlen(name), 0, cb); | ||||
| } | ||||
|  | ||||
|  | ||||
| int uv_pipe_connect2(uv_connect_t* req, | ||||
|                      uv_pipe_t* handle, | ||||
|                      const char* name, | ||||
|                      size_t namelen, | ||||
|                      unsigned int flags, | ||||
|                      uv_connect_cb cb) { | ||||
|   struct sockaddr_un saddr; | ||||
|   int new_sock; | ||||
|   int err; | ||||
|   int r; | ||||
|  | ||||
|   if (flags & ~UV_PIPE_NO_TRUNCATE) | ||||
|     return UV_EINVAL; | ||||
|  | ||||
|   if (name == NULL) | ||||
|     return UV_EINVAL; | ||||
|  | ||||
|   if (namelen == 0) | ||||
|     return UV_EINVAL; | ||||
|  | ||||
| #ifndef __linux__ | ||||
|   /* Abstract socket namespace only works on Linux. */ | ||||
|   if (*name == '\0') | ||||
|     return UV_EINVAL; | ||||
| #endif | ||||
|  | ||||
|   if (flags & UV_PIPE_NO_TRUNCATE) | ||||
|     if (namelen > sizeof(saddr.sun_path)) | ||||
|       return UV_EINVAL; | ||||
|  | ||||
|   /* Truncate long paths. Documented behavior. */ | ||||
|   if (namelen > sizeof(saddr.sun_path)) | ||||
|     namelen = sizeof(saddr.sun_path); | ||||
|  | ||||
|   new_sock = (uv__stream_fd(handle) == -1); | ||||
|  | ||||
|   if (new_sock) { | ||||
| @@ -191,7 +258,7 @@ void uv_pipe_connect(uv_connect_t* req, | ||||
|   } | ||||
|  | ||||
|   memset(&saddr, 0, sizeof saddr); | ||||
|   uv__strscpy(saddr.sun_path, name, sizeof(saddr.sun_path)); | ||||
|   memcpy(&saddr.sun_path, name, namelen); | ||||
|   saddr.sun_family = AF_UNIX; | ||||
|  | ||||
|   do { | ||||
| @@ -230,12 +297,13 @@ out: | ||||
|   uv__req_init(handle->loop, req, UV_CONNECT); | ||||
|   req->handle = (uv_stream_t*)handle; | ||||
|   req->cb = cb; | ||||
|   QUEUE_INIT(&req->queue); | ||||
|   uv__queue_init(&req->queue); | ||||
|  | ||||
|   /* Force callback to run on next tick in case of error. */ | ||||
|   if (err) | ||||
|     uv__io_feed(handle->loop, &handle->io_watcher); | ||||
|  | ||||
|   return 0; | ||||
| } | ||||
|  | ||||
|  | ||||
|   | ||||
							
								
								
									
										14
									
								
								deps/libuv/src/unix/posix-poll.c
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										14
									
								
								deps/libuv/src/unix/posix-poll.c
									
									
									
									
										vendored
									
									
								
							| @@ -137,7 +137,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { | ||||
|   sigset_t set; | ||||
|   uint64_t time_base; | ||||
|   uint64_t time_diff; | ||||
|   QUEUE* q; | ||||
|   struct uv__queue* q; | ||||
|   uv__io_t* w; | ||||
|   size_t i; | ||||
|   unsigned int nevents; | ||||
| @@ -149,19 +149,19 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { | ||||
|   int reset_timeout; | ||||
|  | ||||
|   if (loop->nfds == 0) { | ||||
|     assert(QUEUE_EMPTY(&loop->watcher_queue)); | ||||
|     assert(uv__queue_empty(&loop->watcher_queue)); | ||||
|     return; | ||||
|   } | ||||
|  | ||||
|   lfields = uv__get_internal_fields(loop); | ||||
|  | ||||
|   /* Take queued watchers and add their fds to our poll fds array.  */ | ||||
|   while (!QUEUE_EMPTY(&loop->watcher_queue)) { | ||||
|     q = QUEUE_HEAD(&loop->watcher_queue); | ||||
|     QUEUE_REMOVE(q); | ||||
|     QUEUE_INIT(q); | ||||
|   while (!uv__queue_empty(&loop->watcher_queue)) { | ||||
|     q = uv__queue_head(&loop->watcher_queue); | ||||
|     uv__queue_remove(q); | ||||
|     uv__queue_init(q); | ||||
|  | ||||
|     w = QUEUE_DATA(q, uv__io_t, watcher_queue); | ||||
|     w = uv__queue_data(q, uv__io_t, watcher_queue); | ||||
|     assert(w->pevents != 0); | ||||
|     assert(w->fd >= 0); | ||||
|     assert(w->fd < (int) loop->nwatchers); | ||||
|   | ||||
							
								
								
									
										50
									
								
								deps/libuv/src/unix/process.c
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										50
									
								
								deps/libuv/src/unix/process.c
									
									
									
									
										vendored
									
									
								
							| @@ -108,17 +108,17 @@ void uv__wait_children(uv_loop_t* loop) { | ||||
|   int status; | ||||
|   int options; | ||||
|   pid_t pid; | ||||
|   QUEUE pending; | ||||
|   QUEUE* q; | ||||
|   QUEUE* h; | ||||
|   struct uv__queue pending; | ||||
|   struct uv__queue* q; | ||||
|   struct uv__queue* h; | ||||
|  | ||||
|   QUEUE_INIT(&pending); | ||||
|   uv__queue_init(&pending); | ||||
|  | ||||
|   h = &loop->process_handles; | ||||
|   q = QUEUE_HEAD(h); | ||||
|   q = uv__queue_head(h); | ||||
|   while (q != h) { | ||||
|     process = QUEUE_DATA(q, uv_process_t, queue); | ||||
|     q = QUEUE_NEXT(q); | ||||
|     process = uv__queue_data(q, uv_process_t, queue); | ||||
|     q = uv__queue_next(q); | ||||
|  | ||||
| #ifndef UV_USE_SIGCHLD | ||||
|     if ((process->flags & UV_HANDLE_REAP) == 0) | ||||
| @@ -149,18 +149,18 @@ void uv__wait_children(uv_loop_t* loop) { | ||||
|  | ||||
|     assert(pid == process->pid); | ||||
|     process->status = status; | ||||
|     QUEUE_REMOVE(&process->queue); | ||||
|     QUEUE_INSERT_TAIL(&pending, &process->queue); | ||||
|     uv__queue_remove(&process->queue); | ||||
|     uv__queue_insert_tail(&pending, &process->queue); | ||||
|   } | ||||
|  | ||||
|   h = &pending; | ||||
|   q = QUEUE_HEAD(h); | ||||
|   q = uv__queue_head(h); | ||||
|   while (q != h) { | ||||
|     process = QUEUE_DATA(q, uv_process_t, queue); | ||||
|     q = QUEUE_NEXT(q); | ||||
|     process = uv__queue_data(q, uv_process_t, queue); | ||||
|     q = uv__queue_next(q); | ||||
|  | ||||
|     QUEUE_REMOVE(&process->queue); | ||||
|     QUEUE_INIT(&process->queue); | ||||
|     uv__queue_remove(&process->queue); | ||||
|     uv__queue_init(&process->queue); | ||||
|     uv__handle_stop(process); | ||||
|  | ||||
|     if (process->exit_cb == NULL) | ||||
| @@ -176,13 +176,18 @@ void uv__wait_children(uv_loop_t* loop) { | ||||
|  | ||||
|     process->exit_cb(process, exit_status, term_signal); | ||||
|   } | ||||
|   assert(QUEUE_EMPTY(&pending)); | ||||
|   assert(uv__queue_empty(&pending)); | ||||
| } | ||||
|  | ||||
| /* | ||||
|  * Used for initializing stdio streams like options.stdin_stream. Returns | ||||
|  * zero on success. See also the cleanup section in uv_spawn(). | ||||
|  */ | ||||
| #if !(defined(__APPLE__) && (TARGET_OS_TV || TARGET_OS_WATCH)) | ||||
| /* execvp is marked __WATCHOS_PROHIBITED __TVOS_PROHIBITED, so must be | ||||
|  * avoided. Since this isn't called on those targets, the function | ||||
|  * doesn't even need to be defined for them. | ||||
|  */ | ||||
| static int uv__process_init_stdio(uv_stdio_container_t* container, int fds[2]) { | ||||
|   int mask; | ||||
|   int fd; | ||||
| @@ -269,11 +274,6 @@ static void uv__write_errno(int error_fd) { | ||||
| } | ||||
|  | ||||
|  | ||||
| #if !(defined(__APPLE__) && (TARGET_OS_TV || TARGET_OS_WATCH)) | ||||
| /* execvp is marked __WATCHOS_PROHIBITED __TVOS_PROHIBITED, so must be | ||||
|  * avoided. Since this isn't called on those targets, the function | ||||
|  * doesn't even need to be defined for them. | ||||
|  */ | ||||
| static void uv__process_child_init(const uv_process_options_t* options, | ||||
|                                    int stdio_count, | ||||
|                                    int (*pipes)[2], | ||||
| @@ -405,7 +405,6 @@ static void uv__process_child_init(const uv_process_options_t* options, | ||||
|  | ||||
|   uv__write_errno(error_fd); | ||||
| } | ||||
| #endif | ||||
|  | ||||
|  | ||||
| #if defined(__APPLE__) | ||||
| @@ -952,6 +951,7 @@ static int uv__spawn_and_init_child( | ||||
|  | ||||
|   return err; | ||||
| } | ||||
| #endif /* ISN'T TARGET_OS_TV || TARGET_OS_WATCH */ | ||||
|  | ||||
| int uv_spawn(uv_loop_t* loop, | ||||
|              uv_process_t* process, | ||||
| @@ -978,7 +978,7 @@ int uv_spawn(uv_loop_t* loop, | ||||
|                               UV_PROCESS_WINDOWS_VERBATIM_ARGUMENTS))); | ||||
|  | ||||
|   uv__handle_init(loop, (uv_handle_t*)process, UV_PROCESS); | ||||
|   QUEUE_INIT(&process->queue); | ||||
|   uv__queue_init(&process->queue); | ||||
|   process->status = 0; | ||||
|  | ||||
|   stdio_count = options->stdio_count; | ||||
| @@ -1041,7 +1041,7 @@ int uv_spawn(uv_loop_t* loop, | ||||
|  | ||||
|     process->pid = pid; | ||||
|     process->exit_cb = options->exit_cb; | ||||
|     QUEUE_INSERT_TAIL(&loop->process_handles, &process->queue); | ||||
|     uv__queue_insert_tail(&loop->process_handles, &process->queue); | ||||
|     uv__handle_start(process); | ||||
|   } | ||||
|  | ||||
| @@ -1103,10 +1103,10 @@ int uv_kill(int pid, int signum) { | ||||
|  | ||||
|  | ||||
| void uv__process_close(uv_process_t* handle) { | ||||
|   QUEUE_REMOVE(&handle->queue); | ||||
|   uv__queue_remove(&handle->queue); | ||||
|   uv__handle_stop(handle); | ||||
| #ifdef UV_USE_SIGCHLD | ||||
|   if (QUEUE_EMPTY(&handle->loop->process_handles)) | ||||
|   if (uv__queue_empty(&handle->loop->process_handles)) | ||||
|     uv_signal_stop(&handle->loop->child_watcher); | ||||
| #endif | ||||
| } | ||||
|   | ||||
							
								
								
									
										8
									
								
								deps/libuv/src/unix/signal.c
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										8
									
								
								deps/libuv/src/unix/signal.c
									
									
									
									
										vendored
									
									
								
							| @@ -291,16 +291,16 @@ int uv__signal_loop_fork(uv_loop_t* loop) { | ||||
|  | ||||
|  | ||||
| void uv__signal_loop_cleanup(uv_loop_t* loop) { | ||||
|   QUEUE* q; | ||||
|   struct uv__queue* q; | ||||
|  | ||||
|   /* Stop all the signal watchers that are still attached to this loop. This | ||||
|    * ensures that the (shared) signal tree doesn't contain any invalid entries | ||||
|    * entries, and that signal handlers are removed when appropriate. | ||||
|    * It's safe to use QUEUE_FOREACH here because the handles and the handle | ||||
|    * It's safe to use uv__queue_foreach here because the handles and the handle | ||||
|    * queue are not modified by uv__signal_stop(). | ||||
|    */ | ||||
|   QUEUE_FOREACH(q, &loop->handle_queue) { | ||||
|     uv_handle_t* handle = QUEUE_DATA(q, uv_handle_t, handle_queue); | ||||
|   uv__queue_foreach(q, &loop->handle_queue) { | ||||
|     uv_handle_t* handle = uv__queue_data(q, uv_handle_t, handle_queue); | ||||
|  | ||||
|     if (handle->type == UV_SIGNAL) | ||||
|       uv__signal_stop((uv_signal_t*) handle); | ||||
|   | ||||
							
								
								
									
										56
									
								
								deps/libuv/src/unix/stream.c
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										56
									
								
								deps/libuv/src/unix/stream.c
									
									
									
									
										vendored
									
									
								
							| @@ -94,8 +94,8 @@ void uv__stream_init(uv_loop_t* loop, | ||||
|   stream->accepted_fd = -1; | ||||
|   stream->queued_fds = NULL; | ||||
|   stream->delayed_error = 0; | ||||
|   QUEUE_INIT(&stream->write_queue); | ||||
|   QUEUE_INIT(&stream->write_completed_queue); | ||||
|   uv__queue_init(&stream->write_queue); | ||||
|   uv__queue_init(&stream->write_completed_queue); | ||||
|   stream->write_queue_size = 0; | ||||
|  | ||||
|   if (loop->emfile_fd == -1) { | ||||
| @@ -439,15 +439,15 @@ int uv__stream_open(uv_stream_t* stream, int fd, int flags) { | ||||
|  | ||||
| void uv__stream_flush_write_queue(uv_stream_t* stream, int error) { | ||||
|   uv_write_t* req; | ||||
|   QUEUE* q; | ||||
|   while (!QUEUE_EMPTY(&stream->write_queue)) { | ||||
|     q = QUEUE_HEAD(&stream->write_queue); | ||||
|     QUEUE_REMOVE(q); | ||||
|   struct uv__queue* q; | ||||
|   while (!uv__queue_empty(&stream->write_queue)) { | ||||
|     q = uv__queue_head(&stream->write_queue); | ||||
|     uv__queue_remove(q); | ||||
|  | ||||
|     req = QUEUE_DATA(q, uv_write_t, queue); | ||||
|     req = uv__queue_data(q, uv_write_t, queue); | ||||
|     req->error = error; | ||||
|  | ||||
|     QUEUE_INSERT_TAIL(&stream->write_completed_queue, &req->queue); | ||||
|     uv__queue_insert_tail(&stream->write_completed_queue, &req->queue); | ||||
|   } | ||||
| } | ||||
|  | ||||
| @@ -627,7 +627,7 @@ static void uv__drain(uv_stream_t* stream) { | ||||
|   uv_shutdown_t* req; | ||||
|   int err; | ||||
|  | ||||
|   assert(QUEUE_EMPTY(&stream->write_queue)); | ||||
|   assert(uv__queue_empty(&stream->write_queue)); | ||||
|   if (!(stream->flags & UV_HANDLE_CLOSING)) { | ||||
|     uv__io_stop(stream->loop, &stream->io_watcher, POLLOUT); | ||||
|     uv__stream_osx_interrupt_select(stream); | ||||
| @@ -714,7 +714,7 @@ static void uv__write_req_finish(uv_write_t* req) { | ||||
|   uv_stream_t* stream = req->handle; | ||||
|  | ||||
|   /* Pop the req off tcp->write_queue. */ | ||||
|   QUEUE_REMOVE(&req->queue); | ||||
|   uv__queue_remove(&req->queue); | ||||
|  | ||||
|   /* Only free when there was no error. On error, we touch up write_queue_size | ||||
|    * right before making the callback. The reason we don't do that right away | ||||
| @@ -731,7 +731,7 @@ static void uv__write_req_finish(uv_write_t* req) { | ||||
|   /* Add it to the write_completed_queue where it will have its | ||||
|    * callback called in the near future. | ||||
|    */ | ||||
|   QUEUE_INSERT_TAIL(&stream->write_completed_queue, &req->queue); | ||||
|   uv__queue_insert_tail(&stream->write_completed_queue, &req->queue); | ||||
|   uv__io_feed(stream->loop, &stream->io_watcher); | ||||
| } | ||||
|  | ||||
| @@ -837,7 +837,7 @@ static int uv__try_write(uv_stream_t* stream, | ||||
| } | ||||
|  | ||||
| static void uv__write(uv_stream_t* stream) { | ||||
|   QUEUE* q; | ||||
|   struct uv__queue* q; | ||||
|   uv_write_t* req; | ||||
|   ssize_t n; | ||||
|   int count; | ||||
| @@ -851,11 +851,11 @@ static void uv__write(uv_stream_t* stream) { | ||||
|   count = 32; | ||||
|  | ||||
|   for (;;) { | ||||
|     if (QUEUE_EMPTY(&stream->write_queue)) | ||||
|     if (uv__queue_empty(&stream->write_queue)) | ||||
|       return; | ||||
|  | ||||
|     q = QUEUE_HEAD(&stream->write_queue); | ||||
|     req = QUEUE_DATA(q, uv_write_t, queue); | ||||
|     q = uv__queue_head(&stream->write_queue); | ||||
|     req = uv__queue_data(q, uv_write_t, queue); | ||||
|     assert(req->handle == stream); | ||||
|  | ||||
|     n = uv__try_write(stream, | ||||
| @@ -899,19 +899,19 @@ error: | ||||
|  | ||||
| static void uv__write_callbacks(uv_stream_t* stream) { | ||||
|   uv_write_t* req; | ||||
|   QUEUE* q; | ||||
|   QUEUE pq; | ||||
|   struct uv__queue* q; | ||||
|   struct uv__queue pq; | ||||
|  | ||||
|   if (QUEUE_EMPTY(&stream->write_completed_queue)) | ||||
|   if (uv__queue_empty(&stream->write_completed_queue)) | ||||
|     return; | ||||
|  | ||||
|   QUEUE_MOVE(&stream->write_completed_queue, &pq); | ||||
|   uv__queue_move(&stream->write_completed_queue, &pq); | ||||
|  | ||||
|   while (!QUEUE_EMPTY(&pq)) { | ||||
|   while (!uv__queue_empty(&pq)) { | ||||
|     /* Pop a req off write_completed_queue. */ | ||||
|     q = QUEUE_HEAD(&pq); | ||||
|     req = QUEUE_DATA(q, uv_write_t, queue); | ||||
|     QUEUE_REMOVE(q); | ||||
|     q = uv__queue_head(&pq); | ||||
|     req = uv__queue_data(q, uv_write_t, queue); | ||||
|     uv__queue_remove(q); | ||||
|     uv__req_unregister(stream->loop, req); | ||||
|  | ||||
|     if (req->bufs != NULL) { | ||||
| @@ -1174,7 +1174,7 @@ int uv_shutdown(uv_shutdown_t* req, uv_stream_t* stream, uv_shutdown_cb cb) { | ||||
|   stream->shutdown_req = req; | ||||
|   stream->flags &= ~UV_HANDLE_WRITABLE; | ||||
|  | ||||
|   if (QUEUE_EMPTY(&stream->write_queue)) | ||||
|   if (uv__queue_empty(&stream->write_queue)) | ||||
|     uv__io_feed(stream->loop, &stream->io_watcher); | ||||
|  | ||||
|   return 0; | ||||
| @@ -1227,7 +1227,7 @@ static void uv__stream_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) { | ||||
|     uv__write_callbacks(stream); | ||||
|  | ||||
|     /* Write queue drained. */ | ||||
|     if (QUEUE_EMPTY(&stream->write_queue)) | ||||
|     if (uv__queue_empty(&stream->write_queue)) | ||||
|       uv__drain(stream); | ||||
|   } | ||||
| } | ||||
| @@ -1270,7 +1270,7 @@ static void uv__stream_connect(uv_stream_t* stream) { | ||||
|   stream->connect_req = NULL; | ||||
|   uv__req_unregister(stream->loop, req); | ||||
|  | ||||
|   if (error < 0 || QUEUE_EMPTY(&stream->write_queue)) { | ||||
|   if (error < 0 || uv__queue_empty(&stream->write_queue)) { | ||||
|     uv__io_stop(stream->loop, &stream->io_watcher, POLLOUT); | ||||
|   } | ||||
|  | ||||
| @@ -1352,7 +1352,7 @@ int uv_write2(uv_write_t* req, | ||||
|   req->handle = stream; | ||||
|   req->error = 0; | ||||
|   req->send_handle = send_handle; | ||||
|   QUEUE_INIT(&req->queue); | ||||
|   uv__queue_init(&req->queue); | ||||
|  | ||||
|   req->bufs = req->bufsml; | ||||
|   if (nbufs > ARRAY_SIZE(req->bufsml)) | ||||
| @@ -1367,7 +1367,7 @@ int uv_write2(uv_write_t* req, | ||||
|   stream->write_queue_size += uv__count_bufs(bufs, nbufs); | ||||
|  | ||||
|   /* Append the request to write_queue. */ | ||||
|   QUEUE_INSERT_TAIL(&stream->write_queue, &req->queue); | ||||
|   uv__queue_insert_tail(&stream->write_queue, &req->queue); | ||||
|  | ||||
|   /* If the queue was empty when this function began, we should attempt to | ||||
|    * do the write immediately. Otherwise start the write_watcher and wait | ||||
|   | ||||
							
								
								
									
										18
									
								
								deps/libuv/src/unix/sunos.c
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										18
									
								
								deps/libuv/src/unix/sunos.c
									
									
									
									
										vendored
									
									
								
							| @@ -148,7 +148,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { | ||||
|   struct port_event events[1024]; | ||||
|   struct port_event* pe; | ||||
|   struct timespec spec; | ||||
|   QUEUE* q; | ||||
|   struct uv__queue* q; | ||||
|   uv__io_t* w; | ||||
|   sigset_t* pset; | ||||
|   sigset_t set; | ||||
| @@ -166,16 +166,16 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { | ||||
|   int reset_timeout; | ||||
|  | ||||
|   if (loop->nfds == 0) { | ||||
|     assert(QUEUE_EMPTY(&loop->watcher_queue)); | ||||
|     assert(uv__queue_empty(&loop->watcher_queue)); | ||||
|     return; | ||||
|   } | ||||
|  | ||||
|   while (!QUEUE_EMPTY(&loop->watcher_queue)) { | ||||
|     q = QUEUE_HEAD(&loop->watcher_queue); | ||||
|     QUEUE_REMOVE(q); | ||||
|     QUEUE_INIT(q); | ||||
|   while (!uv__queue_empty(&loop->watcher_queue)) { | ||||
|     q = uv__queue_head(&loop->watcher_queue); | ||||
|     uv__queue_remove(q); | ||||
|     uv__queue_init(q); | ||||
|  | ||||
|     w = QUEUE_DATA(q, uv__io_t, watcher_queue); | ||||
|     w = uv__queue_data(q, uv__io_t, watcher_queue); | ||||
|     assert(w->pevents != 0); | ||||
|  | ||||
|     if (port_associate(loop->backend_fd, | ||||
| @@ -316,8 +316,8 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { | ||||
|         continue;  /* Disabled by callback. */ | ||||
|  | ||||
|       /* Events Ports operates in oneshot mode, rearm timer on next run. */ | ||||
|       if (w->pevents != 0 && QUEUE_EMPTY(&w->watcher_queue)) | ||||
|         QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue); | ||||
|       if (w->pevents != 0 && uv__queue_empty(&w->watcher_queue)) | ||||
|         uv__queue_insert_tail(&loop->watcher_queue, &w->watcher_queue); | ||||
|     } | ||||
|  | ||||
|     uv__metrics_inc_events(loop, nevents); | ||||
|   | ||||
							
								
								
									
										4
									
								
								deps/libuv/src/unix/tcp.c
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								deps/libuv/src/unix/tcp.c
									
									
									
									
										vendored
									
									
								
							| @@ -124,7 +124,7 @@ int uv_tcp_init_ex(uv_loop_t* loop, uv_tcp_t* tcp, unsigned int flags) { | ||||
|   if (domain != AF_UNSPEC) { | ||||
|     err = new_socket(tcp, domain, 0); | ||||
|     if (err) { | ||||
|       QUEUE_REMOVE(&tcp->handle_queue); | ||||
|       uv__queue_remove(&tcp->handle_queue); | ||||
|       if (tcp->io_watcher.fd != -1) | ||||
|         uv__close(tcp->io_watcher.fd); | ||||
|       tcp->io_watcher.fd = -1; | ||||
| @@ -252,7 +252,7 @@ out: | ||||
|   uv__req_init(handle->loop, req, UV_CONNECT); | ||||
|   req->cb = cb; | ||||
|   req->handle = (uv_stream_t*) handle; | ||||
|   QUEUE_INIT(&req->queue); | ||||
|   uv__queue_init(&req->queue); | ||||
|   handle->connect_req = req; | ||||
|  | ||||
|   uv__io_start(handle->loop, &handle->io_watcher, POLLOUT); | ||||
|   | ||||
							
								
								
									
										2
									
								
								deps/libuv/src/unix/tty.c
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								deps/libuv/src/unix/tty.c
									
									
									
									
										vendored
									
									
								
							| @@ -222,7 +222,7 @@ skip: | ||||
|     int rc = r; | ||||
|     if (newfd != -1) | ||||
|       uv__close(newfd); | ||||
|     QUEUE_REMOVE(&tty->handle_queue); | ||||
|     uv__queue_remove(&tty->handle_queue); | ||||
|     do | ||||
|       r = fcntl(fd, F_SETFL, saved_flags); | ||||
|     while (r == -1 && errno == EINTR); | ||||
|   | ||||
							
								
								
									
										76
									
								
								deps/libuv/src/unix/udp.c
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										76
									
								
								deps/libuv/src/unix/udp.c
									
									
									
									
										vendored
									
									
								
							| @@ -62,18 +62,18 @@ void uv__udp_close(uv_udp_t* handle) { | ||||
|  | ||||
| void uv__udp_finish_close(uv_udp_t* handle) { | ||||
|   uv_udp_send_t* req; | ||||
|   QUEUE* q; | ||||
|   struct uv__queue* q; | ||||
|  | ||||
|   assert(!uv__io_active(&handle->io_watcher, POLLIN | POLLOUT)); | ||||
|   assert(handle->io_watcher.fd == -1); | ||||
|  | ||||
|   while (!QUEUE_EMPTY(&handle->write_queue)) { | ||||
|     q = QUEUE_HEAD(&handle->write_queue); | ||||
|     QUEUE_REMOVE(q); | ||||
|   while (!uv__queue_empty(&handle->write_queue)) { | ||||
|     q = uv__queue_head(&handle->write_queue); | ||||
|     uv__queue_remove(q); | ||||
|  | ||||
|     req = QUEUE_DATA(q, uv_udp_send_t, queue); | ||||
|     req = uv__queue_data(q, uv_udp_send_t, queue); | ||||
|     req->status = UV_ECANCELED; | ||||
|     QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue); | ||||
|     uv__queue_insert_tail(&handle->write_completed_queue, &req->queue); | ||||
|   } | ||||
|  | ||||
|   uv__udp_run_completed(handle); | ||||
| @@ -90,16 +90,16 @@ void uv__udp_finish_close(uv_udp_t* handle) { | ||||
|  | ||||
| static void uv__udp_run_completed(uv_udp_t* handle) { | ||||
|   uv_udp_send_t* req; | ||||
|   QUEUE* q; | ||||
|   struct uv__queue* q; | ||||
|  | ||||
|   assert(!(handle->flags & UV_HANDLE_UDP_PROCESSING)); | ||||
|   handle->flags |= UV_HANDLE_UDP_PROCESSING; | ||||
|  | ||||
|   while (!QUEUE_EMPTY(&handle->write_completed_queue)) { | ||||
|     q = QUEUE_HEAD(&handle->write_completed_queue); | ||||
|     QUEUE_REMOVE(q); | ||||
|   while (!uv__queue_empty(&handle->write_completed_queue)) { | ||||
|     q = uv__queue_head(&handle->write_completed_queue); | ||||
|     uv__queue_remove(q); | ||||
|  | ||||
|     req = QUEUE_DATA(q, uv_udp_send_t, queue); | ||||
|     req = uv__queue_data(q, uv_udp_send_t, queue); | ||||
|     uv__req_unregister(handle->loop, req); | ||||
|  | ||||
|     handle->send_queue_size -= uv__count_bufs(req->bufs, req->nbufs); | ||||
| @@ -121,7 +121,7 @@ static void uv__udp_run_completed(uv_udp_t* handle) { | ||||
|       req->send_cb(req, req->status); | ||||
|   } | ||||
|  | ||||
|   if (QUEUE_EMPTY(&handle->write_queue)) { | ||||
|   if (uv__queue_empty(&handle->write_queue)) { | ||||
|     /* Pending queue and completion queue empty, stop watcher. */ | ||||
|     uv__io_stop(handle->loop, &handle->io_watcher, POLLOUT); | ||||
|     if (!uv__io_active(&handle->io_watcher, POLLIN)) | ||||
| @@ -280,20 +280,20 @@ static void uv__udp_sendmsg(uv_udp_t* handle) { | ||||
|   uv_udp_send_t* req; | ||||
|   struct mmsghdr h[20]; | ||||
|   struct mmsghdr* p; | ||||
|   QUEUE* q; | ||||
|   struct uv__queue* q; | ||||
|   ssize_t npkts; | ||||
|   size_t pkts; | ||||
|   size_t i; | ||||
|  | ||||
|   if (QUEUE_EMPTY(&handle->write_queue)) | ||||
|   if (uv__queue_empty(&handle->write_queue)) | ||||
|     return; | ||||
|  | ||||
| write_queue_drain: | ||||
|   for (pkts = 0, q = QUEUE_HEAD(&handle->write_queue); | ||||
|   for (pkts = 0, q = uv__queue_head(&handle->write_queue); | ||||
|        pkts < ARRAY_SIZE(h) && q != &handle->write_queue; | ||||
|        ++pkts, q = QUEUE_HEAD(q)) { | ||||
|        ++pkts, q = uv__queue_head(q)) { | ||||
|     assert(q != NULL); | ||||
|     req = QUEUE_DATA(q, uv_udp_send_t, queue); | ||||
|     req = uv__queue_data(q, uv_udp_send_t, queue); | ||||
|     assert(req != NULL); | ||||
|  | ||||
|     p = &h[pkts]; | ||||
| @@ -325,16 +325,16 @@ write_queue_drain: | ||||
|   if (npkts < 1) { | ||||
|     if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOBUFS) | ||||
|       return; | ||||
|     for (i = 0, q = QUEUE_HEAD(&handle->write_queue); | ||||
|     for (i = 0, q = uv__queue_head(&handle->write_queue); | ||||
|          i < pkts && q != &handle->write_queue; | ||||
|          ++i, q = QUEUE_HEAD(&handle->write_queue)) { | ||||
|          ++i, q = uv__queue_head(&handle->write_queue)) { | ||||
|       assert(q != NULL); | ||||
|       req = QUEUE_DATA(q, uv_udp_send_t, queue); | ||||
|       req = uv__queue_data(q, uv_udp_send_t, queue); | ||||
|       assert(req != NULL); | ||||
|  | ||||
|       req->status = UV__ERR(errno); | ||||
|       QUEUE_REMOVE(&req->queue); | ||||
|       QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue); | ||||
|       uv__queue_remove(&req->queue); | ||||
|       uv__queue_insert_tail(&handle->write_completed_queue, &req->queue); | ||||
|     } | ||||
|     uv__io_feed(handle->loop, &handle->io_watcher); | ||||
|     return; | ||||
| @@ -343,11 +343,11 @@ write_queue_drain: | ||||
|   /* Safety: npkts known to be >0 below. Hence cast from ssize_t | ||||
|    * to size_t safe. | ||||
|    */ | ||||
|   for (i = 0, q = QUEUE_HEAD(&handle->write_queue); | ||||
|   for (i = 0, q = uv__queue_head(&handle->write_queue); | ||||
|        i < (size_t)npkts && q != &handle->write_queue; | ||||
|        ++i, q = QUEUE_HEAD(&handle->write_queue)) { | ||||
|        ++i, q = uv__queue_head(&handle->write_queue)) { | ||||
|     assert(q != NULL); | ||||
|     req = QUEUE_DATA(q, uv_udp_send_t, queue); | ||||
|     req = uv__queue_data(q, uv_udp_send_t, queue); | ||||
|     assert(req != NULL); | ||||
|  | ||||
|     req->status = req->bufs[0].len; | ||||
| @@ -357,25 +357,25 @@ write_queue_drain: | ||||
|      * why we don't handle partial writes. Just pop the request | ||||
|      * off the write queue and onto the completed queue, done. | ||||
|      */ | ||||
|     QUEUE_REMOVE(&req->queue); | ||||
|     QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue); | ||||
|     uv__queue_remove(&req->queue); | ||||
|     uv__queue_insert_tail(&handle->write_completed_queue, &req->queue); | ||||
|   } | ||||
|  | ||||
|   /* couldn't batch everything, continue sending (jump to avoid stack growth) */ | ||||
|   if (!QUEUE_EMPTY(&handle->write_queue)) | ||||
|   if (!uv__queue_empty(&handle->write_queue)) | ||||
|     goto write_queue_drain; | ||||
|   uv__io_feed(handle->loop, &handle->io_watcher); | ||||
| #else  /* __linux__ || ____FreeBSD__ */ | ||||
|   uv_udp_send_t* req; | ||||
|   struct msghdr h; | ||||
|   QUEUE* q; | ||||
|   struct uv__queue* q; | ||||
|   ssize_t size; | ||||
|  | ||||
|   while (!QUEUE_EMPTY(&handle->write_queue)) { | ||||
|     q = QUEUE_HEAD(&handle->write_queue); | ||||
|   while (!uv__queue_empty(&handle->write_queue)) { | ||||
|     q = uv__queue_head(&handle->write_queue); | ||||
|     assert(q != NULL); | ||||
|  | ||||
|     req = QUEUE_DATA(q, uv_udp_send_t, queue); | ||||
|     req = uv__queue_data(q, uv_udp_send_t, queue); | ||||
|     assert(req != NULL); | ||||
|  | ||||
|     memset(&h, 0, sizeof h); | ||||
| @@ -414,8 +414,8 @@ write_queue_drain: | ||||
|      * why we don't handle partial writes. Just pop the request | ||||
|      * off the write queue and onto the completed queue, done. | ||||
|      */ | ||||
|     QUEUE_REMOVE(&req->queue); | ||||
|     QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue); | ||||
|     uv__queue_remove(&req->queue); | ||||
|     uv__queue_insert_tail(&handle->write_completed_queue, &req->queue); | ||||
|     uv__io_feed(handle->loop, &handle->io_watcher); | ||||
|   } | ||||
| #endif  /* __linux__ || ____FreeBSD__ */ | ||||
| @@ -729,7 +729,7 @@ int uv__udp_send(uv_udp_send_t* req, | ||||
|   memcpy(req->bufs, bufs, nbufs * sizeof(bufs[0])); | ||||
|   handle->send_queue_size += uv__count_bufs(req->bufs, req->nbufs); | ||||
|   handle->send_queue_count++; | ||||
|   QUEUE_INSERT_TAIL(&handle->write_queue, &req->queue); | ||||
|   uv__queue_insert_tail(&handle->write_queue, &req->queue); | ||||
|   uv__handle_start(handle); | ||||
|  | ||||
|   if (empty_queue && !(handle->flags & UV_HANDLE_UDP_PROCESSING)) { | ||||
| @@ -739,7 +739,7 @@ int uv__udp_send(uv_udp_send_t* req, | ||||
|      * away. In such cases the `io_watcher` has to be queued for asynchronous | ||||
|      * write. | ||||
|      */ | ||||
|     if (!QUEUE_EMPTY(&handle->write_queue)) | ||||
|     if (!uv__queue_empty(&handle->write_queue)) | ||||
|       uv__io_start(handle->loop, &handle->io_watcher, POLLOUT); | ||||
|   } else { | ||||
|     uv__io_start(handle->loop, &handle->io_watcher, POLLOUT); | ||||
| @@ -1007,8 +1007,8 @@ int uv__udp_init_ex(uv_loop_t* loop, | ||||
|   handle->send_queue_size = 0; | ||||
|   handle->send_queue_count = 0; | ||||
|   uv__io_init(&handle->io_watcher, uv__udp_io, fd); | ||||
|   QUEUE_INIT(&handle->write_queue); | ||||
|   QUEUE_INIT(&handle->write_completed_queue); | ||||
|   uv__queue_init(&handle->write_queue); | ||||
|   uv__queue_init(&handle->write_completed_queue); | ||||
|  | ||||
|   return 0; | ||||
| } | ||||
|   | ||||
							
								
								
									
										28
									
								
								deps/libuv/src/uv-common.c
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										28
									
								
								deps/libuv/src/uv-common.c
									
									
									
									
										vendored
									
									
								
							| @@ -533,17 +533,17 @@ int uv_udp_recv_stop(uv_udp_t* handle) { | ||||
|  | ||||
|  | ||||
| void uv_walk(uv_loop_t* loop, uv_walk_cb walk_cb, void* arg) { | ||||
|   QUEUE queue; | ||||
|   QUEUE* q; | ||||
|   struct uv__queue queue; | ||||
|   struct uv__queue* q; | ||||
|   uv_handle_t* h; | ||||
|  | ||||
|   QUEUE_MOVE(&loop->handle_queue, &queue); | ||||
|   while (!QUEUE_EMPTY(&queue)) { | ||||
|     q = QUEUE_HEAD(&queue); | ||||
|     h = QUEUE_DATA(q, uv_handle_t, handle_queue); | ||||
|   uv__queue_move(&loop->handle_queue, &queue); | ||||
|   while (!uv__queue_empty(&queue)) { | ||||
|     q = uv__queue_head(&queue); | ||||
|     h = uv__queue_data(q, uv_handle_t, handle_queue); | ||||
|  | ||||
|     QUEUE_REMOVE(q); | ||||
|     QUEUE_INSERT_TAIL(&loop->handle_queue, q); | ||||
|     uv__queue_remove(q); | ||||
|     uv__queue_insert_tail(&loop->handle_queue, q); | ||||
|  | ||||
|     if (h->flags & UV_HANDLE_INTERNAL) continue; | ||||
|     walk_cb(h, arg); | ||||
| @@ -553,14 +553,14 @@ void uv_walk(uv_loop_t* loop, uv_walk_cb walk_cb, void* arg) { | ||||
|  | ||||
| static void uv__print_handles(uv_loop_t* loop, int only_active, FILE* stream) { | ||||
|   const char* type; | ||||
|   QUEUE* q; | ||||
|   struct uv__queue* q; | ||||
|   uv_handle_t* h; | ||||
|  | ||||
|   if (loop == NULL) | ||||
|     loop = uv_default_loop(); | ||||
|  | ||||
|   QUEUE_FOREACH(q, &loop->handle_queue) { | ||||
|     h = QUEUE_DATA(q, uv_handle_t, handle_queue); | ||||
|   uv__queue_foreach(q, &loop->handle_queue) { | ||||
|     h = uv__queue_data(q, uv_handle_t, handle_queue); | ||||
|  | ||||
|     if (only_active && !uv__is_active(h)) | ||||
|       continue; | ||||
| @@ -846,7 +846,7 @@ uv_loop_t* uv_loop_new(void) { | ||||
|  | ||||
|  | ||||
| int uv_loop_close(uv_loop_t* loop) { | ||||
|   QUEUE* q; | ||||
|   struct uv__queue* q; | ||||
|   uv_handle_t* h; | ||||
| #ifndef NDEBUG | ||||
|   void* saved_data; | ||||
| @@ -855,8 +855,8 @@ int uv_loop_close(uv_loop_t* loop) { | ||||
|   if (uv__has_active_reqs(loop)) | ||||
|     return UV_EBUSY; | ||||
|  | ||||
|   QUEUE_FOREACH(q, &loop->handle_queue) { | ||||
|     h = QUEUE_DATA(q, uv_handle_t, handle_queue); | ||||
|   uv__queue_foreach(q, &loop->handle_queue) { | ||||
|     h = uv__queue_data(q, uv_handle_t, handle_queue); | ||||
|     if (!(h->flags & UV_HANDLE_INTERNAL)) | ||||
|       return UV_EBUSY; | ||||
|   } | ||||
|   | ||||
							
								
								
									
										3
									
								
								deps/libuv/src/uv-common.h
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										3
									
								
								deps/libuv/src/uv-common.h
									
									
									
									
										vendored
									
									
								
							| @@ -323,7 +323,7 @@ void uv__threadpool_cleanup(void); | ||||
|     (h)->loop = (loop_);                                                      \ | ||||
|     (h)->type = (type_);                                                      \ | ||||
|     (h)->flags = UV_HANDLE_REF;  /* Ref the loop when active. */              \ | ||||
|     QUEUE_INSERT_TAIL(&(loop_)->handle_queue, &(h)->handle_queue);            \ | ||||
|     uv__queue_insert_tail(&(loop_)->handle_queue, &(h)->handle_queue);        \ | ||||
|     uv__handle_platform_init(h);                                              \ | ||||
|   }                                                                           \ | ||||
|   while (0) | ||||
| @@ -415,6 +415,7 @@ struct uv__iou { | ||||
|   size_t sqelen; | ||||
|   int ringfd; | ||||
|   uint32_t in_flight; | ||||
|   uint32_t flags; | ||||
| }; | ||||
| #endif  /* __linux__ */ | ||||
|  | ||||
|   | ||||
							
								
								
									
										11
									
								
								deps/libuv/src/win/core.c
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										11
									
								
								deps/libuv/src/win/core.c
									
									
									
									
										vendored
									
									
								
							| @@ -255,8 +255,8 @@ int uv_loop_init(uv_loop_t* loop) { | ||||
|   loop->time = 0; | ||||
|   uv_update_time(loop); | ||||
|  | ||||
|   QUEUE_INIT(&loop->wq); | ||||
|   QUEUE_INIT(&loop->handle_queue); | ||||
|   uv__queue_init(&loop->wq); | ||||
|   uv__queue_init(&loop->handle_queue); | ||||
|   loop->active_reqs.count = 0; | ||||
|   loop->active_handles = 0; | ||||
|  | ||||
| @@ -358,7 +358,7 @@ void uv__loop_close(uv_loop_t* loop) { | ||||
|   } | ||||
|  | ||||
|   uv_mutex_lock(&loop->wq_mutex); | ||||
|   assert(QUEUE_EMPTY(&loop->wq) && "thread pool work queue not empty!"); | ||||
|   assert(uv__queue_empty(&loop->wq) && "thread pool work queue not empty!"); | ||||
|   assert(!uv__has_active_reqs(loop)); | ||||
|   uv_mutex_unlock(&loop->wq_mutex); | ||||
|   uv_mutex_destroy(&loop->wq_mutex); | ||||
| @@ -629,9 +629,8 @@ int uv_run(uv_loop_t *loop, uv_run_mode mode) { | ||||
|    * while loop for UV_RUN_DEFAULT. Otherwise timers only need to be executed | ||||
|    * once, which should be done after polling in order to maintain proper | ||||
|    * execution order of the conceptual event loop. */ | ||||
|   if (mode == UV_RUN_DEFAULT) { | ||||
|     if (r) | ||||
|       uv_update_time(loop); | ||||
|   if (mode == UV_RUN_DEFAULT && r != 0 && loop->stop_flag == 0) { | ||||
|     uv_update_time(loop); | ||||
|     uv__run_timers(loop); | ||||
|   } | ||||
|  | ||||
|   | ||||
							
								
								
									
										315
									
								
								deps/libuv/src/win/fs.c
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										315
									
								
								deps/libuv/src/win/fs.c
									
									
									
									
										vendored
									
									
								
							| @@ -144,26 +144,97 @@ void uv__fs_init(void) { | ||||
| } | ||||
|  | ||||
|  | ||||
| static int32_t fs__decode_wtf8_char(const char** input) { | ||||
|   uint32_t code_point; | ||||
|   uint8_t b1; | ||||
|   uint8_t b2; | ||||
|   uint8_t b3; | ||||
|   uint8_t b4; | ||||
|  | ||||
|   b1 = **input; | ||||
|   if (b1 <= 0x7F) | ||||
|     return b1; /* ASCII code point */ | ||||
|   if (b1 < 0xC2) | ||||
|     return -1; /* invalid: continuation byte */ | ||||
|   code_point = b1; | ||||
|  | ||||
|   b2 = *++*input; | ||||
|   if ((b2 & 0xC0) != 0x80) | ||||
|     return -1; /* invalid: not a continuation byte */ | ||||
|   code_point = (code_point << 6) | (b2 & 0x3F); | ||||
|   if (b1 <= 0xDF) | ||||
|     return 0x7FF & code_point; /* two-byte character */ | ||||
|  | ||||
|   b3 = *++*input; | ||||
|   if ((b3 & 0xC0) != 0x80) | ||||
|     return -1; /* invalid: not a continuation byte */ | ||||
|   code_point = (code_point << 6) | (b3 & 0x3F); | ||||
|   if (b1 <= 0xEF) | ||||
|     return 0xFFFF & code_point; /* three-byte character */ | ||||
|  | ||||
|   b4 = *++*input; | ||||
|   if ((b4 & 0xC0) != 0x80) | ||||
|     return -1; /* invalid: not a continuation byte */ | ||||
|   code_point = (code_point << 6) | (b4 & 0x3F); | ||||
|   if (b1 <= 0xF4) | ||||
|     if (code_point <= 0x10FFFF) | ||||
|       return code_point; /* four-byte character */ | ||||
|  | ||||
|   /* code point too large */ | ||||
|   return -1; | ||||
| } | ||||
|  | ||||
|  | ||||
| static ssize_t fs__get_length_wtf8(const char* source_ptr) { | ||||
|   size_t w_target_len = 0; | ||||
|   int32_t code_point; | ||||
|  | ||||
|   do { | ||||
|     code_point = fs__decode_wtf8_char(&source_ptr); | ||||
|     if (code_point < 0) | ||||
|       return -1; | ||||
|     if (code_point > 0xFFFF) | ||||
|       w_target_len++; | ||||
|     w_target_len++; | ||||
|   } while (*source_ptr++); | ||||
|   return w_target_len; | ||||
| } | ||||
|  | ||||
|  | ||||
| static void fs__wtf8_to_wide(const char* source_ptr, WCHAR* w_target) { | ||||
|   int32_t code_point; | ||||
|  | ||||
|   do { | ||||
|     code_point = fs__decode_wtf8_char(&source_ptr); | ||||
|     /* fs__get_length_wtf8 should have been called and checked first. */ | ||||
|     assert(code_point >= 0); | ||||
|     if (code_point > 0x10000) { | ||||
|       assert(code_point < 0x10FFFF); | ||||
|       *w_target++ = (((code_point - 0x10000) >> 10) + 0xD800); | ||||
|       *w_target++ = ((code_point - 0x10000) & 0x3FF) + 0xDC00; | ||||
|     } else { | ||||
|       *w_target++ = code_point; | ||||
|     } | ||||
|   } while (*source_ptr++); | ||||
| } | ||||
|  | ||||
|  | ||||
| INLINE static int fs__capture_path(uv_fs_t* req, const char* path, | ||||
|     const char* new_path, const int copy_path) { | ||||
|   char* buf; | ||||
|   char* pos; | ||||
|   ssize_t buf_sz = 0, path_len = 0, pathw_len = 0, new_pathw_len = 0; | ||||
|   WCHAR* buf; | ||||
|   WCHAR* pos; | ||||
|   size_t buf_sz = 0; | ||||
|   size_t path_len = 0; | ||||
|   ssize_t pathw_len = 0; | ||||
|   ssize_t new_pathw_len = 0; | ||||
|  | ||||
|   /* new_path can only be set if path is also set. */ | ||||
|   assert(new_path == NULL || path != NULL); | ||||
|  | ||||
|   if (path != NULL) { | ||||
|     pathw_len = MultiByteToWideChar(CP_UTF8, | ||||
|                                     0, | ||||
|                                     path, | ||||
|                                     -1, | ||||
|                                     NULL, | ||||
|                                     0); | ||||
|     if (pathw_len == 0) { | ||||
|       return GetLastError(); | ||||
|     } | ||||
|  | ||||
|     pathw_len = fs__get_length_wtf8(path); | ||||
|     if (pathw_len < 0) | ||||
|       return ERROR_INVALID_NAME; | ||||
|     buf_sz += pathw_len * sizeof(WCHAR); | ||||
|   } | ||||
|  | ||||
| @@ -173,16 +244,9 @@ INLINE static int fs__capture_path(uv_fs_t* req, const char* path, | ||||
|   } | ||||
|  | ||||
|   if (new_path != NULL) { | ||||
|     new_pathw_len = MultiByteToWideChar(CP_UTF8, | ||||
|                                         0, | ||||
|                                         new_path, | ||||
|                                         -1, | ||||
|                                         NULL, | ||||
|                                         0); | ||||
|     if (new_pathw_len == 0) { | ||||
|       return GetLastError(); | ||||
|     } | ||||
|  | ||||
|     new_pathw_len = fs__get_length_wtf8(new_path); | ||||
|     if (new_pathw_len < 0) | ||||
|       return ERROR_INVALID_NAME; | ||||
|     buf_sz += new_pathw_len * sizeof(WCHAR); | ||||
|   } | ||||
|  | ||||
| @@ -194,7 +258,7 @@ INLINE static int fs__capture_path(uv_fs_t* req, const char* path, | ||||
|     return 0; | ||||
|   } | ||||
|  | ||||
|   buf = (char*) uv__malloc(buf_sz); | ||||
|   buf = uv__malloc(buf_sz); | ||||
|   if (buf == NULL) { | ||||
|     return ERROR_OUTOFMEMORY; | ||||
|   } | ||||
| @@ -202,29 +266,17 @@ INLINE static int fs__capture_path(uv_fs_t* req, const char* path, | ||||
|   pos = buf; | ||||
|  | ||||
|   if (path != NULL) { | ||||
|     DWORD r = MultiByteToWideChar(CP_UTF8, | ||||
|                                   0, | ||||
|                                   path, | ||||
|                                   -1, | ||||
|                                   (WCHAR*) pos, | ||||
|                                   pathw_len); | ||||
|     assert(r == (DWORD) pathw_len); | ||||
|     req->file.pathw = (WCHAR*) pos; | ||||
|     pos += r * sizeof(WCHAR); | ||||
|     fs__wtf8_to_wide(path, pos); | ||||
|     req->file.pathw = pos; | ||||
|     pos += pathw_len; | ||||
|   } else { | ||||
|     req->file.pathw = NULL; | ||||
|   } | ||||
|  | ||||
|   if (new_path != NULL) { | ||||
|     DWORD r = MultiByteToWideChar(CP_UTF8, | ||||
|                                   0, | ||||
|                                   new_path, | ||||
|                                   -1, | ||||
|                                   (WCHAR*) pos, | ||||
|                                   new_pathw_len); | ||||
|     assert(r == (DWORD) new_pathw_len); | ||||
|     req->fs.info.new_pathw = (WCHAR*) pos; | ||||
|     pos += r * sizeof(WCHAR); | ||||
|     fs__wtf8_to_wide(new_path, pos); | ||||
|     req->fs.info.new_pathw = pos; | ||||
|     pos += new_pathw_len; | ||||
|   } else { | ||||
|     req->fs.info.new_pathw = NULL; | ||||
|   } | ||||
| @@ -232,8 +284,8 @@ INLINE static int fs__capture_path(uv_fs_t* req, const char* path, | ||||
|   req->path = path; | ||||
|   if (path != NULL && copy_path) { | ||||
|     memcpy(pos, path, path_len); | ||||
|     assert(path_len == buf_sz - (pos - buf)); | ||||
|     req->path = pos; | ||||
|     assert(path_len == buf_sz - (pos - buf) * sizeof(WCHAR)); | ||||
|     req->path = (char*) pos; | ||||
|   } | ||||
|  | ||||
|   req->flags |= UV_FS_FREE_PATHS; | ||||
| @@ -259,57 +311,115 @@ INLINE static void uv__fs_req_init(uv_loop_t* loop, uv_fs_t* req, | ||||
| } | ||||
|  | ||||
|  | ||||
| static int fs__wide_to_utf8(WCHAR* w_source_ptr, | ||||
|                                DWORD w_source_len, | ||||
|                                char** target_ptr, | ||||
|                                uint64_t* target_len_ptr) { | ||||
|   int r; | ||||
|   int target_len; | ||||
| static int32_t fs__get_surrogate_value(const WCHAR* w_source_ptr, | ||||
|                                        size_t w_source_len) { | ||||
|   WCHAR u; | ||||
|   WCHAR next; | ||||
|  | ||||
|   u = w_source_ptr[0]; | ||||
|   if (u >= 0xD800 && u <= 0xDBFF && w_source_len > 1) { | ||||
|     next = w_source_ptr[1]; | ||||
|     if (next >= 0xDC00 && next <= 0xDFFF) | ||||
|       return 0x10000 + ((u - 0xD800) << 10) + (next - 0xDC00); | ||||
|   } | ||||
|   return u; | ||||
| } | ||||
|  | ||||
|  | ||||
| static size_t fs__get_length_wide(const WCHAR* w_source_ptr, | ||||
|                                   size_t w_source_len) { | ||||
|   size_t target_len; | ||||
|   int32_t code_point; | ||||
|  | ||||
|   target_len = 0; | ||||
|   for (; w_source_len; w_source_len--, w_source_ptr++) { | ||||
|     code_point = fs__get_surrogate_value(w_source_ptr, w_source_len); | ||||
|     /* Can be invalid UTF-8 but must be valid WTF-8. */ | ||||
|     assert(code_point >= 0); | ||||
|     if (code_point < 0x80) | ||||
|       target_len += 1; | ||||
|     else if (code_point < 0x800) | ||||
|       target_len += 2; | ||||
|     else if (code_point < 0x10000) | ||||
|       target_len += 3; | ||||
|     else { | ||||
|       target_len += 4; | ||||
|       w_source_ptr++; | ||||
|       w_source_len--; | ||||
|     } | ||||
|   } | ||||
|   return target_len; | ||||
| } | ||||
|  | ||||
|  | ||||
| static int fs__wide_to_wtf8(WCHAR* w_source_ptr, | ||||
|                             size_t w_source_len, | ||||
|                             char** target_ptr, | ||||
|                             size_t* target_len_ptr) { | ||||
|   size_t target_len; | ||||
|   char* target; | ||||
|   target_len = WideCharToMultiByte(CP_UTF8, | ||||
|                                    0, | ||||
|                                    w_source_ptr, | ||||
|                                    w_source_len, | ||||
|                                    NULL, | ||||
|                                    0, | ||||
|                                    NULL, | ||||
|                                    NULL); | ||||
|   int32_t code_point; | ||||
|  | ||||
|   if (target_len == 0) { | ||||
|     return -1; | ||||
|   /* If *target_ptr is provided, then *target_len_ptr must be its length | ||||
|    * (excluding space for null), otherwise we will compute the target_len_ptr | ||||
|    * length and may return a new allocation in *target_ptr if target_ptr is | ||||
|    * provided. */ | ||||
|   if (target_ptr == NULL || *target_ptr == NULL) { | ||||
|     target_len = fs__get_length_wide(w_source_ptr, w_source_len); | ||||
|     if (target_len_ptr != NULL) | ||||
|       *target_len_ptr = target_len; | ||||
|   } else { | ||||
|     target_len = *target_len_ptr; | ||||
|   } | ||||
|  | ||||
|   if (target_len_ptr != NULL) { | ||||
|     *target_len_ptr = target_len; | ||||
|   } | ||||
|  | ||||
|   if (target_ptr == NULL) { | ||||
|   if (target_ptr == NULL) | ||||
|     return 0; | ||||
|  | ||||
|   if (*target_ptr == NULL) { | ||||
|     target = uv__malloc(target_len + 1); | ||||
|     if (target == NULL) { | ||||
|       SetLastError(ERROR_OUTOFMEMORY); | ||||
|       return -1; | ||||
|     } | ||||
|     *target_ptr = target; | ||||
|   } else { | ||||
|     target = *target_ptr; | ||||
|   } | ||||
|  | ||||
|   target = uv__malloc(target_len + 1); | ||||
|   if (target == NULL) { | ||||
|     SetLastError(ERROR_OUTOFMEMORY); | ||||
|     return -1; | ||||
|   } | ||||
|   for (; w_source_len; w_source_len--, w_source_ptr++) { | ||||
|     code_point = fs__get_surrogate_value(w_source_ptr, w_source_len); | ||||
|     /* Can be invalid UTF-8 but must be valid WTF-8. */ | ||||
|     assert(code_point >= 0); | ||||
|  | ||||
|     if (code_point < 0x80) { | ||||
|       *target++ = code_point; | ||||
|     } else if (code_point < 0x800) { | ||||
|       *target++ = 0xC0 | (code_point >> 6); | ||||
|       *target++ = 0x80 | (code_point & 0x3F); | ||||
|     } else if (code_point < 0x10000) { | ||||
|       *target++ = 0xE0 | (code_point >> 12); | ||||
|       *target++ = 0x80 | ((code_point >> 6) & 0x3F); | ||||
|       *target++ = 0x80 | (code_point & 0x3F); | ||||
|     } else { | ||||
|       *target++ = 0xF0 | (code_point >> 18); | ||||
|       *target++ = 0x80 | ((code_point >> 12) & 0x3F); | ||||
|       *target++ = 0x80 | ((code_point >> 6) & 0x3F); | ||||
|       *target++ = 0x80 | (code_point & 0x3F); | ||||
|       w_source_ptr++; | ||||
|       w_source_len--; | ||||
|     } | ||||
|   } | ||||
|   assert((size_t) (target - *target_ptr) == target_len); | ||||
|  | ||||
|   *target++ = '\0'; | ||||
|  | ||||
|   r = WideCharToMultiByte(CP_UTF8, | ||||
|                           0, | ||||
|                           w_source_ptr, | ||||
|                           w_source_len, | ||||
|                           target, | ||||
|                           target_len, | ||||
|                           NULL, | ||||
|                           NULL); | ||||
|   assert(r == target_len); | ||||
|   target[target_len] = '\0'; | ||||
|   *target_ptr = target; | ||||
|   return 0; | ||||
| } | ||||
|  | ||||
|  | ||||
| INLINE static int fs__readlink_handle(HANDLE handle, char** target_ptr, | ||||
|     uint64_t* target_len_ptr) { | ||||
| INLINE static int fs__readlink_handle(HANDLE handle, | ||||
|                                       char** target_ptr, | ||||
|                                       size_t* target_len_ptr) { | ||||
|   char buffer[MAXIMUM_REPARSE_DATA_BUFFER_SIZE]; | ||||
|   REPARSE_DATA_BUFFER* reparse_data = (REPARSE_DATA_BUFFER*) buffer; | ||||
|   WCHAR* w_target; | ||||
| @@ -439,7 +549,8 @@ INLINE static int fs__readlink_handle(HANDLE handle, char** target_ptr, | ||||
|     return -1; | ||||
|   } | ||||
|  | ||||
|   return fs__wide_to_utf8(w_target, w_target_len, target_ptr, target_len_ptr); | ||||
|   assert(target_ptr == NULL || *target_ptr == NULL); | ||||
|   return fs__wide_to_wtf8(w_target, w_target_len, target_ptr, target_len_ptr); | ||||
| } | ||||
|  | ||||
|  | ||||
| @@ -1429,7 +1540,8 @@ void fs__scandir(uv_fs_t* req) { | ||||
|       uv__dirent_t* dirent; | ||||
|  | ||||
|       size_t wchar_len; | ||||
|       size_t utf8_len; | ||||
|       size_t wtf8_len; | ||||
|       char* wtf8; | ||||
|  | ||||
|       /* Obtain a pointer to the current directory entry. */ | ||||
|       position += next_entry_offset; | ||||
| @@ -1456,11 +1568,8 @@ void fs__scandir(uv_fs_t* req) { | ||||
|           info->FileName[1] == L'.') | ||||
|         continue; | ||||
|  | ||||
|       /* Compute the space required to store the filename as UTF-8. */ | ||||
|       utf8_len = WideCharToMultiByte( | ||||
|           CP_UTF8, 0, &info->FileName[0], wchar_len, NULL, 0, NULL, NULL); | ||||
|       if (utf8_len == 0) | ||||
|         goto win32_error; | ||||
|       /* Compute the space required to store the filename as WTF-8. */ | ||||
|       wtf8_len = fs__get_length_wide(&info->FileName[0], wchar_len); | ||||
|  | ||||
|       /* Resize the dirent array if needed. */ | ||||
|       if (dirents_used >= dirents_size) { | ||||
| @@ -1480,26 +1589,17 @@ void fs__scandir(uv_fs_t* req) { | ||||
|        * includes room for the first character of the filename, but `utf8_len` | ||||
|        * doesn't count the NULL terminator at this point. | ||||
|        */ | ||||
|       dirent = uv__malloc(sizeof *dirent + utf8_len); | ||||
|       dirent = uv__malloc(sizeof *dirent + wtf8_len); | ||||
|       if (dirent == NULL) | ||||
|         goto out_of_memory_error; | ||||
|  | ||||
|       dirents[dirents_used++] = dirent; | ||||
|  | ||||
|       /* Convert file name to UTF-8. */ | ||||
|       if (WideCharToMultiByte(CP_UTF8, | ||||
|                               0, | ||||
|                               &info->FileName[0], | ||||
|                               wchar_len, | ||||
|                               &dirent->d_name[0], | ||||
|                               utf8_len, | ||||
|                               NULL, | ||||
|                               NULL) == 0) | ||||
|       wtf8 = &dirent->d_name[0]; | ||||
|       if (fs__wide_to_wtf8(&info->FileName[0], wchar_len, &wtf8, &wtf8_len) == -1) | ||||
|         goto win32_error; | ||||
|  | ||||
|       /* Add a null terminator to the filename. */ | ||||
|       dirent->d_name[utf8_len] = '\0'; | ||||
|  | ||||
|       /* Fill out the type field. */ | ||||
|       if (info->FileAttributes & FILE_ATTRIBUTE_DEVICE) | ||||
|         dirent->d_type = UV__DT_CHAR; | ||||
| @@ -1708,6 +1808,7 @@ void fs__closedir(uv_fs_t* req) { | ||||
|  | ||||
| INLINE static int fs__stat_handle(HANDLE handle, uv_stat_t* statbuf, | ||||
|     int do_lstat) { | ||||
|   size_t target_length = 0; | ||||
|   FILE_FS_DEVICE_INFORMATION device_info; | ||||
|   FILE_ALL_INFORMATION file_info; | ||||
|   FILE_FS_VOLUME_INFORMATION volume_info; | ||||
| @@ -1803,9 +1904,10 @@ INLINE static int fs__stat_handle(HANDLE handle, uv_stat_t* statbuf, | ||||
|      * to be treated as a regular file. The higher level lstat function will | ||||
|      * detect this failure and retry without do_lstat if appropriate. | ||||
|      */ | ||||
|     if (fs__readlink_handle(handle, NULL, &statbuf->st_size) != 0) | ||||
|     if (fs__readlink_handle(handle, NULL, &target_length) != 0) | ||||
|       return -1; | ||||
|     statbuf->st_mode |= S_IFLNK; | ||||
|     statbuf->st_size = target_length; | ||||
|   } | ||||
|  | ||||
|   if (statbuf->st_mode == 0) { | ||||
| @@ -1961,7 +2063,7 @@ INLINE static int fs__fstat_handle(int fd, HANDLE handle, uv_stat_t* statbuf) { | ||||
|     statbuf->st_mode = file_type == UV_TTY ? _S_IFCHR : _S_IFIFO; | ||||
|     statbuf->st_nlink = 1; | ||||
|     statbuf->st_rdev = (file_type == UV_TTY ? FILE_DEVICE_CONSOLE : FILE_DEVICE_NAMED_PIPE) << 16; | ||||
|     statbuf->st_ino = (uint64_t) handle; | ||||
|     statbuf->st_ino = (uintptr_t) handle; | ||||
|     return 0; | ||||
|  | ||||
|   /* If file type is unknown it is an error. */ | ||||
| @@ -2661,6 +2763,7 @@ static void fs__readlink(uv_fs_t* req) { | ||||
|     return; | ||||
|   } | ||||
|  | ||||
|   assert(req->ptr == NULL); | ||||
|   if (fs__readlink_handle(handle, (char**) &req->ptr, NULL) != 0) { | ||||
|     DWORD error = GetLastError(); | ||||
|     SET_REQ_WIN32_ERROR(req, error); | ||||
| @@ -2720,7 +2823,8 @@ static ssize_t fs__realpath_handle(HANDLE handle, char** realpath_ptr) { | ||||
|     return -1; | ||||
|   } | ||||
|  | ||||
|   r = fs__wide_to_utf8(w_realpath_ptr, w_realpath_len, realpath_ptr, NULL); | ||||
|   assert(*realpath_ptr == NULL); | ||||
|   r = fs__wide_to_wtf8(w_realpath_ptr, w_realpath_len, realpath_ptr, NULL); | ||||
|   uv__free(w_realpath_buf); | ||||
|   return r; | ||||
| } | ||||
| @@ -2740,6 +2844,7 @@ static void fs__realpath(uv_fs_t* req) { | ||||
|     return; | ||||
|   } | ||||
|  | ||||
|   assert(req->ptr == NULL); | ||||
|   if (fs__realpath_handle(handle, (char**) &req->ptr) == -1) { | ||||
|     CloseHandle(handle); | ||||
|     SET_REQ_WIN32_ERROR(req, GetLastError()); | ||||
|   | ||||
							
								
								
									
										2
									
								
								deps/libuv/src/win/handle-inl.h
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								deps/libuv/src/win/handle-inl.h
									
									
									
									
										vendored
									
									
								
							| @@ -75,7 +75,7 @@ | ||||
|  | ||||
| #define uv__handle_close(handle)                                        \ | ||||
|   do {                                                                  \ | ||||
|     QUEUE_REMOVE(&(handle)->handle_queue);                              \ | ||||
|     uv__queue_remove(&(handle)->handle_queue);                          \ | ||||
|     uv__active_handle_rm((uv_handle_t*) (handle));                      \ | ||||
|                                                                         \ | ||||
|     (handle)->flags |= UV_HANDLE_CLOSED;                                \ | ||||
|   | ||||
							
								
								
									
										14
									
								
								deps/libuv/src/win/internal.h
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										14
									
								
								deps/libuv/src/win/internal.h
									
									
									
									
										vendored
									
									
								
							| @@ -168,18 +168,8 @@ void uv__process_tty_read_req(uv_loop_t* loop, uv_tty_t* handle, | ||||
|     uv_req_t* req); | ||||
| void uv__process_tty_write_req(uv_loop_t* loop, uv_tty_t* handle, | ||||
|     uv_write_t* req); | ||||
| /* | ||||
|  * uv__process_tty_accept_req() is a stub to keep DELEGATE_STREAM_REQ working | ||||
|  * TODO: find a way to remove it | ||||
|  */ | ||||
| void uv__process_tty_accept_req(uv_loop_t* loop, uv_tty_t* handle, | ||||
|     uv_req_t* raw_req); | ||||
| /* | ||||
|  * uv__process_tty_connect_req() is a stub to keep DELEGATE_STREAM_REQ working | ||||
|  * TODO: find a way to remove it | ||||
|  */ | ||||
| void uv__process_tty_connect_req(uv_loop_t* loop, uv_tty_t* handle, | ||||
|     uv_connect_t* req); | ||||
| #define uv__process_tty_accept_req(loop, handle, req) abort() | ||||
| #define uv__process_tty_connect_req(loop, handle, req) abort() | ||||
| void uv__process_tty_shutdown_req(uv_loop_t* loop, | ||||
|                                   uv_tty_t* stream, | ||||
|                                   uv_shutdown_t* req); | ||||
|   | ||||
							
								
								
									
										104
									
								
								deps/libuv/src/win/pipe.c
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										104
									
								
								deps/libuv/src/win/pipe.c
									
									
									
									
										vendored
									
									
								
							| @@ -55,7 +55,7 @@ static const int pipe_prefix_len = sizeof(pipe_prefix) - 1; | ||||
| typedef struct { | ||||
|   uv__ipc_socket_xfer_type_t xfer_type; | ||||
|   uv__ipc_socket_xfer_info_t xfer_info; | ||||
|   QUEUE member; | ||||
|   struct uv__queue member; | ||||
| } uv__ipc_xfer_queue_item_t; | ||||
|  | ||||
| /* IPC frame header flags. */ | ||||
| @@ -111,7 +111,7 @@ int uv_pipe_init(uv_loop_t* loop, uv_pipe_t* handle, int ipc) { | ||||
|   handle->name = NULL; | ||||
|   handle->pipe.conn.ipc_remote_pid = 0; | ||||
|   handle->pipe.conn.ipc_data_frame.payload_remaining = 0; | ||||
|   QUEUE_INIT(&handle->pipe.conn.ipc_xfer_queue); | ||||
|   uv__queue_init(&handle->pipe.conn.ipc_xfer_queue); | ||||
|   handle->pipe.conn.ipc_xfer_queue_length = 0; | ||||
|   handle->ipc = ipc; | ||||
|   handle->pipe.conn.non_overlapped_writes_tail = NULL; | ||||
| @@ -637,13 +637,13 @@ void uv__pipe_endgame(uv_loop_t* loop, uv_pipe_t* handle) { | ||||
|  | ||||
|   if (handle->flags & UV_HANDLE_CONNECTION) { | ||||
|     /* Free pending sockets */ | ||||
|     while (!QUEUE_EMPTY(&handle->pipe.conn.ipc_xfer_queue)) { | ||||
|       QUEUE* q; | ||||
|     while (!uv__queue_empty(&handle->pipe.conn.ipc_xfer_queue)) { | ||||
|       struct uv__queue* q; | ||||
|       SOCKET socket; | ||||
|  | ||||
|       q = QUEUE_HEAD(&handle->pipe.conn.ipc_xfer_queue); | ||||
|       QUEUE_REMOVE(q); | ||||
|       xfer_queue_item = QUEUE_DATA(q, uv__ipc_xfer_queue_item_t, member); | ||||
|       q = uv__queue_head(&handle->pipe.conn.ipc_xfer_queue); | ||||
|       uv__queue_remove(q); | ||||
|       xfer_queue_item = uv__queue_data(q, uv__ipc_xfer_queue_item_t, member); | ||||
|  | ||||
|       /* Materialize socket and close it */ | ||||
|       socket = WSASocketW(FROM_PROTOCOL_INFO, | ||||
| @@ -694,20 +694,48 @@ void uv_pipe_pending_instances(uv_pipe_t* handle, int count) { | ||||
|  | ||||
| /* Creates a pipe server. */ | ||||
| int uv_pipe_bind(uv_pipe_t* handle, const char* name) { | ||||
|   return uv_pipe_bind2(handle, name, strlen(name), 0); | ||||
| } | ||||
|  | ||||
|  | ||||
| int uv_pipe_bind2(uv_pipe_t* handle, | ||||
|                   const char* name, | ||||
|                   size_t namelen, | ||||
|                   unsigned int flags) { | ||||
|   uv_loop_t* loop = handle->loop; | ||||
|   int i, err, nameSize; | ||||
|   uv_pipe_accept_t* req; | ||||
|  | ||||
|   if (flags & ~UV_PIPE_NO_TRUNCATE) { | ||||
|     return UV_EINVAL; | ||||
|   } | ||||
|  | ||||
|   if (name == NULL) { | ||||
|     return UV_EINVAL; | ||||
|   } | ||||
|  | ||||
|   if (namelen == 0) { | ||||
|     return UV_EINVAL; | ||||
|   } | ||||
|  | ||||
|   if (*name == '\0') { | ||||
|     return UV_EINVAL; | ||||
|   } | ||||
|  | ||||
|   if (flags & UV_PIPE_NO_TRUNCATE) { | ||||
|     if (namelen > 256) { | ||||
|       return UV_EINVAL; | ||||
|     } | ||||
|   } | ||||
|  | ||||
|   if (handle->flags & UV_HANDLE_BOUND) { | ||||
|     return UV_EINVAL; | ||||
|   } | ||||
|  | ||||
|   if (!name) { | ||||
|     return UV_EINVAL; | ||||
|   } | ||||
|   if (uv__is_closing(handle)) { | ||||
|     return UV_EINVAL; | ||||
|   } | ||||
|  | ||||
|   if (!(handle->flags & UV_HANDLE_PIPESERVER)) { | ||||
|     handle->pipe.serv.pending_instances = default_pending_pipe_instances; | ||||
|   } | ||||
| @@ -818,13 +846,47 @@ static DWORD WINAPI pipe_connect_thread_proc(void* parameter) { | ||||
| } | ||||
|  | ||||
|  | ||||
| void uv_pipe_connect(uv_connect_t* req, uv_pipe_t* handle, | ||||
|     const char* name, uv_connect_cb cb) { | ||||
| void uv_pipe_connect(uv_connect_t* req, | ||||
|                     uv_pipe_t* handle, | ||||
|                     const char* name, | ||||
|                     uv_connect_cb cb) { | ||||
|   uv_pipe_connect2(req, handle, name, strlen(name), 0, cb); | ||||
| } | ||||
|  | ||||
|  | ||||
| int uv_pipe_connect2(uv_connect_t* req, | ||||
|                      uv_pipe_t* handle, | ||||
|                      const char* name, | ||||
|                      size_t namelen, | ||||
|                      unsigned int flags, | ||||
|                      uv_connect_cb cb) { | ||||
|   uv_loop_t* loop = handle->loop; | ||||
|   int err, nameSize; | ||||
|   HANDLE pipeHandle = INVALID_HANDLE_VALUE; | ||||
|   DWORD duplex_flags; | ||||
|  | ||||
|   if (flags & ~UV_PIPE_NO_TRUNCATE) { | ||||
|     return UV_EINVAL; | ||||
|   } | ||||
|  | ||||
|   if (name == NULL) { | ||||
|     return UV_EINVAL; | ||||
|   } | ||||
|  | ||||
|   if (namelen == 0) { | ||||
|     return UV_EINVAL; | ||||
|   } | ||||
|  | ||||
|   if (*name == '\0') { | ||||
|     return UV_EINVAL; | ||||
|   } | ||||
|  | ||||
|   if (flags & UV_PIPE_NO_TRUNCATE) { | ||||
|     if (namelen > 256) { | ||||
|       return UV_EINVAL; | ||||
|     } | ||||
|   } | ||||
|  | ||||
|   UV_REQ_INIT(req, UV_CONNECT); | ||||
|   req->handle = (uv_stream_t*) handle; | ||||
|   req->cb = cb; | ||||
| @@ -882,7 +944,7 @@ void uv_pipe_connect(uv_connect_t* req, uv_pipe_t* handle, | ||||
|       REGISTER_HANDLE_REQ(loop, handle, req); | ||||
|       handle->reqs_pending++; | ||||
|  | ||||
|       return; | ||||
|       return 0; | ||||
|     } | ||||
|  | ||||
|     err = GetLastError(); | ||||
| @@ -895,7 +957,7 @@ void uv_pipe_connect(uv_connect_t* req, uv_pipe_t* handle, | ||||
|   uv__insert_pending_req(loop, (uv_req_t*) req); | ||||
|   handle->reqs_pending++; | ||||
|   REGISTER_HANDLE_REQ(loop, handle, req); | ||||
|   return; | ||||
|   return 0; | ||||
|  | ||||
| error: | ||||
|   if (handle->name) { | ||||
| @@ -911,7 +973,7 @@ error: | ||||
|   uv__insert_pending_req(loop, (uv_req_t*) req); | ||||
|   handle->reqs_pending++; | ||||
|   REGISTER_HANDLE_REQ(loop, handle, req); | ||||
|   return; | ||||
|   return 0; | ||||
| } | ||||
|  | ||||
|  | ||||
| @@ -1062,20 +1124,20 @@ int uv__pipe_accept(uv_pipe_t* server, uv_stream_t* client) { | ||||
|   uv_loop_t* loop = server->loop; | ||||
|   uv_pipe_t* pipe_client; | ||||
|   uv_pipe_accept_t* req; | ||||
|   QUEUE* q; | ||||
|   struct uv__queue* q; | ||||
|   uv__ipc_xfer_queue_item_t* item; | ||||
|   int err; | ||||
|  | ||||
|   if (server->ipc) { | ||||
|     if (QUEUE_EMPTY(&server->pipe.conn.ipc_xfer_queue)) { | ||||
|     if (uv__queue_empty(&server->pipe.conn.ipc_xfer_queue)) { | ||||
|       /* No valid pending sockets. */ | ||||
|       return WSAEWOULDBLOCK; | ||||
|     } | ||||
|  | ||||
|     q = QUEUE_HEAD(&server->pipe.conn.ipc_xfer_queue); | ||||
|     QUEUE_REMOVE(q); | ||||
|     q = uv__queue_head(&server->pipe.conn.ipc_xfer_queue); | ||||
|     uv__queue_remove(q); | ||||
|     server->pipe.conn.ipc_xfer_queue_length--; | ||||
|     item = QUEUE_DATA(q, uv__ipc_xfer_queue_item_t, member); | ||||
|     item = uv__queue_data(q, uv__ipc_xfer_queue_item_t, member); | ||||
|  | ||||
|     err = uv__tcp_xfer_import( | ||||
|         (uv_tcp_t*) client, item->xfer_type, &item->xfer_info); | ||||
| @@ -1829,7 +1891,7 @@ static void uv__pipe_queue_ipc_xfer_info( | ||||
|   item->xfer_type = xfer_type; | ||||
|   item->xfer_info = *xfer_info; | ||||
|  | ||||
|   QUEUE_INSERT_TAIL(&handle->pipe.conn.ipc_xfer_queue, &item->member); | ||||
|   uv__queue_insert_tail(&handle->pipe.conn.ipc_xfer_queue, &item->member); | ||||
|   handle->pipe.conn.ipc_xfer_queue_length++; | ||||
| } | ||||
|  | ||||
|   | ||||
							
								
								
									
										4
									
								
								deps/libuv/src/win/tcp.c
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								deps/libuv/src/win/tcp.c
									
									
									
									
										vendored
									
									
								
							| @@ -175,14 +175,14 @@ int uv_tcp_init_ex(uv_loop_t* loop, uv_tcp_t* handle, unsigned int flags) { | ||||
|     sock = socket(domain, SOCK_STREAM, 0); | ||||
|     if (sock == INVALID_SOCKET) { | ||||
|       err = WSAGetLastError(); | ||||
|       QUEUE_REMOVE(&handle->handle_queue); | ||||
|       uv__queue_remove(&handle->handle_queue); | ||||
|       return uv_translate_sys_error(err); | ||||
|     } | ||||
|  | ||||
|     err = uv__tcp_set_socket(handle->loop, handle, sock, domain, 0); | ||||
|     if (err) { | ||||
|       closesocket(sock); | ||||
|       QUEUE_REMOVE(&handle->handle_queue); | ||||
|       uv__queue_remove(&handle->handle_queue); | ||||
|       return uv_translate_sys_error(err); | ||||
|     } | ||||
|  | ||||
|   | ||||
							
								
								
									
										20
									
								
								deps/libuv/src/win/tty.c
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										20
									
								
								deps/libuv/src/win/tty.c
									
									
									
									
										vendored
									
									
								
							| @@ -2298,26 +2298,6 @@ void uv__tty_endgame(uv_loop_t* loop, uv_tty_t* handle) { | ||||
| } | ||||
|  | ||||
|  | ||||
| /* | ||||
|  * uv__process_tty_accept_req() is a stub to keep DELEGATE_STREAM_REQ working | ||||
|  * TODO: find a way to remove it | ||||
|  */ | ||||
| void uv__process_tty_accept_req(uv_loop_t* loop, uv_tty_t* handle, | ||||
|     uv_req_t* raw_req) { | ||||
|   abort(); | ||||
| } | ||||
|  | ||||
|  | ||||
| /* | ||||
|  * uv__process_tty_connect_req() is a stub to keep DELEGATE_STREAM_REQ working | ||||
|  * TODO: find a way to remove it | ||||
|  */ | ||||
| void uv__process_tty_connect_req(uv_loop_t* loop, uv_tty_t* handle, | ||||
|     uv_connect_t* req) { | ||||
|   abort(); | ||||
| } | ||||
|  | ||||
|  | ||||
| int uv_tty_reset_mode(void) { | ||||
|   /* Not necessary to do anything. */ | ||||
|   return 0; | ||||
|   | ||||
							
								
								
									
										4
									
								
								deps/libuv/src/win/udp.c
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								deps/libuv/src/win/udp.c
									
									
									
									
										vendored
									
									
								
							| @@ -146,14 +146,14 @@ int uv__udp_init_ex(uv_loop_t* loop, | ||||
|     sock = socket(domain, SOCK_DGRAM, 0); | ||||
|     if (sock == INVALID_SOCKET) { | ||||
|       err = WSAGetLastError(); | ||||
|       QUEUE_REMOVE(&handle->handle_queue); | ||||
|       uv__queue_remove(&handle->handle_queue); | ||||
|       return uv_translate_sys_error(err); | ||||
|     } | ||||
|  | ||||
|     err = uv__udp_set_socket(handle->loop, handle, sock, domain); | ||||
|     if (err) { | ||||
|       closesocket(sock); | ||||
|       QUEUE_REMOVE(&handle->handle_queue); | ||||
|       uv__queue_remove(&handle->handle_queue); | ||||
|       return uv_translate_sys_error(err); | ||||
|     } | ||||
|   } | ||||
|   | ||||
		Reference in New Issue
	
	Block a user