ssb: Fix a crash on Windows when we would call uv_async_send on a handle that had already been closed. Various other cleanup and improvements along the journey.

This commit is contained in:
2025-01-02 12:35:58 -05:00
parent fd40596ce7
commit cd2fe9f8d9
5 changed files with 38 additions and 37 deletions

@ -272,7 +272,7 @@ typedef struct _tf_ssb_connection_t
uv_async_t scheduled_async;
uv_timer_t handshake_timer;
uv_timer_t linger_timer;
bool closing;
bool is_closing;
tf_ssb_connection_t* tunnel_connection;
int32_t tunnel_request_number;
@ -600,7 +600,7 @@ static uint32_t _tf_ssb_connection_prng(tf_ssb_connection_t* connection)
static void _tf_ssb_connection_dispatch_scheduled(tf_ssb_connection_t* connection)
{
while (((connection->active_write_count == 0 && connection->read_back_pressure == 0) || connection->closing) && connection->scheduled_count && connection->scheduled)
while (((connection->active_write_count == 0 && connection->read_back_pressure == 0) || connection->is_closing) && connection->scheduled_count && connection->scheduled)
{
int index = _tf_ssb_connection_prng(connection) % connection->scheduled_count;
tf_ssb_connection_scheduled_t scheduled = connection->scheduled[index];
@ -628,9 +628,9 @@ static int _tf_ssb_connection_scheduled_compare(const void* a, const void* b)
void tf_ssb_connection_schedule_idle(tf_ssb_connection_t* connection, const char* key, tf_ssb_scheduled_callback_t* callback, void* user_data)
{
int index = tf_util_insert_index(key, connection->scheduled, connection->scheduled_count, sizeof(tf_ssb_connection_scheduled_t), _tf_ssb_connection_scheduled_compare);
if (index != connection->scheduled_count && strcmp(key, connection->scheduled[index].key) == 0)
if (connection->is_closing || (index != connection->scheduled_count && strcmp(key, connection->scheduled[index].key) == 0))
{
/* Keep the old request. Skip the new request. */
/* Skip the new request. */
tf_trace_begin(connection->ssb->trace, "scheduled callback (skip)");
PRE_CALLBACK(connection->ssb, callback);
callback(connection, true, user_data);
@ -647,9 +647,9 @@ void tf_ssb_connection_schedule_idle(tf_ssb_connection_t* connection, const char
};
snprintf(connection->scheduled[index].key, sizeof(connection->scheduled[index].key), "%s", key);
connection->scheduled_count++;
}
uv_async_send(&connection->scheduled_async);
uv_async_send(&connection->scheduled_async);
}
}
static int _request_compare(const void* a, const void* b)
@ -1340,6 +1340,11 @@ bool tf_ssb_connection_is_connected(tf_ssb_connection_t* connection)
return connection->state == k_tf_ssb_state_verified || connection->state == k_tf_ssb_state_server_verified;
}
bool tf_ssb_connection_is_closing(tf_ssb_connection_t* connection)
{
return connection && connection->is_closing;
}
const char* tf_ssb_connection_get_host(tf_ssb_connection_t* connection)
{
return connection->host;
@ -1886,9 +1891,9 @@ static void _tf_ssb_connection_linger_timer(uv_timer_t* timer)
static void _tf_ssb_connection_destroy(tf_ssb_connection_t* connection, const char* reason)
{
tf_ssb_t* ssb = connection->ssb;
if (!connection->closing)
if (!connection->is_closing)
{
connection->closing = true;
connection->is_closing = true;
uv_timer_start(&connection->linger_timer, _tf_ssb_connection_linger_timer, 5000, 0);
_tf_ssb_notify_connections_changed(ssb, k_tf_ssb_change_update, connection);
}
@ -2006,7 +2011,7 @@ static void _tf_ssb_connection_on_close(uv_handle_t* handle)
{
tf_ssb_connection_t* connection = handle->data;
handle->data = NULL;
if (connection && connection->closing)
if (connection && connection->is_closing)
{
_tf_ssb_connection_destroy(connection, "handle closed");
}
@ -3978,7 +3983,7 @@ static void _tf_ssb_connection_after_work_callback(uv_work_t* work, int status)
tf_trace_end(data->connection->ssb->trace);
}
data->connection->ref_count--;
if (data->connection->ref_count == 0 && data->connection->closing)
if (data->connection->ref_count == 0 && data->connection->is_closing)
{
_tf_ssb_connection_destroy(data->connection, "work completed");
}
@ -4261,9 +4266,9 @@ void tf_ssb_connection_adjust_read_backpressure(tf_ssb_connection_t* connection,
const int k_threshold = 256;
int old_pressure = connection->read_back_pressure;
connection->read_back_pressure += delta;
uv_async_send(&connection->scheduled_async);
if (!connection->closing)
if (!connection->is_closing)
{
uv_async_send(&connection->scheduled_async);
if (old_pressure < k_threshold && connection->read_back_pressure >= k_threshold)
{
_tf_ssb_connection_read_stop(connection);
@ -4274,7 +4279,7 @@ void tf_ssb_connection_adjust_read_backpressure(tf_ssb_connection_t* connection,
}
}
connection->ref_count += delta;
if (connection->ref_count == 0 && connection->closing)
if (connection->ref_count == 0 && connection->is_closing)
{
_tf_ssb_connection_destroy(connection, "backpressure released");
}
@ -4283,7 +4288,10 @@ void tf_ssb_connection_adjust_read_backpressure(tf_ssb_connection_t* connection,
void tf_ssb_connection_adjust_write_count(tf_ssb_connection_t* connection, int delta)
{
connection->active_write_count += delta;
uv_async_send(&connection->scheduled_async);
if (!connection->is_closing)
{
uv_async_send(&connection->scheduled_async);
}
}
const char* tf_ssb_connection_get_destroy_reason(tf_ssb_connection_t* connection)