[glib: 3/4] gthreadpool: Update unused_threads while we still own the pool lock




commit a275ee66796ab0d6d95ed8647f2170be9b136951
Author: Marco Trevisan (TreviƱo) <mail 3v1n0 net>
Date:   Mon Jul 11 18:45:36 2022 +0200

    gthreadpool: Update unused_threads while we still own the pool lock
    
    As per the rationale explained in the previous commit, we could end up
    having the unused_threads value not to be conformant to what
    g_thread_pool_get_num_threads() returns, because an about-to-be-unused
    thread might not be counted yet as such, while the pool threads number
    has been already decreased.
    
    To avoid such scenario, and to make sure that when all the pool's
    threads are stopped, they're unmarked as unused, let's increase the
    unused_threads value earlier, while we still own the pool lock so that
    it will always include the pool that is not used anymore, but not yet
    queued.
    
    As per this we can update the test, not to repeat the stop-unused call
    as now we're sure that when the pool has no threads anymore, the unused
    threads value is also updated accordingly.
    
    Also adding a tests with multiple pools.

 glib/gthreadpool.c            | 10 +++----
 glib/tests/thread-pool-slow.c | 63 ++++++++++++++++++++++++++++++++++++-------
 2 files changed, 58 insertions(+), 15 deletions(-)
---
diff --git a/glib/gthreadpool.c b/glib/gthreadpool.c
index bcbb9e2d3e..0d656f072b 100644
--- a/glib/gthreadpool.c
+++ b/glib/gthreadpool.c
@@ -167,8 +167,6 @@ g_thread_pool_wait_for_new_pool (void)
   local_max_idle_time = g_atomic_int_get (&max_idle_time);
   last_wakeup_thread_serial = g_atomic_int_get (&wakeup_thread_serial);
 
-  g_atomic_int_inc (&unused_threads);
-
   do
     {
       if ((guint) g_atomic_int_get (&unused_threads) >= local_max_unused_threads)
@@ -237,8 +235,6 @@ g_thread_pool_wait_for_new_pool (void)
     }
   while (pool == wakeup_thread_marker);
 
-  g_atomic_int_add (&unused_threads, -1);
-
   return pool;
 }
 
@@ -405,12 +401,16 @@ g_thread_pool_thread_proxy (gpointer data)
                 }
             }
 
+          g_atomic_int_inc (&unused_threads);
           g_async_queue_unlock (pool->queue);
 
           if (free_pool)
             g_thread_pool_free_internal (pool);
 
-          if ((pool = g_thread_pool_wait_for_new_pool ()) == NULL)
+          pool = g_thread_pool_wait_for_new_pool ();
+          g_atomic_int_add (&unused_threads, -1);
+
+          if (pool == NULL)
             break;
 
           g_async_queue_lock (pool->queue);
diff --git a/glib/tests/thread-pool-slow.c b/glib/tests/thread-pool-slow.c
index a7d3039ec3..ae16426be1 100644
--- a/glib/tests/thread-pool-slow.c
+++ b/glib/tests/thread-pool-slow.c
@@ -80,18 +80,13 @@ test_thread_stop_unused (void)
     g_usleep (100);
 
   g_assert_cmpuint (g_thread_pool_get_num_threads (pool), ==, 0);
+  g_assert_cmpuint (g_thread_pool_get_num_unused_threads (), >, 0);
 
   /* Wait for threads to die. */
-  do {
-    /* We may need to repeat this in case we tried to stop unused threads
-     * while some thread was still active, and not yet marked as non-used,
-     * despite what g_thread_pool_get_num_threads() tells us.
-     * And if this happens the thread will be kept in the unused queue
-     * indefinitely, so we need to stop it again, until we're really done.
-     */
-    g_thread_pool_stop_unused_threads ();
+  g_thread_pool_stop_unused_threads ();
+
+  while (g_thread_pool_get_num_unused_threads () != 0)
     g_usleep (100);
-  } while (g_thread_pool_get_num_unused_threads () != 0);
 
   g_assert_cmpuint (g_thread_pool_get_num_unused_threads (), ==, 0);
 
@@ -103,6 +98,51 @@ test_thread_stop_unused (void)
   g_thread_pool_free (pool, FALSE, TRUE);
 }
 
+static void
+test_thread_stop_unused_multiple (void)
+{
+  GThreadPool *pools[10];
+  guint i, j;
+  const guint limit = 10;
+  gboolean all_stopped;
+
+  /* Spawn a few threads. */
+  g_thread_pool_set_max_unused_threads (-1);
+
+  for (i = 0; i < G_N_ELEMENTS (pools); i++)
+    {
+      pools[i] = g_thread_pool_new ((GFunc) g_usleep, NULL, -1, FALSE, NULL);
+
+      for (j = 0; j < limit; j++)
+        g_thread_pool_push (pools[i], GUINT_TO_POINTER (100), NULL);
+    }
+
+  all_stopped = FALSE;
+  while (!all_stopped)
+    {
+      all_stopped = TRUE;
+      for (i = 0; i < G_N_ELEMENTS (pools); i++)
+        all_stopped &= (g_thread_pool_get_num_threads (pools[i]) == 0);
+    }
+
+  for (i = 0; i < G_N_ELEMENTS (pools); i++)
+    {
+      g_assert_cmpuint (g_thread_pool_get_num_threads (pools[i]), ==, 0);
+      g_assert_cmpuint (g_thread_pool_get_num_unused_threads (), >, 0);
+    }
+
+  /* Wait for threads to die. */
+  g_thread_pool_stop_unused_threads ();
+
+  while (g_thread_pool_get_num_unused_threads () != 0)
+    g_usleep (100);
+
+  g_assert_cmpuint (g_thread_pool_get_num_unused_threads (), ==, 0);
+
+  for (i = 0; i < G_N_ELEMENTS (pools); i++)
+    g_thread_pool_free (pools[i], FALSE, TRUE);
+}
+
 static void
 test_thread_pools_entry_func (gpointer data, gpointer user_data)
 {
@@ -344,6 +384,9 @@ test_check_start_and_stop (gpointer user_data)
       test_thread_stop_unused ();
       break;
     case 7:
+      test_thread_stop_unused_multiple ();
+      break;
+    case 8:
       test_thread_idle_time ();
       break;
     default:
@@ -373,7 +416,7 @@ test_check_start_and_stop (gpointer user_data)
     G_UNLOCK (thread_counter_sort);
   }
 
-  if (test_number == 7) {
+  if (test_number == 8) {
     guint idle;
 
     idle = g_thread_pool_get_num_unused_threads ();


[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]