From: Thomas Markwalder Date: Thu, 18 Mar 2021 20:24:28 +0000 (-0400) Subject: [#1730] Fix pitfall in unit test per review comment X-Git-Tag: Kea-1.9.6~154 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=2c14f898094bc46d01e86f0b245838065df2e37f;p=thirdparty%2Fkea.git [#1730] Fix pitfall in unit test per review comment src/lib/config/tests/cmd_http_listener_unittests.cc Replaced thread coordination to avoid "spurious unlocking" possible with use of ConditionVariable::wait() --- diff --git a/src/lib/config/tests/cmd_http_listener_unittests.cc b/src/lib/config/tests/cmd_http_listener_unittests.cc index 0594bcd890..e3feacac5d 100644 --- a/src/lib/config/tests/cmd_http_listener_unittests.cc +++ b/src/lib/config/tests/cmd_http_listener_unittests.cc @@ -53,7 +53,7 @@ public: /// from CommandMgr, and enables multi-threading mode. CmdHttpListenerTest() : io_service_(), test_timer_(io_service_), run_io_service_timer_(io_service_), - clients_(), num_threads_(), num_clients_() { + clients_(), num_threads_(), num_clients_(), num_in_progress_(0), num_finished_(0) { test_timer_.setup(std::bind(&CmdHttpListenerTest::timeoutHandler, this, true), TEST_TIMEOUT, IntervalTimer::ONE_SHOT); @@ -252,16 +252,20 @@ public: // notify everyone and finish. The idea is to force each thread // to handle the same number of requests over the course of the // test, making verification reliable. - if (num_clients_ > 1) { + if (num_clients_ >= num_threads_) { std::unique_lock lck(mutex_); ++num_in_progress_; - if (num_in_progress_ < num_threads_) { - cv_.wait(lck); - } else { - num_in_progress_ = 0; + if (num_in_progress_ == num_threads_) { + num_finished_ = 0; cv_.notify_all(); + } else { + bool ret = cv_.wait_for(lck, std::chrono::seconds(10), + [&]() { return (num_in_progress_ == num_threads_); }); + if (!ret) { + ADD_FAILURE() << "clients failed to start work"; + } } - } + } // Create the map of response arguments. ElementPtr arguments = Element::createMap(); @@ -278,6 +282,25 @@ public: ss << std::this_thread::get_id(); arguments->set("thread-id", Element::create(ss.str())); + // If we have more clients than threads, we need to wait + // for each block of in-progress clients to finish. + if (num_clients_ >= num_threads_) { + std::unique_lock lck(mutex_); + num_finished_++; + if (num_finished_ == num_threads_) { + // We're all done, notify the others and finish. + num_in_progress_ = 0; + cv_.notify_all(); + } else { + // I'm done but others aren't wait here. + bool ret = cv_.wait_for(lck, std::chrono::seconds(10), + [&]() { return (num_finished_ == num_threads_); }); + if (!ret) { + ADD_FAILURE() << "clients failed to finish work"; + } + } + } + // We're done, ship it! return (createAnswer(CONTROL_RESULT_SUCCESS, arguments)); } @@ -458,6 +481,9 @@ public: /// @brief Number of requests currently in progress. size_t num_in_progress_; + /// @brief Number of requests that have finished. + size_t num_finished_; + /// @brief Mutex used to lock during thread coordination. std::mutex mutex_;