diff --git a/contrib/epee/include/net/abstract_tcp_server2.inl b/contrib/epee/include/net/abstract_tcp_server2.inl index db3f9e322..7f326c863 100644 --- a/contrib/epee/include/net/abstract_tcp_server2.inl +++ b/contrib/epee/include/net/abstract_tcp_server2.inl @@ -535,6 +535,7 @@ POP_WARNINGS // Create a pool of threads to run all of the io_services. CRITICAL_REGION_BEGIN(m_threads_lock); + m_threads.clear(); for (std::size_t i = 0; i < threads_count; ++i) { boost::shared_ptr thread(new boost::thread( @@ -546,8 +547,8 @@ POP_WARNINGS if(wait) { for (std::size_t i = 0; i < m_threads.size(); ++i) - m_threads[i]->join(); - m_threads.clear(); + if (!m_stop_signal_sent) + m_threads[i]->join(); }else { @@ -592,13 +593,34 @@ POP_WARNINGS bool boosted_tcp_server::timed_wait_server_stop(uint64_t wait_mseconds) { TRY_ENTRY(); - boost::chrono::milliseconds ms(wait_mseconds); - for (std::size_t i = 0; i < m_threads.size(); ++i) + CRITICAL_REGION_LOCAL(m_threads_lock); + + bool all_done = false; + boost::chrono::milliseconds ms(50); + for (uint64_t tries = 1; tries * 50 <= wait_mseconds; tries++) { - if(m_threads[i]->joinable() && !m_threads[i]->try_join_for(ms)) + all_done = true; + for (std::size_t i = 0; i < m_threads.size(); ++i) { - LOG_PRINT_L0("Interrupting thread " << m_threads[i]->native_handle()); - m_threads[i]->interrupt(); + if(m_threads[i]->joinable() && !m_threads[i]->try_join_for(ms)) + { + all_done = false; + } + } + if (all_done) break; + } + if (!all_done) + { + for (std::size_t i = 0; i < m_threads.size(); ++i) + { + if (m_threads[i]->joinable()) + { + auto handle = m_threads[i]->native_handle(); + LOG_PRINT_L0("Interrupting thread " << std::hex << handle << " and waiting for join."); + m_threads[i]->interrupt(); + m_threads[i]->join(); + LOG_PRINT_L0("Thread " << std::hex << handle << " joined successfully."); + } } } return true; diff --git a/contrib/epee/include/syncobj.h b/contrib/epee/include/syncobj.h index b7273da8e..2e1827dbf 100644 --- a/contrib/epee/include/syncobj.h +++ b/contrib/epee/include/syncobj.h @@ -56,7 +56,10 @@ namespace epee { std::unique_lock lock(m_mx); while (!m_rised) - m_cond_var.wait(lock); + { + m_cond_var.wait_for(lock, std::chrono::milliseconds(50)); + boost::this_thread::interruption_point(); + } m_rised = false; } @@ -70,7 +73,9 @@ namespace epee class critical_section { - boost::recursive_mutex m_section; + boost::recursive_timed_mutex m_section; + + const boost::posix_time::time_duration m_ms{boost::posix_time::milliseconds(2000)}; public: //to make copy fake! @@ -88,7 +93,10 @@ namespace epee void lock() { - m_section.lock(); + while (!m_section.timed_lock(m_ms)) + { + boost::this_thread::interruption_point(); + } //EnterCriticalSection( &m_section ); } diff --git a/src/daemon/daemon.cpp b/src/daemon/daemon.cpp index 728b239a0..7506e054b 100644 --- a/src/daemon/daemon.cpp +++ b/src/daemon/daemon.cpp @@ -114,7 +114,12 @@ bool t_daemon::run() mp_internals->core.run(); mp_internals->rpc.run(); mp_internals->p2p.run(); - mp_internals->rpc.stop(); + + // the stop() method clears internals + if (mp_internals != nullptr) + { + mp_internals->rpc.stop(); + } LOG_PRINT("Node stopped.", LOG_LEVEL_0); return true; } diff --git a/src/daemonizer/posix_daemonizer.inl b/src/daemonizer/posix_daemonizer.inl index e06d43d61..5a905635a 100644 --- a/src/daemonizer/posix_daemonizer.inl +++ b/src/daemonizer/posix_daemonizer.inl @@ -49,7 +49,7 @@ namespace daemonizer auto daemon = executor.create_daemon(vm); tools::success_msg_writer() << "Forking to background..."; posix::fork(); - return daemon.run(); + return daemon.run() ? 0 : 1; } else { diff --git a/src/p2p/net_node.inl b/src/p2p/net_node.inl index 6ed861e10..851ff1fca 100644 --- a/src/p2p/net_node.inl +++ b/src/p2p/net_node.inl @@ -495,7 +495,9 @@ namespace nodetool bool node_server::send_stop_signal() { m_net_server.send_stop_signal(); - LOG_PRINT_L0("[node] Stop signal sent"); + LOG_PRINT_L0("[node] Stop signal sent" << std::endl + << "Please be patient while the daemon shuts down gracefully."); + m_net_server.timed_wait_server_stop(5000); return true; } //-----------------------------------------------------------------------------------