From 1b167ec5ad101ac44451381e26cc73ab5d67d2a1 Mon Sep 17 00:00:00 2001 From: lichao <lichao@aiotlink.com> Date: 星期一, 26 四月 2021 16:37:52 +0800 Subject: [PATCH] fix socket busy loop; del locked readall; refactor. --- utest/api_test.cpp | 189 +++++++++++++++++++++++++++++++++++------------ 1 files changed, 141 insertions(+), 48 deletions(-) diff --git a/utest/api_test.cpp b/utest/api_test.cpp index 200ae99..6682aaf 100644 --- a/utest/api_test.cpp +++ b/utest/api_test.cpp @@ -18,6 +18,7 @@ #include "bh_api.h" #include "util.h" #include <atomic> +#include <boost/lockfree/queue.hpp> using namespace bhome_msg; @@ -49,7 +50,6 @@ static MsgStatus st; return st; } -} // namespace void SubRecvProc(const void *proc_id, const int proc_id_len, @@ -59,7 +59,7 @@ std::string proc((const char *) proc_id, proc_id_len); MsgPublish pub; pub.ParseFromArray(data, data_len); - // printf("Sub data, %s : %s\n", pub.topic().c_str(), pub.data().c_str()); + printf("Sub data, %s : %s\n", pub.topic().c_str(), pub.data().c_str()); } void ServerProc(const void *proc_id, @@ -98,8 +98,8 @@ class TLMutex { - // typedef boost::interprocess::interprocess_mutex MutexT; - typedef CasMutex MutexT; + typedef boost::interprocess::interprocess_mutex MutexT; + // typedef CasMutex MutexT; // typedef std::mutex MutexT; typedef std::chrono::steady_clock Clock; typedef Clock::duration Duration; @@ -108,6 +108,7 @@ const Duration limit_; std::atomic<Duration> last_lock_time_; MutexT mutex_; + bool Expired(const Duration diff) { return diff > limit_; } public: struct Status { @@ -127,16 +128,18 @@ { if (mutex_.try_lock()) { auto old_time = last_lock_time_.load(); - if (Now() - old_time > limit_) { - return last_lock_time_.compare_exchange_strong(old_time, Now()); + auto cur = Now(); + if (Expired(cur - old_time)) { + return last_lock_time_.compare_exchange_strong(old_time, cur); } else { last_lock_time_.store(Now()); return true; } } else { auto old_time = last_lock_time_.load(); - if (Now() - old_time > limit_) { - return last_lock_time_.compare_exchange_strong(old_time, Now()); + auto cur = Now(); + if (Expired(cur - old_time)) { + return last_lock_time_.compare_exchange_strong(old_time, cur); } else { return false; } @@ -151,52 +154,91 @@ } st_.nlock_ += n; } - void unlock() { mutex_.unlock(); } + void unlock() + { + auto old_time = last_lock_time_.load(); + auto cur = Now(); + if (!Expired(cur - old_time)) { + if (last_lock_time_.compare_exchange_strong(old_time, cur)) { + mutex_.unlock(); + } + } + } }; -namespace +//robust attr does NOT work, maybe os does not support it. +class RobustMutex { -typedef int64_t Offset; -Offset Addr(void *ptr) { return reinterpret_cast<Offset>(ptr); } -void *Ptr(const Offset offset) { return reinterpret_cast<void *>(offset); } +public: + RobustMutex() + { + pthread_mutexattr_t mutex_attr; + auto attr = [&]() { return &mutex_attr; }; + int r = pthread_mutexattr_init(attr()); + r |= pthread_mutexattr_setpshared(attr(), PTHREAD_PROCESS_SHARED); + r |= pthread_mutexattr_setrobust_np(attr(), PTHREAD_MUTEX_ROBUST_NP); + r |= pthread_mutex_init(mtx(), attr()); + int rob = 0; + pthread_mutexattr_getrobust_np(attr(), &rob); + int shared = 0; + pthread_mutexattr_getpshared(attr(), &shared); + printf("robust : %d, shared : %d\n", rob, shared); + r |= pthread_mutexattr_destroy(attr()); + if (r) { + throw("init mutex error."); + } + } + ~RobustMutex() + { + pthread_mutex_destroy(mtx()); + } + +public: + void lock() { Lock(); } + bool try_lock() + { + int r = TryLock(); + printf("TryLock ret: %d\n", r); + return r == 0; + } + + void unlock() { Unlock(); } + + // private: + int TryLock() { return pthread_mutex_trylock(mtx()); } + int Lock() { return pthread_mutex_lock(mtx()); } + int Unlock() { return pthread_mutex_unlock(mtx()); } + +private: + pthread_mutex_t *mtx() { return &mutex_; } + pthread_mutex_t mutex_; +}; + +class LockFreeQueue +{ + typedef int64_t Data; + typedef boost::lockfree::queue<Data, boost::lockfree::capacity<1024>> LFQueue; + void push_back(Data d) { queue_.push(d); } + +private: + LFQueue queue_; +}; + } // namespace BOOST_AUTO_TEST_CASE(MutexTest) { SharedMemory &shm = TestShm(); - MsgI::BindShm(shm); - - void *base_ptr = shm.get_address(); - auto PrintPtr = [&](void *p) { - printf("addr: %ld, ptr: %p, offset: %ld\n", Addr(p), p, Addr(p) - Addr(base_ptr)); - }; - - printf("base"); - PrintPtr(base_ptr); - - MsgI msg; - msg.Make("string data"); - for (int i = 0; i < 10; ++i) { - int n = msg.AddRef(); - printf("add %d ref: %d\n", i, n); - } - for (int i = 0; i < 10; ++i) { - int n = msg.Release(); - printf("release %d, ref : %d\n", i, n); - } - std::this_thread::sleep_for(1s); - msg.Release(); + // shm.Remove(); + // return; + GlobalInit(shm); const std::string mtx_name("test_mutex"); const std::string int_name("test_int"); - auto mtx = shm.find_or_construct<Mutex>(mtx_name.c_str())(); - auto pi = shm.find_or_construct<int>(int_name.c_str())(100); + auto mtx = shm.FindOrCreate<TLMutex>(mtx_name); + auto pi = shm.FindOrCreate<int>(int_name, 100); - printf("mutetx "); - PrintPtr(mtx); - printf("int "); - PrintPtr(pi); - + std::mutex m; typedef std::chrono::steady_clock Clock; auto Now = []() { return Clock::now().time_since_epoch(); }; if (pi) { @@ -210,7 +252,7 @@ TLMutex mutex; // CasMutex mutex; auto Lock = [&]() { - for (int i = 0; i < 1000 * 100; ++i) { + for (int i = 0; i < 10; ++i) { mutex.lock(); mutex.unlock(); } @@ -301,7 +343,7 @@ const std::string topic_ = "topic_"; - { + { // Server Register Topics MsgTopicList topics; for (int i = 0; i < 10; ++i) { topics.add_topic_list(topic_ + std::to_string(i)); @@ -315,7 +357,7 @@ Sleep(1s); } - { + { // Subscribe MsgTopicList topics; for (int i = 0; i < 10; ++i) { topics.add_topic_list(topic_ + std::to_string(i * 2)); @@ -328,8 +370,56 @@ printf("subscribe topic : %s\n", r ? "ok" : "failed"); } - BHStartWorker(&ServerProc, &SubRecvProc, &ClientProc); + auto ServerLoop = [&](std::atomic<bool> *run) { + while (*run) { + void *proc_id = 0; + int proc_id_len = 0; + DEFER1(BHFree(proc_id, proc_id_len);); + void *input = 0; + int input_len = 0; + DEFER1(BHFree(input, input_len)); + void *src = 0; + if (BHReadRequest(&proc_id, &proc_id_len, &input, &input_len, &src, 10) && src) { + MsgRequestTopic request; + if (request.ParseFromArray(input, input_len)) { + MsgRequestTopicReply reply; + reply.set_data(" reply: " + request.data()); + std::string s(reply.SerializeAsString()); + // printf("%s", reply.data().c_str()); + BHSendReply(src, s.data(), s.size()); + ++Status().nserved_; + } + src = 0; + } + } + }; + + auto SyncRequest = [&](int idx) { // SyncRequest + MsgRequestTopic req; + req.set_topic(topic_ + std::to_string(idx)); + req.set_data("request_data_" + std::to_string(idx)); + std::string s(req.SerializeAsString()); + // Sleep(10ms, false); + std::string dest(BHAddress().SerializeAsString()); + void *proc_id = 0; + int proc_id_len = 0; + DEFER1(BHFree(proc_id, proc_id_len);); + void *reply = 0; + int reply_len = 0; + DEFER1(BHFree(reply, reply_len)); + bool r = BHRequest(dest.data(), dest.size(), s.data(), s.size(), &proc_id, &proc_id_len, &reply, &reply_len, 100); + if (!r) { + int ec = 0; + std::string msg; + GetLastError(ec, msg); + printf("request error: %s\n", msg.c_str()); + } else { + MsgRequestTopicReply ret; + ret.ParseFromArray(reply, reply_len); + printf("request result: %s\n", ret.data().c_str()); + } + }; { for (int i = 0; i < 1; ++i) { MsgPublish pub; @@ -349,10 +439,10 @@ std::string s(req.SerializeAsString()); void *msg_id = 0; int len = 0; + DEFER1(BHFree(msg_id, len);); // Sleep(10ms, false); std::string dest(BHAddress().SerializeAsString()); bool r = BHAsyncRequest(dest.data(), dest.size(), s.data(), s.size(), 0, 0); - DEFER1(BHFree(msg_id, len);); if (r) { ++Status().nrequest_; } else { @@ -388,13 +478,16 @@ printf("heartbeat: %s\n", r ? "ok" : "failed"); } }; + std::atomic<bool> run(true); + + BHStartWorker(&ServerProc, &SubRecvProc, &ClientProc); ThreadManager threads; boost::timer::auto_cpu_timer timer; threads.Launch(hb, &run); threads.Launch(showStatus, &run); int ncli = 10; - const uint64_t nreq = 1000 * 100; + const uint64_t nreq = 1000 * 10; for (int i = 0; i < ncli; ++i) { threads.Launch(asyncRequest, nreq); } -- Gitblit v1.8.0