/* * ===================================================================================== * * Filename: api_test.cpp * * Description: * * Version: 1.0 * Created: 2021年04月13日 14时31分46秒 * Revision: none * Compiler: gcc * * Author: Li Chao (), lichao@aiotlink.com * Organization: * * ===================================================================================== */ #include "bh_api.h" #include "util.h" #include #include using namespace bhome_msg; namespace { typedef std::atomic Number; void Assign(Number &a, const Number &b) { a.store(b.load()); } struct MsgStatus { Number nrequest_; Number nfailed_; Number nreply_; Number nserved_; MsgStatus() : nrequest_(0), nreply_(0), nserved_(0) {} MsgStatus &operator=(const MsgStatus &a) { Assign(nrequest_, a.nrequest_); Assign(nserved_, a.nserved_); Assign(nreply_, a.nreply_); Assign(nfailed_, a.nfailed_); return *this; } }; MsgStatus &Status() { static MsgStatus st; return st; } void SubRecvProc(const void *proc_id, const int proc_id_len, const void *data, const int data_len) { std::string proc((const char *) proc_id, proc_id_len); MsgPublish pub; pub.ParseFromArray(data, data_len); printf("Sub data, %s : %s\n", pub.topic().c_str(), pub.data().c_str()); } void ServerProc(const void *proc_id, const int proc_id_len, const void *data, const int data_len, void *src) { // printf("ServerProc: "); // DEFER1(printf("\n");); MsgRequestTopic request; if (request.ParseFromArray(data, data_len)) { MsgRequestTopicReply reply; reply.set_data(" reply: " + request.data()); std::string s(reply.SerializeAsString()); // printf("%s", reply.data().c_str()); BHSendReply(src, s.data(), s.size()); ++Status().nserved_; } } void ClientProc(const void *proc_id, const int proc_id_len, const void *msg_id, const int msg_id_len, const void *data, const int data_len) { std::string proc((const char *) proc_id, proc_id_len); MsgRequestTopicReply reply; if (reply.ParseFromArray(data, data_len)) { ++Status().nreply_; } // printf("client Recv reply : %s\n", reply.data().c_str()); } class TLMutex { typedef boost::interprocess::interprocess_mutex MutexT; // typedef CasMutex MutexT; // typedef std::mutex MutexT; typedef std::chrono::steady_clock Clock; typedef Clock::duration Duration; static Duration Now() { return Clock::now().time_since_epoch(); } const Duration limit_; std::atomic last_lock_time_; MutexT mutex_; bool Expired(const Duration diff) { return diff > limit_; } public: struct Status { int64_t nlock_ = 0; int64_t nupdate_time_fail = 0; int64_t nfail = 0; int64_t nexcept = 0; }; Status st_; explicit TLMutex(Duration limit) : limit_(limit) {} TLMutex() : TLMutex(std::chrono::seconds(1)) {} ~TLMutex() { static_assert(std::is_pod::value); } bool try_lock() { if (mutex_.try_lock()) { auto old_time = last_lock_time_.load(); auto cur = Now(); if (Expired(cur - old_time)) { return last_lock_time_.compare_exchange_strong(old_time, cur); } else { last_lock_time_.store(Now()); return true; } } else { auto old_time = last_lock_time_.load(); auto cur = Now(); if (Expired(cur - old_time)) { return last_lock_time_.compare_exchange_strong(old_time, cur); } else { return false; } } } void lock() { int n = 0; while (!try_lock()) { n++; std::this_thread::yield(); } st_.nlock_ += n; } void unlock() { auto old_time = last_lock_time_.load(); auto cur = Now(); if (!Expired(cur - old_time)) { if (last_lock_time_.compare_exchange_strong(old_time, cur)) { mutex_.unlock(); } } } }; //robust attr does NOT work, maybe os does not support it. class RobustMutex { public: RobustMutex() { pthread_mutexattr_t mutex_attr; auto attr = [&]() { return &mutex_attr; }; int r = pthread_mutexattr_init(attr()); r |= pthread_mutexattr_setpshared(attr(), PTHREAD_PROCESS_SHARED); r |= pthread_mutexattr_setrobust_np(attr(), PTHREAD_MUTEX_ROBUST_NP); r |= pthread_mutex_init(mtx(), attr()); int rob = 0; pthread_mutexattr_getrobust_np(attr(), &rob); int shared = 0; pthread_mutexattr_getpshared(attr(), &shared); printf("robust : %d, shared : %d\n", rob, shared); r |= pthread_mutexattr_destroy(attr()); if (r) { throw("init mutex error."); } } ~RobustMutex() { pthread_mutex_destroy(mtx()); } public: void lock() { Lock(); } bool try_lock() { int r = TryLock(); printf("TryLock ret: %d\n", r); return r == 0; } void unlock() { Unlock(); } // private: int TryLock() { return pthread_mutex_trylock(mtx()); } int Lock() { return pthread_mutex_lock(mtx()); } int Unlock() { return pthread_mutex_unlock(mtx()); } private: pthread_mutex_t *mtx() { return &mutex_; } pthread_mutex_t mutex_; }; class LockFreeQueue { typedef int64_t Data; typedef boost::lockfree::queue> LFQueue; void push_back(Data d) { queue_.push(d); } private: LFQueue queue_; }; } // namespace BOOST_AUTO_TEST_CASE(MutexTest) { SharedMemory &shm = TestShm(); // shm.Remove(); // return; GlobalInit(shm); const std::string mtx_name("test_mutex"); const std::string int_name("test_int"); auto mtx = shm.FindOrCreate(mtx_name); auto pi = shm.FindOrCreate(int_name, 100); std::mutex m; typedef std::chrono::steady_clock Clock; auto Now = []() { return Clock::now().time_since_epoch(); }; if (pi) { auto old = *pi; printf("int : %d, add1: %d\n", old, ++*pi); } { boost::timer::auto_cpu_timer timer; printf("test time: "); TLMutex mutex; // CasMutex mutex; auto Lock = [&]() { for (int i = 0; i < 10; ++i) { mutex.lock(); mutex.unlock(); } }; std::thread t1(Lock), t2(Lock); t1.join(); t2.join(); printf("mutex nlock: %ld, update time error: %ld, normal fail: %ld, error wait: %ld\n", mutex.st_.nlock_, mutex.st_.nupdate_time_fail, mutex.st_.nfail, mutex.st_.nexcept); } auto MSFromNow = [](const int ms) { using namespace boost::posix_time; ptime cur = boost::posix_time::microsec_clock::universal_time(); return cur + millisec(ms); }; auto TryLock = [&]() { if (mtx->try_lock()) { printf("try_lock ok\n"); return true; } else { printf("try_lock failed\n"); return false; } }; auto Unlock = [&]() { mtx->unlock(); printf("unlocked\n"); }; if (mtx) { printf("mtx exists\n"); if (TryLock()) { auto op = [&]() { if (TryLock()) { Unlock(); } }; op(); std::thread t(op); t.join(); // Unlock(); } else { // mtx->unlock(); } } else { printf("mtx not exists\n"); } } BOOST_AUTO_TEST_CASE(ApiTest) { auto max_time = std::chrono::steady_clock::time_point::max(); auto dur = max_time.time_since_epoch(); auto nsec = std::chrono::duration_cast(dur).count(); auto nmin = nsec / 60; auto nhour = nmin / 60; auto nday = nhour / 24; auto years = nday / 365; printf("seconds: %ld, hours: %ld , days:%ld, years: %ld\n", nsec, nhour, nday, years); std::chrono::steady_clock::duration a(123456); printf("nowsec: %ld\n", NowSec()); printf("maxsec: %ld\n", CountSeconds(max_time)); bool reg = false; for (int i = 0; i < 3 && !reg; ++i) { ProcInfo proc; proc.set_proc_id("demo_client"); proc.set_public_info("public info of demo_client. etc..."); std::string proc_buf(proc.SerializeAsString()); void *reply = 0; int reply_len = 0; reg = BHRegister(proc_buf.data(), proc_buf.size(), &reply, &reply_len, 2000); printf("register %s\n", reg ? "ok" : "failed"); BHFree(reply, reply_len); Sleep(1s); } if (!reg) { return; } const std::string topic_ = "topic_"; { // Server Register Topics MsgTopicList topics; for (int i = 0; i < 10; ++i) { topics.add_topic_list(topic_ + std::to_string(i)); } std::string s = topics.SerializeAsString(); void *reply = 0; int reply_len = 0; bool r = BHRegisterTopics(s.data(), s.size(), &reply, &reply_len, 1000); BHFree(reply, reply_len); // printf("register topic : %s\n", r ? "ok" : "failed"); Sleep(1s); } { // Subscribe MsgTopicList topics; for (int i = 0; i < 10; ++i) { topics.add_topic_list(topic_ + std::to_string(i * 2)); } std::string s = topics.SerializeAsString(); void *reply = 0; int reply_len = 0; bool r = BHSubscribeTopics(s.data(), s.size(), &reply, &reply_len, 1000); BHFree(reply, reply_len); printf("subscribe topic : %s\n", r ? "ok" : "failed"); } auto ServerLoop = [&](std::atomic *run) { while (*run) { void *proc_id = 0; int proc_id_len = 0; DEFER1(BHFree(proc_id, proc_id_len);); void *input = 0; int input_len = 0; DEFER1(BHFree(input, input_len)); void *src = 0; if (BHReadRequest(&proc_id, &proc_id_len, &input, &input_len, &src, 10) && src) { MsgRequestTopic request; if (request.ParseFromArray(input, input_len)) { MsgRequestTopicReply reply; reply.set_data(" reply: " + request.data()); std::string s(reply.SerializeAsString()); // printf("%s", reply.data().c_str()); BHSendReply(src, s.data(), s.size()); ++Status().nserved_; } src = 0; } } }; auto SyncRequest = [&](int idx) { // SyncRequest MsgRequestTopic req; req.set_topic(topic_ + std::to_string(idx)); req.set_data("request_data_" + std::to_string(idx)); std::string s(req.SerializeAsString()); // Sleep(10ms, false); std::string dest(BHAddress().SerializeAsString()); void *proc_id = 0; int proc_id_len = 0; DEFER1(BHFree(proc_id, proc_id_len);); void *reply = 0; int reply_len = 0; DEFER1(BHFree(reply, reply_len)); bool r = BHRequest(dest.data(), dest.size(), s.data(), s.size(), &proc_id, &proc_id_len, &reply, &reply_len, 100); if (!r) { int ec = 0; std::string msg; GetLastError(ec, msg); printf("request error: %s\n", msg.c_str()); } else { MsgRequestTopicReply ret; ret.ParseFromArray(reply, reply_len); printf("request result: %s\n", ret.data().c_str()); } }; { for (int i = 0; i < 1; ++i) { MsgPublish pub; pub.set_topic(topic_ + std::to_string(i)); pub.set_data("pub_data_" + std::string(1024 * 1, 'a')); std::string s(pub.SerializeAsString()); BHPublish(s.data(), s.size(), 0); // Sleep(1s); } } auto asyncRequest = [&](uint64_t nreq) { for (uint64_t i = 0; i < nreq; ++i) { MsgRequestTopic req; req.set_topic(topic_ + std::to_string(0)); req.set_data("request_data_" + std::to_string(i)); std::string s(req.SerializeAsString()); void *msg_id = 0; int len = 0; DEFER1(BHFree(msg_id, len);); // Sleep(10ms, false); std::string dest(BHAddress().SerializeAsString()); bool r = BHAsyncRequest(dest.data(), dest.size(), s.data(), s.size(), 0, 0); if (r) { ++Status().nrequest_; } else { ++Status().nfailed_; static std::atomic last(0); auto now = NowSec(); if (last.exchange(now) < now) { int ec = 0; std::string msg; GetLastError(ec, msg); printf("request topic error --------- : %s\n", msg.c_str()); } } } }; auto showStatus = [](std::atomic *run) { MsgStatus last; while (*run) { auto &st = Status(); Sleep(1s, false); printf("nreq: %8ld, spd %8ld | failed: %8ld | nsrv: %8ld, spd %8ld | nreply: %8ld, spd %8ld\n", st.nrequest_.load(), st.nrequest_ - last.nrequest_, st.nfailed_.load(), st.nserved_.load(), st.nserved_ - last.nserved_, st.nreply_.load(), st.nreply_ - last.nreply_); last = st; } }; auto hb = [](std::atomic *run) { while (*run) { Sleep(1s, false); bool r = BHHeartbeatEasy(1000); printf("heartbeat: %s\n", r ? "ok" : "failed"); } }; std::atomic run(true); BHStartWorker(&ServerProc, &SubRecvProc, &ClientProc); ThreadManager threads; boost::timer::auto_cpu_timer timer; threads.Launch(hb, &run); threads.Launch(showStatus, &run); int ncli = 10; const uint64_t nreq = 1000 * 10; for (int i = 0; i < ncli; ++i) { threads.Launch(asyncRequest, nreq); } int same = 0; int64_t last = 0; while (last < nreq * ncli && same < 2) { Sleep(1s, false); auto cur = Status().nreply_.load(); if (last == cur) { ++same; } else { last = cur; same = 0; } } run = false; threads.WaitAll(); auto &st = Status(); printf("nreq: %8ld, nsrv: %8ld, nreply: %8ld\n", st.nrequest_.load(), st.nserved_.load(), st.nreply_.load()); }