| | |
| | | #include "bh_api.h" |
| | | #include "util.h" |
| | | #include <atomic> |
| | | #include <boost/lockfree/queue.hpp> |
| | | |
| | | using namespace bhome::msg; |
| | | using namespace bhome_msg; |
| | | |
| | | namespace |
| | | { |
| | |
| | | static MsgStatus st; |
| | | return st; |
| | | } |
| | | } // namespace |
| | | |
| | | void SubRecvProc(const void *proc_id, |
| | | const int proc_id_len, |
| | |
| | | std::string proc((const char *) proc_id, proc_id_len); |
| | | MsgPublish pub; |
| | | pub.ParseFromArray(data, data_len); |
| | | // printf("Sub data, %s : %s\n", pub.topic().c_str(), pub.data().c_str()); |
| | | printf("Sub data, %s : %s\n", pub.topic().c_str(), pub.data().c_str()); |
| | | } |
| | | |
| | | void ServerProc(const void *proc_id, |
| | | const int proc_id_len, |
| | | const void *data, |
| | | const int data_len, |
| | | BHServerCallbackTag *tag) |
| | | void *src) |
| | | { |
| | | // printf("ServerProc: "); |
| | | // DEFER1(printf("\n");); |
| | |
| | | reply.set_data(" reply: " + request.data()); |
| | | std::string s(reply.SerializeAsString()); |
| | | // printf("%s", reply.data().c_str()); |
| | | BHServerCallbackReply(tag, s.data(), s.size()); |
| | | BHSendReply(src, s.data(), s.size()); |
| | | ++Status().nserved_; |
| | | } |
| | | } |
| | |
| | | // printf("client Recv reply : %s\n", reply.data().c_str()); |
| | | } |
| | | |
| | | class TLMutex |
| | | { |
| | | typedef boost::interprocess::interprocess_mutex MutexT; |
| | | // typedef CasMutex MutexT; |
| | | // typedef std::mutex MutexT; |
| | | typedef std::chrono::steady_clock Clock; |
| | | typedef Clock::duration Duration; |
| | | static Duration Now() { return Clock::now().time_since_epoch(); } |
| | | |
| | | const Duration limit_; |
| | | std::atomic<Duration> last_lock_time_; |
| | | MutexT mutex_; |
| | | bool Expired(const Duration diff) { return diff > limit_; } |
| | | |
| | | public: |
| | | struct Status { |
| | | int64_t nlock_ = 0; |
| | | int64_t nupdate_time_fail = 0; |
| | | int64_t nfail = 0; |
| | | int64_t nexcept = 0; |
| | | }; |
| | | Status st_; |
| | | |
| | | explicit TLMutex(Duration limit) : |
| | | limit_(limit) {} |
| | | TLMutex() : |
| | | TLMutex(std::chrono::seconds(1)) {} |
| | | ~TLMutex() { static_assert(std::is_pod<Duration>::value); } |
| | | bool try_lock() |
| | | { |
| | | if (mutex_.try_lock()) { |
| | | auto old_time = last_lock_time_.load(); |
| | | auto cur = Now(); |
| | | if (Expired(cur - old_time)) { |
| | | return last_lock_time_.compare_exchange_strong(old_time, cur); |
| | | } else { |
| | | last_lock_time_.store(Now()); |
| | | return true; |
| | | } |
| | | } else { |
| | | auto old_time = last_lock_time_.load(); |
| | | auto cur = Now(); |
| | | if (Expired(cur - old_time)) { |
| | | return last_lock_time_.compare_exchange_strong(old_time, cur); |
| | | } else { |
| | | return false; |
| | | } |
| | | } |
| | | } |
| | | void lock() |
| | | { |
| | | int n = 0; |
| | | while (!try_lock()) { |
| | | n++; |
| | | std::this_thread::yield(); |
| | | } |
| | | st_.nlock_ += n; |
| | | } |
| | | void unlock() |
| | | { |
| | | auto old_time = last_lock_time_.load(); |
| | | auto cur = Now(); |
| | | if (!Expired(cur - old_time)) { |
| | | if (last_lock_time_.compare_exchange_strong(old_time, cur)) { |
| | | mutex_.unlock(); |
| | | } |
| | | } |
| | | } |
| | | }; |
| | | |
| | | //robust attr does NOT work, maybe os does not support it. |
| | | class RobustMutex |
| | | { |
| | | public: |
| | | RobustMutex() |
| | | { |
| | | pthread_mutexattr_t mutex_attr; |
| | | auto attr = [&]() { return &mutex_attr; }; |
| | | int r = pthread_mutexattr_init(attr()); |
| | | r |= pthread_mutexattr_setpshared(attr(), PTHREAD_PROCESS_SHARED); |
| | | r |= pthread_mutexattr_setrobust_np(attr(), PTHREAD_MUTEX_ROBUST_NP); |
| | | r |= pthread_mutex_init(mtx(), attr()); |
| | | int rob = 0; |
| | | pthread_mutexattr_getrobust_np(attr(), &rob); |
| | | int shared = 0; |
| | | pthread_mutexattr_getpshared(attr(), &shared); |
| | | printf("robust : %d, shared : %d\n", rob, shared); |
| | | r |= pthread_mutexattr_destroy(attr()); |
| | | if (r) { |
| | | throw("init mutex error."); |
| | | } |
| | | } |
| | | ~RobustMutex() |
| | | { |
| | | pthread_mutex_destroy(mtx()); |
| | | } |
| | | |
| | | public: |
| | | void lock() { Lock(); } |
| | | bool try_lock() |
| | | { |
| | | int r = TryLock(); |
| | | printf("TryLock ret: %d\n", r); |
| | | return r == 0; |
| | | } |
| | | |
| | | void unlock() { Unlock(); } |
| | | |
| | | // private: |
| | | int TryLock() { return pthread_mutex_trylock(mtx()); } |
| | | int Lock() { return pthread_mutex_lock(mtx()); } |
| | | int Unlock() { return pthread_mutex_unlock(mtx()); } |
| | | |
| | | private: |
| | | pthread_mutex_t *mtx() { return &mutex_; } |
| | | pthread_mutex_t mutex_; |
| | | }; |
| | | |
| | | class LockFreeQueue |
| | | { |
| | | typedef int64_t Data; |
| | | typedef boost::lockfree::queue<Data, boost::lockfree::capacity<1024>> LFQueue; |
| | | void push_back(Data d) { queue_.push(d); } |
| | | |
| | | private: |
| | | LFQueue queue_; |
| | | }; |
| | | |
| | | } // namespace |
| | | |
| | | BOOST_AUTO_TEST_CASE(MutexTest) |
| | | { |
| | | const std::string shm_name("ShmMutex"); |
| | | // ShmRemover auto_remove(shm_name); |
| | | SharedMemory shm(shm_name, 1024 * 1024 * 10); |
| | | SharedMemory &shm = TestShm(); |
| | | // shm.Remove(); |
| | | // return; |
| | | GlobalInit(shm); |
| | | |
| | | const std::string mtx_name("test_mutex"); |
| | | const std::string int_name("test_int"); |
| | | auto mtx = shm.find_or_construct<Mutex>(mtx_name.c_str())(); |
| | | auto pi = shm.find_or_construct<int>(int_name.c_str())(100); |
| | | auto mtx = shm.FindOrCreate<TLMutex>(mtx_name); |
| | | auto pi = shm.FindOrCreate<int>(int_name, 100); |
| | | |
| | | std::mutex m; |
| | | typedef std::chrono::steady_clock Clock; |
| | | auto Now = []() { return Clock::now().time_since_epoch(); }; |
| | | if (pi) { |
| | | auto old = *pi; |
| | | printf("int : %d, add1: %d\n", old, ++*pi); |
| | | } |
| | | |
| | | { |
| | | boost::timer::auto_cpu_timer timer; |
| | | printf("test time: "); |
| | | TLMutex mutex; |
| | | // CasMutex mutex; |
| | | auto Lock = [&]() { |
| | | for (int i = 0; i < 10; ++i) { |
| | | mutex.lock(); |
| | | mutex.unlock(); |
| | | } |
| | | }; |
| | | std::thread t1(Lock), t2(Lock); |
| | | t1.join(); |
| | | t2.join(); |
| | | printf("mutex nlock: %ld, update time error: %ld, normal fail: %ld, error wait: %ld\n", |
| | | mutex.st_.nlock_, |
| | | mutex.st_.nupdate_time_fail, |
| | | mutex.st_.nfail, |
| | | mutex.st_.nexcept); |
| | | } |
| | | |
| | | auto MSFromNow = [](const int ms) { |
| | | using namespace boost::posix_time; |
| | | ptime cur = boost::posix_time::microsec_clock::universal_time(); |
| | | return cur + millisec(ms); |
| | | }; |
| | | |
| | | auto TryLock = [&]() { |
| | | if (mtx->try_lock()) { |
| | |
| | | if (mtx) { |
| | | printf("mtx exists\n"); |
| | | if (TryLock()) { |
| | | if (TryLock()) { |
| | | Unlock(); |
| | | } |
| | | auto op = [&]() { |
| | | if (TryLock()) { |
| | | Unlock(); |
| | | } |
| | | }; |
| | | op(); |
| | | std::thread t(op); |
| | | t.join(); |
| | | // Unlock(); |
| | | } else { |
| | | // mtx->unlock(); |
| | | } |
| | | } else { |
| | | printf("mtx not exists\n"); |
| | | } |
| | | } |
| | | |
| | | namespace |
| | | { |
| | | struct CCC { |
| | | }; |
| | | void F(CCC &&c) {} |
| | | |
| | | template <class... T> |
| | | void Pass(T &&...t) |
| | | { |
| | | F(std::forward<decltype(t)>(t)...); |
| | | } |
| | | |
| | | } // namespace |
| | | BOOST_AUTO_TEST_CASE(ApiTest) |
| | | { |
| | | auto max_time = std::chrono::steady_clock::time_point::max(); |
| | |
| | | printf("maxsec: %ld\n", CountSeconds(max_time)); |
| | | |
| | | bool reg = false; |
| | | for (int i = 0; i < 10 && !reg; ++i) { |
| | | for (int i = 0; i < 3 && !reg; ++i) { |
| | | ProcInfo proc; |
| | | proc.set_proc_id("demo_client"); |
| | | proc.set_public_info("public info of demo_client. etc..."); |
| | |
| | | BHFree(reply, reply_len); |
| | | Sleep(1s); |
| | | } |
| | | if (!reg) { |
| | | return; |
| | | } |
| | | |
| | | const std::string topic_ = "topic_"; |
| | | |
| | | { |
| | | { // Server Register Topics |
| | | MsgTopicList topics; |
| | | for (int i = 0; i < 10; ++i) { |
| | | topics.add_topic_list(topic_ + std::to_string(i)); |
| | |
| | | Sleep(1s); |
| | | } |
| | | |
| | | { |
| | | { // Subscribe |
| | | MsgTopicList topics; |
| | | for (int i = 0; i < 10; ++i) { |
| | | topics.add_topic_list(topic_ + std::to_string(i * 2)); |
| | |
| | | printf("subscribe topic : %s\n", r ? "ok" : "failed"); |
| | | } |
| | | |
| | | BHStartWorker(&ServerProc, &SubRecvProc, &ClientProc); |
| | | auto ServerLoop = [&](std::atomic<bool> *run) { |
| | | while (*run) { |
| | | void *proc_id = 0; |
| | | int proc_id_len = 0; |
| | | DEFER1(BHFree(proc_id, proc_id_len);); |
| | | void *input = 0; |
| | | int input_len = 0; |
| | | DEFER1(BHFree(input, input_len)); |
| | | void *src = 0; |
| | | if (BHReadRequest(&proc_id, &proc_id_len, &input, &input_len, &src, 10) && src) { |
| | | |
| | | MsgRequestTopic request; |
| | | if (request.ParseFromArray(input, input_len)) { |
| | | MsgRequestTopicReply reply; |
| | | reply.set_data(" reply: " + request.data()); |
| | | std::string s(reply.SerializeAsString()); |
| | | // printf("%s", reply.data().c_str()); |
| | | BHSendReply(src, s.data(), s.size()); |
| | | ++Status().nserved_; |
| | | } |
| | | src = 0; |
| | | } |
| | | } |
| | | }; |
| | | |
| | | auto SyncRequest = [&](int idx) { // SyncRequest |
| | | MsgRequestTopic req; |
| | | req.set_topic(topic_ + std::to_string(idx)); |
| | | req.set_data("request_data_" + std::to_string(idx)); |
| | | std::string s(req.SerializeAsString()); |
| | | // Sleep(10ms, false); |
| | | std::string dest(BHAddress().SerializeAsString()); |
| | | void *proc_id = 0; |
| | | int proc_id_len = 0; |
| | | DEFER1(BHFree(proc_id, proc_id_len);); |
| | | void *reply = 0; |
| | | int reply_len = 0; |
| | | DEFER1(BHFree(reply, reply_len)); |
| | | bool r = BHRequest(dest.data(), dest.size(), s.data(), s.size(), &proc_id, &proc_id_len, &reply, &reply_len, 100); |
| | | if (!r) { |
| | | int ec = 0; |
| | | std::string msg; |
| | | GetLastError(ec, msg); |
| | | printf("request error: %s\n", msg.c_str()); |
| | | } else { |
| | | MsgRequestTopicReply ret; |
| | | ret.ParseFromArray(reply, reply_len); |
| | | printf("request result: %s\n", ret.data().c_str()); |
| | | } |
| | | }; |
| | | { |
| | | for (int i = 0; i < 1; ++i) { |
| | | MsgPublish pub; |
| | | pub.set_topic(topic_ + std::to_string(i)); |
| | | pub.set_data("pub_data_" + std::string(1024 * 1024, 'a')); |
| | | pub.set_data("pub_data_" + std::string(1024 * 1, 'a')); |
| | | std::string s(pub.SerializeAsString()); |
| | | BHPublish(s.data(), s.size(), 0); |
| | | // Sleep(1s); |
| | |
| | | std::string s(req.SerializeAsString()); |
| | | void *msg_id = 0; |
| | | int len = 0; |
| | | // Sleep(10ms, false); |
| | | bool r = BHAsyncRequest(s.data(), s.size(), 0, 0); |
| | | DEFER1(BHFree(msg_id, len);); |
| | | // Sleep(10ms, false); |
| | | std::string dest(BHAddress().SerializeAsString()); |
| | | bool r = BHAsyncRequest(dest.data(), dest.size(), s.data(), s.size(), 0, 0); |
| | | if (r) { |
| | | ++Status().nrequest_; |
| | | } else { |
| | |
| | | auto hb = [](std::atomic<bool> *run) { |
| | | while (*run) { |
| | | Sleep(1s, false); |
| | | bool r = BHHeartBeatEasy(1000); |
| | | bool r = BHHeartbeatEasy(1000); |
| | | printf("heartbeat: %s\n", r ? "ok" : "failed"); |
| | | } |
| | | }; |
| | | |
| | | std::atomic<bool> run(true); |
| | | |
| | | BHStartWorker(&ServerProc, &SubRecvProc, &ClientProc); |
| | | ThreadManager threads; |
| | | boost::timer::auto_cpu_timer timer; |
| | | threads.Launch(hb, &run); |
| | | threads.Launch(showStatus, &run); |
| | | int ncli = 10; |
| | | const uint64_t nreq = 1000 * 100; |
| | | const uint64_t nreq = 1000 * 10; |
| | | for (int i = 0; i < ncli; ++i) { |
| | | threads.Launch(asyncRequest, nreq); |
| | | } |