| | |
| | | const int proc_id_len, |
| | | const void *data, |
| | | const int data_len, |
| | | const void *tag) |
| | | void *src) |
| | | { |
| | | // printf("ServerProc: "); |
| | | // DEFER1(printf("\n");); |
| | |
| | | reply.set_data(" reply: " + request.data()); |
| | | std::string s(reply.SerializeAsString()); |
| | | // printf("%s", reply.data().c_str()); |
| | | BHServerCallbackReply(tag, s.data(), s.size()); |
| | | BHSendReply(src, s.data(), s.size()); |
| | | ++Status().nserved_; |
| | | } |
| | | } |
| | |
| | | // printf("client Recv reply : %s\n", reply.data().c_str()); |
| | | } |
| | | |
| | | class TLMutex |
| | | { |
| | | // typedef boost::interprocess::interprocess_mutex MutexT; |
| | | typedef CasMutex MutexT; |
| | | // typedef std::mutex MutexT; |
| | | typedef std::chrono::steady_clock Clock; |
| | | typedef Clock::duration Duration; |
| | | static Duration Now() { return Clock::now().time_since_epoch(); } |
| | | |
| | | const Duration limit_; |
| | | std::atomic<Duration> last_lock_time_; |
| | | MutexT mutex_; |
| | | |
| | | public: |
| | | struct Status { |
| | | int64_t nlock_ = 0; |
| | | int64_t nupdate_time_fail = 0; |
| | | int64_t nfail = 0; |
| | | int64_t nexcept = 0; |
| | | }; |
| | | Status st_; |
| | | |
| | | explicit TLMutex(Duration limit) : |
| | | limit_(limit) {} |
| | | TLMutex() : |
| | | TLMutex(std::chrono::seconds(1)) {} |
| | | ~TLMutex() { static_assert(std::is_pod<Duration>::value); } |
| | | bool try_lock() |
| | | { |
| | | if (mutex_.try_lock()) { |
| | | auto old_time = last_lock_time_.load(); |
| | | if (Now() - old_time > limit_) { |
| | | return last_lock_time_.compare_exchange_strong(old_time, Now()); |
| | | } else { |
| | | last_lock_time_.store(Now()); |
| | | return true; |
| | | } |
| | | } else { |
| | | auto old_time = last_lock_time_.load(); |
| | | if (Now() - old_time > limit_) { |
| | | return last_lock_time_.compare_exchange_strong(old_time, Now()); |
| | | } else { |
| | | return false; |
| | | } |
| | | } |
| | | } |
| | | void lock() |
| | | { |
| | | int n = 0; |
| | | while (!try_lock()) { |
| | | n++; |
| | | std::this_thread::yield(); |
| | | } |
| | | st_.nlock_ += n; |
| | | } |
| | | void unlock() |
| | | { |
| | | auto old_time = last_lock_time_.load(); |
| | | if (Now() - old_time > limit_) { |
| | | } else { |
| | | if (last_lock_time_.compare_exchange_strong(old_time, Now())) { |
| | | mutex_.unlock(); |
| | | } |
| | | } |
| | | } |
| | | }; |
| | | |
| | | namespace |
| | | { |
| | | typedef int64_t Offset; |
| | | Offset Addr(void *ptr) { return reinterpret_cast<Offset>(ptr); } |
| | | void *Ptr(const Offset offset) { return reinterpret_cast<void *>(offset); } |
| | | } // namespace |
| | | |
| | | class RobustMutex |
| | | { |
| | | public: |
| | | RobustMutex() |
| | | { |
| | | pthread_mutexattr_t attr; |
| | | pthread_mutexattr_init(&attr); |
| | | pthread_mutexattr_setrobust(&attr, 1); |
| | | pthread_mutex_init(mtx(), &attr); |
| | | if (!valid()) { |
| | | throw("init mutex error."); |
| | | } |
| | | } |
| | | int TryLock() { return pthread_mutex_trylock(mtx()); } |
| | | int Lock() { return pthread_mutex_lock(mtx()); } |
| | | int Unlock() { return pthread_mutex_unlock(mtx()); } |
| | | bool valid() const { return false; } |
| | | |
| | | private: |
| | | pthread_mutex_t *mtx() { return &mutex_; } |
| | | pthread_mutex_t mutex_; |
| | | }; |
| | | |
| | | BOOST_AUTO_TEST_CASE(MutexTest) |
| | | { |
| | | const std::string shm_name("ShmMutex"); |
| | | // ShmRemover auto_remove(shm_name); |
| | | SharedMemory shm(shm_name, 1024 * 1024 * 10); |
| | | SharedMemory &shm = TestShm(); |
| | | GlobalInit(shm); |
| | | |
| | | const std::string mtx_name("test_mutex"); |
| | | const std::string int_name("test_int"); |
| | | auto mtx = shm.find_or_construct<Mutex>(mtx_name.c_str())(); |
| | | auto pi = shm.find_or_construct<int>(int_name.c_str())(100); |
| | | auto mtx = shm.FindOrCreate<Mutex>(mtx_name); |
| | | auto pi = shm.FindOrCreate<int>(int_name, 100); |
| | | |
| | | typedef std::chrono::steady_clock Clock; |
| | | auto Now = []() { return Clock::now().time_since_epoch(); }; |
| | | if (pi) { |
| | | auto old = *pi; |
| | | printf("int : %d, add1: %d\n", old, ++*pi); |
| | | } |
| | | |
| | | { |
| | | boost::timer::auto_cpu_timer timer; |
| | | printf("test time: "); |
| | | TLMutex mutex; |
| | | // CasMutex mutex; |
| | | auto Lock = [&]() { |
| | | for (int i = 0; i < 10; ++i) { |
| | | mutex.lock(); |
| | | mutex.unlock(); |
| | | } |
| | | }; |
| | | std::thread t1(Lock), t2(Lock); |
| | | t1.join(); |
| | | t2.join(); |
| | | printf("mutex nlock: %ld, update time error: %ld, normal fail: %ld, error wait: %ld\n", |
| | | mutex.st_.nlock_, |
| | | mutex.st_.nupdate_time_fail, |
| | | mutex.st_.nfail, |
| | | mutex.st_.nexcept); |
| | | } |
| | | |
| | | auto MSFromNow = [](const int ms) { |
| | | using namespace boost::posix_time; |
| | | ptime cur = boost::posix_time::microsec_clock::universal_time(); |
| | | return cur + millisec(ms); |
| | | }; |
| | | |
| | | auto TryLock = [&]() { |
| | | if (mtx->try_lock()) { |
| | |
| | | if (mtx) { |
| | | printf("mtx exists\n"); |
| | | if (TryLock()) { |
| | | if (TryLock()) { |
| | | Unlock(); |
| | | } |
| | | auto op = [&]() { |
| | | if (TryLock()) { |
| | | Unlock(); |
| | | } |
| | | }; |
| | | op(); |
| | | std::thread t(op); |
| | | t.join(); |
| | | // Unlock(); |
| | | } else { |
| | | // mtx->unlock(); |
| | | } |
| | | } else { |
| | | printf("mtx not exists\n"); |
| | |
| | | printf("maxsec: %ld\n", CountSeconds(max_time)); |
| | | |
| | | bool reg = false; |
| | | for (int i = 0; i < 10 && !reg; ++i) { |
| | | for (int i = 0; i < 3 && !reg; ++i) { |
| | | ProcInfo proc; |
| | | proc.set_proc_id("demo_client"); |
| | | proc.set_public_info("public info of demo_client. etc..."); |
| | |
| | | BHFree(reply, reply_len); |
| | | Sleep(1s); |
| | | } |
| | | if (!reg) { |
| | | return; |
| | | } |
| | | |
| | | const std::string topic_ = "topic_"; |
| | | |
| | | { |
| | | { // Server Register Topics |
| | | MsgTopicList topics; |
| | | for (int i = 0; i < 10; ++i) { |
| | | topics.add_topic_list(topic_ + std::to_string(i)); |
| | |
| | | Sleep(1s); |
| | | } |
| | | |
| | | { |
| | | { // Subscribe |
| | | MsgTopicList topics; |
| | | for (int i = 0; i < 10; ++i) { |
| | | topics.add_topic_list(topic_ + std::to_string(i * 2)); |
| | |
| | | printf("subscribe topic : %s\n", r ? "ok" : "failed"); |
| | | } |
| | | |
| | | BHStartWorker(&ServerProc, &SubRecvProc, &ClientProc); |
| | | // BHStartWorker(&ServerProc, &SubRecvProc, &ClientProc); |
| | | auto ServerLoop = [&](std::atomic<bool> *run) { |
| | | while (*run) { |
| | | void *proc_id = 0; |
| | | int proc_id_len = 0; |
| | | DEFER1(BHFree(proc_id, proc_id_len);); |
| | | void *input = 0; |
| | | int input_len = 0; |
| | | DEFER1(BHFree(input, input_len)); |
| | | void *src = 0; |
| | | if (BHReadRequest(&proc_id, &proc_id_len, &input, &input_len, &src, 10) && src) { |
| | | |
| | | MsgRequestTopic request; |
| | | if (request.ParseFromArray(input, input_len)) { |
| | | MsgRequestTopicReply reply; |
| | | reply.set_data(" reply: " + request.data()); |
| | | std::string s(reply.SerializeAsString()); |
| | | // printf("%s", reply.data().c_str()); |
| | | BHSendReply(src, s.data(), s.size()); |
| | | ++Status().nserved_; |
| | | } |
| | | src = 0; |
| | | } |
| | | } |
| | | }; |
| | | |
| | | auto SyncRequest = [&](int idx) { // SyncRequest |
| | | MsgRequestTopic req; |
| | | req.set_topic(topic_ + std::to_string(idx)); |
| | | req.set_data("request_data_" + std::to_string(idx)); |
| | | std::string s(req.SerializeAsString()); |
| | | // Sleep(10ms, false); |
| | | std::string dest(BHAddress().SerializeAsString()); |
| | | void *proc_id = 0; |
| | | int proc_id_len = 0; |
| | | DEFER1(BHFree(proc_id, proc_id_len);); |
| | | void *reply = 0; |
| | | int reply_len = 0; |
| | | DEFER1(BHFree(reply, reply_len)); |
| | | bool r = BHRequest(dest.data(), dest.size(), s.data(), s.size(), &proc_id, &proc_id_len, &reply, &reply_len, 100); |
| | | if (!r) { |
| | | int ec = 0; |
| | | std::string msg; |
| | | GetLastError(ec, msg); |
| | | printf("request error: %s\n", msg.c_str()); |
| | | } else { |
| | | MsgRequestTopicReply ret; |
| | | ret.ParseFromArray(reply, reply_len); |
| | | printf("request result: %s\n", ret.data().c_str()); |
| | | } |
| | | }; |
| | | { |
| | | for (int i = 0; i < 1; ++i) { |
| | | MsgPublish pub; |
| | | pub.set_topic(topic_ + std::to_string(i)); |
| | | pub.set_data("pub_data_" + std::string(1024 * 1024, 'a')); |
| | | pub.set_data("pub_data_" + std::string(1024 * 1, 'a')); |
| | | std::string s(pub.SerializeAsString()); |
| | | BHPublish(s.data(), s.size(), 0); |
| | | // Sleep(1s); |
| | |
| | | std::string s(req.SerializeAsString()); |
| | | void *msg_id = 0; |
| | | int len = 0; |
| | | // Sleep(10ms, false); |
| | | bool r = BHAsyncRequest(s.data(), s.size(), 0, 0); |
| | | DEFER1(BHFree(msg_id, len);); |
| | | // Sleep(10ms, false); |
| | | std::string dest(BHAddress().SerializeAsString()); |
| | | bool r = BHAsyncRequest(dest.data(), dest.size(), s.data(), s.size(), 0, 0); |
| | | if (r) { |
| | | ++Status().nrequest_; |
| | | } else { |
| | |
| | | printf("heartbeat: %s\n", r ? "ok" : "failed"); |
| | | } |
| | | }; |
| | | |
| | | std::atomic<bool> run(true); |
| | | |
| | | ThreadManager threads; |
| | | boost::timer::auto_cpu_timer timer; |
| | | threads.Launch(hb, &run); |
| | | threads.Launch(ServerLoop, &run); |
| | | threads.Launch(showStatus, &run); |
| | | int ncli = 10; |
| | | const uint64_t nreq = 1000 * 100; |
| | | const uint64_t nreq = 1000 * 1; |
| | | for (int i = 0; i < ncli; ++i) { |
| | | threads.Launch(asyncRequest, nreq); |
| | | // threads.Launch(asyncRequest, nreq); |
| | | } |
| | | |
| | | for (int i = 0; i < 10; ++i) { |
| | | SyncRequest(i); |
| | | } |
| | | // run.store(false); |
| | | // server_thread.join(); |
| | | // return; |
| | | |
| | | int same = 0; |
| | | int64_t last = 0; |
| | | while (last < nreq * ncli && same < 2) { |
| | | while (last < nreq * ncli && same < 1) { |
| | | Sleep(1s, false); |
| | | auto cur = Status().nreply_.load(); |
| | | if (last == cur) { |