| | |
| | | void *Ptr(const Offset offset) { return reinterpret_cast<void *>(offset); } |
| | | } // namespace |
| | | |
| | | class RobustMutex |
| | | { |
| | | public: |
| | | RobustMutex() |
| | | { |
| | | pthread_mutexattr_t attr; |
| | | pthread_mutexattr_init(&attr); |
| | | pthread_mutexattr_setrobust(&attr, 1); |
| | | pthread_mutex_init(mtx(), &attr); |
| | | if (!valid()) { |
| | | throw("init mutex error."); |
| | | } |
| | | } |
| | | int TryLock() { return pthread_mutex_trylock(mtx()); } |
| | | int Lock() { return pthread_mutex_lock(mtx()); } |
| | | int Unlock() { return pthread_mutex_unlock(mtx()); } |
| | | bool valid() const { return false; } |
| | | |
| | | private: |
| | | pthread_mutex_t *mtx() { return &mutex_; } |
| | | pthread_mutex_t mutex_; |
| | | }; |
| | | |
| | | BOOST_AUTO_TEST_CASE(MutexTest) |
| | | { |
| | | SharedMemory &shm = TestShm(); |
| | | MsgI::BindShm(shm); |
| | | |
| | | void *base_ptr = shm.get_address(); |
| | | auto PrintPtr = [&](void *p) { |
| | | printf("addr: %ld, ptr: %p, offset: %ld\n", Addr(p), p, Addr(p) - Addr(base_ptr)); |
| | | }; |
| | | |
| | | printf("base"); |
| | | PrintPtr(base_ptr); |
| | | |
| | | MsgI msg; |
| | | msg.Make("string data"); |
| | | for (int i = 0; i < 10; ++i) { |
| | | int n = msg.AddRef(); |
| | | printf("add %d ref: %d\n", i, n); |
| | | } |
| | | for (int i = 0; i < 10; ++i) { |
| | | int n = msg.Release(); |
| | | printf("release %d, ref : %d\n", i, n); |
| | | } |
| | | std::this_thread::sleep_for(1s); |
| | | msg.Release(); |
| | | GlobalInit(shm); |
| | | |
| | | const std::string mtx_name("test_mutex"); |
| | | const std::string int_name("test_int"); |
| | | auto mtx = shm.FindOrCreate<Mutex>(mtx_name); |
| | | auto pi = shm.FindOrCreate<int>(int_name, 100); |
| | | |
| | | printf("mutetx "); |
| | | PrintPtr(mtx); |
| | | printf("int "); |
| | | PrintPtr(pi); |
| | | |
| | | typedef std::chrono::steady_clock Clock; |
| | | auto Now = []() { return Clock::now().time_since_epoch(); }; |
| | |
| | | |
| | | const std::string topic_ = "topic_"; |
| | | |
| | | { |
| | | { // Server Register Topics |
| | | MsgTopicList topics; |
| | | for (int i = 0; i < 10; ++i) { |
| | | topics.add_topic_list(topic_ + std::to_string(i)); |
| | |
| | | Sleep(1s); |
| | | } |
| | | |
| | | { |
| | | { // Subscribe |
| | | MsgTopicList topics; |
| | | for (int i = 0; i < 10; ++i) { |
| | | topics.add_topic_list(topic_ + std::to_string(i * 2)); |
| | |
| | | printf("subscribe topic : %s\n", r ? "ok" : "failed"); |
| | | } |
| | | |
| | | BHStartWorker(&ServerProc, &SubRecvProc, &ClientProc); |
| | | // BHStartWorker(&ServerProc, &SubRecvProc, &ClientProc); |
| | | auto ServerLoop = [&](std::atomic<bool> *run) { |
| | | while (*run) { |
| | | void *proc_id = 0; |
| | | int proc_id_len = 0; |
| | | DEFER1(BHFree(proc_id, proc_id_len);); |
| | | void *input = 0; |
| | | int input_len = 0; |
| | | DEFER1(BHFree(input, input_len)); |
| | | void *src = 0; |
| | | if (BHReadRequest(&proc_id, &proc_id_len, &input, &input_len, &src, 10) && src) { |
| | | |
| | | MsgRequestTopic request; |
| | | if (request.ParseFromArray(input, input_len)) { |
| | | MsgRequestTopicReply reply; |
| | | reply.set_data(" reply: " + request.data()); |
| | | std::string s(reply.SerializeAsString()); |
| | | // printf("%s", reply.data().c_str()); |
| | | BHSendReply(src, s.data(), s.size()); |
| | | ++Status().nserved_; |
| | | } |
| | | src = 0; |
| | | } |
| | | } |
| | | }; |
| | | |
| | | auto SyncRequest = [&](int idx) { // SyncRequest |
| | | MsgRequestTopic req; |
| | | req.set_topic(topic_ + std::to_string(idx)); |
| | | req.set_data("request_data_" + std::to_string(idx)); |
| | | std::string s(req.SerializeAsString()); |
| | | // Sleep(10ms, false); |
| | | std::string dest(BHAddress().SerializeAsString()); |
| | | void *proc_id = 0; |
| | | int proc_id_len = 0; |
| | | DEFER1(BHFree(proc_id, proc_id_len);); |
| | | void *reply = 0; |
| | | int reply_len = 0; |
| | | DEFER1(BHFree(reply, reply_len)); |
| | | bool r = BHRequest(dest.data(), dest.size(), s.data(), s.size(), &proc_id, &proc_id_len, &reply, &reply_len, 100); |
| | | if (!r) { |
| | | int ec = 0; |
| | | std::string msg; |
| | | GetLastError(ec, msg); |
| | | printf("request error: %s\n", msg.c_str()); |
| | | } else { |
| | | MsgRequestTopicReply ret; |
| | | ret.ParseFromArray(reply, reply_len); |
| | | printf("request result: %s\n", ret.data().c_str()); |
| | | } |
| | | }; |
| | | { |
| | | for (int i = 0; i < 1; ++i) { |
| | | MsgPublish pub; |
| | |
| | | std::string s(req.SerializeAsString()); |
| | | void *msg_id = 0; |
| | | int len = 0; |
| | | DEFER1(BHFree(msg_id, len);); |
| | | // Sleep(10ms, false); |
| | | std::string dest(BHAddress().SerializeAsString()); |
| | | bool r = BHAsyncRequest(dest.data(), dest.size(), s.data(), s.size(), 0, 0); |
| | | DEFER1(BHFree(msg_id, len);); |
| | | if (r) { |
| | | ++Status().nrequest_; |
| | | } else { |
| | |
| | | printf("heartbeat: %s\n", r ? "ok" : "failed"); |
| | | } |
| | | }; |
| | | |
| | | std::atomic<bool> run(true); |
| | | |
| | | ThreadManager threads; |
| | | boost::timer::auto_cpu_timer timer; |
| | | threads.Launch(hb, &run); |
| | | threads.Launch(ServerLoop, &run); |
| | | threads.Launch(showStatus, &run); |
| | | int ncli = 10; |
| | | const uint64_t nreq = 1000 * 100; |
| | | const uint64_t nreq = 1000 * 1; |
| | | for (int i = 0; i < ncli; ++i) { |
| | | threads.Launch(asyncRequest, nreq); |
| | | // threads.Launch(asyncRequest, nreq); |
| | | } |
| | | |
| | | for (int i = 0; i < 10; ++i) { |
| | | SyncRequest(i); |
| | | } |
| | | // run.store(false); |
| | | // server_thread.join(); |
| | | // return; |
| | | |
| | | int same = 0; |
| | | int64_t last = 0; |
| | | while (last < nreq * ncli && same < 2) { |
| | | while (last < nreq * ncli && same < 1) { |
| | | Sleep(1s, false); |
| | | auto cur = Status().nreply_.load(); |
| | | if (last == cur) { |