From b3b9d91eccd3f54be112ac5389b49969fea93b4c Mon Sep 17 00:00:00 2001 From: lichao <lichao@aiotlink.com> Date: 星期三, 21 四月 2021 13:22:55 +0800 Subject: [PATCH] trivial. --- utest/utest.cpp | 38 ++++++++++++++++++++++++-------------- 1 files changed, 24 insertions(+), 14 deletions(-) diff --git a/utest/utest.cpp b/utest/utest.cpp index 12d4396..b2de97f 100644 --- a/utest/utest.cpp +++ b/utest/utest.cpp @@ -102,7 +102,7 @@ Sleep(100ms); std::atomic<uint64_t> total_count(0); - std::atomic<ptime> last_time(Now() - seconds(1)); + std::atomic<int64_t> last_time(NowSec() - 1); std::atomic<uint64_t> last_count(0); const uint64_t nmsg = 100 * 2; @@ -125,7 +125,7 @@ auto OnTopicData = [&](const std::string &proc_id, const MsgPublish &pub) { ++total_count; - auto cur = Now(); + auto cur = NowSec(); if (last_time.exchange(cur) < cur) { std::cout << "time: " << cur; printf("sub recv, total msg:%10ld, speed:[%8ld/s], used mem:%8ld \n", @@ -177,7 +177,7 @@ threads.Launch(Pub, "some_else"); threads.WaitAll(); - std::cout << "end : " << Now(); + printf("sub recv, total msg:%10ld, speed:[%8ld/s], used mem:%8ld \n", total_count.load(), total_count - last_count.exchange(total_count), init_avail - Avail()); } @@ -198,7 +198,7 @@ { const std::string shm_name("ShmReqRep"); ShmRemover auto_remove(shm_name); - SharedMemory shm(shm_name, 1024 * 1024 * 50); + SharedMemory shm(shm_name, 1024 * 1024 * 512); auto Avail = [&]() { return shm.get_free_memory(); }; auto init_avail = Avail(); @@ -224,25 +224,29 @@ printf("count: %d\n", count.load()); } }; + MsgRequestTopic req; + req.set_topic(topic); + req.set_data("data " + std::string(100, 'a')); + client.ClientStartWorker(onRecv, 2); + boost::timer::auto_cpu_timer timer; for (int i = 0; i < nreq; ++i) { - MsgRequestTopic req; - req.set_topic(topic); - req.set_data("data " + std::to_string(i)); std::string msg_id; if (!client.ClientAsyncRequest(req, msg_id)) { printf("client request failed\n"); ++count; } - // if (!client.SyncRequest(topic, "data " + std::to_string(i), reply, 1000)) { + // std::string proc_id; + // MsgRequestTopicReply reply; + // if (!client.ClientSyncRequest(req, proc_id, reply, 1000)) { // printf("client request failed\n"); // } - // ++count; + // ++count; } do { - std::this_thread::yield(); + std::this_thread::sleep_for(100ms); } while (count.load() < nreq); client.Stop(); printf("request %s %d done ", topic.c_str(), count.load()); @@ -252,12 +256,18 @@ auto Server = [&](const std::string &name, const std::vector<std::string> &topics) { DemoNode server(name, shm); - auto onData = [&](const std::string &proc_id, const MsgRequestTopic &request, MsgRequestTopicReply &reply) { + auto onDataSync = [&](const std::string &proc_id, const MsgRequestTopic &request, MsgRequestTopicReply &reply) { ++server_msg_count; reply.set_data(request.topic() + ':' + request.data()); return true; }; - server.ServerStart(onData); + auto onDataAsync = [&](void *src, std::string &proc_id, MsgRequestTopic &request) { + ++server_msg_count; + MsgRequestTopicReply reply; + reply.set_data(request.topic() + ':' + request.data()); + server.ServerSendReply(src, reply); + }; + server.ServerStart(onDataAsync); MsgTopicList rpc; for (auto &topic : topics) { @@ -270,7 +280,7 @@ } while (run) { - std::this_thread::yield(); + std::this_thread::sleep_for(100ms); } }; ThreadManager clients, servers; @@ -278,7 +288,7 @@ servers.Launch(Server, "server", topics); Sleep(100ms); for (auto &t : topics) { - clients.Launch(Client, t, 1000 * 100); + clients.Launch(Client, t, 1000 * 100 * 2); } clients.WaitAll(); printf("clients done, server replyed: %ld\n", server_msg_count.load()); -- Gitblit v1.8.0