From 3931f83205f153f2bc7fc36d1a894cdc3f14b4db Mon Sep 17 00:00:00 2001
From: lichao <lichao@aiotlink.com>
Date: 星期三, 21 四月 2021 16:52:51 +0800
Subject: [PATCH] change node socket to vector; try lock free queue.

---
 utest/speed_test.cpp |  116 +++++++++++++++++++++++++++++++++-------------------------
 1 files changed, 66 insertions(+), 50 deletions(-)

diff --git a/utest/speed_test.cpp b/utest/speed_test.cpp
index dc64cc0..86367b9 100644
--- a/utest/speed_test.cpp
+++ b/utest/speed_test.cpp
@@ -26,18 +26,24 @@
 	ShmRemover auto_remove(shm_name);
 	const int mem_size = 1024 * 1024 * 50;
 	MQId id = boost::uuids::random_generator()();
-	const int timeout = 100;
+	const int timeout = 1000;
 	const uint32_t data_size = 4000;
+	const std::string proc_id = "demo_proc";
 
 	auto Writer = [&](int writer_id, uint64_t n) {
 		SharedMemory shm(shm_name, mem_size);
 		ShmMsgQueue mq(shm, 64);
 		std::string str(data_size, 'a');
 		MsgI msg;
+		MsgRequestTopic body;
+		body.set_topic("topic");
+		body.set_data(str);
+		auto head(InitMsgHead(GetType(body), proc_id));
+		msg.MakeRC(shm, head, body);
+		assert(msg.IsCounted());
 		DEFER1(msg.Release(shm););
-		msg.MakeRC(shm, MakeRequest(mq.Id(), "topic", str.data(), str.size()));
+
 		for (uint64_t i = 0; i < n; ++i) {
-			// mq.Send(id, str.data(), str.size(), timeout);
 			mq.Send(id, msg, timeout);
 		}
 	};
@@ -45,8 +51,10 @@
 		SharedMemory shm(shm_name, mem_size);
 		ShmMsgQueue mq(id, shm, 1000);
 		while (*run) {
-			BHMsg msg;
+			MsgI msg;
+			BHMsgHead head;
 			if (mq.Recv(msg, timeout)) {
+				DEFER1(msg.Release(shm));
 				// ok
 			} else if (isfork) {
 				exit(0); // for forked quit after 1s.
@@ -82,6 +90,7 @@
 					www.Launch(Writer, i, nmsg);
 				}
 				www.WaitAll();
+				printf("writer finished\n");
 				run.store(false);
 				rrr.WaitAll();
 				printf("Write %ld msg  R(%3d) W(%3d), : ", total_msg, nreader, nwriter);
@@ -110,66 +119,75 @@
 	const std::string shm_name("ShmSendRecv");
 	ShmRemover auto_remove(shm_name);
 	const int qlen = 64;
-	const size_t msg_length = 1000;
+	const size_t msg_length = 100;
 	std::string msg_content(msg_length, 'a');
 	msg_content[20] = '\0';
+	const std::string client_proc_id = "client_proc";
+	const std::string server_proc_id = "server_proc";
 
-	SharedMemory shm(shm_name, 1024 * 1024 * 50);
+	SharedMemory shm(shm_name, 1024 * 1024 * 512);
 	auto Avail = [&]() { return shm.get_free_memory(); };
 	auto init_avail = Avail();
-	ShmMsgQueue srv(shm, qlen);
-	ShmMsgQueue cli(shm, qlen);
+	ShmSocket srv(shm, qlen);
+	ShmSocket cli(shm, qlen);
 
-	MsgI request_rc;
-	request_rc.MakeRC(shm, MakeRequest(cli.Id(), "topic", msg_content.data(), msg_content.size()));
-	MsgI reply_rc;
-	reply_rc.MakeRC(shm, MakeReply("fakemsgid", msg_content.data(), msg_content.size()));
-
+	int ncli = 1;
+	uint64_t nmsg = 1000 * 1000 * 1;
 	std::atomic<uint64_t> count(0);
 
-	std::atomic<ptime> last_time(Now() - seconds(1));
+	std::atomic<int64_t> last_time(NowSec() - 1);
 	std::atomic<uint64_t> last_count(0);
+
+	auto PrintStatus = [&](int64_t cur) {
+		std::cout << "time: " << cur;
+		printf(", total msg:%10ld, speed:[%8ld/s], used mem:%8ld\n",
+		       count.load(), count - last_count.exchange(count), init_avail - Avail());
+	};
+	auto onRecv = [&](ShmSocket &sock, MsgI &msg, BHMsgHead &head) {
+		++count;
+		auto cur = NowSec();
+		if (last_time.exchange(cur) < cur) {
+			PrintStatus(cur);
+		}
+	};
+	cli.Start(onRecv, 2);
 
 	auto Client = [&](int cli_id, int nmsg) {
 		for (int i = 0; i < nmsg; ++i) {
 			auto Req = [&]() {
-				return cli.Send(srv.Id(), MakeRequest(cli.Id(), "topic", msg_content.data(), msg_content.size()), 100);
+				MsgRequestTopic req_body;
+				req_body.set_topic("topic");
+				req_body.set_data(msg_content);
+				auto req_head(InitMsgHead(GetType(req_body), client_proc_id));
+				req_head.add_route()->set_mq_id(&cli.id(), cli.id().size());
+				return cli.Send(&srv.id(), req_head, req_body);
 			};
-			auto ReqRC = [&]() { return cli.Send(srv.Id(), request_rc, 1000); };
 
-			if (!ReqRC()) {
-				printf("********** client send error.\n");
-				continue;
-			}
-			BHMsg msg;
-			if (!cli.Recv(msg, 1000)) {
-				printf("********** client recv error.\n");
-			} else {
-				++count;
-				auto cur = Now();
-				if (last_time.exchange(cur) < cur) {
-					std::cout << "time: " << cur;
-					printf(", total msg:%10ld, speed:[%8ld/s], used mem:%8ld, refcount:%d\n",
-					       count.load(), count - last_count.exchange(count), init_avail - Avail(), request_rc.Count());
-				}
-			}
+			Req();
 		}
 	};
 
 	std::atomic<bool> stop(false);
 	auto Server = [&]() {
-		BHMsg req;
-		while (!stop) {
-			if (srv.Recv(req, 100) && req.type() == kMsgTypeRequest) {
-				auto &mqid = req.route()[0].mq_id();
-				MQId src_id;
-				memcpy(&src_id, mqid.data(), sizeof(src_id));
-				auto Reply = [&]() {
-					return srv.Send(src_id, MakeReply(req.msg_id(), msg_content.data(), msg_content.size()), 100);
-				};
-				auto ReplyRC = [&]() { return srv.Send(src_id, reply_rc, 100); };
+		MsgI req;
+		BHMsgHead req_head;
 
-				if (ReplyRC()) {
+		while (!stop) {
+			if (srv.SyncRecv(req, req_head, 10)) {
+				DEFER1(req.Release(shm));
+
+				if (req.ParseHead(req_head) && req_head.type() == kMsgTypeRequestTopic) {
+					auto &mqid = req_head.route()[0].mq_id();
+					MQId src_id;
+					memcpy(&src_id, mqid.data(), sizeof(src_id));
+					auto Reply = [&]() {
+						MsgRequestTopic reply_body;
+						reply_body.set_topic("topic");
+						reply_body.set_data(msg_content);
+						auto reply_head(InitMsgHead(GetType(reply_body), server_proc_id, req_head.msg_id()));
+						return srv.Send(&src_id, reply_head, reply_body);
+					};
+					Reply();
 				}
 			}
 		}
@@ -179,18 +197,16 @@
 	DEFER1(printf("Request Reply Test:"););
 
 	ThreadManager clients, servers;
-	for (int i = 0; i < qlen; ++i) { servers.Launch(Server); }
-	int ncli = 100 * 1;
-	uint64_t nmsg = 100 * 100 * 2;
+	for (int i = 0; i < 2; ++i) { servers.Launch(Server); }
 	printf("client threads: %d, msgs : %ld, total msg: %ld\n", ncli, nmsg, ncli * nmsg);
 	for (int i = 0; i < ncli; ++i) { clients.Launch(Client, i, nmsg); }
 	clients.WaitAll();
 	printf("request ok: %ld\n", count.load());
+	do {
+		std::this_thread::sleep_for(100ms);
+	} while (count.load() < ncli * nmsg);
+	PrintStatus(NowSec());
 	stop = true;
 	servers.WaitAll();
-	BOOST_CHECK(request_rc.IsCounted());
-	BOOST_CHECK_EQUAL(request_rc.Count(), 1);
-	request_rc.Release(shm);
-	BOOST_CHECK(!request_rc.IsCounted());
 	// BOOST_CHECK_THROW(reply.Count(), int);
 }

--
Gitblit v1.8.0