From 34cd75f77d0ca94dbdba4e6cc9451fe4d33e78b3 Mon Sep 17 00:00:00 2001
From: lichao <lichao@aiotlink.com>
Date: 星期三, 19 五月 2021 19:14:13 +0800
Subject: [PATCH] add api BHQueryProcs.

---
 utest/utest.cpp |  274 ++++++++++++++++++++++++++++++++++++++++++++++--------
 1 files changed, 231 insertions(+), 43 deletions(-)

diff --git a/utest/utest.cpp b/utest/utest.cpp
index e24d34a..7cb9587 100644
--- a/utest/utest.cpp
+++ b/utest/utest.cpp
@@ -1,15 +1,33 @@
+#include "center.h"
 #include "defs.h"
-#include "pubsub.h"
-#include "socket.h"
+#include "log.h"
 #include "util.h"
 #include <atomic>
-#include <boost/uuid/uuid_generators.hpp>
-#include <boost/uuid/uuid_io.hpp>
 #include <condition_variable>
 #include <stdio.h>
 #include <string>
 #include <thread>
 #include <vector>
+
+namespace
+{
+bool InitLog()
+{
+	ns_log::AddLog("/tmp/bhshmq_test.log", true, true);
+	ns_log::ResetLogLevel(ns_log::LogLevel::debug);
+	return true;
+}
+static bool g_test_init_log = InitLog();
+} // namespace
+
+using namespace bhome_shm;
+using namespace bhome_msg;
+
+SharedMemory &TestShm()
+{
+	static SharedMemory shm("utest_0", 1024 * 1024 * 512);
+	return shm;
+}
 
 template <class A, class B>
 struct IsSameType {
@@ -22,7 +40,25 @@
 
 BOOST_AUTO_TEST_CASE(Temp)
 {
-	std::string topics[] = {
+	const std::string shm_name("ShmTemp");
+	ShmRemover auto_remove(shm_name); //remove twice? in case of killed?
+	SharedMemory shm(shm_name, 1024 * 1024 * 10);
+
+	typedef std::chrono::steady_clock clock;
+	int n = 1000 * 1000;
+	std::vector<clock::time_point> tps(n);
+	{
+		printf("thread switch %d times, ", n);
+		boost::timer::auto_cpu_timer timer;
+		for (auto &tp : tps) {
+			tp = clock::now();
+			std::this_thread::yield();
+		}
+	}
+	printf("time: %ld ns\n", (tps.back() - tps.front()).count());
+	return;
+	// sub topic partial match.
+	Topic topics[] = {
 	    "",
 	    ".",
 	    "a",
@@ -65,93 +101,245 @@
 
 BOOST_AUTO_TEST_CASE(PubSubTest)
 {
-	const std::string shm_name("ShmPubSub");
-	ShmRemover auto_remove(shm_name); //remove twice? in case of killed?
-	SharedMemory shm(shm_name, 1024 * 1024 * 50);
+	SharedMemory &shm = TestShm();
+	GlobalInit(shm);
+
 	auto Avail = [&]() { return shm.get_free_memory(); };
 	auto init_avail = Avail();
+	int *flag = shm.FindOrCreate<int>("flag", 123);
+	printf("flag = %d\n", *flag);
+	++*flag;
+	const std::string sub_proc_id = "subscriber";
+	const std::string pub_proc_id = "publisher";
 
-	BusManager bus(shm);
-	bus.Start(1);
-	std::this_thread::sleep_for(100ms);
+	BHCenter center(shm);
+	center.Start();
 
-	std::atomic<uint64_t> count(0);
-	std::atomic<ptime> last_time(Now() - seconds(1));
+	Sleep(100ms);
+
+	std::atomic<uint64_t> total_count(0);
+	std::atomic<int64_t> last_time(NowSec() - 1);
 	std::atomic<uint64_t> last_count(0);
 
-	const uint64_t nmsg = 100;
+	const uint64_t nmsg = 100 * 2;
 	const int timeout = 1000;
 	auto Sub = [&](int id, const std::vector<std::string> &topics) {
-		ShmSocket client(ShmSocket::eSockSubscribe, shm);
-		bool r = client.Subscribe(topics, timeout);
+		DemoNode client("client_" + std::to_string(id), shm);
+		MsgTopicList tlist;
+		for (auto &t : topics) {
+			tlist.add_topic_list(t);
+		}
+		MsgCommonReply reply_body;
+		bool r = client.Subscribe(tlist, reply_body, timeout);
+		if (!r) {
+			printf("client subscribe failed.\n");
+		}
 		std::mutex mutex;
 		std::condition_variable cv;
 
-		int i = 0;
-		auto OnRecv = [&](BHMsg &msg) {
-			if (msg.type() != kMsgTypePublish) {
-				BOOST_CHECK(false);
-			}
-			DataPub pub;
-			if (!pub.ParseFromString(msg.body())) {
-				BOOST_CHECK(false);
-			}
-			++count;
+		std::atomic<uint64_t> n(0);
+		auto OnTopicData = [&](const std::string &proc_id, const MsgPublish &pub) {
+			++total_count;
 
-			auto cur = Now();
+			auto cur = NowSec();
 			if (last_time.exchange(cur) < cur) {
 				std::cout << "time: " << cur;
 				printf("sub recv, total msg:%10ld, speed:[%8ld/s], used mem:%8ld \n",
-				       count.load(), count - last_count.exchange(count), init_avail - Avail());
+				       total_count.load(), total_count - last_count.exchange(total_count), init_avail - Avail());
 			}
-			if (++i >= nmsg * topics.size()) {
+			if (++n >= nmsg * topics.size()) {
 				cv.notify_one();
 			}
 			// printf("sub %2d recv: %s/%s\n", id, pub.topic().c_str(), pub.data().c_str());
 		};
-		client.Start(OnRecv);
+		client.SubscribeStartWorker(OnTopicData, 1);
 
 		std::unique_lock<std::mutex> lk(mutex);
 		cv.wait(lk);
 	};
 
 	auto Pub = [&](const std::string &topic) {
-		ShmSocket provider(ShmSocket::eSockPublish, shm);
-		for (int i = 0; i < nmsg; ++i) {
-			std::string data = topic + std::to_string(i) + std::string(1000, '-');
+		DemoNode provider("server_" + topic, shm);
 
-			bool r = provider.Publish(topic, data.data(), data.size(), timeout);
-			// bool r = provider.Send(kBHBusQueueId, MakePub(topic, data.data(), data.size()), timeout);
+		for (unsigned i = 0; i < nmsg; ++i) {
+			std::string data = topic + std::to_string(i) + std::string(1000, '-');
+			MsgPublish pub;
+			pub.set_topic(topic);
+			pub.set_data(data);
+			bool r = provider.Publish(pub, 0);
 			if (!r) {
-				printf("pub ret: %s\n", r ? "ok" : "fail");
+				static std::atomic<int> an(0);
+				int n = ++an;
+				printf("pub %d ret: %s\n", n, r ? "ok" : "fail");
 			}
 		}
 	};
 	ThreadManager threads;
-	typedef std::vector<std::string> Topics;
+	typedef std::vector<Topic> Topics;
 	Topics topics;
 	for (int i = 0; i < 100; ++i) {
 		topics.push_back("t" + std::to_string(i));
 	}
 	Topics part;
-	for (int i = 0; i < topics.size(); ++i) {
+	boost::timer::auto_cpu_timer pubsub_timer;
+	for (size_t i = 0; i < topics.size(); ++i) {
 		part.push_back(topics[i]);
 		threads.Launch(Sub, i, topics);
 	}
-	std::this_thread::sleep_for(100ms);
+	Sleep(100ms);
 	for (auto &topic : topics) {
 		threads.Launch(Pub, topic);
 	}
 	threads.Launch(Pub, "some_else");
 
 	threads.WaitAll();
-	std::cout << "end : " << Now();
-	printf("sub recv, total msg:%10ld, speed:[%8ld/s], used mem:%8ld \n",
-	       count.load(), count - last_count.exchange(count), init_avail - Avail());
 
-	bus.Stop();
+	printf("sub recv, total msg:%10ld, speed:[%8ld/s], used mem:%8ld \n",
+	       total_count.load(), total_count - last_count.exchange(total_count), init_avail - Avail());
 }
 
+namespace
+{
+struct C {
+	C() { printf("+C\n"); }
+	C(const C &c) { printf("+C(const C&)\n"); }
+	void F() { printf("C::F()\n"); }
+	~C() { printf("-C\n"); }
+	char arr[100];
+};
+int F(C &c) { return printf(":::::::::::::F()\n"); }
+} // namespace
+
+BOOST_AUTO_TEST_CASE(ReqRepTest)
+{
+	SharedMemory &shm = TestShm();
+	GlobalInit(shm);
+
+	auto Avail = [&]() { return shm.get_free_memory(); };
+	auto init_avail = Avail();
+	int *flag = shm.FindOrCreate<int>("flag", 123);
+	printf("flag = %d\n", *flag);
+	++*flag;
+
+	const std::string client_proc_id = "client_proc_";
+	const std::string server_proc_id = "server_proc_";
+
+	BHCenter center(shm);
+	center.Start();
+	std::atomic<bool> run(true);
+
+	auto Client = [&](const std::string &topic, const int nreq) {
+		DemoNode client(client_proc_id + topic, shm);
+
+		std::atomic<int> count(0);
+		std::string reply;
+		auto onRecv = [&](const BHMsgHead &head, const MsgRequestTopicReply &msg) {
+			reply = msg.data();
+			if (++count >= nreq) {
+				printf("count: %d\n", count.load());
+			}
+		};
+		MsgRequestTopic req;
+		req.set_topic(topic);
+		req.set_data("data " + std::string(100, 'a'));
+
+		client.ClientStartWorker(onRecv, 2);
+
+		boost::timer::auto_cpu_timer timer;
+		for (int i = 0; i < nreq; ++i) {
+			std::string msg_id;
+			if (!client.ClientAsyncRequest(BHAddress(), req, msg_id)) {
+				printf("client request failed\n");
+				++count;
+			}
+
+			// std::string proc_id;
+			// MsgRequestTopicReply reply;
+			// if (!client.ClientSyncRequest(req, proc_id, reply, 1000)) {
+			// 	printf("client request failed\n");
+			// }
+			// ++count;
+		}
+		do {
+			std::this_thread::sleep_for(100ms);
+		} while (count.load() < nreq);
+		client.Stop();
+		printf("request %s %d done ", topic.c_str(), count.load());
+	};
+
+	std::atomic_uint64_t server_msg_count(0);
+	auto Server = [&](const std::string &name, const std::vector<std::string> &topics) {
+		DemoNode server(name, shm);
+
+		auto onDataSync = [&](const std::string &proc_id, const MsgRequestTopic &request, MsgRequestTopicReply &reply) {
+			++server_msg_count;
+			reply.set_data(request.topic() + ':' + request.data());
+			return true;
+		};
+		auto onDataAsync = [&](void *src, std::string &proc_id, MsgRequestTopic &request) {
+			++server_msg_count;
+			MsgRequestTopicReply reply;
+			reply.set_data(request.topic() + ':' + request.data());
+			server.ServerSendReply(src, reply);
+		};
+		server.ServerStart(onDataAsync);
+
+		MsgTopicList rpc;
+		for (auto &topic : topics) {
+			rpc.add_topic_list(topic);
+		}
+		MsgCommonReply reply_body;
+		if (!server.ServerRegisterRPC(rpc, reply_body, 100)) {
+			printf("server register topic failed\n");
+			return;
+		}
+
+		while (run) {
+			std::this_thread::sleep_for(100ms);
+		}
+	};
+	ThreadManager clients, servers;
+	std::vector<Topic> topics = {"topic1", "topic2"};
+	servers.Launch(Server, "server", topics);
+	Sleep(100ms);
+	for (auto &t : topics) {
+		clients.Launch(Client, t, 1000 * 100 * 2);
+	}
+	clients.WaitAll();
+	printf("clients done, server replyed: %ld\n", server_msg_count.load());
+	run = false;
+	servers.WaitAll();
+}
+
+BOOST_AUTO_TEST_CASE(HeartbeatTest)
+{
+	const std::string shm_name("ShmHeartbeat");
+	ShmRemover auto_remove(shm_name);
+	SharedMemory shm(shm_name, 1024 * 1024 * 50);
+
+	BHCenter center(shm);
+	center.Start();
+
+	{
+
+		DemoNode node("demo_node", shm);
+		auto Check = [&]() {
+			bool r = node.Heartbeat(100);
+			printf("hearbeat ret : %s\n", r ? "ok" : "failed");
+		};
+		Check();
+		for (int i = 0; i < 3; ++i) {
+			Sleep(1s);
+			Check();
+		}
+		Sleep(4s);
+		for (int i = 0; i < 2; ++i) {
+			Sleep(1s);
+			Check();
+		}
+	}
+	Sleep(8s);
+}
 inline int MyMin(int a, int b)
 {
 	printf("MyMin\n");

--
Gitblit v1.8.0