From 5bb28835d06e27dbd960916c9fb11a555fc5a9bc Mon Sep 17 00:00:00 2001
From: lichao <lichao@aiotlink.com>
Date: 星期一, 26 四月 2021 14:24:29 +0800
Subject: [PATCH] add shared lib target.

---
 utest/api_test.cpp |  126 +++++++++++++++++++++++++++++++++++++----
 1 files changed, 112 insertions(+), 14 deletions(-)

diff --git a/utest/api_test.cpp b/utest/api_test.cpp
index a91db43..debe8ad 100644
--- a/utest/api_test.cpp
+++ b/utest/api_test.cpp
@@ -151,19 +151,58 @@
 		}
 		st_.nlock_ += n;
 	}
-	void unlock() { mutex_.unlock(); }
+	void unlock()
+	{
+		auto old_time = last_lock_time_.load();
+		if (Now() - old_time > limit_) {
+		} else {
+			if (last_lock_time_.compare_exchange_strong(old_time, Now())) {
+				mutex_.unlock();
+			}
+		}
+	}
 };
+
+namespace
+{
+typedef int64_t Offset;
+Offset Addr(void *ptr) { return reinterpret_cast<Offset>(ptr); }
+void *Ptr(const Offset offset) { return reinterpret_cast<void *>(offset); }
+} // namespace
+
+class RobustMutex
+{
+public:
+	RobustMutex()
+	{
+		pthread_mutexattr_t attr;
+		pthread_mutexattr_init(&attr);
+		pthread_mutexattr_setrobust(&attr, 1);
+		pthread_mutex_init(mtx(), &attr);
+		if (!valid()) {
+			throw("init mutex error.");
+		}
+	}
+	int TryLock() { return pthread_mutex_trylock(mtx()); }
+	int Lock() { return pthread_mutex_lock(mtx()); }
+	int Unlock() { return pthread_mutex_unlock(mtx()); }
+	bool valid() const { return false; }
+
+private:
+	pthread_mutex_t *mtx() { return &mutex_; }
+	pthread_mutex_t mutex_;
+};
+
 BOOST_AUTO_TEST_CASE(MutexTest)
 {
-	const std::string shm_name("ShmMutex");
-	// ShmRemover auto_remove(shm_name);
-	SharedMemory shm(shm_name, 1024 * 1024 * 10);
+	SharedMemory &shm = TestShm();
+	GlobalInit(shm);
 
 	const std::string mtx_name("test_mutex");
 	const std::string int_name("test_int");
-	auto mtx = shm.find_or_construct<Mutex>(mtx_name.c_str())(3s);
+	auto mtx = shm.FindOrCreate<Mutex>(mtx_name);
+	auto pi = shm.FindOrCreate<int>(int_name, 100);
 
-	auto pi = shm.find_or_construct<int>(int_name.c_str())(100);
 	typedef std::chrono::steady_clock Clock;
 	auto Now = []() { return Clock::now().time_since_epoch(); };
 	if (pi) {
@@ -177,7 +216,7 @@
 		TLMutex mutex;
 		// CasMutex mutex;
 		auto Lock = [&]() {
-			for (int i = 0; i < 1000 * 100; ++i) {
+			for (int i = 0; i < 10; ++i) {
 				mutex.lock();
 				mutex.unlock();
 			}
@@ -268,7 +307,7 @@
 
 	const std::string topic_ = "topic_";
 
-	{
+	{ // Server Register Topics
 		MsgTopicList topics;
 		for (int i = 0; i < 10; ++i) {
 			topics.add_topic_list(topic_ + std::to_string(i));
@@ -282,7 +321,7 @@
 		Sleep(1s);
 	}
 
-	{
+	{ // Subscribe
 		MsgTopicList topics;
 		for (int i = 0; i < 10; ++i) {
 			topics.add_topic_list(topic_ + std::to_string(i * 2));
@@ -295,8 +334,57 @@
 		printf("subscribe topic : %s\n", r ? "ok" : "failed");
 	}
 
-	BHStartWorker(&ServerProc, &SubRecvProc, &ClientProc);
+	// BHStartWorker(&ServerProc, &SubRecvProc, &ClientProc);
+	auto ServerLoop = [&](std::atomic<bool> *run) {
+		while (*run) {
+			void *proc_id = 0;
+			int proc_id_len = 0;
+			DEFER1(BHFree(proc_id, proc_id_len););
+			void *input = 0;
+			int input_len = 0;
+			DEFER1(BHFree(input, input_len));
+			void *src = 0;
+			if (BHReadRequest(&proc_id, &proc_id_len, &input, &input_len, &src, 10) && src) {
 
+				MsgRequestTopic request;
+				if (request.ParseFromArray(input, input_len)) {
+					MsgRequestTopicReply reply;
+					reply.set_data(" reply: " + request.data());
+					std::string s(reply.SerializeAsString());
+					// printf("%s", reply.data().c_str());
+					BHSendReply(src, s.data(), s.size());
+					++Status().nserved_;
+				}
+				src = 0;
+			}
+		}
+	};
+
+	auto SyncRequest = [&](int idx) { // SyncRequest
+		MsgRequestTopic req;
+		req.set_topic(topic_ + std::to_string(idx));
+		req.set_data("request_data_" + std::to_string(idx));
+		std::string s(req.SerializeAsString());
+		// Sleep(10ms, false);
+		std::string dest(BHAddress().SerializeAsString());
+		void *proc_id = 0;
+		int proc_id_len = 0;
+		DEFER1(BHFree(proc_id, proc_id_len););
+		void *reply = 0;
+		int reply_len = 0;
+		DEFER1(BHFree(reply, reply_len));
+		bool r = BHRequest(dest.data(), dest.size(), s.data(), s.size(), &proc_id, &proc_id_len, &reply, &reply_len, 100);
+		if (!r) {
+			int ec = 0;
+			std::string msg;
+			GetLastError(ec, msg);
+			printf("request error: %s\n", msg.c_str());
+		} else {
+			MsgRequestTopicReply ret;
+			ret.ParseFromArray(reply, reply_len);
+			printf("request result: %s\n", ret.data().c_str());
+		}
+	};
 	{
 		for (int i = 0; i < 1; ++i) {
 			MsgPublish pub;
@@ -316,10 +404,10 @@
 			std::string s(req.SerializeAsString());
 			void *msg_id = 0;
 			int len = 0;
+			DEFER1(BHFree(msg_id, len););
 			// Sleep(10ms, false);
 			std::string dest(BHAddress().SerializeAsString());
 			bool r = BHAsyncRequest(dest.data(), dest.size(), s.data(), s.size(), 0, 0);
-			DEFER1(BHFree(msg_id, len););
 			if (r) {
 				++Status().nrequest_;
 			} else {
@@ -355,20 +443,30 @@
 			printf("heartbeat: %s\n", r ? "ok" : "failed");
 		}
 	};
+
 	std::atomic<bool> run(true);
+
 	ThreadManager threads;
 	boost::timer::auto_cpu_timer timer;
 	threads.Launch(hb, &run);
+	threads.Launch(ServerLoop, &run);
 	threads.Launch(showStatus, &run);
 	int ncli = 10;
-	const uint64_t nreq = 1000 * 100;
+	const uint64_t nreq = 1000 * 1;
 	for (int i = 0; i < ncli; ++i) {
-		threads.Launch(asyncRequest, nreq);
+		// threads.Launch(asyncRequest, nreq);
 	}
+
+	for (int i = 0; i < 10; ++i) {
+		SyncRequest(i);
+	}
+	// run.store(false);
+	// server_thread.join();
+	// return;
 
 	int same = 0;
 	int64_t last = 0;
-	while (last < nreq * ncli && same < 2) {
+	while (last < nreq * ncli && same < 1) {
 		Sleep(1s, false);
 		auto cur = Status().nreply_.load();
 		if (last == cur) {

--
Gitblit v1.8.0