From 1b167ec5ad101ac44451381e26cc73ab5d67d2a1 Mon Sep 17 00:00:00 2001
From: lichao <lichao@aiotlink.com>
Date: 星期一, 26 四月 2021 16:37:52 +0800
Subject: [PATCH] fix socket busy loop; del locked readall; refactor.

---
 utest/api_test.cpp |  253 +++++++++++++++++++++++++++++++++++++++++++++++---
 1 files changed, 237 insertions(+), 16 deletions(-)

diff --git a/utest/api_test.cpp b/utest/api_test.cpp
index 766c0f8..6682aaf 100644
--- a/utest/api_test.cpp
+++ b/utest/api_test.cpp
@@ -18,6 +18,7 @@
 #include "bh_api.h"
 #include "util.h"
 #include <atomic>
+#include <boost/lockfree/queue.hpp>
 
 using namespace bhome_msg;
 
@@ -49,7 +50,6 @@
 	static MsgStatus st;
 	return st;
 }
-} // namespace
 
 void SubRecvProc(const void *proc_id,
                  const int proc_id_len,
@@ -59,7 +59,7 @@
 	std::string proc((const char *) proc_id, proc_id_len);
 	MsgPublish pub;
 	pub.ParseFromArray(data, data_len);
-	// printf("Sub data, %s : %s\n", pub.topic().c_str(), pub.data().c_str());
+	printf("Sub data, %s : %s\n", pub.topic().c_str(), pub.data().c_str());
 }
 
 void ServerProc(const void *proc_id,
@@ -96,20 +96,182 @@
 	// printf("client Recv reply : %s\n", reply.data().c_str());
 }
 
+class TLMutex
+{
+	typedef boost::interprocess::interprocess_mutex MutexT;
+	// typedef CasMutex MutexT;
+	// typedef std::mutex MutexT;
+	typedef std::chrono::steady_clock Clock;
+	typedef Clock::duration Duration;
+	static Duration Now() { return Clock::now().time_since_epoch(); }
+
+	const Duration limit_;
+	std::atomic<Duration> last_lock_time_;
+	MutexT mutex_;
+	bool Expired(const Duration diff) { return diff > limit_; }
+
+public:
+	struct Status {
+		int64_t nlock_ = 0;
+		int64_t nupdate_time_fail = 0;
+		int64_t nfail = 0;
+		int64_t nexcept = 0;
+	};
+	Status st_;
+
+	explicit TLMutex(Duration limit) :
+	    limit_(limit) {}
+	TLMutex() :
+	    TLMutex(std::chrono::seconds(1)) {}
+	~TLMutex() { static_assert(std::is_pod<Duration>::value); }
+	bool try_lock()
+	{
+		if (mutex_.try_lock()) {
+			auto old_time = last_lock_time_.load();
+			auto cur = Now();
+			if (Expired(cur - old_time)) {
+				return last_lock_time_.compare_exchange_strong(old_time, cur);
+			} else {
+				last_lock_time_.store(Now());
+				return true;
+			}
+		} else {
+			auto old_time = last_lock_time_.load();
+			auto cur = Now();
+			if (Expired(cur - old_time)) {
+				return last_lock_time_.compare_exchange_strong(old_time, cur);
+			} else {
+				return false;
+			}
+		}
+	}
+	void lock()
+	{
+		int n = 0;
+		while (!try_lock()) {
+			n++;
+			std::this_thread::yield();
+		}
+		st_.nlock_ += n;
+	}
+	void unlock()
+	{
+		auto old_time = last_lock_time_.load();
+		auto cur = Now();
+		if (!Expired(cur - old_time)) {
+			if (last_lock_time_.compare_exchange_strong(old_time, cur)) {
+				mutex_.unlock();
+			}
+		}
+	}
+};
+
+//robust attr does NOT work, maybe os does not support it.
+class RobustMutex
+{
+public:
+	RobustMutex()
+	{
+		pthread_mutexattr_t mutex_attr;
+		auto attr = [&]() { return &mutex_attr; };
+		int r = pthread_mutexattr_init(attr());
+		r |= pthread_mutexattr_setpshared(attr(), PTHREAD_PROCESS_SHARED);
+		r |= pthread_mutexattr_setrobust_np(attr(), PTHREAD_MUTEX_ROBUST_NP);
+		r |= pthread_mutex_init(mtx(), attr());
+		int rob = 0;
+		pthread_mutexattr_getrobust_np(attr(), &rob);
+		int shared = 0;
+		pthread_mutexattr_getpshared(attr(), &shared);
+		printf("robust : %d, shared : %d\n", rob, shared);
+		r |= pthread_mutexattr_destroy(attr());
+		if (r) {
+			throw("init mutex error.");
+		}
+	}
+	~RobustMutex()
+	{
+		pthread_mutex_destroy(mtx());
+	}
+
+public:
+	void lock() { Lock(); }
+	bool try_lock()
+	{
+		int r = TryLock();
+		printf("TryLock ret: %d\n", r);
+		return r == 0;
+	}
+
+	void unlock() { Unlock(); }
+
+	// private:
+	int TryLock() { return pthread_mutex_trylock(mtx()); }
+	int Lock() { return pthread_mutex_lock(mtx()); }
+	int Unlock() { return pthread_mutex_unlock(mtx()); }
+
+private:
+	pthread_mutex_t *mtx() { return &mutex_; }
+	pthread_mutex_t mutex_;
+};
+
+class LockFreeQueue
+{
+	typedef int64_t Data;
+	typedef boost::lockfree::queue<Data, boost::lockfree::capacity<1024>> LFQueue;
+	void push_back(Data d) { queue_.push(d); }
+
+private:
+	LFQueue queue_;
+};
+
+} // namespace
+
 BOOST_AUTO_TEST_CASE(MutexTest)
 {
-	const std::string shm_name("ShmMutex");
-	// ShmRemover auto_remove(shm_name);
-	SharedMemory shm(shm_name, 1024 * 1024 * 10);
+	SharedMemory &shm = TestShm();
+	// shm.Remove();
+	// return;
+	GlobalInit(shm);
 
 	const std::string mtx_name("test_mutex");
 	const std::string int_name("test_int");
-	auto mtx = shm.find_or_construct<Mutex>(mtx_name.c_str())();
-	auto pi = shm.find_or_construct<int>(int_name.c_str())(100);
+	auto mtx = shm.FindOrCreate<TLMutex>(mtx_name);
+	auto pi = shm.FindOrCreate<int>(int_name, 100);
+
+	std::mutex m;
+	typedef std::chrono::steady_clock Clock;
+	auto Now = []() { return Clock::now().time_since_epoch(); };
 	if (pi) {
 		auto old = *pi;
 		printf("int : %d, add1: %d\n", old, ++*pi);
 	}
+
+	{
+		boost::timer::auto_cpu_timer timer;
+		printf("test time: ");
+		TLMutex mutex;
+		// CasMutex mutex;
+		auto Lock = [&]() {
+			for (int i = 0; i < 10; ++i) {
+				mutex.lock();
+				mutex.unlock();
+			}
+		};
+		std::thread t1(Lock), t2(Lock);
+		t1.join();
+		t2.join();
+		printf("mutex nlock: %ld, update time error: %ld, normal fail: %ld, error wait: %ld\n",
+		       mutex.st_.nlock_,
+		       mutex.st_.nupdate_time_fail,
+		       mutex.st_.nfail,
+		       mutex.st_.nexcept);
+	}
+
+	auto MSFromNow = [](const int ms) {
+		using namespace boost::posix_time;
+		ptime cur = boost::posix_time::microsec_clock::universal_time();
+		return cur + millisec(ms);
+	};
 
 	auto TryLock = [&]() {
 		if (mtx->try_lock()) {
@@ -128,10 +290,17 @@
 	if (mtx) {
 		printf("mtx exists\n");
 		if (TryLock()) {
-			if (TryLock()) {
-				Unlock();
-			}
+			auto op = [&]() {
+				if (TryLock()) {
+					Unlock();
+				}
+			};
+			op();
+			std::thread t(op);
+			t.join();
 			// Unlock();
+		} else {
+			// mtx->unlock();
 		}
 	} else {
 		printf("mtx not exists\n");
@@ -174,7 +343,7 @@
 
 	const std::string topic_ = "topic_";
 
-	{
+	{ // Server Register Topics
 		MsgTopicList topics;
 		for (int i = 0; i < 10; ++i) {
 			topics.add_topic_list(topic_ + std::to_string(i));
@@ -188,7 +357,7 @@
 		Sleep(1s);
 	}
 
-	{
+	{ // Subscribe
 		MsgTopicList topics;
 		for (int i = 0; i < 10; ++i) {
 			topics.add_topic_list(topic_ + std::to_string(i * 2));
@@ -201,8 +370,56 @@
 		printf("subscribe topic : %s\n", r ? "ok" : "failed");
 	}
 
-	BHStartWorker(&ServerProc, &SubRecvProc, &ClientProc);
+	auto ServerLoop = [&](std::atomic<bool> *run) {
+		while (*run) {
+			void *proc_id = 0;
+			int proc_id_len = 0;
+			DEFER1(BHFree(proc_id, proc_id_len););
+			void *input = 0;
+			int input_len = 0;
+			DEFER1(BHFree(input, input_len));
+			void *src = 0;
+			if (BHReadRequest(&proc_id, &proc_id_len, &input, &input_len, &src, 10) && src) {
 
+				MsgRequestTopic request;
+				if (request.ParseFromArray(input, input_len)) {
+					MsgRequestTopicReply reply;
+					reply.set_data(" reply: " + request.data());
+					std::string s(reply.SerializeAsString());
+					// printf("%s", reply.data().c_str());
+					BHSendReply(src, s.data(), s.size());
+					++Status().nserved_;
+				}
+				src = 0;
+			}
+		}
+	};
+
+	auto SyncRequest = [&](int idx) { // SyncRequest
+		MsgRequestTopic req;
+		req.set_topic(topic_ + std::to_string(idx));
+		req.set_data("request_data_" + std::to_string(idx));
+		std::string s(req.SerializeAsString());
+		// Sleep(10ms, false);
+		std::string dest(BHAddress().SerializeAsString());
+		void *proc_id = 0;
+		int proc_id_len = 0;
+		DEFER1(BHFree(proc_id, proc_id_len););
+		void *reply = 0;
+		int reply_len = 0;
+		DEFER1(BHFree(reply, reply_len));
+		bool r = BHRequest(dest.data(), dest.size(), s.data(), s.size(), &proc_id, &proc_id_len, &reply, &reply_len, 100);
+		if (!r) {
+			int ec = 0;
+			std::string msg;
+			GetLastError(ec, msg);
+			printf("request error: %s\n", msg.c_str());
+		} else {
+			MsgRequestTopicReply ret;
+			ret.ParseFromArray(reply, reply_len);
+			printf("request result: %s\n", ret.data().c_str());
+		}
+	};
 	{
 		for (int i = 0; i < 1; ++i) {
 			MsgPublish pub;
@@ -222,9 +439,10 @@
 			std::string s(req.SerializeAsString());
 			void *msg_id = 0;
 			int len = 0;
-			// Sleep(10ms, false);
-			bool r = BHAsyncRequest(s.data(), s.size(), 0, 0);
 			DEFER1(BHFree(msg_id, len););
+			// Sleep(10ms, false);
+			std::string dest(BHAddress().SerializeAsString());
+			bool r = BHAsyncRequest(dest.data(), dest.size(), s.data(), s.size(), 0, 0);
 			if (r) {
 				++Status().nrequest_;
 			} else {
@@ -260,13 +478,16 @@
 			printf("heartbeat: %s\n", r ? "ok" : "failed");
 		}
 	};
+
 	std::atomic<bool> run(true);
+
+	BHStartWorker(&ServerProc, &SubRecvProc, &ClientProc);
 	ThreadManager threads;
 	boost::timer::auto_cpu_timer timer;
 	threads.Launch(hb, &run);
 	threads.Launch(showStatus, &run);
 	int ncli = 10;
-	const uint64_t nreq = 1000 * 100;
+	const uint64_t nreq = 1000 * 10;
 	for (int i = 0; i < ncli; ++i) {
 		threads.Launch(asyncRequest, nreq);
 	}

--
Gitblit v1.8.0