From 72851db66655912cb9c92300a80985fb9797d168 Mon Sep 17 00:00:00 2001
From: lichao <lichao@aiotlink.com>
Date: 星期二, 01 六月 2021 16:25:23 +0800
Subject: [PATCH] remove AtomicQueue, not used.
---
utest/api_test.cpp | 396 +++++++++++++++++++++++++++++---------------------------
1 files changed, 203 insertions(+), 193 deletions(-)
diff --git a/utest/api_test.cpp b/utest/api_test.cpp
index dd59b09..bddcbf7 100644
--- a/utest/api_test.cpp
+++ b/utest/api_test.cpp
@@ -16,14 +16,25 @@
* =====================================================================================
*/
#include "bh_api.h"
+#include "robust.h"
#include "util.h"
#include <atomic>
+#include <boost/lockfree/queue.hpp>
using namespace bhome_msg;
namespace
{
typedef std::atomic<uint64_t> Number;
+
+void GetApiError(int &ec, std::string &msg)
+{
+ void *pmsg = 0;
+ int msg_len = 0;
+ ec = BHGetLastError(&pmsg, &msg_len);
+ msg.assign((char *) pmsg, msg_len);
+ BHFree(pmsg, msg_len);
+}
void Assign(Number &a, const Number &b) { a.store(b.load()); }
struct MsgStatus {
@@ -49,7 +60,6 @@
static MsgStatus st;
return st;
}
-} // namespace
void SubRecvProc(const void *proc_id,
const int proc_id_len,
@@ -59,7 +69,7 @@
std::string proc((const char *) proc_id, proc_id_len);
MsgPublish pub;
pub.ParseFromArray(data, data_len);
- // printf("Sub data, %s : %s\n", pub.topic().c_str(), pub.data().c_str());
+ printf("****************************************** Sub data, %s : %s\n", pub.topic().c_str(), pub.data().c_str());
}
void ServerProc(const void *proc_id,
@@ -96,183 +106,10 @@
// printf("client Recv reply : %s\n", reply.data().c_str());
}
-class TLMutex
-{
- // typedef boost::interprocess::interprocess_mutex MutexT;
- typedef CasMutex MutexT;
- // typedef std::mutex MutexT;
- typedef std::chrono::steady_clock Clock;
- typedef Clock::duration Duration;
- static Duration Now() { return Clock::now().time_since_epoch(); }
-
- const Duration limit_;
- std::atomic<Duration> last_lock_time_;
- MutexT mutex_;
-
-public:
- struct Status {
- int64_t nlock_ = 0;
- int64_t nupdate_time_fail = 0;
- int64_t nfail = 0;
- int64_t nexcept = 0;
- };
- Status st_;
-
- explicit TLMutex(Duration limit) :
- limit_(limit) {}
- TLMutex() :
- TLMutex(std::chrono::seconds(1)) {}
- ~TLMutex() { static_assert(std::is_pod<Duration>::value); }
- bool try_lock()
- {
- if (mutex_.try_lock()) {
- auto old_time = last_lock_time_.load();
- if (Now() - old_time > limit_) {
- return last_lock_time_.compare_exchange_strong(old_time, Now());
- } else {
- last_lock_time_.store(Now());
- return true;
- }
- } else {
- auto old_time = last_lock_time_.load();
- if (Now() - old_time > limit_) {
- return last_lock_time_.compare_exchange_strong(old_time, Now());
- } else {
- return false;
- }
- }
- }
- void lock()
- {
- int n = 0;
- while (!try_lock()) {
- n++;
- std::this_thread::yield();
- }
- st_.nlock_ += n;
- }
- void unlock()
- {
- auto old_time = last_lock_time_.load();
- if (Now() - old_time > limit_) {
- } else {
- if (last_lock_time_.compare_exchange_strong(old_time, Now())) {
- mutex_.unlock();
- }
- }
- }
-};
-
-namespace
-{
-typedef int64_t Offset;
-Offset Addr(void *ptr) { return reinterpret_cast<Offset>(ptr); }
-void *Ptr(const Offset offset) { return reinterpret_cast<void *>(offset); }
} // namespace
-
-BOOST_AUTO_TEST_CASE(MutexTest)
-{
- SharedMemory &shm = TestShm();
- MsgI::BindShm(shm);
-
- void *base_ptr = shm.get_address();
- auto PrintPtr = [&](void *p) {
- printf("addr: %ld, ptr: %p, offset: %ld\n", Addr(p), p, Addr(p) - Addr(base_ptr));
- };
-
- printf("base");
- PrintPtr(base_ptr);
-
- MsgI msg;
- msg.Make("string data");
- for (int i = 0; i < 10; ++i) {
- int n = msg.AddRef();
- printf("add %d ref: %d\n", i, n);
- }
- for (int i = 0; i < 10; ++i) {
- int n = msg.Release();
- printf("release %d, ref : %d\n", i, n);
- }
- std::this_thread::sleep_for(1s);
- msg.Release();
-
- const std::string mtx_name("test_mutex");
- const std::string int_name("test_int");
- auto mtx = shm.FindOrCreate<Mutex>(mtx_name);
- auto pi = shm.FindOrCreate<int>(int_name, 100);
-
- printf("mutetx ");
- PrintPtr(mtx);
- printf("int ");
- PrintPtr(pi);
-
- typedef std::chrono::steady_clock Clock;
- auto Now = []() { return Clock::now().time_since_epoch(); };
- if (pi) {
- auto old = *pi;
- printf("int : %d, add1: %d\n", old, ++*pi);
- }
-
- {
- boost::timer::auto_cpu_timer timer;
- printf("test time: ");
- TLMutex mutex;
- // CasMutex mutex;
- auto Lock = [&]() {
- for (int i = 0; i < 10; ++i) {
- mutex.lock();
- mutex.unlock();
- }
- };
- std::thread t1(Lock), t2(Lock);
- t1.join();
- t2.join();
- printf("mutex nlock: %ld, update time error: %ld, normal fail: %ld, error wait: %ld\n",
- mutex.st_.nlock_,
- mutex.st_.nupdate_time_fail,
- mutex.st_.nfail,
- mutex.st_.nexcept);
- }
-
- auto MSFromNow = [](const int ms) {
- using namespace boost::posix_time;
- ptime cur = boost::posix_time::microsec_clock::universal_time();
- return cur + millisec(ms);
- };
-
- auto TryLock = [&]() {
- if (mtx->try_lock()) {
- printf("try_lock ok\n");
- return true;
- } else {
- printf("try_lock failed\n");
- return false;
- }
- };
- auto Unlock = [&]() {
- mtx->unlock();
- printf("unlocked\n");
- };
-
- if (mtx) {
- printf("mtx exists\n");
- if (TryLock()) {
- auto op = [&]() {
- if (TryLock()) {
- Unlock();
- }
- };
- op();
- std::thread t(op);
- t.join();
- // Unlock();
- } else {
- // mtx->unlock();
- }
- } else {
- printf("mtx not exists\n");
- }
-}
+#include <chrono>
+using namespace std::chrono;
+// using namespace std::chrono_literals;
BOOST_AUTO_TEST_CASE(ApiTest)
{
@@ -290,27 +127,46 @@
printf("maxsec: %ld\n", CountSeconds(max_time));
+ // BHCleanup();
+ // return;
+ const std::string proc_id = "demo_client";
bool reg = false;
for (int i = 0; i < 3 && !reg; ++i) {
ProcInfo proc;
- proc.set_proc_id("demo_client");
+ proc.set_proc_id(proc_id);
proc.set_public_info("public info of demo_client. etc...");
std::string proc_buf(proc.SerializeAsString());
void *reply = 0;
int reply_len = 0;
reg = BHRegister(proc_buf.data(), proc_buf.size(), &reply, &reply_len, 2000);
- printf("register %s\n", reg ? "ok" : "failed");
+ if (reg) {
+ printf("register ok\n");
+ // bool r = BHUnregister(proc_buf.data(), proc_buf.size(), &reply, &reply_len, 2000);
+ // printf("unregister %s\n", r ? "ok" : "failed");
+ // reg = BHRegister(proc_buf.data(), proc_buf.size(), &reply, &reply_len, 2000);
+ // if (!reg) {
+ // int ec = 0;
+ // std::string msg;
+ // GetApiError(ec, msg);
+ // printf("reg error: %s\n", msg.c_str());
+ // }
+ } else {
+ int ec = 0;
+ std::string msg;
+ GetApiError(ec, msg);
+ printf("register failed, %d, %s\n", ec, msg.c_str());
+ }
BHFree(reply, reply_len);
- Sleep(1s);
+ // Sleep(1s);
}
if (!reg) {
return;
}
- const std::string topic_ = "topic_";
+ const std::string topic_ = proc_id + "_topic_";
- {
+ { // Server Register Topics
MsgTopicList topics;
for (int i = 0; i < 10; ++i) {
topics.add_topic_list(topic_ + std::to_string(i));
@@ -319,13 +175,98 @@
void *reply = 0;
int reply_len = 0;
bool r = BHRegisterTopics(s.data(), s.size(), &reply, &reply_len, 1000);
- BHFree(reply, reply_len);
+ DEFER1(BHFree(reply, reply_len));
+ }
+ { // Server Register Topics
+ MsgTopicList topics;
+ topics.add_topic_list("@should_fail");
+ std::string s = topics.SerializeAsString();
+ void *reply = 0;
+ int reply_len = 0;
+ bool r = BHRegisterTopics(s.data(), s.size(), &reply, &reply_len, 1000);
+ DEFER1(BHFree(reply, reply_len));
+ if (!r) {
+ int ec = 0;
+ std::string msg;
+ GetApiError(ec, msg);
+ printf("register rpc failed, %d, %s\n", ec, msg.c_str());
+ }
+ }
+ auto PrintProcs = [](MsgQueryProcReply const &result) {
+ printf("query proc result: %d\n", result.proc_list().size());
+ for (int i = 0; i < result.proc_list().size(); ++i) {
+ auto &info = result.proc_list(i);
+ printf("proc [%d] %s, %s, %s\n\ttopics\n", i,
+ (info.online() ? "online" : "offline"),
+ info.proc().proc_id().c_str(), info.proc().name().c_str());
+ for (auto &t : info.topics().topic_list()) {
+ printf("\t\t %s\n", t.c_str());
+ }
+ printf("\n");
+ }
+ printf("\n");
+ };
+ if (0) {
+ // query procs
+ std::string dest(BHAddress().SerializeAsString());
+ MsgQueryProc query;
+ std::string s = query.SerializeAsString();
+ void *reply = 0;
+ int reply_len = 0;
+ bool r = BHQueryProcs(dest.data(), dest.size(), s.data(), s.size(), &reply, &reply_len, 1000);
+ DEFER1(BHFree(reply, reply_len));
+ MsgQueryProcReply result;
+ if (result.ParseFromArray(reply, reply_len) && IsSuccess(result.errmsg().errcode())) {
+ PrintProcs(result);
+ } else {
+ printf("query proc error\n");
+ }
// printf("register topic : %s\n", r ? "ok" : "failed");
- Sleep(1s);
+ // Sleep(1s);
+ }
+ for (int i = 0; i < 3; ++i) {
+ // query procs with normal topic request
+ MsgRequestTopic req;
+ req.set_topic("#center_query_procs");
+ // req.set_data("{\"proc_id\":\"#center.node\"}");
+ std::string s(req.SerializeAsString());
+ // Sleep(10ms, false);
+ BHAddress host;
+ printf("query with ip set\n");
+ host.set_ip("127.0.0.1");
+ host.set_port(kBHCenterPort);
+ host.set_mq_id(1000011);
+ host.set_abs_addr(10296);
+
+ std::string dest(host.SerializeAsString());
+ void *proc_id = 0;
+ int proc_id_len = 0;
+ DEFER1(BHFree(proc_id, proc_id_len););
+ void *reply = 0;
+ int reply_len = 0;
+ DEFER1(BHFree(reply, reply_len));
+ bool r = BHRequest(dest.data(), dest.size(), s.data(), s.size(), &proc_id, &proc_id_len, &reply, &reply_len, 1000);
+ if (!r) {
+ int ec = 0;
+ std::string msg;
+ GetApiError(ec, msg);
+ printf("topic query proc error: %s\n", msg.c_str());
+ } else {
+ MsgRequestTopicReply ret;
+ ret.ParseFromArray(reply, reply_len);
+ printf("\ntopic query proc : %s\n", ret.data().c_str());
+ // MsgQueryProcReply result;
+ // if (result.ParseFromArray(ret.data().data(), ret.data().size()) && IsSuccess(result.errmsg().errcode())) {
+ // PrintProcs(result);
+ // } else {
+ // printf("topic query proc error\n");
+ // }
+ }
}
- {
+ { // Subscribe
MsgTopicList topics;
+ topics.add_topic_list("#center.node");
for (int i = 0; i < 10; ++i) {
topics.add_topic_list(topic_ + std::to_string(i * 2));
}
@@ -337,13 +278,61 @@
printf("subscribe topic : %s\n", r ? "ok" : "failed");
}
- BHStartWorker(&ServerProc, &SubRecvProc, &ClientProc);
+ auto ServerLoop = [&](std::atomic<bool> *run) {
+ while (*run) {
+ void *proc_id = 0;
+ int proc_id_len = 0;
+ DEFER1(BHFree(proc_id, proc_id_len););
+ void *input = 0;
+ int input_len = 0;
+ DEFER1(BHFree(input, input_len));
+ void *src = 0;
+ if (BHReadRequest(&proc_id, &proc_id_len, &input, &input_len, &src, 10) && src) {
+ MsgRequestTopic request;
+ if (request.ParseFromArray(input, input_len)) {
+ MsgRequestTopicReply reply;
+ reply.set_data(" reply: " + request.data());
+ std::string s(reply.SerializeAsString());
+ // printf("%s", reply.data().c_str());
+ BHSendReply(src, s.data(), s.size());
+ ++Status().nserved_;
+ }
+ src = 0;
+ }
+ }
+ };
+
+ auto SyncRequest = [&](int idx) { // SyncRequest
+ MsgRequestTopic req;
+ req.set_topic(topic_ + std::to_string(0));
+ req.set_data("request_data_" + std::to_string(idx));
+ std::string s(req.SerializeAsString());
+ // Sleep(10ms, false);
+ std::string dest(BHAddress().SerializeAsString());
+ void *proc_id = 0;
+ int proc_id_len = 0;
+ DEFER1(BHFree(proc_id, proc_id_len););
+ void *reply = 0;
+ int reply_len = 0;
+ DEFER1(BHFree(reply, reply_len));
+ bool r = BHRequest(dest.data(), dest.size(), s.data(), s.size(), &proc_id, &proc_id_len, &reply, &reply_len, 1000);
+ if (!r) {
+ int ec = 0;
+ std::string msg;
+ GetApiError(ec, msg);
+ printf("request error: %s\n", msg.c_str());
+ } else {
+ MsgRequestTopicReply ret;
+ ret.ParseFromArray(reply, reply_len);
+ printf("request result: %s\n", ret.data().c_str());
+ }
+ };
{
for (int i = 0; i < 1; ++i) {
MsgPublish pub;
pub.set_topic(topic_ + std::to_string(i));
- pub.set_data("pub_data_" + std::string(1024 * 1, 'a'));
+ pub.set_data("pub_data_" + std::string(104 * 1, 'a'));
std::string s(pub.SerializeAsString());
BHPublish(s.data(), s.size(), 0);
// Sleep(1s);
@@ -358,10 +347,11 @@
std::string s(req.SerializeAsString());
void *msg_id = 0;
int len = 0;
+ DEFER1(BHFree(msg_id, len););
// Sleep(10ms, false);
std::string dest(BHAddress().SerializeAsString());
+
bool r = BHAsyncRequest(dest.data(), dest.size(), s.data(), s.size(), 0, 0);
- DEFER1(BHFree(msg_id, len););
if (r) {
++Status().nrequest_;
} else {
@@ -371,7 +361,7 @@
if (last.exchange(now) < now) {
int ec = 0;
std::string msg;
- GetLastError(ec, msg);
+ GetApiError(ec, msg);
printf("request topic error --------- : %s\n", msg.c_str());
}
}
@@ -397,32 +387,52 @@
printf("heartbeat: %s\n", r ? "ok" : "failed");
}
};
+
std::atomic<bool> run(true);
+
ThreadManager threads;
+
+#if 1
+ BHStartWorker(&ServerProc, &SubRecvProc, &ClientProc);
+#else
+ BHStartWorker(FServerCallback(), &SubRecvProc, &ClientProc);
+ threads.Launch(ServerLoop, &run);
+#endif
+
boost::timer::auto_cpu_timer timer;
threads.Launch(hb, &run);
threads.Launch(showStatus, &run);
int ncli = 10;
- const uint64_t nreq = 1000 * 100;
+ const int64_t nreq = 1000 * 100;
+
+ for (int i = 0; i < 10; ++i) {
+ SyncRequest(i);
+ }
+
for (int i = 0; i < ncli; ++i) {
threads.Launch(asyncRequest, nreq);
}
int same = 0;
- int64_t last = 0;
- while (last < nreq * ncli && same < 2) {
+ uint64_t last = 0;
+ while (last < nreq * ncli && same < 3) {
Sleep(1s, false);
auto cur = Status().nreply_.load();
if (last == cur) {
++same;
+ printf("same %d\n", same);
} else {
last = cur;
same = 0;
}
}
+ Sleep(1s);
+
run = false;
threads.WaitAll();
auto &st = Status();
printf("nreq: %8ld, nsrv: %8ld, nreply: %8ld\n", st.nrequest_.load(), st.nserved_.load(), st.nreply_.load());
+ BHCleanup();
+ printf("after cleanup\n");
}
\ No newline at end of file
--
Gitblit v1.8.0