/*
|
* =====================================================================================
|
*
|
* Filename: api_test.cpp
|
*
|
* Description:
|
*
|
* Version: 1.0
|
* Created: 2021年04月13日 14时31分46秒
|
* Revision: none
|
* Compiler: gcc
|
*
|
* Author: Li Chao (), lichao@aiotlink.com
|
* Organization:
|
*
|
* =====================================================================================
|
*/
|
#include "bh_api.h"
|
#include "util.h"
|
#include <atomic>
|
|
using namespace bhome_msg;
|
|
namespace
|
{
|
typedef std::atomic<uint64_t> Number;
|
|
void Assign(Number &a, const Number &b) { a.store(b.load()); }
|
struct MsgStatus {
|
|
Number nrequest_;
|
Number nfailed_;
|
Number nreply_;
|
Number nserved_;
|
MsgStatus() :
|
nrequest_(0), nreply_(0), nserved_(0) {}
|
MsgStatus &operator=(const MsgStatus &a)
|
{
|
Assign(nrequest_, a.nrequest_);
|
Assign(nserved_, a.nserved_);
|
Assign(nreply_, a.nreply_);
|
Assign(nfailed_, a.nfailed_);
|
return *this;
|
}
|
};
|
|
MsgStatus &Status()
|
{
|
static MsgStatus st;
|
return st;
|
}
|
} // namespace
|
|
void SubRecvProc(const void *proc_id,
|
const int proc_id_len,
|
const void *data,
|
const int data_len)
|
{
|
std::string proc((const char *) proc_id, proc_id_len);
|
MsgPublish pub;
|
pub.ParseFromArray(data, data_len);
|
// printf("Sub data, %s : %s\n", pub.topic().c_str(), pub.data().c_str());
|
}
|
|
void ServerProc(const void *proc_id,
|
const int proc_id_len,
|
const void *data,
|
const int data_len,
|
void *src)
|
{
|
// printf("ServerProc: ");
|
// DEFER1(printf("\n"););
|
MsgRequestTopic request;
|
if (request.ParseFromArray(data, data_len)) {
|
MsgRequestTopicReply reply;
|
reply.set_data(" reply: " + request.data());
|
std::string s(reply.SerializeAsString());
|
// printf("%s", reply.data().c_str());
|
BHSendReply(src, s.data(), s.size());
|
++Status().nserved_;
|
}
|
}
|
|
void ClientProc(const void *proc_id,
|
const int proc_id_len,
|
const void *msg_id,
|
const int msg_id_len,
|
const void *data,
|
const int data_len)
|
{
|
std::string proc((const char *) proc_id, proc_id_len);
|
MsgRequestTopicReply reply;
|
if (reply.ParseFromArray(data, data_len)) {
|
++Status().nreply_;
|
}
|
// printf("client Recv reply : %s\n", reply.data().c_str());
|
}
|
|
class TLMutex
|
{
|
// typedef boost::interprocess::interprocess_mutex MutexT;
|
typedef CasMutex MutexT;
|
// typedef std::mutex MutexT;
|
typedef std::chrono::steady_clock Clock;
|
typedef Clock::duration Duration;
|
static Duration Now() { return Clock::now().time_since_epoch(); }
|
|
const Duration limit_;
|
std::atomic<Duration> last_lock_time_;
|
MutexT mutex_;
|
|
public:
|
struct Status {
|
int64_t nlock_ = 0;
|
int64_t nupdate_time_fail = 0;
|
int64_t nfail = 0;
|
int64_t nexcept = 0;
|
};
|
Status st_;
|
|
explicit TLMutex(Duration limit) :
|
limit_(limit) {}
|
TLMutex() :
|
TLMutex(std::chrono::seconds(1)) {}
|
~TLMutex() { static_assert(std::is_pod<Duration>::value); }
|
bool try_lock()
|
{
|
if (mutex_.try_lock()) {
|
auto old_time = last_lock_time_.load();
|
if (Now() - old_time > limit_) {
|
return last_lock_time_.compare_exchange_strong(old_time, Now());
|
} else {
|
last_lock_time_.store(Now());
|
return true;
|
}
|
} else {
|
auto old_time = last_lock_time_.load();
|
if (Now() - old_time > limit_) {
|
return last_lock_time_.compare_exchange_strong(old_time, Now());
|
} else {
|
return false;
|
}
|
}
|
}
|
void lock()
|
{
|
int n = 0;
|
while (!try_lock()) {
|
n++;
|
std::this_thread::yield();
|
}
|
st_.nlock_ += n;
|
}
|
void unlock() { mutex_.unlock(); }
|
};
|
|
namespace
|
{
|
typedef int64_t Offset;
|
Offset Addr(void *ptr) { return reinterpret_cast<Offset>(ptr); }
|
void *Ptr(const Offset offset) { return reinterpret_cast<void *>(offset); }
|
} // namespace
|
|
BOOST_AUTO_TEST_CASE(MutexTest)
|
{
|
SharedMemory &shm = TestShm();
|
MsgI::BindShm(shm);
|
|
void *base_ptr = shm.get_address();
|
auto PrintPtr = [&](void *p) {
|
printf("addr: %ld, ptr: %p, offset: %ld\n", Addr(p), p, Addr(p) - Addr(base_ptr));
|
};
|
|
printf("base");
|
PrintPtr(base_ptr);
|
|
MsgI msg;
|
msg.Make("string data");
|
for (int i = 0; i < 10; ++i) {
|
int n = msg.AddRef();
|
printf("add %d ref: %d\n", i, n);
|
}
|
for (int i = 0; i < 10; ++i) {
|
int n = msg.Release();
|
printf("release %d, ref : %d\n", i, n);
|
}
|
std::this_thread::sleep_for(1s);
|
msg.Release();
|
|
const std::string mtx_name("test_mutex");
|
const std::string int_name("test_int");
|
auto mtx = shm.find_or_construct<Mutex>(mtx_name.c_str())();
|
auto pi = shm.find_or_construct<int>(int_name.c_str())(100);
|
|
printf("mutetx ");
|
PrintPtr(mtx);
|
printf("int ");
|
PrintPtr(pi);
|
|
typedef std::chrono::steady_clock Clock;
|
auto Now = []() { return Clock::now().time_since_epoch(); };
|
if (pi) {
|
auto old = *pi;
|
printf("int : %d, add1: %d\n", old, ++*pi);
|
}
|
|
{
|
boost::timer::auto_cpu_timer timer;
|
printf("test time: ");
|
TLMutex mutex;
|
// CasMutex mutex;
|
auto Lock = [&]() {
|
for (int i = 0; i < 1000 * 100; ++i) {
|
mutex.lock();
|
mutex.unlock();
|
}
|
};
|
std::thread t1(Lock), t2(Lock);
|
t1.join();
|
t2.join();
|
printf("mutex nlock: %ld, update time error: %ld, normal fail: %ld, error wait: %ld\n",
|
mutex.st_.nlock_,
|
mutex.st_.nupdate_time_fail,
|
mutex.st_.nfail,
|
mutex.st_.nexcept);
|
}
|
|
auto MSFromNow = [](const int ms) {
|
using namespace boost::posix_time;
|
ptime cur = boost::posix_time::microsec_clock::universal_time();
|
return cur + millisec(ms);
|
};
|
|
auto TryLock = [&]() {
|
if (mtx->try_lock()) {
|
printf("try_lock ok\n");
|
return true;
|
} else {
|
printf("try_lock failed\n");
|
return false;
|
}
|
};
|
auto Unlock = [&]() {
|
mtx->unlock();
|
printf("unlocked\n");
|
};
|
|
if (mtx) {
|
printf("mtx exists\n");
|
if (TryLock()) {
|
auto op = [&]() {
|
if (TryLock()) {
|
Unlock();
|
}
|
};
|
op();
|
std::thread t(op);
|
t.join();
|
// Unlock();
|
} else {
|
// mtx->unlock();
|
}
|
} else {
|
printf("mtx not exists\n");
|
}
|
}
|
|
BOOST_AUTO_TEST_CASE(ApiTest)
|
{
|
auto max_time = std::chrono::steady_clock::time_point::max();
|
auto dur = max_time.time_since_epoch();
|
auto nsec = std::chrono::duration_cast<std::chrono::seconds>(dur).count();
|
auto nmin = nsec / 60;
|
auto nhour = nmin / 60;
|
auto nday = nhour / 24;
|
auto years = nday / 365;
|
printf("seconds: %ld, hours: %ld , days:%ld, years: %ld\n",
|
nsec, nhour, nday, years);
|
std::chrono::steady_clock::duration a(123456);
|
printf("nowsec: %ld\n", NowSec());
|
|
printf("maxsec: %ld\n", CountSeconds(max_time));
|
|
bool reg = false;
|
for (int i = 0; i < 3 && !reg; ++i) {
|
ProcInfo proc;
|
proc.set_proc_id("demo_client");
|
proc.set_public_info("public info of demo_client. etc...");
|
std::string proc_buf(proc.SerializeAsString());
|
void *reply = 0;
|
int reply_len = 0;
|
reg = BHRegister(proc_buf.data(), proc_buf.size(), &reply, &reply_len, 2000);
|
printf("register %s\n", reg ? "ok" : "failed");
|
|
BHFree(reply, reply_len);
|
Sleep(1s);
|
}
|
if (!reg) {
|
return;
|
}
|
|
const std::string topic_ = "topic_";
|
|
{
|
MsgTopicList topics;
|
for (int i = 0; i < 10; ++i) {
|
topics.add_topic_list(topic_ + std::to_string(i));
|
}
|
std::string s = topics.SerializeAsString();
|
void *reply = 0;
|
int reply_len = 0;
|
bool r = BHRegisterTopics(s.data(), s.size(), &reply, &reply_len, 1000);
|
BHFree(reply, reply_len);
|
// printf("register topic : %s\n", r ? "ok" : "failed");
|
Sleep(1s);
|
}
|
|
{
|
MsgTopicList topics;
|
for (int i = 0; i < 10; ++i) {
|
topics.add_topic_list(topic_ + std::to_string(i * 2));
|
}
|
std::string s = topics.SerializeAsString();
|
void *reply = 0;
|
int reply_len = 0;
|
bool r = BHSubscribeTopics(s.data(), s.size(), &reply, &reply_len, 1000);
|
BHFree(reply, reply_len);
|
printf("subscribe topic : %s\n", r ? "ok" : "failed");
|
}
|
|
BHStartWorker(&ServerProc, &SubRecvProc, &ClientProc);
|
|
{
|
for (int i = 0; i < 1; ++i) {
|
MsgPublish pub;
|
pub.set_topic(topic_ + std::to_string(i));
|
pub.set_data("pub_data_" + std::string(1024 * 1, 'a'));
|
std::string s(pub.SerializeAsString());
|
BHPublish(s.data(), s.size(), 0);
|
// Sleep(1s);
|
}
|
}
|
|
auto asyncRequest = [&](uint64_t nreq) {
|
for (uint64_t i = 0; i < nreq; ++i) {
|
MsgRequestTopic req;
|
req.set_topic(topic_ + std::to_string(0));
|
req.set_data("request_data_" + std::to_string(i));
|
std::string s(req.SerializeAsString());
|
void *msg_id = 0;
|
int len = 0;
|
// Sleep(10ms, false);
|
std::string dest(BHAddress().SerializeAsString());
|
bool r = BHAsyncRequest(dest.data(), dest.size(), s.data(), s.size(), 0, 0);
|
DEFER1(BHFree(msg_id, len););
|
if (r) {
|
++Status().nrequest_;
|
} else {
|
++Status().nfailed_;
|
static std::atomic<int64_t> last(0);
|
auto now = NowSec();
|
if (last.exchange(now) < now) {
|
int ec = 0;
|
std::string msg;
|
GetLastError(ec, msg);
|
printf("request topic error --------- : %s\n", msg.c_str());
|
}
|
}
|
}
|
};
|
auto showStatus = [](std::atomic<bool> *run) {
|
MsgStatus last;
|
while (*run) {
|
auto &st = Status();
|
Sleep(1s, false);
|
printf("nreq: %8ld, spd %8ld | failed: %8ld | nsrv: %8ld, spd %8ld | nreply: %8ld, spd %8ld\n",
|
st.nrequest_.load(), st.nrequest_ - last.nrequest_,
|
st.nfailed_.load(),
|
st.nserved_.load(), st.nserved_ - last.nserved_,
|
st.nreply_.load(), st.nreply_ - last.nreply_);
|
last = st;
|
}
|
};
|
auto hb = [](std::atomic<bool> *run) {
|
while (*run) {
|
Sleep(1s, false);
|
bool r = BHHeartbeatEasy(1000);
|
printf("heartbeat: %s\n", r ? "ok" : "failed");
|
}
|
};
|
std::atomic<bool> run(true);
|
ThreadManager threads;
|
boost::timer::auto_cpu_timer timer;
|
threads.Launch(hb, &run);
|
threads.Launch(showStatus, &run);
|
int ncli = 10;
|
const uint64_t nreq = 1000 * 100;
|
for (int i = 0; i < ncli; ++i) {
|
threads.Launch(asyncRequest, nreq);
|
}
|
|
int same = 0;
|
int64_t last = 0;
|
while (last < nreq * ncli && same < 2) {
|
Sleep(1s, false);
|
auto cur = Status().nreply_.load();
|
if (last == cur) {
|
++same;
|
} else {
|
last = cur;
|
same = 0;
|
}
|
}
|
|
run = false;
|
threads.WaitAll();
|
auto &st = Status();
|
printf("nreq: %8ld, nsrv: %8ld, nreply: %8ld\n", st.nrequest_.load(), st.nserved_.load(), st.nreply_.load());
|
}
|