From 3931f83205f153f2bc7fc36d1a894cdc3f14b4db Mon Sep 17 00:00:00 2001 From: lichao <lichao@aiotlink.com> Date: 星期三, 21 四月 2021 16:52:51 +0800 Subject: [PATCH] change node socket to vector; try lock free queue. --- src/socket.cpp | 102 ++++++++++++++++++++++++++------------------------ 1 files changed, 53 insertions(+), 49 deletions(-) diff --git a/src/socket.cpp b/src/socket.cpp index 4c2fc6b..c664982 100644 --- a/src/socket.cpp +++ b/src/socket.cpp @@ -24,24 +24,15 @@ using namespace bhome_msg; using namespace bhome_shm; -namespace +ShmSocket::ShmSocket(Shm &shm, const MQId &id, const int len) : + run_(false), mq_(id, shm, len) { - -} // namespace - -ShmSocket::ShmSocket(Shm &shm, const void *id, const int len) : - shm_(shm), run_(false) -{ - if (id && len > 0) { - mq_.reset(new Queue(*static_cast<const MQId *>(id), shm, len)); - } + Start(); } ShmSocket::ShmSocket(bhome_shm::SharedMemory &shm, const int len) : - shm_(shm), run_(false) + run_(false), mq_(shm, len) { - if (len > 0) { - mq_.reset(new Queue(shm_, len)); - } + Start(); } ShmSocket::~ShmSocket() @@ -49,35 +40,53 @@ Stop(); } -bool ShmSocket::StartRaw(const RecvRawCB &onData, int nworker) +bool ShmSocket::Start(int nworker, const RecvCB &onData, const IdleCB &onIdle) { - if (!mq_) { - return false; - } + auto ioProc = [this, onData, onIdle]() { + auto DoSend = [this]() { return send_buffer_.TrySend(mq()); }; + auto DoRecv = [=] { + auto onRecvWithPerMsgCB = [this, onData](ShmSocket &socket, MsgI &imsg, BHMsgHead &head) { + RecvCB cb; + if (per_msg_cbs_->Pick(head.msg_id(), cb)) { + cb(socket, imsg, head); + } else if (onData) { + onData(socket, imsg, head); + } + }; - std::lock_guard<std::mutex> lock(mutex_); - StopNoLock(); - auto RecvProc = [this, onData]() { - while (run_) { - try { - MsgI imsg; - DEFER1(imsg.Release(shm_)); - if (mq_->Recv(imsg, 100)) { onData(imsg); } - } catch (...) { + // do not recv if no cb is set. + if (!onData && per_msg_cbs_->empty()) { + return false; } + auto onMsg = [&](MsgI &imsg) { + DEFER1(imsg.Release(shm())); + BHMsgHead head; + if (imsg.ParseHead(head)) { + onRecvWithPerMsgCB(*this, imsg, head); + } + }; + return mq().TryRecvAll(onMsg) > 0; // this will recv all msgs. + }; + + try { + bool more_to_send = DoSend(); + bool more_to_recv = DoRecv(); + if (onIdle) { onIdle(*this); } + if (!more_to_send && !more_to_recv) { + std::this_thread::yield(); + } + } catch (...) { } }; + std::lock_guard<std::mutex> lock(mutex_); + StopNoLock(); + run_.store(true); for (int i = 0; i < nworker; ++i) { - workers_.emplace_back(RecvProc); + workers_.emplace_back([this, ioProc]() { while (run_) { ioProc(); } }); } return true; -} - -bool ShmSocket::Start(const RecvCB &onData, int nworker) -{ - return StartRaw([this, onData](MsgI &imsg) { BHMsg m; if (imsg.Unpack(m)) { onData(m); } }, nworker); } bool ShmSocket::Stop() @@ -100,22 +109,17 @@ return false; } -bool ShmSocket::SyncSend(const void *id, const bhome_msg::BHMsg &msg, const int timeout_ms) +//maybe reimplment, using async cbs? +bool ShmSocket::SyncRecv(bhome_msg::MsgI &msg, bhome_msg::BHMsgHead &head, const int timeout_ms) { - std::lock_guard<std::mutex> lock(mutex_); - if (!mq_ || RunningNoLock()) { - return false; - } else { - return mq_->Send(*static_cast<const MQId *>(id), msg, timeout_ms); + // std::lock_guard<std::mutex> lock(mutex_); // seems no need to lock mutex_. + bool got = (timeout_ms == 0) ? mq().TryRecv(msg) : mq().Recv(msg, timeout_ms); + if (got) { + if (msg.ParseHead(head)) { + return true; + } else { + msg.Release(shm()); + } } -} - -bool ShmSocket::SyncRecv(bhome_msg::BHMsg &msg, const int timeout_ms) -{ - std::lock_guard<std::mutex> lock(mutex_); - if (!mq_ || RunningNoLock()) { - return false; - } else { - return mq_->Recv(msg, timeout_ms); - } + return false; } -- Gitblit v1.8.0