From c64c54d8e75b9354dc49a7b6b2d326e7dd59eb37 Mon Sep 17 00:00:00 2001
From: lichao <lichao@aiotlink.com>
Date: 星期四, 15 四月 2021 19:32:16 +0800
Subject: [PATCH] add api; fix send, socknode mem leak.
---
src/socket.cpp | 137 ++++++++++++++++++++-------------------------
1 files changed, 62 insertions(+), 75 deletions(-)
diff --git a/src/socket.cpp b/src/socket.cpp
index 4d9fcc9..0ba195a 100644
--- a/src/socket.cpp
+++ b/src/socket.cpp
@@ -29,98 +29,69 @@
} // namespace
-ShmSocket::ShmSocket(Type type, bhome_shm::SharedMemory &shm) :
- shm_(shm), type_(type), run_(false)
+ShmSocket::ShmSocket(Shm &shm, const MQId &id, const int len) :
+ run_(false), mq_(id, shm, len)
{
- switch (type) {
- case eSockBus: mq_.reset(new Queue(kBHBusQueueId, shm_, 1000)); break;
- case eSockRequest: mq_.reset(new Queue(shm_, 12)); break;
- case eSockReply: mq_.reset(new Queue(shm_, 64)); break;
- case eSockSubscribe: mq_.reset(new Queue(shm_, 64)); break;
- case eSockPublish: break; // no recv mq needed
- default: break;
- }
+ Start();
}
-
-ShmSocket::ShmSocket(Type type) :
- ShmSocket(type, BHomeShm()) {}
+ShmSocket::ShmSocket(bhome_shm::SharedMemory &shm, const int len) :
+ run_(false), mq_(shm, len)
+{
+ Start();
+}
ShmSocket::~ShmSocket()
{
- Stop();
+ Stop(); //TODO should stop in sub class, incase thread access sub class data.
}
-bool ShmSocket::Publish(const std::string &topic, const void *data, const size_t size, const int timeout_ms)
+bool ShmSocket::Start(int nworker, const RecvCB &onData, const IdleCB &onIdle)
{
- if (type_ != eSockPublish) {
- return false;
- }
- assert(!mq_);
- try {
- MsgI imsg;
- if (!imsg.MakeRC(shm_, MakePub(topic, data, size))) {
- return false;
- }
- DEFER1(imsg.Release(shm_));
- return Queue::Send(shm_, kBHBusQueueId, imsg, timeout_ms);
- } catch (...) {
- return false;
- }
-}
+ auto ioProc = [this, onData, onIdle]() {
+ auto DoSend = [this]() { return send_buffer_.TrySend(mq()); };
+ auto DoRecv = [=] {
+ auto onRecvWithPerMsgCB = [this, onData](ShmSocket &socket, MsgI &imsg, BHMsgHead &head) {
+ RecvCB cb;
+ if (per_msg_cbs_->Find(head.msg_id(), cb)) {
+ cb(socket, imsg, head);
+ } else if (onData) {
+ onData(socket, imsg, head);
+ }
+ };
-bool ShmSocket::Subscribe(const std::vector<std::string> &topics, const int timeout_ms)
-{
- if (type_ != eSockSubscribe) {
- return false;
- }
- assert(mq_);
- try {
- return mq_->Send(kBHBusQueueId, MakeSub(mq_->Id(), topics), timeout_ms);
- } catch (...) {
- return false;
- }
-}
-
-bool ShmSocket::StartRaw(const RecvRawCB &onData, int nworker)
-{
- auto CanRecv = [this]() {
- switch (type_) {
- case eSockRequest:
- case eSockReply:
- case eSockBus:
- case eSockSubscribe:
- return true;
- default:
- return false;
- }
- };
- if (!CanRecv()) {
- return false;
- }
- std::lock_guard<std::mutex> lock(mutex_);
-
- StopNoLock();
- auto RecvProc = [this, onData]() {
- while (run_) {
- try {
- MsgI imsg;
- DEFER1(imsg.Release(shm_));
- if (mq_->Recv(imsg, 100)) { onData(imsg); }
- } catch (...) {
+ // do not recv if no cb is set.
+ if (!onData && per_msg_cbs_->empty()) {
+ return false;
}
+ auto onMsg = [&](MsgI &imsg) {
+ DEFER1(imsg.Release(shm()));
+ BHMsgHead head;
+ if (imsg.ParseHead(head)) {
+ onRecvWithPerMsgCB(*this, imsg, head);
+ }
+ };
+ return mq().TryRecvAll(onMsg) > 0; // this will recv all msgs.
+ };
+
+ try {
+ bool more_to_send = DoSend();
+ bool more_to_recv = DoRecv();
+ if (onIdle) { onIdle(*this); }
+ if (!more_to_send && !more_to_recv) {
+ std::this_thread::yield();
+ }
+ } catch (...) {
}
};
+
+ std::lock_guard<std::mutex> lock(mutex_);
+ StopNoLock();
run_.store(true);
for (int i = 0; i < nworker; ++i) {
- workers_.emplace_back(RecvProc);
+ workers_.emplace_back([this, ioProc]() { while (run_) { ioProc(); } });
}
return true;
-}
-
-bool ShmSocket::Start(const RecvCB &onData, int nworker)
-{
- return StartRaw([this, onData](MsgI &imsg) { BHMsg m; if (imsg.Unpack(m)) { onData(m); } }, nworker);
}
bool ShmSocket::Stop()
@@ -137,7 +108,23 @@
w.join();
}
}
+ workers_.clear();
return true;
}
return false;
}
+
+//maybe reimplment, using async cbs?
+bool ShmSocket::SyncRecv(bhome_msg::MsgI &msg, bhome::msg::BHMsgHead &head, const int timeout_ms)
+{
+ // std::lock_guard<std::mutex> lock(mutex_); // seems no need to lock mutex_.
+ bool got = (timeout_ms == 0) ? mq().TryRecv(msg) : mq().Recv(msg, timeout_ms);
+ if (got) {
+ if (msg.ParseHead(head)) {
+ return true;
+ } else {
+ msg.Release(shm());
+ }
+ }
+ return false;
+}
--
Gitblit v1.8.0