From cb6e8a6831613006603c14337291174fa29d2f57 Mon Sep 17 00:00:00 2001
From: zhangmeng <775834166@qq.com>
Date: 星期四, 16 十二月 2021 15:50:41 +0800
Subject: [PATCH] add interface

---
 src/nng_wrap.cpp |  585 +++++++++------------------------------------------------
 1 files changed, 99 insertions(+), 486 deletions(-)

diff --git a/src/nng_wrap.cpp b/src/nng_wrap.cpp
index 85c555e..1972e07 100644
--- a/src/nng_wrap.cpp
+++ b/src/nng_wrap.cpp
@@ -2,54 +2,15 @@
 
 #include <string.h>
 
-#include <random>
 #include <vector>
-#include <unordered_map>
-#include <unordered_set>
-#include <thread>
-#include <atomic>
-#include <deque>
-#include <mutex>
-#include <condition_variable>
-#include <functional>
-#include <chrono>
-#include <future>
+#include "common.h"
 using namespace std;
 
-#include <unistd.h>
-
-#include <nng/nng.h>
-#include <nng/protocol/reqrep0/rep.h>
-#include <nng/supplemental/util/platform.h>
-
-#include "nng/compat/nanomsg/nn.h"
 #include "nng/compat/nanomsg/reqrep.h"
 #include "nng/compat/nanomsg/pubsub.h"
 #include "nng/compat/nanomsg/survey.h"
 
 namespace nng_wrap {
-
-// common function
-static int client_socket(const string& url, const int protocol, int family=AF_SP){
-    int sock = nn_socket(family, protocol);
-    if (sock < 0) return sock;
-    int rc = nn_connect(sock, url.c_str());
-    if (rc < 0) {
-        nn_close(sock);
-        return rc;
-    }
-    return sock;
-}
-
-static void remove_exist(const string& url){
-    if (url.find("ipc://") == 0){
-        string address(url);
-        address = address.substr(6);
-        if (access(address.c_str(), F_OK) == 0){
-            remove(address.c_str());
-        }
-    }
-}
 
 // static int server_socket(const string& url, const int protocol, int family=AF_SP){
 //     int sock = nn_socket(family, protocol);
@@ -62,6 +23,17 @@
 //     }
 //     return sock;
 // }
+
+static int client_socket(const string& url, const int protocol, int family=AF_SP){
+    int sock = nn_socket(family, protocol);
+    if (sock < 0) return sock;
+    int rc = nn_connect(sock, url.c_str());
+    if (rc < 0) {
+        nn_close(sock);
+        return rc;
+    }
+    return sock;
+}
 
 static void set_socket_timeout(int sock, const int to_ms){
     nn_setsockopt(sock, NN_SOL_SOCKET, NN_SNDTIMEO, &to_ms, sizeof(to_ms));
@@ -106,17 +78,6 @@
         *dest_len = src_len;
 }
 
-static thread_local string verbose_info{};
-#ifndef PRNTVITAG
-#define TAG do{ verbose_info.clear(); \
-                verbose_info=string("function [")+__FUNCTION__+string("]"); \
-            }while(0)
-#define PRNTVITAG(msg) do{ \
-            verbose_info+=string("-> (") + msg + string(")"); \
-        }while(0)
-// #define TAG
-// #define PRNTVITAG(args)
-#endif
 void get_last_error(int* ec, void** emsg, int* emsg_len){
     *emsg = NULL;
     *emsg_len = 0;
@@ -132,7 +93,6 @@
 ///////////////////////////////////////////////////////
 // simple request waiting reply
 
-static constexpr int timeout_req_rep = 5162;
 int simple_request(const std::string& url,
     const void* in, const int in_len,
     void** out, int *out_len, const int to_ms){
@@ -160,55 +120,12 @@
     return true;
 }
 
-///////////////////////////////////////////////////////////
-// base class
-#define DISABLE_COPY_AND_ASSIGN(className) \
-        className(const className&)=delete; \
-        className(className&&)=delete; \
-        className& operator=(const className&)=delete; \
-        className& operator=(className&&)=delete
-
-class _nn{
-public:
-    DISABLE_COPY_AND_ASSIGN(_nn);
-    _nn()=default;
-    virtual ~_nn(){ if (socket_ > 0) nn_close(socket_); }
-    int                         socket_{-1};
-    string                      url_{};
-};
-
-///////////////////////////////////////////////
+/////////////////////////////////////////////////////
 // publish
 
-class _ps : public _nn{
-public:
-    struct psmsg{
-        DISABLE_COPY_AND_ASSIGN(psmsg);
-        psmsg()=delete;
-        psmsg(const std::string& t, std::string&& m)
-        :topic_(t),data_(std::move(m)){}
-        std::string topic_{};
-        std::string data_{};
-    };
-public:
-    DISABLE_COPY_AND_ASSIGN(_ps);
-    _ps()=default;
-    virtual ~_ps(){
-        t_quit_.store(true, memory_order_relaxed);
-        if (t_.joinable()) t_.join();
-    }
-
-    thread              t_;
-    atomic_bool         t_quit_{false};
-    deque<psmsg>        msg_{};
-    mutex               mtx_msg_{};
-    condition_variable  cv_msg_{};
-};
-
-static _ps pub_;
-static int connect_to_center(const string& topic){
-    if (pub_.socket_ > 0) return pub_.socket_;
-    pub_.url_ = topic;
+static int pub_connect_to_center(const string& topic, _ps* pub){
+    if (pub->socket_ > 0) return pub->socket_;
+    pub->url_ = topic;
 
     TAG;
     int sock = client_socket(topic, NN_REQ);
@@ -217,29 +134,29 @@
         return -1;
     }
     set_socket_timeout(sock, timeout_req_rep);
-    pub_.socket_ = sock;
-    pub_.t_ = thread([]{
-        while (!pub_.t_quit_.load()) {
+    pub->socket_ = sock;
+    pub->t_ = thread([pub]{
+        while (!pub->t_quit_.load()) {
             _ps::psmsg *msg{NULL};
             {
-                unique_lock<mutex> l{pub_.mtx_msg_};
-                pub_.cv_msg_.wait(l, []{
-                    return !pub_.msg_.empty() || pub_.t_quit_.load();
+                unique_lock<mutex> l{pub->mtx_msg_};
+                pub->cv_msg_.wait(l, [pub]{
+                    return !pub->msg_.empty() || pub->t_quit_.load();
                 });
-                if(pub_.t_quit_.load()) break;
-                msg = &pub_.msg_.front();
-                if (msg->topic_.empty()) {pub_.msg_.pop_front(); continue;}
+                if(pub->t_quit_.load()) break;
+                msg = &pub->msg_.front();
+                if (msg->topic_.empty()) {pub->msg_.pop_front(); continue;}
             }
             string sndmsg(msg->topic_ + msg->data_);
-            int rc = nn_send(pub_.socket_, sndmsg.data(), sndmsg.size(), 0);
+            int rc = nn_send(pub->socket_, sndmsg.data(), sndmsg.size(), 0);
             if (rc == (int)sndmsg.size()){
                 char* tmp{};
-                rc = nn_recv(pub_.socket_, &tmp, NN_MSG, 0);
+                rc = nn_recv(pub->socket_, &tmp, NN_MSG, 0);
                 if (rc > 0){
                     nn_freemsg(tmp);
                     printf("======>> publish topic %s data length %lu\n", msg->topic_.c_str(), msg->data_.size());
-                    lock_guard<mutex> l{pub_.mtx_msg_};
-                    pub_.msg_.pop_front();
+                    lock_guard<mutex> l{pub->mtx_msg_};
+                    pub->msg_.pop_front();
                     continue;
                 }else{
                     PRNTVITAG("publish req-rep thread nn_recv faild");
@@ -253,44 +170,38 @@
     return sock;
 }
 
-int publish(const std::string& topic, const void* data, const int data_len){
+int publish(const std::string& topic, const void* data, const int data_len, void* arg/*=NULL*/){
+    _ps* pub = (_ps*)arg;
+    if (!pub) pub = singleton<_ps>();
+
     if (!data && data_len == 0){
         // printf("======>> publish start url %s\n", topic.c_str());
-        return connect_to_center(topic);
+        return pub_connect_to_center(topic, pub);
     }
-    if (pub_.socket_ < 0){
-        connect_to_center(pub_.url_);
+    if (pub->socket_ < 0){
+        pub_connect_to_center(pub->url_, pub);
     }
-    if(pub_.socket_ < 0) {
+    if(pub->socket_ < 0) {
         PRNTVITAG("publish socket_ < 0");
         return -1;
     }
 
     // printf("======>> publish topic %s\n", topic.c_str());
-    lock_guard<mutex> l{pub_.mtx_msg_};
-    pub_.msg_.emplace_back(topic, string{(const char*)data, (const size_t)data_len});
-    pub_.cv_msg_.notify_one();
-    return pub_.msg_.size();
+    lock_guard<mutex> l{pub->mtx_msg_};
+    pub->msg_.emplace_back(topic, string{(const char*)data, (const size_t)data_len});
+    pub->cv_msg_.notify_one();
+    return pub->msg_.size();
 }
 
 ///////////////////////////////////////////////
 // subscribe
-class _ps_sub : public _ps{
-public:
-    DISABLE_COPY_AND_ASSIGN(_ps_sub);
-    _ps_sub()=default;
-    ~_ps_sub()=default;
 
-    unordered_set<string>   topics_{};
-    mutex                   mtx_topics_{};
-    unordered_set<string>   failed_topics_{};
-    mutex                   mtx_failed_topics_{};
-};
+int subscribe_center(const std::string& url, void* arg/*=NULL*/){
+    _ps_sub* sub = (_ps_sub*)arg;
+    if (!sub) sub = singleton<_ps_sub>();
 
-static _ps_sub sub_;
-int subscribe_center(const std::string& url){
-    if (sub_.socket_ > 0) return 0;
-    sub_.url_ = url;
+    if (sub->socket_ > 0) return 0;
+    sub->url_ = url;
 
     TAG;
     int sock = client_socket(url, NN_SUB);
@@ -299,18 +210,18 @@
         return -1;
     }
     // set_socket_timeout(sock, timeout_req_rep);
-    sub_.socket_ = sock;
-    sub_.t_ = thread([]{
-        while (!sub_.t_quit_.load()) {
+    sub->socket_ = sock;
+    sub->t_ = thread([sub]{
+        while (!sub->t_quit_.load()) {
             char* m;
-            int m_len = nn_recv(sub_.socket_, &m, NN_MSG, NN_DONTWAIT);
+            int m_len = nn_recv(sub->socket_, &m, NN_MSG, NN_DONTWAIT);
             if (m_len > 0){
                 string tmp_msg{m, (size_t)m_len};
                 nn_freemsg(m);
                 string topic{}, msg{};
                 {
-                    lock_guard<mutex> l{sub_.mtx_topics_};
-                    for(auto && i : sub_.topics_){
+                    lock_guard<mutex> l{sub->mtx_topics_};
+                    for(auto && i : sub->topics_){
                         if (tmp_msg.size() < i.size()) continue;
                         topic = tmp_msg.substr(0, i.size());
                         if (topic == i){
@@ -321,18 +232,18 @@
                 }
                 printf("======>> subscribe recv topic %s msg length %lu\n", topic.c_str(), msg.length());
                 if (!msg.empty()){
-                    lock_guard<mutex> l(sub_.mtx_msg_);
-                    sub_.msg_.emplace_back(topic, move(msg));
-                    sub_.cv_msg_.notify_all();
+                    lock_guard<mutex> l(sub->mtx_msg_);
+                    sub->msg_.emplace_back(topic, move(msg));
+                    sub->cv_msg_.notify_all();
                 }
 
             }else {
                 {
-                    lock_guard<mutex> l{sub_.mtx_failed_topics_};
-                    if (!sub_.failed_topics_.empty()){
-                        for(auto iter = sub_.failed_topics_.begin(); iter != sub_.failed_topics_.end();){
-                            if (nn_setsockopt(sub_.socket_, NN_SUB, NN_SUB_UNSUBSCRIBE, iter->c_str(), iter->length()) >= 0){
-                                iter = sub_.failed_topics_.erase(iter);
+                    lock_guard<mutex> l{sub->mtx_failed_topics_};
+                    if (!sub->failed_topics_.empty()){
+                        for(auto iter = sub->failed_topics_.begin(); iter != sub->failed_topics_.end();){
+                            if (nn_setsockopt(sub->socket_, NN_SUB, NN_SUB_UNSUBSCRIBE, iter->c_str(), iter->length()) >= 0){
+                                iter = sub->failed_topics_.erase(iter);
                             }else{
                                 iter++;
                             }
@@ -347,58 +258,66 @@
     return 0;
 }
 
-int subscribe_topic(const std::string& topic){
+int subscribe_topic(const std::string& topic, void* arg/*=NULL*/){
+    _ps_sub* sub = (_ps_sub*)arg;
+    if (!sub) sub = singleton<_ps_sub>();
+
     TAG;
-    if (sub_.socket_ < 0){
-        subscribe_center(sub_.url_);
+    if (sub->socket_ < 0){
+        subscribe_center(sub->url_, sub);
     }
-    if (sub_.socket_ < 0) {
+    if (sub->socket_ < 0) {
         PRNTVITAG("socket_ < 0");
         return -1;
     }
 
-    auto ret = nn_setsockopt(sub_.socket_, NN_SUB, NN_SUB_SUBSCRIBE, topic.c_str(), topic.length());
+    auto ret = nn_setsockopt(sub->socket_, NN_SUB, NN_SUB_SUBSCRIBE, topic.c_str(), topic.length());
     // printf("set NN_SUB_SUBSCRIBE topic %s ret %d\n", topic.c_str(), ret);
     if (ret < 0){
         PRNTVITAG("nn_setsockopt failed");
-        lock_guard<mutex> l{sub_.mtx_failed_topics_};
-        sub_.failed_topics_.insert(topic);
+        lock_guard<mutex> l{sub->mtx_failed_topics_};
+        sub->failed_topics_.insert(topic);
     }
-    lock_guard<mutex> l{sub_.mtx_topics_};
-    sub_.topics_.insert(topic);
+    lock_guard<mutex> l{sub->mtx_topics_};
+    sub->topics_.insert(topic);
 
     return 0;
 }
 
-int unsubscribe_topic(const std::string& topic){
-    lock_guard<mutex> l(sub_.mtx_topics_);
-    auto iter = sub_.topics_.find(topic);
-    if (iter != sub_.topics_.end()){
-        nn_setsockopt(sub_.socket_, NN_SUB, NN_SUB_UNSUBSCRIBE, topic.c_str(), topic.length());
-        sub_.topics_.erase(iter);
+int unsubscribe_topic(const std::string& topic, void* arg/*=NULL*/){
+    _ps_sub* sub = (_ps_sub*)arg;
+    if (!sub) sub = singleton<_ps_sub>();
+
+    lock_guard<mutex> l(sub->mtx_topics_);
+    auto iter = sub->topics_.find(topic);
+    if (iter != sub->topics_.end()){
+        nn_setsockopt(sub->socket_, NN_SUB, NN_SUB_UNSUBSCRIBE, topic.c_str(), topic.length());
+        sub->topics_.erase(iter);
     }
 
     return 0;
 }
 
-int subscribe_read(std::string* topic, std::string* msg, const int to_ms){
+int subscribe_read(std::string* topic, std::string* msg, const int to_ms, void* arg/*=NULL*/){
+    _ps_sub* sub = (_ps_sub*)arg;
+    if (!sub) sub = singleton<_ps_sub>();
 
     TAG;
 
     int tm = to_ms > 0 ? to_ms : 30;
 
-    unique_lock<mutex> l(sub_.mtx_msg_);
-    auto status = sub_.cv_msg_.wait_for(l, chrono::milliseconds{tm}, []{
-        return !sub_.msg_.empty();
+    unique_lock<mutex> l(sub->mtx_msg_);
+    auto status = sub->cv_msg_.wait_for(l, chrono::milliseconds{tm}, [sub]{
+        return !sub->msg_.empty();
     });
     if (!status){
         PRNTVITAG("subscribe_read timeout");
         return -1;
     }
-    const auto& tmp = sub_.msg_.front();
+    const auto& tmp = sub->msg_.front();
     *topic = tmp.topic_;
     *msg = tmp.data_;
-    sub_.msg_.pop_front();
+    sub->msg_.pop_front();
 
     return 0;
 }
@@ -406,33 +325,21 @@
 ///////////////////////////////////////////////////////////
 // survey respondent for heartbeat
 
-class _sv : public _nn{
-public:
-    DISABLE_COPY_AND_ASSIGN(_sv);
-    _sv()=default;
-    ~_sv(){
-        t_quit_.store(true, memory_order_relaxed);
-        if (t_.joinable()) t_.join();
-    }
+int respond_survey(const std::string& url, std::string&& fixed_msg, void* arg/*=NULL*/){
+    _sv* sv = (_sv*)arg;
+    if (!sv) sv = singleton<_sv>();
 
-    thread      t_;
-    atomic_bool t_quit_{false};
-    string      fixed_msg_{};
-};
-
-static _sv survey_;
-int respond_survey(const std::string& url, std::string&& fixed_msg){
-    survey_.url_ = url;
-    survey_.fixed_msg_ = move(fixed_msg);
-    survey_.t_ = thread([]{
+    sv->url_ = url;
+    sv->fixed_msg_ = move(fixed_msg);
+    sv->t_ = thread([sv]{
 
         TAG;
 
-        int& sock = survey_.socket_;
-        const auto& msg = survey_.fixed_msg_;
-        while (!survey_.t_quit_.load()) {
+        int& sock = sv->socket_;
+        const auto& msg = sv->fixed_msg_;
+        while (!sv->t_quit_.load()) {
             if (sock < 0){
-                sock = client_socket(survey_.url_, NN_RESPONDENT);
+                sock = client_socket(sv->url_, NN_RESPONDENT);
                 if (sock > 0) set_socket_timeout(sock, 126);
             }
             if (sock < 0) continue;
@@ -452,299 +359,5 @@
 
     return 0;
 }
-
-//////////////////////////////////////////////
-// reply for request
-
-enum { INIT, RECV, WAIT, SEND };
-struct work {
-    int state{-1};
-    nng_aio *aio{};
-    nng_msg *msg{};
-    nng_ctx  ctx;
-    void(*cb_recv)(work*){};
-};
-
-class _rr : public _nn{
-public:
-    DISABLE_COPY_AND_ASSIGN(_rr);
-    _rr()=default;
-    ~_rr(){
-        if(sock_local_.id > 0) nng_close(sock_local_);
-        if(sock_remote_.id > 0) nng_close(sock_remote_);
-        t_quit_.store(true, memory_order_relaxed);
-        if (t_unblock_&&t_unblock_->joinable()) t_unblock_->join();
-    }
-
-    const string unblocking_msg_{"~!@#$%^&*()-=<<UNBLOCKING>>=-()*&^%$#@!~"};
-
-    unique_ptr<thread>                              t_unblock_{nullptr};
-    atomic_bool                                     t_quit_{false};
-
-    nng_socket                                      sock_local_{0};
-    nng_socket                                      sock_remote_{0};
-    int                                             port_{-1};
-
-    unordered_map<uint64_t, string>                 msg_{};
-    class worker{
-        worker& in_op(const worker& w){if(&w!=this){w_=w.w_;life_=w.life_;}return *this;};
-    public:
-        worker()=default;
-        ~worker()=default;
-        worker(struct work* w):w_(w),life_(0){}
-        worker(const worker& w):w_(w.w_),life_(w.life_){}
-        worker(worker&& w):w_(w.w_),life_(w.life_){}
-        worker& operator=(const worker& w){return in_op(w);}
-        worker& operator=(worker&& w){return in_op(w);}
-        operator struct work*() const{return w_;}
-        operator int&() {return life_;}
-        struct work* w_{};
-        int life_{};
-    };
-    unordered_map<uint64_t, worker>                 works_{};
-    uint64_t                                        work_index_{0};
-    mutex                                           mtx_msg_{};
-    condition_variable                              cv_msg_{};
-
-};
-
-static _rr reply_;
-
-int request2(const std::string &ipc, const void* r, const int r_len,
-    void** reply, int* reply_len, const int to_ms)
-{
-    const auto suc = simple_request(ipc, r, r_len, reply, reply_len, to_ms);
-    if (suc){
-        const size_t sl = reply_.unblocking_msg_.size();
-        const size_t rl = *reply_len;
-        if (sl != rl) return true;
-
-        const auto& s = reply_.unblocking_msg_;
-        auto r = (const char*)(*reply);
-        if (s.compare(0, sl, r, rl) == 0){
-            free(*reply);
-            *reply = NULL;
-            *reply_len = 0;
-            return false;
-        }
-    }
-    return suc;
-}
-
-static void server_cb(void *arg)
-{
-    if (!arg) return;
-
-    struct work *work = (struct work*)arg;
-    nng_msg *    msg;
-    int          rv;
-    // uint32_t     when{0};
-
-    switch (work->state) {
-    case INIT:
-        work->state = RECV;
-        nng_ctx_recv(work->ctx, work->aio);
-        break;
-    case RECV:
-        if ((rv = nng_aio_result(work->aio)) != 0) {
-            break;
-        }
-        msg = nng_aio_get_msg(work->aio);
-
-        work->msg   = msg;
-        work->state = WAIT;
-        if (work->cb_recv) work->cb_recv(work);
-        // nng_sleep_aio(when, work->aio);
-        break;
-    case WAIT:
-        // We could add more data to the message here.
-        nng_aio_set_msg(work->aio, work->msg);
-        work->msg   = NULL;
-        work->state = SEND;
-        nng_ctx_send(work->ctx, work->aio);
-        break;
-    case SEND:
-        if ((rv = nng_aio_result(work->aio)) != 0) {
-            nng_msg_free(work->msg);
-        }
-        work->state = RECV;
-        nng_ctx_recv(work->ctx, work->aio);
-        break;
-    default:
-        break;
-    }
-}
-
-static void cb_recv_for_aio(work* w){
-    nng_msg *om = w->msg;
-    if (!om) return;
-
-    string msg{(const char*)nng_msg_body(om), nng_msg_len(om)};
-    nng_msg_free(om);
-
-    lock_guard<mutex> l{reply_.mtx_msg_};
-    reply_.works_.emplace(reply_.work_index_, w);
-    reply_.msg_.emplace(reply_.work_index_, move(msg));
-    // reply_.works_.insert({reply_.work_index_, w});
-    // reply_.msg_.insert({reply_.work_index_, msg});
-    // reply_.works_[reply_.work_index_] = w;
-    // reply_.msg_[reply_.work_index_] = msg;
-    reply_.work_index_++;
-    reply_.cv_msg_.notify_all();
-}
-
-static struct work *alloc_work(nng_socket sock)
-{
-    struct work *w;
-    int          rv;
-
-    if ((w = (struct work*)nng_alloc(sizeof(*w))) == NULL) {
-        return NULL;;
-    }
-    w->cb_recv = cb_recv_for_aio;
-
-    if ((rv = nng_aio_alloc(&w->aio, server_cb, w)) != 0) {
-        return NULL;
-    }
-    if ((rv = nng_ctx_open(&w->ctx, sock)) != 0) {
-        return NULL;
-    }
-    w->state = INIT;
-    return (w);
-}
-
-static int create_server(nng_socket* sock, const string& url, const int count){
-    TAG;
-    if (sock->id > 0) return 0;
-
-    int rv = nng_rep0_open(sock);
-    if (rv < 0){
-        PRNTVITAG("create_server nng_rep0_open faild");
-        PRNTVITAG(url);
-        return rv;
-    }
-
-    work** works = (work**)malloc(sizeof(work*) * count);
-    for (int i = 0; i < count; i++) {
-        works[i] = alloc_work(*sock);
-    }
-
-    remove_exist(url);
-    rv = nng_listen(*sock, url.c_str(), NULL, 0);
-    if (rv < 0){
-        free(works);
-        PRNTVITAG("create_server nng_listen failed");
-        PRNTVITAG(url);
-        return rv;
-    }
-
-    for (int i = 0; i < count; i++) {
-        server_cb(works[i]); // this starts them going (INIT state)
-    }
-
-    free(works);
-    return 0;
-}
-
-static void aio_unblock(work* w, const void* msg, const int msg_len){
-    nng_msg_alloc(&w->msg, 0);
-    nng_msg_append(w->msg, msg, msg_len);
-
-    nng_sleep_aio(0, w->aio);
-}
-
-int start_reply(const std::string& url, const int port){
-
-    string ipc = "ipc:///tmp/" + url;
-    if (url.find("ipc://") == 0){
-        ipc = url;
-    }
-    reply_.url_ = ipc;
-    if(create_server(&reply_.sock_local_, ipc, 62) != 0) return -1;
-
-    if (port > 0){
-        reply_.port_ = port;
-        ipc = "tcp://0.0.0.0:" + to_string(port);
-        if(create_server(&reply_.sock_remote_, ipc, 62) != 0) return -1;
-    }else {
-        reply_.sock_remote_.id = numeric_limits<int32_t>::max();
-    }
-
-    if (!reply_.t_unblock_){
-        reply_.t_unblock_.reset(new thread([]{
-            constexpr int idle = 10;
-            const auto data = reply_.unblocking_msg_.data();
-            const auto data_size = reply_.unblocking_msg_.size();
-            while (!reply_.t_quit_.load()) {
-                this_thread::sleep_for(chrono::milliseconds{10});
-                vector<struct work*> tmp{};
-                {
-                    lock_guard<mutex> l{reply_.mtx_msg_};
-                    for(auto iter = reply_.works_.begin(); iter != reply_.works_.end();){
-                        if ((iter->second+=idle) > timeout_req_rep){
-                            tmp.push_back(iter->second.w_);
-                            iter = reply_.works_.erase(iter);
-                        }else {
-                            ++iter;
-                        }
-                    }
-                }
-                for(auto && w : tmp){
-                    aio_unblock(w, data, data_size);
-                }
-            }
-        }));
-    }
-
-    return 0;
-}
-
-int read_request(void** src, std::string* msg, const int to_ms){
-
-    if (reply_.sock_local_.id == 0 || reply_.sock_remote_.id == 0)
-        if (start_reply(reply_.url_, reply_.port_) != 0)
-            return -1;
-
-    int tm = to_ms > 0 ? to_ms : 30;
-
-    uint64_t key{};
-    {
-        unique_lock<mutex> l(reply_.mtx_msg_);
-        auto status = reply_.cv_msg_.wait_for(l, chrono::milliseconds{tm}, []{
-            return !reply_.msg_.empty();
-        });
-        if (!status){
-            PRNTVITAG("subscribe_read timeout");
-            return -1;
-        }
-        auto iter = reply_.msg_.begin();
-        key = iter->first;
-        *msg = move(iter->second);
-        reply_.msg_.erase(iter);
-    }
-
-    *src = malloc(sizeof(uint64_t));
-    *(uint64_t*)(*src) = key;
-
-    return 0;
-}
-
-int send_reply(const void* src, const void* msg, const int msg_len){
-    struct work* w{};
-    {
-        auto key = *(static_cast<uint64_t*>(const_cast<void*>(src)));
-
-        lock_guard<mutex> l{reply_.mtx_msg_};
-        auto iter = reply_.works_.find(key);
-        if (iter == reply_.works_.end()) return -1;
-        w = iter->second;
-        reply_.works_.erase(iter);
-    }
-
-    aio_unblock(w, msg, msg_len);
-
-    return 0;
-}
-
 
 }

--
Gitblit v1.8.0