| | |
| | | #include "common.h" |
| | | using namespace std; |
| | | |
| | | #include <nng/protocol/reqrep0/rep.h> |
| | | #include <nng/supplemental/util/platform.h> |
| | | |
| | | #include "nng/compat/nanomsg/reqrep.h" |
| | | #include "nng/compat/nanomsg/pubsub.h" |
| | | #include "nng/compat/nanomsg/survey.h" |
| | | |
| | | namespace nng_wrap { |
| | | |
| | | // static int server_socket(const string& url, const int protocol, int family=AF_SP){ |
| | | // int sock = nn_socket(family, protocol); |
| | | // if (sock < 0) return sock; |
| | | // remove_exist(url); |
| | | // int rc = nn_bind(sock, url.c_str()); |
| | | // if (rc < 0) { |
| | | // nn_close(sock); |
| | | // return rc; |
| | | // } |
| | | // return sock; |
| | | // } |
| | | |
| | | static int client_socket(const string& url, const int protocol, int family=AF_SP){ |
| | | int sock = nn_socket(family, protocol); |
| | |
| | | } |
| | | set_socket_timeout(sock, timeout_req_rep); |
| | | pub->socket_ = sock; |
| | | pub->t_ = thread([pub]{ |
| | | pub->t_ = get_thread([](const auto pub){ |
| | | while (!pub->t_quit_.load()) { |
| | | _ps::psmsg *msg{NULL}; |
| | | { |
| | |
| | | msg = &pub->msg_.front(); |
| | | if (msg->topic_.empty()) {pub->msg_.pop_front(); continue;} |
| | | } |
| | | string sndmsg(msg->topic_ + msg->data_); |
| | | string sndmsg = (string{msg->topic_}+='\0')+=msg->data_; |
| | | int rc = nn_send(pub->socket_, sndmsg.data(), sndmsg.size(), 0); |
| | | if (rc == (int)sndmsg.size()){ |
| | | char* tmp{}; |
| | | rc = nn_recv(pub->socket_, &tmp, NN_MSG, 0); |
| | | if (rc > 0){ |
| | | nn_freemsg(tmp); |
| | | printf("======>> publish topic %s data length %lu\n", msg->topic_.c_str(), msg->data_.size()); |
| | | // printf("======>> publish topic %s data length %lu\n", msg->topic_.c_str(), msg->data_.size()); |
| | | lock_guard<mutex> l{pub->mtx_msg_}; |
| | | pub->msg_.pop_front(); |
| | | continue; |
| | |
| | | } |
| | | |
| | | } |
| | | }); |
| | | }, pub); |
| | | return sock; |
| | | } |
| | | |
| | | int publish(const std::string& topic, const void* data, const int data_len, void* arg/*=NULL*/){ |
| | | // printf("======>> publish topic %s\n", topic.c_str()); |
| | | _ps* pub = (_ps*)arg; |
| | | if (!pub) pub = singleton<_ps>(); |
| | | |
| | |
| | | lock_guard<mutex> l{pub->mtx_msg_}; |
| | | pub->msg_.emplace_back(topic, string{(const char*)data, (const size_t)data_len}); |
| | | pub->cv_msg_.notify_one(); |
| | | return pub->msg_.size(); |
| | | return (*pub)(); |
| | | } |
| | | |
| | | /////////////////////////////////////////////// |
| | |
| | | } |
| | | // set_socket_timeout(sock, timeout_req_rep); |
| | | sub->socket_ = sock; |
| | | sub->t_ = thread([sub]{ |
| | | sub->t_ = get_thread([](const auto sub){ |
| | | while (!sub->t_quit_.load()) { |
| | | char* m; |
| | | int m_len = nn_recv(sub->socket_, &m, NN_MSG, NN_DONTWAIT); |
| | | if (m_len > 0){ |
| | | string tmp_msg{m, (size_t)m_len}; |
| | | nn_freemsg(m); |
| | | string topic{}, msg{}; |
| | | const auto topic{tmp_msg.c_str()}; |
| | | string msg{}; |
| | | { |
| | | lock_guard<mutex> l{sub->mtx_topics_}; |
| | | lock_guard<mutex> l{sub->operator()()}; |
| | | for(auto && i : sub->topics_){ |
| | | if (tmp_msg.size() < i.size()) continue; |
| | | topic = tmp_msg.substr(0, i.size()); |
| | | if (topic == i){ |
| | | msg = tmp_msg.substr(i.size()); |
| | | if (!!!i.compare(topic)){ |
| | | msg = move(tmp_msg.substr(i.size()+1)); |
| | | break; |
| | | } |
| | | } |
| | | } |
| | | printf("======>> subscribe recv topic %s msg length %lu\n", topic.c_str(), msg.length()); |
| | | // printf("======>> subscribe recv topic %s msg length %lu\n", topic, msg.length()); |
| | | if (!msg.empty()){ |
| | | lock_guard<mutex> l(sub->mtx_msg_); |
| | | sub->msg_.emplace_back(topic, move(msg)); |
| | |
| | | lock_guard<mutex> l{sub->mtx_failed_topics_}; |
| | | if (!sub->failed_topics_.empty()){ |
| | | for(auto iter = sub->failed_topics_.begin(); iter != sub->failed_topics_.end();){ |
| | | if (nn_setsockopt(sub->socket_, NN_SUB, NN_SUB_UNSUBSCRIBE, iter->c_str(), iter->length()) >= 0){ |
| | | if (nn_setsockopt(sub->socket_, NN_SUB, NN_SUB_SUBSCRIBE, iter->c_str(), iter->length()) >= 0){ |
| | | iter = sub->failed_topics_.erase(iter); |
| | | }else{ |
| | | iter++; |
| | |
| | | // printf("======>> subscribe nn_recv failed %s\n", nn_strerror(nn_errno())); |
| | | } |
| | | } |
| | | }); |
| | | }, sub); |
| | | return 0; |
| | | } |
| | | |
| | |
| | | lock_guard<mutex> l{sub->mtx_failed_topics_}; |
| | | sub->failed_topics_.insert(topic); |
| | | } |
| | | lock_guard<mutex> l{sub->mtx_topics_}; |
| | | lock_guard<mutex> l{(*sub)()}; |
| | | sub->topics_.insert(topic); |
| | | |
| | | return 0; |
| | |
| | | _ps_sub* sub = (_ps_sub*)arg; |
| | | if (!sub) sub = singleton<_ps_sub>(); |
| | | |
| | | lock_guard<mutex> l(sub->mtx_topics_); |
| | | lock_guard<mutex> l{(*sub)()}; |
| | | auto iter = sub->topics_.find(topic); |
| | | if (iter != sub->topics_.end()){ |
| | | nn_setsockopt(sub->socket_, NN_SUB, NN_SUB_UNSUBSCRIBE, topic.c_str(), topic.length()); |
| | |
| | | |
| | | sv->url_ = url; |
| | | sv->fixed_msg_ = move(fixed_msg); |
| | | sv->t_ = thread([sv]{ |
| | | sv->t_ = get_thread([](const auto sv){ |
| | | |
| | | TAG; |
| | | |
| | | int& sock = sv->socket_; |
| | | const auto& msg = sv->fixed_msg_; |
| | | |
| | | while (!sv->t_quit_.load()) { |
| | | if (sock < 0){ |
| | | sock = client_socket(sv->url_, NN_RESPONDENT); |
| | |
| | | int rc = nn_recv(sock, &tmp, NN_MSG, 0); |
| | | if (rc > 0){ |
| | | nn_freemsg(tmp); |
| | | rc = nn_send(sock, msg.data(), msg.size(), 0); |
| | | rc = nn_send(sock, (*sv)().front().data(), (*sv)().front().size(), 0); |
| | | if (rc < 0){ |
| | | PRNTVITAG("heartbeat survey failed"); |
| | | PRNTVITAG(nn_strerror(nn_errno())); |
| | | } |
| | | } |
| | | } |
| | | }); |
| | | }, sv); |
| | | |
| | | return 0; |
| | | } |
| | | |
| | | int request2(const std::string &ipc, const void* r, const int r_len, |
| | | void** reply, int* reply_len, const int to_ms, void* arg/*=NULL*/) |
| | | { |
| | | const auto suc = simple_request(ipc, r, r_len, reply, reply_len, to_ms); |
| | | if (suc){ |
| | | const size_t sl = rr_unblocking_msg_.size(); |
| | | const size_t rl = *reply_len; |
| | | if (sl != rl) return true; |
| | | |
| | | const auto& s = rr_unblocking_msg_; |
| | | auto r = (const char*)(*reply); |
| | | if (s.compare(0, sl, r, rl) == 0){ |
| | | free(*reply); |
| | | *reply = NULL; |
| | | *reply_len = 0; |
| | | return false; |
| | | } |
| | | } |
| | | return suc; |
| | | } |
| | | |
| | | static void server_cb(void *arg) |
| | | { |
| | | if (!arg) return; |
| | | |
| | | struct work *work = (struct work*)arg; |
| | | nng_msg * msg; |
| | | int rv; |
| | | // uint32_t when{0}; |
| | | |
| | | switch (work->state) { |
| | | case INIT: |
| | | work->state = RECV; |
| | | nng_ctx_recv(work->ctx, work->aio); |
| | | break; |
| | | case RECV: |
| | | if ((rv = nng_aio_result(work->aio)) != 0) { |
| | | break; |
| | | } |
| | | msg = nng_aio_get_msg(work->aio); |
| | | |
| | | work->msg = msg; |
| | | work->state = WAIT; |
| | | if (work->cb_recv) work->cb_recv(work); |
| | | // nng_sleep_aio(when, work->aio); |
| | | break; |
| | | case WAIT: |
| | | // We could add more data to the message here. |
| | | nng_aio_set_msg(work->aio, work->msg); |
| | | work->msg = NULL; |
| | | work->state = SEND; |
| | | nng_ctx_send(work->ctx, work->aio); |
| | | break; |
| | | case SEND: |
| | | if ((rv = nng_aio_result(work->aio)) != 0) { |
| | | nng_msg_free(work->msg); |
| | | work->msg = NULL; |
| | | } |
| | | work->state = RECV; |
| | | nng_ctx_recv(work->ctx, work->aio); |
| | | break; |
| | | default: |
| | | break; |
| | | } |
| | | } |
| | | |
| | | static void cb_recv_for_aio(work* w){ |
| | | nng_msg *om = w->msg; |
| | | if (!om) return; |
| | | |
| | | _rr* rep = (_rr*)w->user_data; |
| | | |
| | | string msg{(const char*)nng_msg_body(om), nng_msg_len(om)}; |
| | | nng_msg_free(om); |
| | | |
| | | auto t = (*rep)(); |
| | | lock_guard<mutex> l{rep->mtx_msg_}; |
| | | rep->works_.emplace(get<0>(t), w); |
| | | get<1>(t).emplace(get<0>(t), move(msg)); |
| | | get<0>(t)++; |
| | | rep->cv_msg_.notify_all(); |
| | | } |
| | | |
| | | static struct work *alloc_work(nng_socket sock, _rr* rep) |
| | | { |
| | | struct work *w; |
| | | int rv; |
| | | |
| | | if ((w = (struct work*)nng_alloc(sizeof(*w))) == NULL) { |
| | | return NULL;; |
| | | } |
| | | w->cb_recv = cb_recv_for_aio; |
| | | w->user_data = rep; |
| | | |
| | | if ((rv = nng_aio_alloc(&w->aio, server_cb, w)) != 0) { |
| | | nng_free(w, sizeof(*w)); |
| | | return NULL; |
| | | } |
| | | if ((rv = nng_ctx_open(&w->ctx, sock)) != 0) { |
| | | nng_free(w, sizeof(*w)); |
| | | return NULL; |
| | | } |
| | | w->state = INIT; |
| | | return (w); |
| | | } |
| | | |
| | | static int create_server(nng_socket* sock, const string& url, const int count, _rr* rep){ |
| | | TAG; |
| | | if (sock->id > 0) return 0; |
| | | |
| | | int rv = nng_rep0_open(sock); |
| | | if (rv < 0){ |
| | | PRNTVITAG("create_server nng_rep0_open faild"); |
| | | PRNTVITAG(url); |
| | | return rv; |
| | | } |
| | | |
| | | work** works = (work**)malloc(sizeof(void*) * count); |
| | | for (int i = 0; i < count; i++) { |
| | | works[i] = alloc_work(*sock, rep); |
| | | } |
| | | |
| | | remove_exist(url); |
| | | rv = nng_listen(*sock, url.c_str(), NULL, 0); |
| | | if (rv < 0){ |
| | | for(int i = 0; i < count; i++) if(works[i]) nng_free(works[i], sizeof(work)); |
| | | free(works); |
| | | PRNTVITAG("create_server nng_listen failed"); |
| | | PRNTVITAG(url); |
| | | return rv; |
| | | } |
| | | |
| | | for (int i = 0; i < count; i++) { |
| | | server_cb(works[i]); // this starts them going (INIT state) |
| | | } |
| | | |
| | | free(works); |
| | | return 0; |
| | | } |
| | | |
| | | static void aio_unblock(work* w, const void* msg, const int msg_len){ |
| | | nng_msg_alloc(&w->msg, 0); |
| | | nng_msg_append(w->msg, msg, msg_len); |
| | | |
| | | nng_sleep_aio(0, w->aio); |
| | | } |
| | | |
| | | int start_reply(const std::string& url, const int port, void* arg/*=NULL*/){ |
| | | _rr* rep = (_rr*)arg; |
| | | if (!rep) rep = singleton<_rr>(); |
| | | |
| | | string ipc = "ipc:///tmp/" + url; |
| | | if (url.find("ipc://") == 0){ |
| | | ipc = url; |
| | | } |
| | | rep->url_ = ipc; |
| | | if(create_server(&get<0>(rep->socks_), ipc, 62, rep) != 0) return -1; |
| | | |
| | | if (port > 0){ |
| | | get<1>(get<1>(rep->socks_)) = port; |
| | | ipc = "tcp://0.0.0.0:" + to_string(port); |
| | | if(create_server(&get<0>(get<1>(rep->socks_)), ipc, 62, rep) != 0) return -1; |
| | | // printf("======>> create server for remote port %d\n", port); |
| | | }else { |
| | | get<0>(get<1>(rep->socks_)).id = numeric_limits<int32_t>::max(); |
| | | } |
| | | |
| | | if (!rep->t_unblock_){ |
| | | rep->t_unblock_.reset(new thread(get_thread([](const auto rep){ |
| | | constexpr int idle = 10; |
| | | const auto data = rr_unblocking_msg_.data(); |
| | | const auto data_size = rr_unblocking_msg_.size(); |
| | | constexpr int life_span = timeout_req_rep*10; |
| | | |
| | | auto f = [rep]{ |
| | | vector<struct work*> tmp{}; |
| | | lock_guard<mutex> l{rep->mtx_msg_}; |
| | | for(auto iter = rep->works_.begin(); iter != rep->works_.end();){ |
| | | if ((iter->second+=idle) > life_span){ |
| | | tmp.push_back(iter->second.w_); |
| | | iter = rep->works_.erase(iter); |
| | | }else { |
| | | ++iter; |
| | | } |
| | | } |
| | | return tmp; |
| | | }; |
| | | while (!rep->t_quit_.load()) { |
| | | this_thread::sleep_for(chrono::milliseconds{10}); |
| | | vector<struct work*> tmp = f(); |
| | | for(auto && w : tmp){ |
| | | aio_unblock(w, data, data_size); |
| | | } |
| | | } |
| | | }, rep))); |
| | | } |
| | | |
| | | return 0; |
| | | } |
| | | |
| | | int read_request(void** src, std::string* msg, const int to_ms, void* arg/*=NULL*/){ |
| | | _rr* rep = (_rr*)arg; |
| | | if (!rep) rep = singleton<_rr>(); |
| | | |
| | | if (get<0>(rep->socks_).id == 0 || get<0>(get<1>(rep->socks_)).id == 0) |
| | | if (start_reply(rep->url_, get<1>(get<1>(rep->socks_))) != 0) |
| | | return -1; |
| | | |
| | | int tm = to_ms > 0 ? to_ms : 30; |
| | | |
| | | uint64_t key{}; |
| | | { |
| | | unique_lock<mutex> l(rep->mtx_msg_); |
| | | auto status = rep->cv_msg_.wait_for(l, chrono::milliseconds{tm}, [rep]{ |
| | | return !rep->msg_.empty(); |
| | | }); |
| | | if (!status){ |
| | | PRNTVITAG("read_request timeout"); |
| | | return -1; |
| | | } |
| | | auto iter = rep->msg_.begin(); |
| | | key = iter->first; |
| | | *msg = move(iter->second); |
| | | rep->msg_.erase(iter); |
| | | } |
| | | |
| | | *src = malloc(sizeof(uint64_t)); |
| | | *(uint64_t*)(*src) = key; |
| | | |
| | | return 0; |
| | | } |
| | | |
| | | int send_reply(const void* src, const void* msg, const int msg_len, void* arg/*=NULL*/){ |
| | | _rr* rep = (_rr*)arg; |
| | | if (!rep) rep = singleton<_rr>(); |
| | | |
| | | struct work* w{}; |
| | | { |
| | | auto key = *(static_cast<uint64_t*>(const_cast<void*>(src))); |
| | | |
| | | lock_guard<mutex> l{rep->mtx_msg_}; |
| | | auto iter = rep->works_.find(key); |
| | | if (iter == rep->works_.end()) return -1; |
| | | w = iter->second; |
| | | rep->works_.erase(iter); |
| | | } |
| | | |
| | | aio_unblock(w, msg, msg_len); |
| | | |
| | | return 0; |
| | | } |