From 68c7bef33e74f23aa0136ccd6f7faa654d671ebc Mon Sep 17 00:00:00 2001
From: lichao <lichao@aiotlink.com>
Date: 星期五, 21 五月 2021 09:23:01 +0800
Subject: [PATCH] center publish notify; fix topic partial match.
---
utest/robust_test.cpp | 208 +++++++++++++++++++++++++++++++++++++++++----------
1 files changed, 166 insertions(+), 42 deletions(-)
diff --git a/utest/robust_test.cpp b/utest/robust_test.cpp
index 0ffbcd6..ea6144c 100644
--- a/utest/robust_test.cpp
+++ b/utest/robust_test.cpp
@@ -1,5 +1,6 @@
#include "robust.h"
#include "util.h"
+#include <boost/circular_buffer.hpp>
using namespace robust;
@@ -15,10 +16,39 @@
/////////////////////////////////////////////////////////////////////////////////////////
+BOOST_AUTO_TEST_CASE(InitTest)
+{
+ AtomicReqRep rr;
+ auto client = [&]() {
+ for (int i = 0; i < 20; ++i) {
+ int64_t reply = 0;
+ bool r = rr.ClientRequest(i, reply);
+ printf("init request %d, %s, reply %d\n", i, (r ? "ok" : "failed"), reply);
+ }
+ };
+
+ bool run = true;
+ auto server = [&]() {
+ auto onReq = [](int64_t req) { return req + 100; };
+ while (run) {
+ rr.ServerProcess(onReq);
+ }
+ };
+
+ ThreadManager clients, servers;
+ servers.Launch(server);
+ for (int i = 0; i < 2; ++i) {
+ clients.Launch(client);
+ }
+ clients.WaitAll();
+ run = false;
+ servers.WaitAll();
+}
+
BOOST_AUTO_TEST_CASE(QueueTest)
{
const int nthread = 100;
- const uint64_t nmsg = 1000 * 1000 * 100;
+ const uint64_t nmsg = 1000 * 1000 * 10;
SharedMemory &shm = TestShm();
shm.Remove();
@@ -33,37 +63,37 @@
BOOST_CHECK_EQUAL((u64 & 255), i);
}
-#if 1
- typedef AtomicQueue<4> Rcb;
-
- Rcb tmp;
- BOOST_CHECK(tmp.like_empty());
- BOOST_CHECK(tmp.push_back(1));
- BOOST_CHECK(tmp.tail() == 1);
- BOOST_CHECK(tmp.head() == 0);
- int64_t d;
- BOOST_CHECK(tmp.pop_front(d));
- BOOST_CHECK(tmp.like_empty());
- BOOST_CHECK(tmp.head() == 1);
- BOOST_CHECK(tmp.tail() == 1);
-
- ShmObject<Rcb> rcb(shm, "test_rcb");
-#else
- typedef Circular<int64_t> Rcb;
- ShmObject<Rcb> rcb(shm, "test_rcb", 64, shm.get_segment_manager());
-#endif
-
- const int nsize = sizeof(Rcb);
-
- bool try_more = false;
uint64_t correct_total = nmsg * (nmsg - 1) / 2;
std::atomic<uint64_t> total(0);
std::atomic<uint64_t> nwrite(0);
std::atomic<uint64_t> writedone(0);
+
+#if 1
+ const int kPower = 0;
+ typedef AtomicQueue<kPower> Rcb;
+
+ Rcb tmp;
+ // BOOST_CHECK(tmp.like_empty());
+ BOOST_CHECK(tmp.push(1));
+ if (kPower != 0) {
+ BOOST_CHECK(tmp.tail() == 1);
+ }
+ BOOST_CHECK(tmp.head() == 0);
+ int64_t d;
+ BOOST_CHECK(tmp.pop(d));
+ if (kPower != 0) {
+ // BOOST_CHECK(tmp.like_empty());
+ BOOST_CHECK(tmp.head() == 1);
+ BOOST_CHECK(tmp.tail() == 1);
+ }
+
+ ShmObject<Rcb> rcb(shm, "test_rcb");
+ bool try_more = true;
+
auto Writer = [&]() {
uint64_t n = 0;
while ((n = nwrite++) < nmsg) {
- while (!rcb->push_back(n, try_more)) {
+ while (!rcb->push(n, try_more)) {
// MySleep();
}
++writedone;
@@ -73,7 +103,7 @@
auto Reader = [&]() {
while (nread.load() < nmsg) {
int64_t d;
- if (rcb->pop_front(d, try_more)) {
+ if (rcb->pop(d, try_more)) {
++nread;
total += d;
} else {
@@ -81,6 +111,58 @@
}
}
};
+
+#else
+ typedef Circular<int64_t> Rcb;
+ ShmObject<Rcb> rcb(shm, "test_rcb", 16, shm.get_segment_manager());
+
+ typedef FMutex Mutex;
+ // typedef SemMutex Mutex;
+ Mutex mtx(123);
+ auto Writer = [&]() {
+ uint64_t n = 0;
+ while ((n = nwrite++) < nmsg) {
+ auto Write = [&]() {
+ robust::Guard<Mutex> lk(mtx);
+ if (rcb->full()) {
+ return false;
+ } else {
+ rcb->push_back(n);
+ return true;
+ }
+ // return rcb->push_back(n);
+ };
+ while (!Write()) {
+ // MySleep();
+ }
+ ++writedone;
+ }
+ };
+ std::atomic<uint64_t> nread(0);
+ auto Reader = [&]() {
+ while (nread.load() < nmsg) {
+ int64_t d;
+ auto Read = [&]() {
+ robust::Guard<Mutex> lk(mtx);
+ if (rcb->empty()) {
+ return false;
+ } else {
+ d = rcb->front();
+ rcb->pop_front();
+ return true;
+ }
+ // return rcb->pop_front(d);
+ };
+ if (Read()) {
+ ++nread;
+ total += d;
+ } else {
+ // MySleep();
+ }
+ }
+ };
+
+#endif
auto status = [&]() {
auto next = steady_clock::now();
@@ -102,7 +184,8 @@
{
ThreadManager threads;
boost::timer::auto_cpu_timer timer;
- printf("Testing Robust Buffer, msgs %ld, queue size: %d, threads: %d \n", nmsg, Rcb::capacity, nthread);
+ // printf("Testing Robust Buffer, msgs %ld, queue size: %d, threads: %d \n", nmsg, Rcb::capacity, nthread);
+ printf("Testing Robust Buffer, msgs %ld, queue size: %d, threads: %d \n", nmsg, 16, nthread);
for (int i = 0; i < nthread; ++i) {
threads.Launch(Reader);
threads.Launch(Writer);
@@ -116,7 +199,26 @@
BOOST_AUTO_TEST_CASE(MutexTest)
{
- typedef robust::Mutex RobustMutex;
+ {
+ int sem_id = semget(100, 1, 0666 | IPC_CREAT);
+ auto P = [&]() {
+ sembuf op = {0, -1, SEM_UNDO};
+ semop(sem_id, &op, 1);
+ };
+ auto V = [&]() {
+ sembuf op = {0, 1, SEM_UNDO};
+ semop(sem_id, &op, 1);
+ };
+ for (int i = 0; i < 10; ++i) {
+ V();
+ }
+ Sleep(10s);
+
+ return;
+ }
+
+ // typedef robust::MFMutex RobustMutex;
+ typedef robust::SemMutex RobustMutex;
for (int i = 0; i < 20; ++i) {
int size = i;
@@ -131,40 +233,61 @@
const std::string mtx_name("test_mutex");
const std::string int_name("test_int");
- auto mtx = shm.FindOrCreate<RobustMutex>(mtx_name);
+ // auto mtx = shm.FindOrCreate<RobustMutex>(mtx_name, 12345);
+ RobustMutex rmtx(12345);
+ auto mtx = &rmtx;
auto pi = shm.FindOrCreate<int>(int_name, 100);
std::mutex m;
typedef std::chrono::steady_clock Clock;
- auto Now = []() { return Clock::now().time_since_epoch(); };
+
if (pi) {
auto old = *pi;
printf("int : %d, add1: %d\n", old, ++*pi);
}
- {
- const int ntimes = 1000 * 1000;
- RobustMutex mutex;
+ auto LockSpeed = [](auto &mutex, const std::string &name) {
+ const int ntimes = 1000 * 1;
auto Lock = [&]() {
for (int i = 0; i < ntimes; ++i) {
mutex.lock();
mutex.unlock();
}
};
-
+ printf("\nTesting %s lock/unlock %d times\n", name.c_str(), ntimes);
{
boost::timer::auto_cpu_timer timer;
- printf("test lock/unlock %d times: ", ntimes);
+ printf("1 thread: ");
Lock();
}
- {
+ auto InThread = [&](int nthread) {
boost::timer::auto_cpu_timer timer;
- printf("test lock/unlock %d times, 2 thread: ", ntimes);
- std::thread t1(Lock), t2(Lock);
- t1.join();
- t2.join();
- }
- }
+ printf("%d threads: ", nthread);
+ std::vector<std::thread> vt;
+ for (int i = 0; i < nthread; ++i) {
+ vt.emplace_back(Lock);
+ }
+ for (auto &t : vt) {
+ t.join();
+ }
+ };
+ InThread(4);
+ InThread(16);
+ InThread(100);
+ InThread(1000);
+ };
+ NullMutex null_mtx;
+ std::mutex std_mtx;
+ CasMutex cas_mtx;
+ FMutex mfmtx(3);
+ boost::interprocess::interprocess_mutex ipc_mutex;
+ SemMutex sem_mtx(3);
+ LockSpeed(null_mtx, "null mutex");
+ LockSpeed(std_mtx, "std::mutex");
+ // LockSpeed(cas_mtx, "CAS mutex");
+ LockSpeed(ipc_mutex, "boost ipc mutex");
+ LockSpeed(mfmtx, "mutex+flock");
+ LockSpeed(sem_mtx, "sem mutex");
auto TryLock = [&]() {
if (mtx->try_lock()) {
@@ -183,6 +306,7 @@
if (mtx) {
printf("mtx exists\n");
if (TryLock()) {
+ // Sleep(10s);
auto op = [&]() {
if (TryLock()) {
Unlock();
--
Gitblit v1.8.0