summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLu Qiuwen <[email protected]>2023-08-29 18:18:54 +0800
committerLu Qiuwen <[email protected]>2023-08-30 14:52:00 +0800
commit5e5afdd30f9274038a3ed6130a8f163cd4f4391b (patch)
tree2b16e669f6865b1ca5fcd05cd12c4bfb172e0ff7
parent3b9d656d676b3501894d691f55f6513f994ad237 (diff)
vnode支持共享队列以提高对突发流量的缓冲能力。v4.6.48-20230830
-rw-r--r--infra/include/vnode.h4
-rw-r--r--infra/src/vnode_common.c87
-rw-r--r--infra/src/vnode_common.h21
-rw-r--r--infra/src/vnode_mirror.c109
-rw-r--r--infra/test/TestVNode.cc253
-rw-r--r--service/include/sc_vdev.h4
-rw-r--r--service/src/devmgr.c70
-rw-r--r--service/src/vdata.c98
8 files changed, 409 insertions, 237 deletions
diff --git a/infra/include/vnode.h b/infra/include/vnode.h
index 0728270..0593b22 100644
--- a/infra/include/vnode.h
+++ b/infra/include/vnode.h
@@ -147,8 +147,8 @@ int vnode_mirror_enqueue_bulk(struct vnode_prod * prod,
int vnode_mirror_dequeue_burst(struct vnode_cons * cons,
unsigned int consq, struct rte_mbuf * objects[], int nr_max_objects);
-struct vnode * vnode_mirror_create(const char * sym, unsigned int sz_tunnel, unsigned int sz_tunnel_buffer,
- struct rte_mempool * indirect_pool, unsigned int notify_cons_when_rx,
+struct vnode * vnode_mirror_create(const char * sym, unsigned int sz_exclusive, unsigned int sz_shared,
+ unsigned int sz_buffer, unsigned int notify_cons_when_rx,
unsigned int batch_interval_us);
int vnode_mirror_delete(struct vnode * vnode);
diff --git a/infra/src/vnode_common.c b/infra/src/vnode_common.c
index f7a51a8..1f077aa 100644
--- a/infra/src/vnode_common.c
+++ b/infra/src/vnode_common.c
@@ -42,12 +42,15 @@
* Len = sizeof(nr_prodq) + sizeof(nr_consq) + sizeof(descs) * nr_prodq * nr_consq
*/
-static struct tunnel_desc * tunnel_new(const char * symbol, unsigned int size, unsigned int sz_buffer)
+static struct tunnel_desc * tunnel_new(const char * symbol, unsigned int sz_exclusive, unsigned int sz_shared,
+ unsigned int sz_buffer, rte_atomic32_t * shared_counter)
{
struct tunnel_desc * desc = ZMALLOC(sizeof(struct tunnel_desc));
MR_VERIFY_MALLOC(desc);
- desc->tunnel_object = rte_ring_create(symbol, size, SOCKET_ID_ANY, RING_F_SC_DEQ | RING_F_SP_ENQ);
+ desc->tunnel_object =
+ rte_ring_create(symbol, sz_exclusive + sz_shared, SOCKET_ID_ANY, RING_F_SC_DEQ | RING_F_SP_ENQ | RING_F_EXACT_SZ);
+
if (desc->tunnel_object == NULL)
{
MR_ERROR("Create tunnel %s failed : %s", symbol, MR_STR_ERRNO(errno));
@@ -55,13 +58,11 @@ static struct tunnel_desc * tunnel_new(const char * symbol, unsigned int size, u
}
snprintf(desc->symbol, sizeof(desc->symbol), "%s", symbol);
- desc->tunnel_size = size;
+ desc->tunnel_size = sz_exclusive + sz_shared;
- // 禁用buffer,就是让buffer为1,写入一个后立即发出
- if (sz_buffer == 0)
- {
- sz_buffer = 1;
- }
+ desc->tunnel_exclusive_size = sz_exclusive;
+ desc->shared_credict_counter = shared_counter;
+ desc->shared_credict_used = 0;
desc->en_buffer = ZMALLOC(sizeof(void *) * MR_LIBVNODE_MAX_SZ_BURST);
MR_VERIFY_MALLOC(desc->en_buffer);
@@ -122,41 +123,10 @@ static int tunnel_block_delete(struct tunnel_block * block)
return 0;
}
-static void tunnel_unpoison(struct tunnel_desc * desc)
-{
- MR_ASAN_UNPOISON_MEMORY_REGION(desc, sizeof(struct tunnel_desc));
- MR_ASAN_UNPOISON_MEMORY_REGION(desc->en_buffer, sizeof(void *) * desc->sz_en_buffer);
-
- MR_ASAN_UNPOISON_MEMORY_REGION(desc->tunnel_object, sizeof(struct rte_ring));
- ssize_t sz_tunnel_object = rte_ring_get_memsize(rte_ring_get_size(desc->tunnel_object));
- assert(sz_tunnel_object >= 0);
- MR_ASAN_UNPOISON_MEMORY_REGION(desc->tunnel_object, (size_t)sz_tunnel_object);
-}
-
-static void tunnel_block_unpoison(struct tunnel_block * block)
-{
- /* Tunnel-Block Header */
- MR_ASAN_UNPOISON_MEMORY_REGION(block, sizeof(struct tunnel_block));
-
- /* Tunnel-Block Body */
- void * __tunnel_block_body = (void *)block + sizeof(struct tunnel_block);
- size_t __tunnel_block_body_size = sizeof(struct tunnel_desc *) * (block->nr_prodq * block->nr_consq);
- MR_ASAN_UNPOISON_MEMORY_REGION(__tunnel_block_body, __tunnel_block_body_size);
-
- /* Tunnel Objects */
- for (unsigned int prodq_id = 0; prodq_id < block->nr_prodq; prodq_id++)
- {
- for (unsigned int consq_id = 0; consq_id < block->nr_consq; consq_id++)
- {
- struct tunnel_desc * tunnel_desc = *tunnel_block_locate(block, prodq_id, consq_id);
- tunnel_unpoison(tunnel_desc);
- }
- }
-}
-
/* Alloc a block of tunnels, and init all the tunnels */
-static struct tunnel_block * tunnel_block_new(const char * symbol,
- struct vnode_prod * prod, struct vnode_cons * cons, unsigned int tunnel_size, unsigned int tun_sz_buffer)
+static struct tunnel_block * tunnel_block_new(const char * symbol, struct vnode_prod * prod, struct vnode_cons * cons,
+ unsigned int sz_exclusive, unsigned int sz_shared,
+ unsigned int sz_buffer, rte_atomic32_t * shared_counter)
{
unsigned int nr_prodq = prod->nr_prodq;
unsigned int nr_consq = cons->nr_consq;
@@ -175,25 +145,25 @@ static struct tunnel_block * tunnel_block_new(const char * symbol,
// create tunnel for each prodq and consq in block
for (int prodq_id = 0; prodq_id < nr_prodq; prodq_id++)
- {
- for (int consq_id = 0; consq_id < nr_consq; consq_id++)
- {
- char tunnel_sym[MR_SYMBOL_MAX];
- snprintf(tunnel_sym, sizeof(tunnel_sym), "%s-%d-%d",
- symbol, prodq_id, consq_id);
+ {
+ for (int consq_id = 0; consq_id < nr_consq; consq_id++)
+ {
+ char tunnel_sym[MR_SYMBOL_MAX];
+ snprintf(tunnel_sym, sizeof(tunnel_sym), "%s-%d-%d", symbol, prodq_id, consq_id);
- struct tunnel_desc * tdesc = tunnel_new(tunnel_sym, tunnel_size, tun_sz_buffer);
- if (tdesc == NULL) goto err;
+ struct tunnel_desc * tdesc = tunnel_new(tunnel_sym, sz_exclusive, sz_shared, sz_buffer, shared_counter);
+ if (tdesc == NULL)
+ goto err;
- *tunnel_block_locate(block, prodq_id, consq_id) = tdesc;
- }
- }
+ *tunnel_block_locate(block, prodq_id, consq_id) = tdesc;
+ }
+ }
return block;
err:
MR_ERROR("Create tunnel block %s failed, tunnel size = %d, tunnel buffer = %d",
- symbol, tunnel_size, tun_sz_buffer);
+ symbol, sz_exclusive, sz_buffer);
if (block) tunnel_block_delete(block);
return NULL;
}
@@ -209,7 +179,9 @@ static int do_producer_join_unsafe(struct vnode * vnode, struct vnode_prod * pro
snprintf(block_sym, sizeof(block_sym), "%s-%s-%s", vnode->symbol, prod->symbol, cons->symbol);
// create communication tunnel for each cons and prods
- block = tunnel_block_new(block_sym, prod, cons, vnode->sz_tunnel, vnode->sz_tunnel_buffer);
+ block = tunnel_block_new(block_sym, prod, cons, vnode->sz_tunnel, vnode->sz_shared, vnode->sz_tunnel_buffer,
+ &vnode->shared_credict_counter);
+
if (block == NULL)
{
goto error;
@@ -237,8 +209,9 @@ static int do_consumer_join_unsafe(struct vnode * vnode, struct vnode_cons * con
char block_sym[MR_SYMBOL_MAX];
snprintf(block_sym, sizeof(block_sym), "%s-%s-%s", vnode->symbol, prod->symbol, cons->symbol);
- // create commucation tunnel for each cons and prods
- block = tunnel_block_new(block_sym, prod, cons, vnode->sz_tunnel, vnode->sz_tunnel_buffer);
+ // create communication tunnel for each cons and prods
+ block = tunnel_block_new(block_sym, prod, cons, vnode->sz_tunnel, vnode->sz_shared, vnode->sz_tunnel_buffer,
+ &vnode->shared_credict_counter);
if (block == NULL)
goto error;
diff --git a/infra/src/vnode_common.h b/infra/src/vnode_common.h
index 466760a..c089bf8 100644
--- a/infra/src/vnode_common.h
+++ b/infra/src/vnode_common.h
@@ -12,6 +12,12 @@
#define VNODE_STAT_ENABLE 1
#endif
+struct shared_credict_counter
+{
+ rte_spinlock_t lock;
+ unsigned int counter;
+};
+
/* Tunnel Description Structure */
struct tunnel_desc
{
@@ -24,6 +30,9 @@ struct tunnel_desc
struct rte_ring * tunnel_object;
/* Tunnel Size */
unsigned int tunnel_size;
+ /* tunnel exclusive size */
+ unsigned int tunnel_exclusive_size;
+
/* Tunnel Enqueue Buffer */
struct rte_mbuf ** en_buffer;
@@ -38,6 +47,10 @@ struct tunnel_desc
unsigned int sz_en_buffer;
/* Tunnel Enqueue Buffer Used */
unsigned int sz_en_buffer_used;
+ /* shared tunnel use */
+ unsigned int shared_credict_used;
+ /* counter */
+ rte_atomic32_t * shared_credict_counter;
#if 0
/* Tunnel Enqueue Buffer Returned */
@@ -110,16 +123,16 @@ struct vnode
unsigned int sz_tunnel;
/* Tunnel Enqueue Buffer Size */
unsigned int sz_tunnel_buffer;
+ /* shared credict */
+ unsigned int sz_shared;
/* allow to notify cons when packet arrived. */
unsigned int notify_cons_when_rx;
/* batch interval */
unsigned int batch_interval_tsc;
- /* Spinlock */
/* Guarantees one operator(consumer or producer, create or destroy) a time */
- rte_spinlock_t lock;
- /* Private */
- void * priv;
+ rte_spinlock_t lock __rte_cache_aligned;
+ rte_atomic32_t shared_credict_counter __rte_cache_aligned;
};
diff --git a/infra/src/vnode_mirror.c b/infra/src/vnode_mirror.c
index 4939bf2..e878bbd 100644
--- a/infra/src/vnode_mirror.c
+++ b/infra/src/vnode_mirror.c
@@ -17,46 +17,49 @@
#include "vnode_common.h"
-struct __vnode_priv_mirror
-{
- struct rte_mempool * clone;
-};
-
static inline void dist_tunnel_flush(struct vnode_prod * prod, struct vnode_cons * cons, unsigned int prodq,
unsigned int consq, struct tunnel_desc * desc)
{
- unsigned int n_to_send = desc->sz_en_buffer_used;
- if (n_to_send == 0)
+ struct vnode_cons_notify * cons_notify_ctx;
+
+ /* nothing to send */
+ if (desc->sz_en_buffer_used == 0)
{
return;
}
-#if 0
- struct vnode * vnode = prod->vnode;
- if (n_to_send < desc->sz_en_buffer)
+ unsigned int nr_ring_count = rte_ring_count(desc->tunnel_object);
+ unsigned int nr_ring_to_use = nr_ring_count + desc->sz_en_buffer_used;
+ unsigned int nr_shared_credict = 0;
+
+ if (nr_ring_to_use > desc->tunnel_exclusive_size)
{
- uint64_t tsc_now = rte_get_timer_cycles();
- uint64_t tsc_delta = tsc_now - desc->tsc_prev;
+ /* need to apply shared credict */
+ nr_shared_credict = nr_ring_to_use - desc->tunnel_exclusive_size;
+ uint32_t cur_value = rte_atomic32_read(desc->shared_credict_counter);
- if (tsc_delta < vnode->batch_interval_tsc)
+ while (1)
{
- return;
+ uint32_t new_value = cur_value > nr_shared_credict ? cur_value - nr_shared_credict : 0;
+ if (rte_atomic32_cmpset((volatile uint32_t *)desc->shared_credict_counter, cur_value, new_value))
+ {
+ nr_shared_credict = cur_value > new_value ? cur_value - new_value : 0;
+ break;
+ }
}
+
+ desc->shared_credict_used += nr_shared_credict;
}
-#endif
- size_t n_send_len = 0;
- struct vnode_cons_notify * cons_notify_ctx;
+ unsigned int n_can_send = desc->tunnel_exclusive_size + nr_shared_credict - nr_ring_count;
+ unsigned int n_to_send = RTE_MIN(desc->sz_en_buffer_used, n_can_send);
+ size_t n_send_len = 0;
for (unsigned int k = 0; k < n_to_send; k++)
{
n_send_len += rte_pktmbuf_data_len(desc->en_buffer[k]);
}
-#if 0
- desc->tsc_prev = rte_get_timer_cycles();
-#endif
-
for (unsigned int k = 0; k < n_to_send; k++)
{
struct rte_mbuf * mbuf = desc->en_buffer[k];
@@ -65,7 +68,7 @@ static inline void dist_tunnel_flush(struct vnode_prod * prod, struct vnode_cons
rte_cldemote(rte_mbuf_data_addr_default(mbuf));
/* flush the mbuf's data first, and then flush the metadata of mbufs */
- /* because to know where is mbuf's data we need to access mbuf's first cache line */
+ /* because to know where is mbuf's data, we need to access mbuf's first cache line */
rte_cldemote(mbuf);
rte_cldemote(RTE_PTR_ADD(mbuf, RTE_CACHE_LINE_SIZE));
rte_cldemote(RTE_PTR_ADD(mbuf, RTE_CACHE_LINE_SIZE * 2));
@@ -74,10 +77,12 @@ static inline void dist_tunnel_flush(struct vnode_prod * prod, struct vnode_cons
unsigned int n_send = rte_ring_sp_enqueue_burst(desc->tunnel_object, (void **)desc->en_buffer, n_to_send, NULL);
/* 没有丢包 */
- if (likely(n_send == n_to_send))
+ if (likely(n_send == desc->sz_en_buffer_used))
+ {
goto out;
+ }
- for (unsigned int k = n_send; k < n_to_send; k++)
+ for (unsigned int k = n_send; k < desc->sz_en_buffer_used; k++)
{
struct rte_mbuf * object_to_be_free = desc->en_buffer[k];
n_send_len -= rte_pktmbuf_data_len(object_to_be_free);
@@ -153,26 +158,16 @@ out:
static inline int dist_tunnel_dequeue(struct tunnel_desc * desc, void * obj, int nr_max_obj)
{
-#if RTE_VERSION >= RTE_VERSION_NUM(17, 5, 0, 0)
- return rte_ring_sc_dequeue_burst(desc->tunnel_object, obj, nr_max_obj, NULL);
-#else
- return rte_ring_sc_dequeue_burst(desc->tunnel_object, obj, nr_max_obj);
-#endif
-}
-
-static inline void dist_tunnel_enqueue_without_buffer(struct tunnel_desc * desc, void * obj)
-{
- assert(desc->en_buffer == NULL && desc->sz_en_buffer == 0);
+ unsigned int nr_deq = rte_ring_sc_dequeue_burst(desc->tunnel_object, obj, nr_max_obj, NULL);
-#if RTE_VERSION >= RTE_VERSION_NUM(17, 5, 0, 0)
- int ret = rte_ring_sp_enqueue_burst(desc->tunnel_object, &obj, 1, NULL);
-#else
- int ret = rte_ring_sp_enqueue_burst(desc->tunnel_object, &obj, 1);
-#endif
+ unsigned int shared_credict_to_release = RTE_MIN(nr_deq, desc->shared_credict_used);
+ if (shared_credict_to_release > 0)
+ {
+ rte_atomic32_add(desc->shared_credict_counter, (int32_t)desc->shared_credict_used);
+ desc->shared_credict_used -= shared_credict_to_release;
+ }
- if (unlikely(ret != 1))
- PROTECT_rte_pktmbuf_free(obj);
- return;
+ return (int)nr_deq;
}
// Tunnel Block Data Operation, send objects according for their hash value,
@@ -321,34 +316,26 @@ static inline int __mirror_clone_objects(struct rte_mempool * clone_pool, struct
int vnode_mirror_enqueue_bulk(struct vnode_prod * prod, unsigned int prodq, struct rte_mbuf * objects[],
uint32_t hash[], int nr_objects)
{
- int ret = 0;
assert(nr_objects <= MR_LIBVNODE_MAX_SZ_BURST);
struct tunnel_block * block = prod->block;
- if (likely(block != NULL))
- {
- dist_tunnel_block_enqueue_with_hash(block, prodq, objects, hash, nr_objects);
- }
- else
+ if (unlikely(block == NULL))
{
goto failure;
}
- ret = 0;
- goto out;
+ dist_tunnel_block_enqueue_with_hash(block, (int)prodq, objects, hash, nr_objects);
+ return 0;
failure:
for (int i = 0; i < nr_objects; i++)
{
- PROTECT_rte_pktmbuf_free(objects[i]);
+ rte_pktmbuf_free(objects[i]);
}
VNODE_STAT_UPDATE(prod, prodq, on_line, nr_objects);
VNODE_STAT_UPDATE(prod, prodq, missed, nr_objects);
return 0;
-
-out:
- return ret;
}
int vnode_mirror_dequeue_burst(struct vnode_cons * cons, unsigned int consq, struct rte_mbuf * objects[],
@@ -374,11 +361,11 @@ void vnode_mirror_flush(struct vnode_prod * prod, unsigned int prodq)
}
}
-struct vnode * vnode_mirror_create(const char * sym, unsigned int sz_tunnel, unsigned int sz_tunnel_buffer,
- struct rte_mempool * indirect_pool, unsigned int notify_cons_when_rx,
+struct vnode * vnode_mirror_create(const char * sym, unsigned int sz_exclusive, unsigned int sz_shared,
+ unsigned int sz_buffer, unsigned int notify_cons_when_rx,
unsigned int batch_interval_us)
{
- struct vnode * vnode_common = __vnode_common_create(sym, sz_tunnel, sz_tunnel_buffer, notify_cons_when_rx);
+ struct vnode * vnode_common = __vnode_common_create(sym, sz_exclusive, sz_buffer, notify_cons_when_rx);
if (vnode_common == NULL)
{
@@ -387,24 +374,20 @@ struct vnode * vnode_mirror_create(const char * sym, unsigned int sz_tunnel, uns
}
vnode_common->batch_interval_tsc = batch_interval_us * rte_get_timer_cycles() / US_PER_S;
- vnode_common->priv = ZMALLOC(sizeof(struct __vnode_priv_mirror));
- MR_VERIFY_MALLOC(vnode_common->priv);
+ vnode_common->sz_shared = sz_shared;
- ((struct __vnode_priv_mirror *)(vnode_common->priv))->clone = indirect_pool;
+ rte_atomic32_set(&vnode_common->shared_credict_counter, (int32_t)sz_shared);
return vnode_common;
}
int vnode_mirror_delete(struct vnode * vnode)
{
- if (vnode->priv)
- rte_free(vnode->priv);
return __vnode_common_delete(vnode);
}
void vnode_mirror_common_unpoison(struct vnode * vnode)
{
__vnode_common_unpoison(vnode);
- MR_ASAN_UNPOISON_MEMORY_REGION(vnode->priv, sizeof(struct __vnode_priv_mirror));
}
__USE_COMMON_VNODE_CREATE_PROD(mirror)
diff --git a/infra/test/TestVNode.cc b/infra/test/TestVNode.cc
index 4609979..a72bb78 100644
--- a/infra/test/TestVNode.cc
+++ b/infra/test/TestVNode.cc
@@ -39,7 +39,7 @@ class TestCaseVNodeQueue : public TestCaseVNode
void SetUp() override
{
- vnode_ = vnode_mirror_create("m-vnode", 1024, 32, nullptr, 0, 0);
+ vnode_ = vnode_mirror_create("m-vnode", 1024, 0, 32, 0, 0);
ASSERT_NE(vnode_, nullptr);
assert(prod_ == nullptr);
@@ -82,7 +82,7 @@ struct rte_mempool * TestCaseVNode::pktmbuf_pool_ = nullptr;
TEST_F(TestCaseVNode, CreateAndDeleteInEmptyNode)
{
- struct vnode * vnode_ptr = vnode_mirror_create("m-vnode", 1024, 32, nullptr, 0, 0);
+ struct vnode * vnode_ptr = vnode_mirror_create("m-vnode", 1024, 0, 32, 0, 0);
EXPECT_NE(vnode_ptr, nullptr);
int ret = vnode_mirror_delete(vnode_ptr);
@@ -91,7 +91,7 @@ TEST_F(TestCaseVNode, CreateAndDeleteInEmptyNode)
TEST_F(TestCaseVNode, CreateAndDelete)
{
- struct vnode * vnode_ptr = vnode_mirror_create("m-vnode", 1024, 32, nullptr, 0, 0);
+ struct vnode * vnode_ptr = vnode_mirror_create("m-vnode", 1024, 0, 32, 0, 0);
ASSERT_NE(vnode_ptr, nullptr);
struct vnode_prod * prod;
@@ -109,7 +109,7 @@ TEST_F(TestCaseVNode, CreateAndDelete)
TEST_F(TestCaseVNode, CreateAndDeleteMultiThread)
{
- struct vnode * vnode_ptr = vnode_mirror_create("m-vnode", 1024, 32, nullptr, 0, 0);
+ struct vnode * vnode_ptr = vnode_mirror_create("m-vnode", 1024, 0, 32, 0, 0);
ASSERT_NE(vnode_ptr, nullptr);
/* create multiple thread and run them at same time */
@@ -141,7 +141,7 @@ TEST_F(TestCaseVNode, CreateAndDeleteMultiThread)
TEST_F(TestCaseVNode, TestVNodeProdAndConsLookup)
{
- struct vnode * vnode_ptr = vnode_mirror_create("m-vnode", 1024, 32, nullptr, 0, 0);
+ struct vnode * vnode_ptr = vnode_mirror_create("m-vnode", 1024, 0, 32, 0, 0);
ASSERT_NE(vnode_ptr, nullptr);
struct vnode_prod * prod;
@@ -164,7 +164,7 @@ TEST_F(TestCaseVNode, TestVNodeProdAndConsLookup)
TEST_F(TestCaseVNode, TestVNodeEnqueue)
{
- struct vnode * vnode_ptr = vnode_mirror_create("m-vnode", 1024, 32, nullptr, 0, 0);
+ struct vnode * vnode_ptr = vnode_mirror_create("m-vnode", 1024, 0, 32, 0, 0);
ASSERT_NE(vnode_ptr, nullptr);
struct vnode_prod * prod;
@@ -209,9 +209,246 @@ TEST_F(TestCaseVNode, TestVNodeEnqueue)
vnode_mirror_delete(vnode_ptr);
}
+TEST_F(TestCaseVNode, TestVNodeMultipleThreadEnqueueUseSharedCredict)
+{
+ /* create multiple thread */
+ struct vnode * vnode_ptr = vnode_mirror_create("m-vnode", 32, 2048, 32, 0, 0);
+ ASSERT_NE(vnode_ptr, nullptr);
+
+ struct vnode_prod * prod;
+ struct vnode_cons * cons;
+
+ prod = vnode_mirror_create_prod(vnode_ptr, "prod", 4);
+ ASSERT_NE(prod, nullptr);
+
+ cons = vnode_mirror_create_cons(vnode_ptr, "cons", 1);
+ ASSERT_NE(cons, nullptr);
+
+ constexpr unsigned int TEST_MBUFS_COUNT = 2048;
+ struct rte_mbuf * enq_objs[TEST_MBUFS_COUNT] = {};
+
+ int ret = rte_pktmbuf_alloc_bulk(pktmbuf_pool_, enq_objs, RTE_DIM(enq_objs));
+ ASSERT_EQ(ret, 0);
+
+ uint32_t enq_hashs[TEST_MBUFS_COUNT] = {};
+ for (unsigned int i = 0; i < RTE_DIM(enq_hashs); i++)
+ {
+ enq_hashs[i] = 0x4d5a;
+ }
+
+ std::thread test_thread_1([&prod, &enq_objs, &enq_hashs] {
+ int enq_ret = vnode_mirror_enqueue_bulk(prod, 0, enq_objs, enq_hashs, 512);
+ EXPECT_EQ(enq_ret, 0);
+ });
+
+ std::thread test_thread_2([&prod, &enq_objs, &enq_hashs] {
+ int enq_ret = vnode_mirror_enqueue_bulk(prod, 1, enq_objs + 512, enq_hashs, 512);
+ EXPECT_EQ(enq_ret, 0);
+ });
+
+ std::thread test_thread_3([&prod, &enq_objs, &enq_hashs] {
+ int enq_ret = vnode_mirror_enqueue_bulk(prod, 2, enq_objs + 1024, enq_hashs, 512);
+ EXPECT_EQ(enq_ret, 0);
+ });
+
+ std::thread test_thread_4([&prod, &enq_objs, &enq_hashs] {
+ int enq_ret = vnode_mirror_enqueue_bulk(prod, 3, enq_objs + 1536, enq_hashs, 512);
+ EXPECT_EQ(enq_ret, 0);
+ });
+
+ test_thread_1.join();
+ test_thread_2.join();
+ test_thread_3.join();
+ test_thread_4.join();
+
+ int deq_ret = vnode_mirror_dequeue_burst(cons, 0, enq_objs, RTE_DIM(enq_objs));
+ EXPECT_EQ(deq_ret, 2048);
+
+ rte_pktmbuf_free_bulk(enq_objs, deq_ret);
+ vnode_mirror_delete(vnode_ptr);
+}
+
+TEST_F(TestCaseVNode, TestVNodeEnqueueAndDequeueUseSharedCredict)
+{
+ struct vnode * vnode_ptr = vnode_mirror_create("m-vnode", 32, 512, 32, 0, 0);
+ ASSERT_NE(vnode_ptr, nullptr);
+
+ struct vnode_prod * prod;
+ struct vnode_cons * cons;
+
+ prod = vnode_mirror_create_prod(vnode_ptr, "prod", 1);
+ ASSERT_NE(prod, nullptr);
+
+ cons = vnode_mirror_create_cons(vnode_ptr, "cons", 1);
+ ASSERT_NE(cons, nullptr);
+
+ constexpr unsigned int TEST_MBUFS_COUNT = 2048;
+ struct rte_mbuf * enq_objs[TEST_MBUFS_COUNT] = {};
+ struct rte_mbuf * deq_objs[TEST_MBUFS_COUNT] = {};
+
+ int ret = rte_pktmbuf_alloc_bulk(pktmbuf_pool_, enq_objs, RTE_DIM(enq_objs));
+ ASSERT_EQ(ret, 0);
+
+ uint32_t enq_hashs[TEST_MBUFS_COUNT] = {};
+
+ for (unsigned int i = 0; i < RTE_DIM(enq_hashs); i++)
+ {
+ enq_hashs[i] = 0x4d5a;
+ }
+
+ /* first 512 mbufs, use the exclusive credit */
+ int enq_ret = vnode_mirror_enqueue_bulk(prod, 0, enq_objs, enq_hashs, 512);
+ EXPECT_EQ(enq_ret, 0);
+
+ /* second 512 mbufs, use the shared credict */
+ int enq_ret_2 = vnode_mirror_enqueue_bulk(prod, 0, enq_objs + 512, enq_hashs, 512);
+ EXPECT_EQ(enq_ret_2, 0);
+
+ /* until here, we have 512 + 32 credict, so only 544 mbufs can be enqueue. */
+ int deq_ret_1 = vnode_mirror_dequeue_burst(cons, 0, deq_objs, RTE_DIM(deq_objs));
+ EXPECT_EQ(deq_ret_1, 544);
+
+ /* free these mbufs */
+ rte_pktmbuf_free_bulk(deq_objs, deq_ret_1);
+
+ /* put another 512 mbufs */
+ int enq_ret_3 = vnode_mirror_enqueue_bulk(prod, 0, enq_objs + 1024, enq_hashs, 512);
+ EXPECT_EQ(enq_ret_3, 0);
+
+ int enq_ret_4 = vnode_mirror_enqueue_bulk(prod, 0, enq_objs + 1536, enq_hashs, 512);
+ EXPECT_EQ(enq_ret_4, 0);
+
+ int deq_ret_2 = vnode_mirror_dequeue_burst(cons, 0, deq_objs, RTE_DIM(deq_objs));
+ EXPECT_EQ(deq_ret_2, 544);
+
+ rte_pktmbuf_free_bulk(deq_objs, deq_ret_2);
+ vnode_mirror_delete(vnode_ptr);
+}
+
+TEST_F(TestCaseVNode, TestVNodeMultipleQueueUseSharedCredict)
+{
+ struct vnode * vnode_ptr = vnode_mirror_create("m-vnode", 32, 512, 32, 0, 0);
+ ASSERT_NE(vnode_ptr, nullptr);
+
+ struct vnode_prod * prod;
+ struct vnode_cons * cons;
+
+ prod = vnode_mirror_create_prod(vnode_ptr, "prod", 2);
+ ASSERT_NE(prod, nullptr);
+
+ cons = vnode_mirror_create_cons(vnode_ptr, "cons", 1);
+ ASSERT_NE(cons, nullptr);
+
+ /* queue 1 */
+ struct rte_mbuf * enq_objs_q1_exclusive[32] = {};
+ struct rte_mbuf * enq_objs_q1_shared[32] = {};
+
+ /* queue 2 */
+ struct rte_mbuf * enq_objs_q2_exclusive[32] = {};
+ struct rte_mbuf * enq_objs_q2_shared[32] = {};
+
+ /* alloc mbufs */
+ int ret = rte_pktmbuf_alloc_bulk(pktmbuf_pool_, enq_objs_q1_exclusive, RTE_DIM(enq_objs_q1_exclusive));
+ ASSERT_EQ(ret, 0);
+
+ ret = rte_pktmbuf_alloc_bulk(pktmbuf_pool_, enq_objs_q1_shared, RTE_DIM(enq_objs_q1_shared));
+ ASSERT_EQ(ret, 0);
+
+ ret = rte_pktmbuf_alloc_bulk(pktmbuf_pool_, enq_objs_q2_exclusive, RTE_DIM(enq_objs_q2_exclusive));
+ ASSERT_EQ(ret, 0);
+
+ ret = rte_pktmbuf_alloc_bulk(pktmbuf_pool_, enq_objs_q2_shared, RTE_DIM(enq_objs_q2_shared));
+ ASSERT_EQ(ret, 0);
+
+ /* enqueue */
+ uint32_t enq_hashs[32] = {};
+ for (unsigned int i = 0; i < RTE_DIM(enq_hashs); i++)
+ {
+ enq_hashs[i] = 0x4d5a;
+ }
+
+ /* first 32 mbufs, use the exclusive credit */
+ int enq_ret = vnode_mirror_enqueue_bulk(prod, 0, enq_objs_q1_exclusive, enq_hashs, 32);
+ EXPECT_EQ(enq_ret, 0);
+
+ /* second 32 mbufs, use the shared credict */
+ int enq_ret_2 = vnode_mirror_enqueue_bulk(prod, 0, enq_objs_q1_shared, enq_hashs, 32);
+ EXPECT_EQ(enq_ret_2, 0);
+
+ /* third 32 mbufs, use the exclusive credit */
+ int enq_ret_3 = vnode_mirror_enqueue_bulk(prod, 1, enq_objs_q2_exclusive, enq_hashs, 32);
+ EXPECT_EQ(enq_ret_3, 0);
+
+ /* fourth 32 mbufs, use the shared credict */
+ int enq_ret_4 = vnode_mirror_enqueue_bulk(prod, 1, enq_objs_q2_shared, enq_hashs, 32);
+ EXPECT_EQ(enq_ret_4, 0);
+
+ /* dequeue */
+ struct rte_mbuf * deq_objs[128] = {};
+ int deq_ret = vnode_mirror_dequeue_burst(cons, 0, deq_objs, RTE_DIM(deq_objs));
+ EXPECT_EQ(deq_ret, 128);
+
+ rte_pktmbuf_free_bulk(deq_objs, deq_ret);
+ vnode_mirror_delete(vnode_ptr);
+}
+
+TEST_F(TestCaseVNode, TestVNodeEnqueueUseSharedCredict)
+{
+ struct vnode * vnode_ptr = vnode_mirror_create("m-vnode", 512, 512, 32, 0, 0);
+ ASSERT_NE(vnode_ptr, nullptr);
+
+ struct vnode_prod * prod;
+ struct vnode_cons * cons;
+
+ prod = vnode_mirror_create_prod(vnode_ptr, "prod", 1);
+ ASSERT_NE(prod, nullptr);
+
+ cons = vnode_mirror_create_cons(vnode_ptr, "cons", 1);
+ ASSERT_NE(cons, nullptr);
+
+ constexpr unsigned int TEST_MBUFS_COUNT = 2048;
+ struct rte_mbuf * enq_objs[TEST_MBUFS_COUNT] = {};
+ struct rte_mbuf * deq_objs[TEST_MBUFS_COUNT] = {};
+
+ int ret = rte_pktmbuf_alloc_bulk(pktmbuf_pool_, enq_objs, RTE_DIM(enq_objs));
+ ASSERT_EQ(ret, 0);
+
+ for (unsigned int i = 0; i < RTE_DIM(enq_objs); i++)
+ {
+ enq_objs[i]->hash.usr = 0x4d5a;
+ }
+
+ uint32_t enq_hashs[TEST_MBUFS_COUNT] = {};
+ for (unsigned int i = 0; i < RTE_DIM(enq_hashs); i++)
+ {
+ enq_hashs[i] = 0x4d5a;
+ }
+
+ /* first 512 mbufs, use the exclusive credit */
+ int enq_ret = vnode_mirror_enqueue_bulk(prod, 0, enq_objs, enq_hashs, 512);
+ EXPECT_EQ(enq_ret, 0);
+
+ /* second 512 mbufs, use the shared credict */
+ int enq_ret_2 = vnode_mirror_enqueue_bulk(prod, 0, enq_objs + 512, enq_hashs, 512);
+ EXPECT_EQ(enq_ret_2, 0);
+
+ /* third 512 mbufs should be rejected */
+ int enq_ret_3 = vnode_mirror_enqueue_bulk(prod, 0, enq_objs + 1024, enq_hashs, 512);
+ EXPECT_EQ(enq_ret_3, 0);
+
+ int enq_ret_4 = vnode_mirror_enqueue_bulk(prod, 0, enq_objs + 1536, enq_hashs, 512);
+ EXPECT_EQ(enq_ret_4, 0);
+
+ int deq_ret = vnode_mirror_dequeue_burst(cons, 0, deq_objs, RTE_DIM(deq_objs));
+ EXPECT_EQ(deq_ret, 1024);
+
+ rte_pktmbuf_free_bulk(deq_objs, deq_ret);
+ vnode_mirror_delete(vnode_ptr);
+}
+
TEST_F(TestCaseVNodeQueue, MultQueueEnqueue)
{
- struct vnode * vnode_ptr = vnode_mirror_create("vnode", 1024, 32, nullptr, 0, 0);
+ struct vnode * vnode_ptr = vnode_mirror_create("vnode", 1024, 0, 32, 0, 0);
ASSERT_NE(vnode_ptr, nullptr);
struct vnode_prod * prod;
@@ -291,7 +528,7 @@ TEST_F(TestCaseVNodeQueue, MultiQueueDequeue)
int main(int argc, char * argv[])
{
/* generate eal parameters */
- const char * eal_args[] = {"unittest", "-c", "0x1", "-n", "4", "--proc-type", "auto", "--no-huge"};
+ const char * eal_args[] = {"unittest", "-c", "0x1", "-n", "4", "--proc-type", "auto", "--no-huge", "-m", "2048"};
if (rte_eal_init(RTE_DIM(eal_args), (char **)eal_args) < 0)
{
return -1;
diff --git a/service/include/sc_vdev.h b/service/include/sc_vdev.h
index 8a0eb53..ce12e0c 100644
--- a/service/include/sc_vdev.h
+++ b/service/include/sc_vdev.h
@@ -137,8 +137,8 @@ void vdev_stats_last_save(struct vdev * vdev, struct vdev_stat_info * stat_info_
void vdev_stats_last_get(struct vdev * vdev, struct vdev_stat_info * stat_info_last);
int vdev_data_create(struct vdev_main * v_main, const char * symbol, unsigned int nr_rxstream, unsigned int nr_txstream,
- unsigned int sz_tunnel, unsigned int sz_buffer, unsigned int batch_interval_in_us,
- struct rte_mempool * direct_pool, struct rte_mempool * indirect_pool);
+ unsigned int sz_tunnel_rx_exclusive, unsigned int sz_tunnel_rx_shared, unsigned int sz_tunnel_tx,
+ unsigned int sz_buffer, unsigned int batch_interval_in_us, struct rte_mempool * direct_pool);
int vdev_loop_create(struct vdev_main * v_main, const char * symbol, unsigned int sz_tunnel, unsigned int sz_buffer,
struct rte_mempool * direct_pool, struct rte_mempool * indirect_pool);
diff --git a/service/src/devmgr.c b/service/src/devmgr.c
index e48d883..e46eb7a 100644
--- a/service/src/devmgr.c
+++ b/service/src/devmgr.c
@@ -453,31 +453,46 @@ static char ** gcfg_device_syms_get_by_drv(struct sc_main * sc, enum mr_dev_driv
return out_dev_symbols;
}
-void shmdev_config_load(struct devmgr_main * devmgr_main, const char * devsym, unsigned int * sz_tunnel,
- unsigned int * sz_buffer, unsigned int * batch_interval_in_us, char mempool_direct_pool[MR_SYMBOL_MAX],
- char mempool_indirect_pool[MR_SYMBOL_MAX])
+struct shmdev_config
{
- unsigned int default_sz_tunnel;
- unsigned int default_sz_buffer;
- unsigned int default_batch_interval_in_us;
+ unsigned int sz_tun_rx_exclusive;
+ unsigned int sz_tun_rx_shared;
+ unsigned int sz_tun_tx;
+
+ unsigned int sz_buffer;
+ unsigned int batch_interval_in_us;
+};
+void shmdev_config_load(struct devmgr_main * devmgr_main, const char * devsym, struct shmdev_config * cfg_out)
+{
const char * cfgfile = devmgr_main->sc->local_cfgfile;
+ /* load the old tunnel settings */
+ unsigned int default_sz_tunnel;
MESA_load_profile_uint_def(cfgfile, "device", "sz_tunnel", &default_sz_tunnel, 4096);
- MESA_load_profile_uint_def(cfgfile, "device", "sz_buffer", &default_sz_buffer, 32);
- MESA_load_profile_uint_def(cfgfile, "device", "batch_interval_tsc", &default_batch_interval_in_us, 50);
- char str_dev_section[MR_SYMBOL_MAX];
- snprintf(str_dev_section, sizeof(str_dev_section), "device:%s", devsym);
+ unsigned int default_sz_tun_rx_exclusive = default_sz_tunnel;
+ unsigned int default_sz_tun_tx = default_sz_tunnel;
+ unsigned int default_sz_tun_rx_shared = 0;
+
+ /* override configration */
+ MESA_load_profile_uint_def(cfgfile, "device", "sz_rx_tunnel", &default_sz_tun_rx_exclusive,
+ default_sz_tun_rx_exclusive);
+ MESA_load_profile_uint_def(cfgfile, "device", "sz_tx_tunnel", &default_sz_tun_tx, default_sz_tun_tx);
+ MESA_load_profile_uint_def(cfgfile, "device", "sz_rx_tunnel_shared", &default_sz_tun_rx_shared,
+ default_sz_tun_rx_shared);
- MESA_load_profile_uint_def(cfgfile, str_dev_section, "sz_tunnel", sz_tunnel, default_sz_tunnel);
- MESA_load_profile_uint_def(cfgfile, str_dev_section, "sz_buffer", sz_buffer, default_sz_buffer);
- MESA_load_profile_uint_def(cfgfile, str_dev_section, "batch_interval_tsc", batch_interval_in_us,
- default_batch_interval_in_us);
+ unsigned int default_sz_buffer;
+ unsigned int default_batch_interval_in_us;
+
+ MESA_load_profile_uint_def(cfgfile, "device", "sz_buffer", &default_sz_buffer, 32);
+ MESA_load_profile_uint_def(cfgfile, "device", "batch_interval_tsc", &default_batch_interval_in_us, 50);
- MESA_load_profile_string_def(cfgfile, str_dev_section, "direct-pool", mempool_direct_pool, MR_SYMBOL_MAX, devsym);
- MESA_load_profile_string_def(cfgfile, str_dev_section, "indirect-pool", mempool_indirect_pool, MR_SYMBOL_MAX,
- mempool_direct_pool);
+ cfg_out->sz_tun_rx_exclusive = default_sz_tun_rx_exclusive;
+ cfg_out->sz_tun_tx = default_sz_tun_tx;
+ cfg_out->sz_tun_rx_shared = default_sz_tun_rx_shared;
+ cfg_out->sz_buffer = default_sz_buffer;
+ cfg_out->batch_interval_in_us = default_batch_interval_in_us;
}
int shmdev_setup_one_device(struct devmgr_main * devmgr_main, const char * devsym)
@@ -492,21 +507,15 @@ int shmdev_setup_one_device(struct devmgr_main * devmgr_main, const char * devsy
return RT_ERR;
}
- /* TODO: check the name conflict */
- unsigned int sz_tunnel = UINT32_MAX;
- unsigned int sz_buffer = UINT32_MAX;
- unsigned int batch_interval_in_us = 30;
-
char sym_direct_mempool[MR_SYMBOL_MAX] = {0};
char sym_indirect_mempool[MR_SYMBOL_MAX] = {0};
- shmdev_config_load(devmgr_main, devsym, &sz_tunnel, &sz_buffer, &batch_interval_in_us, sym_direct_mempool, sym_indirect_mempool);
- assert(sz_tunnel != UINT32_MAX && sz_buffer != UINT32_MAX);
+ struct shmdev_config shmdev_config = {};
+ shmdev_config_load(devmgr_main, devsym, &shmdev_config);
struct sc_main * sc = devmgr_main->sc;
unsigned int nr_rxstream = CPU_COUNT(&dev_desc->rx_cpu_set);
unsigned int nr_txstream = CPU_COUNT(&dev_desc->tx_cpu_set);
-
assert(nr_rxstream > 0 && nr_txstream > 0);
/* Get indirect pool */
@@ -517,15 +526,10 @@ int shmdev_setup_one_device(struct devmgr_main * devmgr_main, const char * devsy
return RT_ERR;
}
- struct rte_mempool * indirect_pool = mrb_indirect_mempool_locate(sc->mrb_pool_main, sym_indirect_mempool, 0, 0);
- if (indirect_pool == NULL)
- {
- MR_ERROR("Indirect mempool %s for virtual device %s is not existed. ", sym_indirect_mempool, devsym);
- return RT_ERR;
- }
+ ret = vdev_data_create(sc->vdev_main, devsym, nr_rxstream, nr_txstream, shmdev_config.sz_tun_rx_exclusive,
+ shmdev_config.sz_tun_rx_shared, shmdev_config.sz_tun_tx,
+ shmdev_config.sz_buffer, shmdev_config.batch_interval_in_us, direct_pool);
- ret = vdev_data_create(sc->vdev_main, devsym, nr_rxstream, nr_txstream, sz_tunnel, sz_buffer, batch_interval_in_us, direct_pool,
- indirect_pool);
if (unlikely(ret < 0))
{
MR_ERROR("Failed at creating shmdev resources for device %s", devsym);
diff --git a/service/src/vdata.c b/service/src/vdata.c
index 0e8560c..e81558f 100644
--- a/service/src/vdata.c
+++ b/service/src/vdata.c
@@ -13,35 +13,6 @@
#include <sc_common.h>
#include <sc_vdev.h>
-/* ============================= Common Functions =========================================== */
-
-static int vdev_data_create_loop_buffer(const char * prefix, const char * devsym, unsigned int nr_stream,
- unsigned int sz_tunnel, struct rte_ring * object[])
-{
- for (int i = 0; i < nr_stream; i++)
- {
- char str_loop_buffer[MR_SYMBOL_MAX];
- snprintf(str_loop_buffer, sizeof(str_loop_buffer), "%s-%s-%d", prefix, devsym, i);
-
- object[i] = rte_ring_create(str_loop_buffer, sz_tunnel, SOCKET_ID_ANY, RING_F_SC_DEQ | RING_F_SP_ENQ);
-
- if (object[i] == NULL)
- {
- MR_WARNING("Create loop buffer for vdev %s(sym=%s, size=%d) failed. : %s\n", devsym, str_loop_buffer,
- sz_tunnel, __str_rte_errno());
- goto err_out;
- }
- }
-
- return 0;
-
-err_out:
- for (int i = 0; i < nr_stream; i++)
- if (object[i] != NULL)
- rte_ring_free(object[i]);
- return -1;
-}
-
static int vdev_data_dispatch(struct _vdev * _vdev, queue_id_t qid, struct rte_mbuf * pkts[], unsigned int nr_pkts,
int flags)
{
@@ -254,9 +225,11 @@ static int vdev_data_destory(struct _vdev * _vdev)
return 0;
}
+
+
int vdev_data_create(struct vdev_main * v_main, const char * symbol, unsigned int nr_rxstream, unsigned int nr_txstream,
- unsigned int sz_tunnel, unsigned int sz_buffer, unsigned int batch_interval_in_us,
- struct rte_mempool * direct_pool, struct rte_mempool * indirect_pool)
+ unsigned int sz_tunnel_rx_exclusive, unsigned int sz_tunnel_rx_shared, unsigned int sz_tunnel_tx,
+ unsigned int sz_buffer, unsigned int batch_interval_in_us, struct rte_mempool * direct_pool)
{
// 检查设备是否已经存在,不允许重复创建
struct vdev * vdev_info = vdev_lookup(v_main, symbol);
@@ -266,16 +239,6 @@ int vdev_data_create(struct vdev_main * v_main, const char * symbol, unsigned in
return RT_IGNORE;
}
-#define _ERR_VERIFY(x, ...) \
- do \
- { \
- if (x == NULL) \
- { \
- MR_ERROR(__VA_ARGS__); \
- goto errout; \
- } \
- } while (0)
-
// 申请Info结构体的空间
struct _vdev * _vdev = ZMALLOC(sizeof(struct _vdev));
MR_VERIFY_MALLOC(_vdev);
@@ -288,9 +251,8 @@ int vdev_data_create(struct vdev_main * v_main, const char * symbol, unsigned in
_vdev->nr_rxstream = nr_rxstream;
_vdev->nr_txstream = nr_txstream;
_vdev->sz_buffer = sz_buffer;
- _vdev->sz_tunnel = sz_tunnel;
+ _vdev->sz_tunnel = sz_tunnel_rx_exclusive;
_vdev->direct_pool = direct_pool;
- _vdev->indirect_pool = indirect_pool;
char vnode_sym_rx[MR_SYMBOL_MAX * 2];
snprintf(vnode_sym_rx, sizeof(vnode_sym_rx), "%s-rx", vdev_info->symbol);
@@ -305,16 +267,28 @@ int vdev_data_create(struct vdev_main * v_main, const char * symbol, unsigned in
snprintf(vnode_sym_ltx, sizeof(vnode_sym_ltx), "%s-ltx", vdev_info->symbol);
/* 创建VNODE */
- _vdev->vnode_rx = vnode_mirror_create(vnode_sym_rx, sz_tunnel, sz_buffer, indirect_pool, 1, batch_interval_in_us);
- _vdev->vnode_tx = vnode_mirror_create(vnode_sym_tx, sz_tunnel, sz_buffer, indirect_pool, 0, 0);
- _vdev->vnode_ftx = vnode_mirror_create(vnode_sym_ftx, sz_tunnel, 0, indirect_pool, 0, 0);
- _vdev->vnode_ltx = vnode_mirror_create(vnode_sym_ltx, sz_tunnel, 0, indirect_pool, 0, 0);
+ _vdev->vnode_rx = vnode_mirror_create(vnode_sym_rx, sz_tunnel_rx_exclusive, sz_tunnel_rx_shared, sz_buffer, 1,
+ batch_interval_in_us);
+
+ _vdev->vnode_tx = vnode_mirror_create(vnode_sym_tx, sz_tunnel_tx, 0, sz_buffer, 0, 0);
+ _vdev->vnode_ftx = vnode_mirror_create(vnode_sym_ftx, sz_tunnel_tx, 0, 0, 0, 0);
+ _vdev->vnode_ltx = vnode_mirror_create(vnode_sym_ltx, sz_tunnel_tx, 0, 0, 0, 0);
+
+#define ERR_VERIFY(x, ...) \
+ do \
+ { \
+ if (x == NULL) \
+ { \
+ MR_ERROR(__VA_ARGS__); \
+ goto errout; \
+ } \
+ } while (0)
/* 错误校验 */
- _ERR_VERIFY(_vdev->vnode_rx, "Create vdev %s rx vnode failed.", vdev_info->symbol);
- _ERR_VERIFY(_vdev->vnode_tx, "Create vdev %s tx vnode failed.", vdev_info->symbol);
- _ERR_VERIFY(_vdev->vnode_ftx, "Create vdev %s fast tx vnode failed. ", vdev_info->symbol);
- _ERR_VERIFY(_vdev->vnode_ltx, "Create vdev %s lock tx vnode failed. ", vdev_info->symbol);
+ ERR_VERIFY(_vdev->vnode_rx, "Create vdev %s rx vnode failed.", vdev_info->symbol);
+ ERR_VERIFY(_vdev->vnode_tx, "Create vdev %s tx vnode failed.", vdev_info->symbol);
+ ERR_VERIFY(_vdev->vnode_ftx, "Create vdev %s fast tx vnode failed. ", vdev_info->symbol);
+ ERR_VERIFY(_vdev->vnode_ltx, "Create vdev %s lock tx vnode failed. ", vdev_info->symbol);
_vdev->vnode_rx_prod = vnode_mirror_create_prod(_vdev->vnode_rx, "sv", nr_rxstream);
_vdev->vnode_tx_cons = vnode_mirror_create_cons(_vdev->vnode_tx, "sv", nr_txstream);
@@ -322,22 +296,10 @@ int vdev_data_create(struct vdev_main * v_main, const char * symbol, unsigned in
_vdev->vnode_ctrl_cons = vnode_mirror_create_cons(_vdev->vnode_ltx, "sv", nr_txstream);
/* 校验,申请VNODE、VNODE生产者、消费者是否成功 */
- _ERR_VERIFY(_vdev->vnode_rx_prod, "Create vdev %s rx vnode producer failed. ", vdev_info->symbol);
- _ERR_VERIFY(_vdev->vnode_tx_cons, "Create vdev %s tx vnode consumer failed. ", vdev_info->symbol);
- _ERR_VERIFY(_vdev->vnode_ftx_cons, "Create vdev %s fast tx vnode consumer failed. ", vdev_info->symbol);
- _ERR_VERIFY(_vdev->vnode_ctrl_cons, "Create vdev %s lock tx vnode consumer failed. ", vdev_info->symbol);
-
- /* 创建自循环缓冲区 */
- _vdev->rx_loop_buffers = ZMALLOC(sizeof(struct rte_ring *) * _vdev->nr_rxstream);
- _vdev->tx_loop_buffers = ZMALLOC(sizeof(struct rte_ring *) * _vdev->nr_txstream);
- MR_VERIFY_MALLOC(_vdev->rx_loop_buffers);
- MR_VERIFY_MALLOC(_vdev->tx_loop_buffers);
-
- vdev_data_create_loop_buffer("rx", vdev_info->symbol, _vdev->nr_rxstream, sz_tunnel, _vdev->rx_loop_buffers);
- vdev_data_create_loop_buffer("tx", vdev_info->symbol, _vdev->nr_txstream, sz_tunnel, _vdev->tx_loop_buffers);
-
- _ERR_VERIFY(_vdev->rx_loop_buffers, "Create rx self-loop buffer failed. \n");
- _ERR_VERIFY(_vdev->tx_loop_buffers, "Create tx self-loop buffer failed. \n");
+ ERR_VERIFY(_vdev->vnode_rx_prod, "Create vdev %s rx vnode producer failed. ", vdev_info->symbol);
+ ERR_VERIFY(_vdev->vnode_tx_cons, "Create vdev %s tx vnode consumer failed. ", vdev_info->symbol);
+ ERR_VERIFY(_vdev->vnode_ftx_cons, "Create vdev %s fast tx vnode consumer failed. ", vdev_info->symbol);
+ ERR_VERIFY(_vdev->vnode_ctrl_cons, "Create vdev %s lock tx vnode consumer failed. ", vdev_info->symbol);
/* 注册回调函数 */
_vdev->dispatch = vdev_data_dispatch;
@@ -361,5 +323,5 @@ errout:
rte_free(_vdev);
return RT_ERR;
-#undef _ERR_VERIFY
+#undef ERR_VERIFY
}