summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLu Qiuwen <[email protected]>2024-06-12 15:49:16 +0800
committerLu Qiuwen <[email protected]>2024-06-14 14:57:06 +0800
commit06f515f5fee9457b99841cfd3bb42bcf31f91f7d (patch)
tree26f08bbeb6af2c36e453b12bcec2f5204891a41e
parent33f70f1e0a63c04f06b997ff6b65b46f80c23e0d (diff)
OMPUB-1321 bugfix: clear the inflight lean counter when prod recreated.v4.7.7-20240620
-rw-r--r--infra/src/vnode_common.c32
-rw-r--r--infra/test/TestVNode.cc125
2 files changed, 142 insertions, 15 deletions
diff --git a/infra/src/vnode_common.c b/infra/src/vnode_common.c
index b27af85..eed2cd9 100644
--- a/infra/src/vnode_common.c
+++ b/infra/src/vnode_common.c
@@ -98,17 +98,29 @@ errout:
static int tunnel_delete(struct tunnel_desc * desc)
{
for (int i = 0; i < desc->sz_en_buffer_used; i++)
- rte_pktmbuf_free(desc->en_buffer[i]);
+ {
+ rte_pktmbuf_free(desc->en_buffer[i]);
+ }
struct rte_mbuf * mbuf;
while (rte_ring_dequeue(desc->tunnel_object, (void **)&mbuf) == 0)
- rte_pktmbuf_free(mbuf);
+ {
+ rte_pktmbuf_free(mbuf);
+ }
+
+ /* free the rt buffer */
+ for (int i = 0; i < desc->sz_rt_buffer_used; i++)
+ {
+ assert(0);
+ rte_pktmbuf_free(desc->rt_buffer[i]);
+ }
- MR_VERIFY_2(rte_ring_empty(desc->tunnel_object) == 1, "Tunnel %s is not empty", desc->symbol);
- rte_free(desc->en_buffer);
- rte_ring_free(desc->tunnel_object);
- rte_free(desc);
- return 0;
+ MR_VERIFY_2(rte_ring_empty(desc->tunnel_object) == 1, "Tunnel %s is not empty", desc->symbol);
+ rte_free(desc->en_buffer);
+ rte_free(desc->rt_buffer);
+ rte_ring_free(desc->tunnel_object);
+ rte_free(desc);
+ return 0;
}
/* Delete a block of tunnels */
@@ -164,8 +176,11 @@ static struct tunnel_block * tunnel_block_new(const char * symbol, struct vnode_
err:
MR_ERROR("Create tunnel block %s failed, tunnel size = %d, tunnel buffer = %d", symbol, sz_exclusive, sz_buffer);
- if (block)
+ if (block != NULL)
+ {
tunnel_block_delete(block);
+ }
+
return NULL;
}
@@ -264,6 +279,7 @@ struct vnode_prod * __vnode_common_create_prod(struct vnode * vnode, const char
int ret = do_producer_join_unsafe(vnode, prod);
rte_spinlock_unlock(&vnode->lock);
+ vnode->credits_on_loan = 0;
if (ret < 0)
{
goto err;
diff --git a/infra/test/TestVNode.cc b/infra/test/TestVNode.cc
index 76846be..0cc8756 100644
--- a/infra/test/TestVNode.cc
+++ b/infra/test/TestVNode.cc
@@ -748,6 +748,109 @@ TEST_F(TestCaseVNode, TestVNodeEnableMaxInFlight_RefillCredits)
vnode_mirror_delete(vnode_ptr);
}
+TEST_F(TestCaseVNode, TestVNodeEnableMaxInFlight_CreateConsManyTimes)
+{
+ struct vnode * vnode_ptr = vnode_mirror_create("m-vnode", 16384, 64, 0, 0);
+ ASSERT_NE(vnode_ptr, nullptr);
+
+ struct vnode_prod * prod = vnode_mirror_create_prod(vnode_ptr, "prod", 8);
+ ASSERT_NE(prod, nullptr);
+
+ static constexpr int test_object_count = 64;
+ for (unsigned int i = 0; i < 64; i++)
+ {
+ struct rte_mbuf * enq_objs[test_object_count] = {};
+ struct rte_mbuf * deq_objs[test_object_count] = {};
+
+ int ret = rte_pktmbuf_alloc_bulk(pktmbuf_pool_, enq_objs, RTE_DIM(enq_objs));
+ ASSERT_EQ(ret, 0);
+
+ for (auto & enq_obj : enq_objs)
+ {
+ enq_obj->hash.usr = 0x4d5a;
+ }
+
+ uint32_t enq_hashs[test_object_count] = {};
+ for (unsigned int & enq_hash : enq_hashs)
+ {
+ enq_hash = 0x4d5a;
+ }
+
+ struct vnode_cons * cons = vnode_mirror_create_cons(vnode_ptr, "cons", 1);
+ ASSERT_NE(cons, nullptr);
+
+ /* exceed the credit, all packets should be dropped */
+ int enq_ret = vnode_mirror_enqueue_bulk(prod, 0, enq_objs, enq_hashs, RTE_DIM(enq_hashs));
+ EXPECT_EQ(enq_ret, 0);
+
+ int rt_ret = vnode_mirror_rt_object_retrieve(prod, 0, deq_objs, RTE_DIM(deq_objs));
+ EXPECT_EQ(rt_ret, 0);
+ rte_pktmbuf_free_bulk(deq_objs, rt_ret);
+
+ int deq_ret = vnode_mirror_dequeue_burst(cons, 0, deq_objs, RTE_DIM(deq_objs));
+ EXPECT_EQ(deq_ret, 64);
+
+ rte_pktmbuf_free_bulk(deq_objs, deq_ret);
+ vnode_mirror_delete_cons(cons);
+
+ EXPECT_EQ(rte_mempool_in_use_count(pktmbuf_pool_), 0);
+ }
+
+ vnode_mirror_delete(vnode_ptr);
+}
+
+TEST_F(TestCaseVNode, TestVNodeEnableMaxInFlight_CreateProdManyTimes)
+{
+ struct vnode * vnode_ptr = vnode_mirror_create("m-vnode", 16384, 64, 0, 0);
+ ASSERT_NE(vnode_ptr, nullptr);
+
+ struct vnode_cons * cons = vnode_mirror_create_cons(vnode_ptr, "cons", 1);
+ ASSERT_NE(cons, nullptr);
+
+ for (unsigned int i = 0; i < 64; i++)
+ {
+ static constexpr int test_object_count = 64;
+ struct rte_mbuf * enq_objs[test_object_count] = {};
+ struct rte_mbuf * deq_objs[test_object_count] = {};
+
+ int ret = rte_pktmbuf_alloc_bulk(pktmbuf_pool_, enq_objs, RTE_DIM(enq_objs));
+ ASSERT_EQ(ret, 0);
+
+ for (auto & enq_obj : enq_objs)
+ {
+ enq_obj->hash.usr = 0x4d5a;
+ }
+
+ uint32_t enq_hashs[test_object_count] = {};
+ for (unsigned int & enq_hash : enq_hashs)
+ {
+ enq_hash = 0x4d5a;
+ }
+
+ struct vnode_prod * prod = vnode_mirror_create_prod(vnode_ptr, "prod", 8);
+ ASSERT_NE(prod, nullptr);
+
+ /* exceed the credit, all packets should be dropped */
+ int enq_ret = vnode_mirror_enqueue_bulk(prod, 0, enq_objs, enq_hashs, RTE_DIM(enq_hashs));
+ EXPECT_EQ(enq_ret, 0);
+
+ int rt_ret = vnode_mirror_rt_object_retrieve(prod, 0, deq_objs, RTE_DIM(deq_objs));
+ EXPECT_EQ(rt_ret, 0);
+ rte_pktmbuf_free_bulk(deq_objs, rt_ret);
+
+ int deq_ret = vnode_mirror_dequeue_burst(cons, 0, deq_objs, RTE_DIM(deq_objs));
+ EXPECT_EQ(deq_ret, 64);
+
+ /* free the mbufs */
+ rte_pktmbuf_free_bulk(deq_objs, deq_ret);
+
+ /* then, delete the prod */
+ vnode_mirror_delete_prod(prod);
+ }
+
+ vnode_mirror_delete(vnode_ptr);
+}
+
TEST_F(TestCaseVNode, TestVNodeEnableMaxInFlight_MultipleThreadProducer)
{
struct vnode * vnode_ptr = vnode_mirror_create("m-vnode", 16384, 64, 0, 0);
@@ -762,14 +865,18 @@ TEST_F(TestCaseVNode, TestVNodeEnableMaxInFlight_MultipleThreadProducer)
rte_atomic32_t total_dropped_objects;
rte_atomic32_clear(&total_dropped_objects);
- auto f_producer = [vnode_ptr, prod, &total_dropped_objects](unsigned int hash) {
+ rte_atomic32_t aid_atomic_counter;
+ rte_atomic32_clear(&aid_atomic_counter);
+
+ auto f_producer = [vnode_ptr, prod, &total_dropped_objects, &aid_atomic_counter](unsigned int hash) {
struct rte_mbuf * enq_objs[64] = {};
struct rte_mbuf * deq_objs[64] = {};
+ unsigned int qid = rte_atomic32_add_return(&aid_atomic_counter, 1) - 1;
int ret = rte_pktmbuf_alloc_bulk(pktmbuf_pool_, enq_objs, RTE_DIM(enq_objs));
ASSERT_EQ(ret, 0);
- for (auto & enq_obj : enq_objs)
+ for (auto * enq_obj : enq_objs)
{
enq_obj->hash.usr = hash;
}
@@ -780,12 +887,15 @@ TEST_F(TestCaseVNode, TestVNodeEnableMaxInFlight_MultipleThreadProducer)
enq_hash = hash;
}
- int enq_ret = vnode_mirror_enqueue_bulk(prod, 0, enq_objs, enq_hashs, RTE_DIM(enq_hashs));
+ int enq_ret = vnode_mirror_enqueue_bulk(prod, qid, enq_objs, enq_hashs, RTE_DIM(enq_hashs));
EXPECT_EQ(enq_ret, 0);
- int rt_ret = vnode_mirror_rt_object_retrieve(prod, 0, deq_objs, RTE_DIM(deq_objs));
- rte_pktmbuf_free_bulk(deq_objs, rt_ret);
- rte_atomic32_add(&total_dropped_objects, rt_ret);
+ int rt_ret = vnode_mirror_rt_object_retrieve(prod, qid, deq_objs, RTE_DIM(deq_objs));
+ if (rt_ret != 0)
+ {
+ rte_pktmbuf_free_bulk(deq_objs, rt_ret);
+ rte_atomic32_add(&total_dropped_objects, rt_ret);
+ }
};
std::thread t_producer_1(f_producer, 0x4d5a);
@@ -804,13 +914,14 @@ TEST_F(TestCaseVNode, TestVNodeEnableMaxInFlight_MultipleThreadProducer)
struct rte_mbuf * deq_objs[64] = {};
int deq_ret = vnode_mirror_dequeue_burst(cons, 0, deq_objs, RTE_DIM(deq_objs));
EXPECT_EQ(deq_ret, 64);
+
rte_pktmbuf_free_bulk(deq_objs, deq_ret);
/* delete the vnode */
vnode_mirror_delete(vnode_ptr);
}
-TEST_F(TestCaseVNodeQueue, MultQueueEnqueue)
+TEST_F(TestCaseVNodeQueue, MultiQueueEnqueue)
{
struct vnode * vnode_ptr = vnode_mirror_create("vnode", 1024, 0, 0, 0);
ASSERT_NE(vnode_ptr, nullptr);