summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
author刘畅 <[email protected]>2024-01-05 07:50:57 +0000
committer刘畅 <[email protected]>2024-01-05 07:50:57 +0000
commit926d48d5f2c3d5a53484554b042e795e1b358eb9 (patch)
treedc90660629257da5831b94ba674882845425f2f3
parent0822899c4f2d2932ce2a3cc1e1c1bba7c2b27ad0 (diff)
parentf54e76984fe50d922c57ad8b8efcd6afc17a9577 (diff)
Merge branch 'bugfix_record_queue_len_on_blocked_profile' into 'rel'v3.1.18
Bugfix record queue len on blocked profile See merge request tango/shaping-engine!68
-rw-r--r--shaping/include/shaper.h1
-rw-r--r--shaping/src/shaper.cpp15
-rw-r--r--shaping/src/shaper_global_stat.cpp4
-rw-r--r--shaping/test/gtest_shaper.cpp149
4 files changed, 160 insertions, 9 deletions
diff --git a/shaping/include/shaper.h b/shaping/include/shaper.h
index 5bec4ca..33eb103 100644
--- a/shaping/include/shaper.h
+++ b/shaping/include/shaper.h
@@ -124,6 +124,7 @@ struct shaping_packet_wrapper {
unsigned long long income_time_ns;
unsigned long long enqueue_time_us;//first enqueue time
unsigned int length;
+ int rule_anchor;
unsigned char direction;
TAILQ_ENTRY(shaping_packet_wrapper) node;
};
diff --git a/shaping/src/shaper.cpp b/shaping/src/shaper.cpp
index be68ca3..0977213 100644
--- a/shaping/src/shaper.cpp
+++ b/shaping/src/shaper.cpp
@@ -217,6 +217,7 @@ static int shaper_packet_enqueue(struct shaping_thread_ctx *ctx, struct shaping_
s_pkt->pkt_buff = pkt_buff;
s_pkt->direction = meta->dir;
s_pkt->length = meta->raw_len;
+ s_pkt->rule_anchor = sf->anchor;
s_pkt->income_time_ns = curr_time.tv_sec * NANO_SECONDS_PER_SEC + curr_time.tv_nsec;
s_pkt->enqueue_time_us = curr_time.tv_sec * MICRO_SECONDS_PER_SEC + curr_time.tv_nsec / NANO_SECONDS_PER_MICRO_SEC;
TAILQ_INSERT_TAIL(&sf->packet_queue, s_pkt, node);
@@ -307,6 +308,7 @@ int shaper_flow_push(struct shaping_thread_ctx *ctx, struct shaping_flow *sf, un
END:
if (ret == 0) {//all avl tree success
s_rule_info->primary.enqueue_time_us = enqueue_time;
+ shaper_stat_queueing_pkt_inc(&s_rule_info->primary.stat, pkt_wrapper->direction, ctx->thread_index);
}
return ret;
@@ -673,6 +675,7 @@ static int shaper_profile_is_priority_blocked(struct shaping_thread_ctx *ctx, st
END:
if (profile->hash_node->is_priority_blocked[priority] == 1) {
+ sf->flag |= SESSION_BORROW;
return 1;
} else {
return 0;
@@ -874,7 +877,6 @@ static enum shaping_packet_action shaper_pkt_action_decide_queueing(struct shapi
clock_gettime(CLOCK_MONOTONIC, &curr_time);
enqueue_time = curr_time.tv_sec * MICRO_SECONDS_PER_SEC + curr_time.tv_nsec / NANO_SECONDS_PER_MICRO_SEC;
if (0 == shaper_flow_push(ctx, sf, enqueue_time)) {
- shaper_stat_queueing_pkt_inc(&sf->matched_rule_infos[sf->anchor].primary.stat, pkt_wrapper->direction, ctx->thread_index);
return SHAPING_QUEUED;
} else {
goto DROP;
@@ -923,7 +925,6 @@ FLOW_PUSH:
clock_gettime(CLOCK_MONOTONIC, &curr_time);
enqueue_time = curr_time.tv_sec * MICRO_SECONDS_PER_SEC + curr_time.tv_nsec / NANO_SECONDS_PER_MICRO_SEC;
if (0 == shaper_flow_push(ctx, sf, enqueue_time)) {
- shaper_stat_queueing_pkt_inc(&sf->matched_rule_infos[sf->anchor].primary.stat, meta->dir, ctx->thread_index);
return SHAPING_QUEUED;
} else {
goto DROP;
@@ -989,12 +990,10 @@ static int shaper_polling_first_pkt_token_get(struct shaper *sp, struct shaping_
return 0;
} else {
pkt_wrapper = shaper_first_pkt_get(sf);
- sf->anchor = 0;
+ shaper_stat_queueing_pkt_dec(&sf->matched_rule_infos[pkt_wrapper->rule_anchor].primary.stat, pkt_wrapper->direction, ctx->thread_index);
- if (0 == shaper_flow_push(ctx, sf, pkt_wrapper->enqueue_time_us)) {
- /*in pkt process, when queue not empty, new pkt's queueing stat was added to primary profile of first rule.
- so don't need add queueing stat here*/
- } else {
+ sf->anchor = 0;
+ if (shaper_flow_push(ctx, sf, pkt_wrapper->enqueue_time_us) != 0) {
shaper_queue_clear(sf, ctx);//first packet fail, then every packet will fail
if (sf->flag & SESSION_CLOSE) {
sf->flag &= (~SESSION_CLOSE);
@@ -1037,7 +1036,7 @@ void shaping_packet_process(struct shaping_thread_ctx *ctx, marsio_buff_t *rx_bu
}
if (!shaper_queue_empty(sf)) {//already have queueing pkt, enqueue directly
- s_rule = &sf->matched_rule_infos[0];
+ s_rule = &sf->matched_rule_infos[sf->anchor];
if (0 == shaper_packet_enqueue(ctx, sf, rx_buff, meta)) {
shaper_stat_queueing_pkt_inc(&s_rule->primary.stat, meta->dir, ctx->thread_index);
shaper_global_stat_queueing_inc(&ctx->thread_global_stat, meta->raw_len);
diff --git a/shaping/src/shaper_global_stat.cpp b/shaping/src/shaper_global_stat.cpp
index 771438a..03a913f 100644
--- a/shaping/src/shaper_global_stat.cpp
+++ b/shaping/src/shaper_global_stat.cpp
@@ -127,6 +127,10 @@ void shaper_global_stat_destroy(struct shaping_global_stat *stat)
return;
}
+ if (stat->stat_data) {
+ free(stat->stat_data);
+ }
+
if (stat->instance) {
fieldstat_instance_free(stat->instance);
}
diff --git a/shaping/test/gtest_shaper.cpp b/shaping/test/gtest_shaper.cpp
index e27a49f..b129c25 100644
--- a/shaping/test/gtest_shaper.cpp
+++ b/shaping/test/gtest_shaper.cpp
@@ -1506,6 +1506,153 @@ TEST(two_session_diff_priority_same_profile, session_timer_test)
stub_clear_matched_shaping_rules();
}
+/*session1 match rule1 & rule2; session2 match rule3
+ rule1:
+ priority:1
+ primary profile_a: (priority 1)
+ rule2:
+ priority:2
+ primary profile_b: (priority 2)
+ rule3:
+ priority:3
+ primary profile_a: (priority 3)
+
+profile_a(id 0): limit 3000
+profile_b(id 1): limit 1000
+*/
+TEST(two_sessions, priority_non_block)
+{
+ struct stub_pkt_queue expec_tx_queue1;
+ struct stub_pkt_queue expec_tx_queue2;
+ struct stub_pkt_queue *actual_tx_queue;
+ struct shaping_ctx *ctx = NULL;
+ struct shaping_flow *sf1 = NULL;
+ struct shaping_flow *sf2 = NULL;
+ long long rule_ids[] = {1, 2, 3};
+ long long rule_id1[] = {1, 2};
+ long long rule_id2[] = {3};
+ int profile_nums[] = {1, 1, 1};
+ int prioritys[] = {1, 2, 3};
+ int profile_id[][MAX_REF_PROFILE] = {{0}, {1}, {0}};
+
+
+ TAILQ_INIT(&expec_tx_queue1);
+ TAILQ_INIT(&expec_tx_queue2);
+ stub_init();
+
+ ctx = shaping_engine_init();
+ ASSERT_TRUE(ctx != NULL);
+ sf1 = shaping_flow_new(&ctx->thread_ctx[0]);
+ ASSERT_TRUE(sf1 != NULL);
+ sf2 = shaping_flow_new(&ctx->thread_ctx[1]);
+ ASSERT_TRUE(sf2 != NULL);
+
+ stub_set_matched_shaping_rules(3, rule_ids, prioritys, profile_nums, profile_id);
+
+ stub_set_token_bucket_avl_per_sec(0, 3000, SHAPING_DIR_OUT);
+ stub_set_token_bucket_avl_per_sec(1, 1000, SHAPING_DIR_OUT);
+ actual_tx_queue = stub_get_tx_queue();
+ shaper_rules_update(&ctx->thread_ctx[0], sf1, rule_id1, 2);
+ shaper_rules_update(&ctx->thread_ctx[1], sf2, rule_id2, 1);
+
+ /*******send packets***********/
+ send_packets(&ctx->thread_ctx[0], sf1, 100, 100, SHAPING_DIR_OUT, &expec_tx_queue1, 2, 0);//sf1 blocked by rule2(profile id 1), while rule3(profile id 0) still has 1000 token
+ send_packets(&ctx->thread_ctx[1], sf2, 10, 100, SHAPING_DIR_OUT, &expec_tx_queue2, 1, 0);
+ ASSERT_EQ(0, judge_packet_eq(&expec_tx_queue1, actual_tx_queue, 10));//sf1 should send 10 pkts
+ ASSERT_EQ(0, judge_packet_eq(&expec_tx_queue2, actual_tx_queue, 10));//sf2 should send 10 pkts cause rule3(profile id 0) has 1000 token
+ ASSERT_TRUE(TAILQ_EMPTY(actual_tx_queue));
+
+ while (!TAILQ_EMPTY(&expec_tx_queue1)) {
+ stub_refresh_token_bucket(0);
+ stub_refresh_token_bucket(1);
+ polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]);
+ polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]);//tow rules per pkt need two polling
+
+ ASSERT_EQ(0, judge_packet_eq(&expec_tx_queue1, actual_tx_queue, 1));//sf1 remaining 90 pkts
+ }
+
+ shaping_flow_free(&ctx->thread_ctx[0], sf1);
+ shaping_flow_free(&ctx->thread_ctx[1], sf2);
+ fieldstat_global_disable_prometheus_endpoint();
+ shaper_thread_resource_clear();
+ shaping_engine_destroy(ctx);
+ stub_clear_matched_shaping_rules();
+}
+
+/*session1 match rule1; session2 match rule2
+ rule1:
+ priority:1
+ primary profile_a: (priority 1)
+ rule2:
+ priority:2
+ primary profile_a: (priority 2)
+ borrow profile_b: (priority 3)
+
+profile_a(id 0): limit 1000
+profile_b(id 1): limit 1000
+*/
+TEST(two_sessions, priority_block_borrow)
+{
+ struct stub_pkt_queue expec_tx_queue1;
+ struct stub_pkt_queue expec_tx_queue2;
+ struct stub_pkt_queue *actual_tx_queue;
+ struct shaping_ctx *ctx = NULL;
+ struct shaping_flow *sf1 = NULL;
+ struct shaping_flow *sf2 = NULL;
+ long long rule_ids[] = {1, 2};
+ long long rule_id1[] = {1};
+ long long rule_id2[] = {2};
+ int profile_nums[] = {1, 2};
+ int prioritys[] = {1, 2};
+ int profile_id[][MAX_REF_PROFILE] = {{0}, {0, 1}};
+
+
+ TAILQ_INIT(&expec_tx_queue1);
+ TAILQ_INIT(&expec_tx_queue2);
+ stub_init();
+
+ ctx = shaping_engine_init();
+ ASSERT_TRUE(ctx != NULL);
+ sf1 = shaping_flow_new(&ctx->thread_ctx[0]);
+ ASSERT_TRUE(sf1 != NULL);
+ sf2 = shaping_flow_new(&ctx->thread_ctx[1]);
+ ASSERT_TRUE(sf2 != NULL);
+
+ stub_set_matched_shaping_rules(2, rule_ids, prioritys, profile_nums, profile_id);
+
+ stub_set_token_bucket_avl_per_sec(0, 1000, SHAPING_DIR_OUT);
+ stub_set_token_bucket_avl_per_sec(1, 1000, SHAPING_DIR_OUT);
+ actual_tx_queue = stub_get_tx_queue();
+ shaper_rules_update(&ctx->thread_ctx[0], sf1, rule_id1, 1);
+ shaper_rules_update(&ctx->thread_ctx[1], sf2, rule_id2, 1);
+
+ /*******send packets***********/
+ send_packets(&ctx->thread_ctx[0], sf1, 100, 100, SHAPING_DIR_OUT, &expec_tx_queue1, 1, 0);
+ send_packets(&ctx->thread_ctx[1], sf2, 100, 100, SHAPING_DIR_OUT, &expec_tx_queue2, 1, 0);
+ ASSERT_EQ(0, judge_packet_eq(&expec_tx_queue1, actual_tx_queue, 10));
+
+ while (!TAILQ_EMPTY(&expec_tx_queue2)) {
+ stub_refresh_token_bucket(1);
+ polling_entry(ctx->thread_ctx[1].sp, ctx->thread_ctx[1].stat, &ctx->thread_ctx[1]);//primary profile blocked by priority, send by borrow profile
+
+ ASSERT_EQ(0, judge_packet_eq(&expec_tx_queue2, actual_tx_queue, 1));
+ }
+
+ while (!TAILQ_EMPTY(&expec_tx_queue1)) {
+ stub_refresh_token_bucket(0);
+ polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]);
+
+ ASSERT_EQ(0, judge_packet_eq(&expec_tx_queue1, actual_tx_queue, 1));
+ }
+
+ shaping_flow_free(&ctx->thread_ctx[0], sf1);
+ shaping_flow_free(&ctx->thread_ctx[1], sf2);
+ fieldstat_global_disable_prometheus_endpoint();
+ shaper_thread_resource_clear();
+ shaping_engine_destroy(ctx);
+ stub_clear_matched_shaping_rules();
+}
+
/*session1 match rule1
rule1:
profile: limit 1000*/
@@ -1674,6 +1821,6 @@ TEST(statistics, udp_queueing_pkt)
int main(int argc, char **argv)
{
testing::InitGoogleTest(&argc, argv);
- //testing::GTEST_FLAG(filter) = "max_min_host_fairness_profile.udp_tx_in_order";
+ //testing::GTEST_FLAG(filter) = "two_sessions.priority_block_borrow";
return RUN_ALL_TESTS();
} \ No newline at end of file