summaryrefslogtreecommitdiff
path: root/shaping/test
diff options
context:
space:
mode:
Diffstat (limited to 'shaping/test')
-rw-r--r--shaping/test/dummy_swarmkv.cpp204
-rw-r--r--shaping/test/gtest_shaper.cpp500
-rw-r--r--shaping/test/gtest_shaper_aqm.cpp30
-rw-r--r--shaping/test/gtest_shaper_maat.cpp320
-rw-r--r--shaping/test/gtest_shaper_send_log.cpp87
-rw-r--r--shaping/test/gtest_shaper_with_swarmkv.cpp51
-rw-r--r--shaping/test/stub.cpp154
-rw-r--r--shaping/test/stub.h42
-rw-r--r--shaping/test/test_conf/shaping_maat.json111
-rw-r--r--shaping/test/test_conf/table_info.json15
10 files changed, 844 insertions, 670 deletions
diff --git a/shaping/test/dummy_swarmkv.cpp b/shaping/test/dummy_swarmkv.cpp
index 85251b8..7671a52 100644
--- a/shaping/test/dummy_swarmkv.cpp
+++ b/shaping/test/dummy_swarmkv.cpp
@@ -9,95 +9,81 @@
using namespace std;
-#define MAX_STUB_RULE_NUM 8
-#define MAX_STUB_PROFILE_NUM 8
-
-#define DEFAULT_AVALIABLE_TOKEN_PER_SEC -1
-
-struct stub_token_thread_arg {
- int profile_id;
- struct swarmkv_reply reply;
- swarmkv_on_reply_callback_t *cb;
- void *cb_arg;
-};
-
-struct stub_avaliable_token {
- int in_limit_bandwidth;
- int out_limit_bandwidth;
- int bidirection_limit_bandwidth;
+struct profile_priority_queue_len {
+ uuid_t profile_uuid;
+ int priority_queue_len[SHAPING_PRIORITY_NUM_MAX][SHAPING_DIR_MAX];
+ UT_hash_handle hh;
};
-static int profile_priority_len[MAX_STUB_PROFILE_NUM][SHAPING_PRIORITY_NUM_MAX][SHAPING_DIR_MAX];
-static struct stub_avaliable_token pf_curr_avl_token[MAX_STUB_PROFILE_NUM];
-static int pf_async_times[MAX_STUB_PROFILE_NUM];
-vector<struct stub_token_thread_arg> pf_async_thread[MAX_STUB_PROFILE_NUM];
-extern struct shaping_profile pf_array[MAX_STUB_PROFILE_NUM];
+struct profile_priority_queue_len *profile_priority_queue_len_hash = NULL;
+extern struct stub_shaping_profile *profiles_hash;
-void dummy_swarmkv_init()
+void stub_set_token_bucket_avl_per_sec(const char *profile_uuid_str, unsigned int tokens, unsigned char direction, enum shaping_profile_limit_direction limit_direction)
{
- memset(&pf_array, 0, MAX_STUB_PROFILE_NUM * sizeof(struct shaping_profile));
- memset(&profile_priority_len, 0, MAX_STUB_PROFILE_NUM * SHAPING_PRIORITY_NUM_MAX * SHAPING_DIR_MAX * sizeof(int));
-
- for (int i = 0; i < MAX_STUB_PROFILE_NUM; i++) {
- pf_curr_avl_token[i].in_limit_bandwidth = DEFAULT_AVALIABLE_TOKEN_PER_SEC;
- pf_curr_avl_token[i].out_limit_bandwidth = DEFAULT_AVALIABLE_TOKEN_PER_SEC;
- pf_curr_avl_token[i].bidirection_limit_bandwidth = DEFAULT_AVALIABLE_TOKEN_PER_SEC;
- pf_array[i].id = i;
- pf_array[i].in_limit_bandwidth = DEFAULT_AVALIABLE_TOKEN_PER_SEC;
- pf_array[i].out_limit_bandwidth = DEFAULT_AVALIABLE_TOKEN_PER_SEC;
- pf_async_times[i] = 0;
- memset(profile_priority_len[i], 0, 10 * sizeof(int));
+ uuid_t profile_uuid;
+ struct stub_shaping_profile *stub_profile = NULL;
+ unsigned token_bits;
+
+ uuid_parse(profile_uuid_str, profile_uuid);
+ HASH_FIND(hh, profiles_hash, profile_uuid, sizeof(uuid_t), stub_profile);
+ if (!stub_profile) {
+ stub_profile = (struct stub_shaping_profile*)calloc(1, sizeof(struct stub_shaping_profile));
+ uuid_copy(stub_profile->profile.uuid, profile_uuid);
+ HASH_ADD(hh, profiles_hash, profile.uuid, sizeof(uuid_t), stub_profile);
}
-}
-
-void * stub_get_token_thread_func(void *data)
-{
- struct stub_token_thread_arg *thread_arg;
- thread_arg = (struct stub_token_thread_arg*)data;
+ stub_profile->profile.limit_direction = limit_direction;
- thread_arg->cb(&thread_arg->reply, thread_arg->cb_arg);
-
- return NULL;
-}
-
-void stub_set_token_bucket_avl_per_sec(int profile_id, unsigned int tokens, unsigned char direction, enum shaping_profile_limit_direction limit_direction)
-{
- pf_array[profile_id].limit_direction = limit_direction;
+ if (tokens == AVALIABLE_TOKEN_UNLIMITED) {
+ token_bits = tokens;
+ } else {
+ token_bits = tokens * 8;
+ }
if (limit_direction == PROFILE_LIMIT_DIRECTION_BIDIRECTION) {
- pf_array[profile_id].bidirection_limit_bandwidth = tokens * 8;
- pf_curr_avl_token[profile_id].bidirection_limit_bandwidth = tokens * 8;
+ stub_profile->profile.bidirection_limit_bandwidth = token_bits;
+ stub_profile->avaliable_token.bidirection_limit_bandwidth = token_bits;
} else {
if (direction == SHAPING_DIR_IN) {
- pf_array[profile_id].in_limit_bandwidth = tokens * 8;
- pf_curr_avl_token[profile_id].in_limit_bandwidth = tokens * 8;
+ stub_profile->profile.in_limit_bandwidth = token_bits;
+ stub_profile->avaliable_token.in_limit_bandwidth = token_bits;
} else {
- pf_array[profile_id].out_limit_bandwidth = tokens * 8;
- pf_curr_avl_token[profile_id].out_limit_bandwidth = tokens * 8;
+ stub_profile->profile.out_limit_bandwidth = token_bits;
+ stub_profile->avaliable_token.out_limit_bandwidth = token_bits;
}
}
return;
}
-void stub_refresh_token_bucket(int profile_id)
+void stub_refresh_token_bucket(const char *profile_uuid_str)
{
- pf_curr_avl_token[profile_id].bidirection_limit_bandwidth = pf_array[profile_id].bidirection_limit_bandwidth;
- pf_curr_avl_token[profile_id].in_limit_bandwidth = pf_array[profile_id].in_limit_bandwidth;
- pf_curr_avl_token[profile_id].out_limit_bandwidth = pf_array[profile_id].out_limit_bandwidth;
+ uuid_t profile_uuid;
+ struct stub_shaping_profile *stub_profile = NULL;
+
+ uuid_parse(profile_uuid_str, profile_uuid);
+ HASH_FIND(hh, profiles_hash, profile_uuid, sizeof(uuid_t), stub_profile);
+ if (!stub_profile) {
+ return;
+ }
+
+ if (stub_profile->profile.limit_direction == PROFILE_LIMIT_DIRECTION_BIDIRECTION) {
+ stub_profile->avaliable_token.bidirection_limit_bandwidth = stub_profile->profile.bidirection_limit_bandwidth;
+ } else {
+ stub_profile->avaliable_token.in_limit_bandwidth = stub_profile->profile.in_limit_bandwidth;
+ stub_profile->avaliable_token.out_limit_bandwidth = stub_profile->profile.out_limit_bandwidth;
+ }
+
return;
}
-void stub_set_async_token_get_times(int profile_id, int times)
+void stub_swarmkv_clear_resource()
{
- pf_async_times[profile_id] = times;
-
- if (pf_async_times[profile_id] == 0) {
- for (unsigned int i = 0; i < pf_async_thread[profile_id].size(); i++) {
- stub_get_token_thread_func(&pf_async_thread[profile_id][i]);
- }
- pf_async_thread[profile_id].clear();
+ struct profile_priority_queue_len *node, *tmp = NULL;
+
+ HASH_ITER(hh, profile_priority_queue_len_hash, node, tmp) {
+ HASH_DEL(profile_priority_queue_len_hash, node);
+ free(node);
}
return;
@@ -185,21 +171,30 @@ int swarmkv_options_set_log_level(struct swarmkv_options *opts, int loglevel)
static void swarmkv_hincrby_cmd_func(char *cmd_str, swarmkv_on_reply_callback_t * cb, void *cb_arg)
{
- int profile_id;
+ uuid_t profile_uuid;
+ char uuid_str[UUID_STR_LEN] = {0};
int priority;
int value;
char direction[5] = {0};
enum shaping_packet_dir dir;
struct swarmkv_reply *reply = (struct swarmkv_reply*)calloc(1, sizeof(struct swarmkv_reply));
- sscanf(cmd_str, "HINCRBY tsg-shaping-%d priority-%d-%s %d", &profile_id, &priority, direction, &value);
+ sscanf(cmd_str, "HINCRBY tsg-shaping-%s priority-%d-%s %d", uuid_str, &priority, direction, &value);
+ uuid_parse(uuid_str, profile_uuid);
if (strncmp(direction, "in", 2) == 0) {
dir = SHAPING_DIR_IN;
} else {
dir = SHAPING_DIR_OUT;
}
- profile_priority_len[profile_id][priority][dir] += value;
+ struct profile_priority_queue_len *node = NULL;
+ HASH_FIND(hh, profile_priority_queue_len_hash, profile_uuid, sizeof(uuid_t), node);
+ if (!node) {
+ node = (struct profile_priority_queue_len*)calloc(1, sizeof(struct profile_priority_queue_len));
+ uuid_copy(node->profile_uuid, profile_uuid);
+ HASH_ADD(hh, profile_priority_queue_len_hash, profile_uuid, sizeof(uuid_t), node);
+ }
+ node->priority_queue_len[priority][dir] += value;
reply->type = SWARMKV_REPLY_INTEGER;
cb(reply, cb_arg);
@@ -211,7 +206,8 @@ static void swarmkv_hincrby_cmd_func(char *cmd_str, swarmkv_on_reply_callback_t
static void swarmkv_hmget_cmd_func(char *cmd_str, swarmkv_on_reply_callback_t * cb, void *cb_arg)
{
- int profile_id;
+ uuid_t profile_uuid;
+ char uuid_str[UUID_STR_LEN] = {0};
int priority[10];
int ret;
int priority_num;
@@ -219,10 +215,19 @@ static void swarmkv_hmget_cmd_func(char *cmd_str, swarmkv_on_reply_callback_t *
enum shaping_packet_dir dir;
struct swarmkv_reply *reply = (struct swarmkv_reply*)calloc(1, sizeof(struct swarmkv_reply));
- ret = sscanf(cmd_str, "HMGET tsg-shaping-%d priority-%d-%s priority-%d-%*s priority-%d-%*s priority-%d-%*s priority-%d-%*s priority-%d-%*s priority-%d-%*s priority-%d-%*s priority-%d-%*s",
- &profile_id, &priority[0], direction, &priority[1], &priority[2], &priority[3], &priority[4], &priority[5], &priority[6], &priority[7], &priority[8]);
+ ret = sscanf(cmd_str, "HMGET tsg-shaping-%s priority-%d-%s priority-%d-%*s priority-%d-%*s priority-%d-%*s priority-%d-%*s priority-%d-%*s priority-%d-%*s priority-%d-%*s priority-%d-%*s",
+ uuid_str, &priority[0], direction, &priority[1], &priority[2], &priority[3], &priority[4], &priority[5], &priority[6], &priority[7], &priority[8]);
priority_num = ret - 1;
+ uuid_parse(uuid_str, profile_uuid);
+ struct profile_priority_queue_len *node = NULL;
+ HASH_FIND(hh, profile_priority_queue_len_hash, profile_uuid, sizeof(uuid_t), node);
+ if (!node) {
+ node = (struct profile_priority_queue_len*)calloc(1, sizeof(struct profile_priority_queue_len));
+ uuid_copy(node->profile_uuid, profile_uuid);
+ HASH_ADD(hh, profile_priority_queue_len_hash, profile_uuid, sizeof(uuid_t), node);
+ }
+
if (strncmp(direction, "in", 2) == 0) {
dir = SHAPING_DIR_IN;
} else {
@@ -237,7 +242,7 @@ static void swarmkv_hmget_cmd_func(char *cmd_str, swarmkv_on_reply_callback_t *
reply->elements[i]->type = SWARMKV_REPLY_STRING;
char tmp_str[128] = {0};
- sprintf(tmp_str, "%d", profile_priority_len[profile_id][priority[i]][dir]);
+ sprintf(tmp_str, "%d", node->priority_queue_len[priority[i]][dir]);
reply->elements[i]->str = (char *)calloc(1, strlen(tmp_str));
memcpy(reply->elements[i]->str, tmp_str, strlen(tmp_str));
reply->elements[i]->len = strlen(tmp_str);
@@ -281,59 +286,46 @@ void swarmkv_async_command(struct swarmkv *db, swarmkv_on_reply_callback_t * cb,
void swarmkv_tconsume(struct swarmkv * db, const char * key, size_t keylen, long long tokens, swarmkv_on_reply_callback_t *cb, void *cb_arg)
{
int actual_tokens;
- struct stub_token_thread_arg thread_arg;
struct swarmkv_reply reply;
- int profile_id;
char direction[16] = {0};
-
- sscanf(key, "tsg-shaping-%d-%15s", &profile_id, direction);
+ char uuid_str[UUID_STR_LEN] = {0};
+ uuid_t profile_uuid;
+ struct stub_shaping_profile *stub_profile = NULL;
+
+ sscanf(key, "tsg-shaping-%36s-%15s", uuid_str, direction);
+ uuid_parse(uuid_str, profile_uuid);
+ HASH_FIND(hh, profiles_hash, profile_uuid, sizeof(uuid_t), stub_profile);
+ if (!stub_profile) {
+ return;
+ }
if (strncmp("bidirectional", direction, sizeof(direction)) == 0) {
- if (pf_curr_avl_token[profile_id].bidirection_limit_bandwidth == DEFAULT_AVALIABLE_TOKEN_PER_SEC) {
+ if (stub_profile->avaliable_token.bidirection_limit_bandwidth == AVALIABLE_TOKEN_UNLIMITED) {
actual_tokens = tokens;
} else {
- actual_tokens = pf_curr_avl_token[profile_id].bidirection_limit_bandwidth >= tokens ? tokens : 0;
- pf_curr_avl_token[profile_id].bidirection_limit_bandwidth -= actual_tokens;
+ actual_tokens = stub_profile->avaliable_token.bidirection_limit_bandwidth >= tokens ? tokens : 0;
+ stub_profile->avaliable_token.bidirection_limit_bandwidth -= actual_tokens;
}
} else if (strncmp("incoming", direction, sizeof(direction)) == 0) {
- if (pf_curr_avl_token[profile_id].in_limit_bandwidth == DEFAULT_AVALIABLE_TOKEN_PER_SEC) {
+ if (stub_profile->avaliable_token.in_limit_bandwidth == AVALIABLE_TOKEN_UNLIMITED) {
actual_tokens = tokens;
} else {
- actual_tokens = pf_curr_avl_token[profile_id].in_limit_bandwidth >= tokens ? tokens : 0;
- pf_curr_avl_token[profile_id].in_limit_bandwidth -= actual_tokens;
+ actual_tokens = stub_profile->avaliable_token.in_limit_bandwidth >= tokens ? tokens : 0;
+ stub_profile->avaliable_token.in_limit_bandwidth -= actual_tokens;
}
} else {
- if (pf_curr_avl_token[profile_id].out_limit_bandwidth == DEFAULT_AVALIABLE_TOKEN_PER_SEC) {
+ if (stub_profile->avaliable_token.out_limit_bandwidth == AVALIABLE_TOKEN_UNLIMITED) {
actual_tokens = tokens;
} else {
- actual_tokens = pf_curr_avl_token[profile_id].out_limit_bandwidth >= tokens ? tokens : 0;
- pf_curr_avl_token[profile_id].out_limit_bandwidth -= actual_tokens;
+ actual_tokens = stub_profile->avaliable_token.out_limit_bandwidth >= tokens ? tokens : 0;
+ stub_profile->avaliable_token.out_limit_bandwidth -= actual_tokens;
}
}
- if (pf_async_times[profile_id] == 0) {
- for (unsigned int i = 0; i < pf_async_thread[profile_id].size(); i++) {
- stub_get_token_thread_func(&pf_async_thread[profile_id][i]);
- }
- pf_async_thread[profile_id].clear();
- }
-
reply.integer = actual_tokens;
reply.type = SWARMKV_REPLY_INTEGER;
- if (pf_async_times[profile_id] > 0) {
- pf_async_times[profile_id]--;
-
- thread_arg.profile_id = profile_id;
- thread_arg.reply = reply;
- thread_arg.cb = cb;
- thread_arg.cb_arg = cb_arg;
-
- pf_async_thread[profile_id].push_back(thread_arg);
-
- } else {
- cb(&reply, cb_arg);
- }
+ cb(&reply, cb_arg);
return;
}
diff --git a/shaping/test/gtest_shaper.cpp b/shaping/test/gtest_shaper.cpp
index 2f9a1b1..e79bd2d 100644
--- a/shaping/test/gtest_shaper.cpp
+++ b/shaping/test/gtest_shaper.cpp
@@ -99,7 +99,7 @@ static int judge_packet_eq(struct stub_pkt_queue *expec_queue, struct stub_pkt_q
}
static void shaping_stat_judge(char *counter_file_line, char *guage_file_line, int counter_json_array_idx,
- int guage_json_array_idx, int rule_id, int profile_id, int priority,
+ int guage_json_array_idx, const char *rule_uuid, const char *profile_uuid, int priority,
unsigned long long tx_pkts, unsigned long long tx_bytes,
unsigned long long drop_pkts, long long queue_len, long long max_latency,
unsigned char direction, char profile_type[])
@@ -127,13 +127,13 @@ static void shaping_stat_judge(char *counter_file_line, char *guage_file_line, i
ASSERT_TRUE(tmp_obj != NULL);
EXPECT_EQ(tmp_obj->valueint, STUB_TEST_VSYS_ID);
- tmp_obj = cJSON_GetObjectItem(json_array_element, "rule_id");
+ tmp_obj = cJSON_GetObjectItem(json_array_element, "rule_uuid");
ASSERT_TRUE(tmp_obj != NULL);
- EXPECT_EQ(rule_id, tmp_obj->valueint);
+ EXPECT_STREQ(rule_uuid, tmp_obj->valuestring);
- tmp_obj = cJSON_GetObjectItem(json_array_element, "profile_id");
+ tmp_obj = cJSON_GetObjectItem(json_array_element, "profile_uuid");
ASSERT_TRUE(tmp_obj != NULL);
- EXPECT_EQ(profile_id, tmp_obj->valueint);
+ EXPECT_STREQ(profile_uuid, tmp_obj->valuestring);
tmp_obj = cJSON_GetObjectItem(json_array_element, "priority");
ASSERT_TRUE(tmp_obj != NULL);
@@ -237,23 +237,24 @@ TEST(single_session, udp_tx_in_order)
struct stub_pkt_queue *actual_tx_queue;
struct shaping_ctx *ctx = NULL;
struct shaping_flow *sf = NULL;
- long long rule_id[] = {0};
+ const char *rule_uuid_strs[] = {"00000000-0000-0000-0000-000000000001"};
int priority[] = {1};
int profile_num[] = {1};
- int profile_id[][MAX_REF_PROFILE] = {{0}};
+ const char *profile_uuid_strs[][MAX_REF_PROFILE] = {{"00000000-0000-0000-0000-000000000001"}};
TAILQ_INIT(&expec_tx_queue);
stub_init();
- dummy_swarmkv_init();
ctx = shaping_engine_init();
ASSERT_TRUE(ctx != NULL);
sf = shaping_flow_new(&ctx->thread_ctx[0]);
ASSERT_TRUE(sf != NULL);
- stub_set_matched_shaping_rules(1, rule_id, priority, profile_num, profile_id);
- stub_set_token_bucket_avl_per_sec(0, 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
+ stub_set_matched_shaping_rules(1, rule_uuid_strs, priority, profile_num, profile_uuid_strs);
+ stub_set_token_bucket_avl_per_sec(profile_uuid_strs[0][0], 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
actual_tx_queue = stub_get_tx_queue();
- shaper_rules_update(&ctx->thread_ctx[0], sf, rule_id, 1);
+ uuid_t rule_uuid;
+ uuid_parse(rule_uuid_strs[0], rule_uuid);
+ shaper_rules_update(&ctx->thread_ctx[0], sf, &rule_uuid, 1);
/**********send packets*********************/
@@ -265,7 +266,7 @@ TEST(single_session, udp_tx_in_order)
ASSERT_TRUE(TAILQ_EMPTY(actual_tx_queue));
while (!TAILQ_EMPTY(&expec_tx_queue)) {//last 90 delay packets
- stub_refresh_token_bucket(0);
+ stub_refresh_token_bucket(profile_uuid_strs[0][0]);
for (int i = 0; i < 20; i++) {//even though invoke polling more than 10 times, there should be only 10 pkts be sent
polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]);
stub_curr_time_ns_inc(STUB_TIME_INC_FOR_PACKET);
@@ -292,11 +293,11 @@ TEST(single_session, udp_tx_in_order)
shaper_thread_resource_clear();
shaping_engine_destroy(ctx);
- stub_clear_matched_shaping_rules();
+ stub_swarmkv_clear_resource();
/*******test statistics***********/
//judge shaping metric
- shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, 0, 0, 1, 100, 10000, 0, 0, 171000, SHAPING_DIR_OUT, profile_type_primary);//max latency is last 10 pkts
+ shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, rule_uuid_strs[0], profile_uuid_strs[0][0], 1, 100, 10000, 0, 0, 171000, SHAPING_DIR_OUT, profile_type_primary);//max latency is last 10 pkts
//judge shaping global metric
shaping_global_stat_judge(global_stat_str, 100, 10000, 0, 0, 0, 0);
@@ -316,24 +317,25 @@ TEST(bidirectional, udp_tx_in_order)
struct stub_pkt_queue *actual_tx_queue;
struct shaping_ctx *ctx = NULL;
struct shaping_flow *sf = NULL;
- long long rule_id[] = {0};
+ const char *rule_uuid_strs[] = {"00000000-0000-0000-0000-000000000001"};
int priority[] = {1};
int profile_num[] = {1};
- int profile_id[][MAX_REF_PROFILE] = {{0}};
+ const char *profile_uuid_strs[][MAX_REF_PROFILE] = {{"00000000-0000-0000-0000-000000000001"}};
TAILQ_INIT(&expec_tx_queue_in);
TAILQ_INIT(&expec_tx_queue_out);
stub_init();
- dummy_swarmkv_init();
ctx = shaping_engine_init();
ASSERT_TRUE(ctx != NULL);
sf = shaping_flow_new(&ctx->thread_ctx[0]);
ASSERT_TRUE(sf != NULL);
- stub_set_matched_shaping_rules(1, rule_id, priority, profile_num, profile_id);
- stub_set_token_bucket_avl_per_sec(0, 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_BIDIRECTION);
+ stub_set_matched_shaping_rules(1, rule_uuid_strs, priority, profile_num, profile_uuid_strs);
+ stub_set_token_bucket_avl_per_sec(profile_uuid_strs[0][0], 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_BIDIRECTION);
actual_tx_queue = stub_get_tx_queue();
- shaper_rules_update(&ctx->thread_ctx[0], sf, rule_id, 1);
+ uuid_t rule_uuid;
+ uuid_parse(rule_uuid_strs[0], rule_uuid);
+ shaper_rules_update(&ctx->thread_ctx[0], sf, &rule_uuid, 1);
/*******send packets***********/
send_packets(&ctx->thread_ctx[0], sf, 10, 100, SHAPING_DIR_OUT, &expec_tx_queue_out, 1, 0);
@@ -345,12 +347,12 @@ TEST(bidirectional, udp_tx_in_order)
ASSERT_EQ(0, judge_packet_eq(&expec_tx_queue_out, actual_tx_queue, 10));
ASSERT_TRUE(TAILQ_EMPTY(actual_tx_queue));
- stub_refresh_token_bucket(0);
+ stub_refresh_token_bucket(profile_uuid_strs[0][0]);
polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]);
ASSERT_EQ(0, judge_packet_eq(&expec_tx_queue_out, actual_tx_queue, 1));
while(!TAILQ_EMPTY(&expec_tx_queue_out)) {
- stub_refresh_token_bucket(0);
+ stub_refresh_token_bucket(profile_uuid_strs[0][0]);
polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]);
ASSERT_EQ(0, judge_packet_eq(&expec_tx_queue_in, actual_tx_queue, 1));
@@ -367,7 +369,7 @@ TEST(bidirectional, udp_tx_in_order)
shaper_thread_resource_clear();
shaping_engine_destroy(ctx);
- stub_clear_matched_shaping_rules();
+ stub_swarmkv_clear_resource();
}
@@ -380,24 +382,25 @@ TEST(max_min_host_fairness_profile, udp_tx_in_order)
struct stub_pkt_queue *actual_tx_queue;
struct shaping_ctx *ctx = NULL;
struct shaping_flow *sf = NULL;
- long long rule_id[] = {0};
+ const char *rule_uuid_strs[] = {"00000000-0000-0000-0000-000000000001"};
int priority[] = {1};
int profile_num[] = {1};
- int profile_id[][MAX_REF_PROFILE] = {{0}};
+ const char *profile_uuid_strs[][MAX_REF_PROFILE] = {{"00000000-0000-0000-0000-000000000001"}};
TAILQ_INIT(&expec_tx_queue);
stub_init();
- dummy_swarmkv_init();
ctx = shaping_engine_init();
ASSERT_TRUE(ctx != NULL);
sf = shaping_flow_new(&ctx->thread_ctx[0]);
ASSERT_TRUE(sf != NULL);
- stub_set_matched_shaping_rules(1, rule_id, priority, profile_num, profile_id);
- stub_set_token_bucket_avl_per_sec(0, 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
- stub_set_profile_type(0, PROFILE_TYPE_MAX_MIN_HOST_FAIRNESS);
+ stub_set_matched_shaping_rules(1, rule_uuid_strs, priority, profile_num, profile_uuid_strs);
+ stub_set_token_bucket_avl_per_sec(profile_uuid_strs[0][0], 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
+ stub_set_profile_type(profile_uuid_strs[0][0], PROFILE_TYPE_MAX_MIN_HOST_FAIRNESS);
actual_tx_queue = stub_get_tx_queue();
- shaper_rules_update(&ctx->thread_ctx[0], sf, rule_id, 1);
+ uuid_t rule_uuid;
+ uuid_parse(rule_uuid_strs[0], rule_uuid);
+ shaper_rules_update(&ctx->thread_ctx[0], sf, &rule_uuid, 1);
/**********send packets*********************/
@@ -409,7 +412,7 @@ TEST(max_min_host_fairness_profile, udp_tx_in_order)
ASSERT_TRUE(TAILQ_EMPTY(actual_tx_queue));
while (!TAILQ_EMPTY(&expec_tx_queue)) {//last 90 delay packets
- stub_refresh_token_bucket(0);
+ stub_refresh_token_bucket(profile_uuid_strs[0][0]);
for (int i = 0; i < 20; i++) {//even though invoke polling more than 10 times, there should be only 10 pkts be sent
polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]);
stub_curr_time_ns_inc(STUB_TIME_INC_FOR_PACKET);
@@ -436,11 +439,12 @@ TEST(max_min_host_fairness_profile, udp_tx_in_order)
shaper_thread_resource_clear();
shaping_engine_destroy(ctx);
- stub_clear_matched_shaping_rules();
+ stub_clear_resource();
+ stub_swarmkv_clear_resource();
/*******test statistics***********/
//judge shaping metric
- shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, 0, 0, 1, 100, 10000, 0, 0, 171000, SHAPING_DIR_OUT, profile_type_primary);//max latency is last 10 pkts
+ shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, rule_uuid_strs[0], profile_uuid_strs[0][0], 1, 100, 10000, 0, 0, 171000, SHAPING_DIR_OUT, profile_type_primary);//max latency is last 10 pkts
//judge shaping global metric
shaping_global_stat_judge(global_stat_str, 100, 10000, 0, 0, 0, 0);
@@ -460,24 +464,25 @@ TEST(single_session, tcp_tx_in_order)
struct stub_pkt_queue *actual_tx_queue;
struct shaping_ctx *ctx = NULL;
struct shaping_flow *sf = NULL;
- long long rule_id[] = {0};
+ const char *rule_uuid_strs[] = {"00000000-0000-0000-0000-000000000001"};
int priority[] = {1};
int profile_num[] = {1};
- int profile_id[][MAX_REF_PROFILE] = {{0}};
+ const char *profile_uuid_strs[][MAX_REF_PROFILE] = {{"00000000-0000-0000-0000-000000000001"}};
TAILQ_INIT(&expec_tx_queue);
TAILQ_INIT(&expec_pure_ctl_tx_queue);
stub_init();
- dummy_swarmkv_init();
ctx = shaping_engine_init();
ASSERT_TRUE(ctx != NULL);
sf = shaping_flow_new(&ctx->thread_ctx[0]);
ASSERT_TRUE(sf != NULL);
- stub_set_matched_shaping_rules(1, rule_id, priority, profile_num, profile_id);
- stub_set_token_bucket_avl_per_sec(0, 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
+ stub_set_matched_shaping_rules(1, rule_uuid_strs, priority, profile_num, profile_uuid_strs);
+ stub_set_token_bucket_avl_per_sec(profile_uuid_strs[0][0], 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
actual_tx_queue = stub_get_tx_queue();
- shaper_rules_update(&ctx->thread_ctx[0], sf, rule_id, 1);
+ uuid_t rule_uuid;
+ uuid_parse(rule_uuid_strs[0], rule_uuid);
+ shaper_rules_update(&ctx->thread_ctx[0], sf, &rule_uuid, 1);
/*******send packets***********/
send_packets(&ctx->thread_ctx[0], sf, 20, 100, SHAPING_DIR_OUT, &expec_tx_queue, 1, 0);
@@ -498,18 +503,18 @@ TEST(single_session, tcp_tx_in_order)
shaper_stat_refresh(&ctx->thread_ctx[0], sf, 1);
fieldstat_easy_output(ctx->thread_ctx[0].stat->counter_instance, &counter_stat_str, &stat_str_len);
fieldstat_easy_output(ctx->thread_ctx[0].stat->guage_instance, &guage_stat_str, &stat_str_len);
- shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, 0, 0, 1, 20, 2000, 0, 10, 0, SHAPING_DIR_OUT, profile_type_primary);//*test statistics
+ shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, rule_uuid_strs[0], profile_uuid_strs[0][0], 1, 20, 2000, 0, 10, 0, SHAPING_DIR_OUT, profile_type_primary);//*test statistics
free(counter_stat_str);
free(guage_stat_str);
- stub_refresh_token_bucket(0);
+ stub_refresh_token_bucket(profile_uuid_strs[0][0]);
for (int i = 0; i < 10; i++) {
polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]);
stub_curr_time_ns_inc(STUB_TIME_INC_FOR_PACKET);
}
ASSERT_TRUE(TAILQ_EMPTY(actual_tx_queue));//pure ctrl pkts force consume 1000 tokens, current token: -1000--->0, so no pkt can be sent
- stub_refresh_token_bucket(0);
+ stub_refresh_token_bucket(profile_uuid_strs[0][0]);
for (int i = 0; i < 11; i++) {//10 pkts which is not pure control, first polling request 10 times token, then 10 loops send 10 pkts
polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]);
stub_curr_time_ns_inc(STUB_TIME_INC_FOR_PACKET);
@@ -524,11 +529,12 @@ TEST(single_session, tcp_tx_in_order)
shaper_thread_resource_clear();
shaping_engine_destroy(ctx);
- stub_clear_matched_shaping_rules();
+ stub_clear_resource();
+ stub_swarmkv_clear_resource();
/*******test statistics***********/
- shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, 0, 0, 1, 30, 3000, 0, 0, 31000, SHAPING_DIR_OUT, profile_type_primary);
+ shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, rule_uuid_strs[0], profile_uuid_strs[0][0], 1, 30, 3000, 0, 0, 31000, SHAPING_DIR_OUT, profile_type_primary);
free(counter_stat_str);
free(guage_stat_str);
}
@@ -544,24 +550,26 @@ TEST(single_session, udp_diff_direction)
struct stub_pkt_queue *actual_tx_queue;
struct shaping_ctx *ctx = NULL;
struct shaping_flow *sf = NULL;
- long long rule_id[] = {0};
+ const char *rule_uuid_strs[] = {"00000000-0000-0000-0000-000000000001"};
int priority[] = {1};
int profile_num[] = {1};
- int profile_id[][MAX_REF_PROFILE] = {{0}};
+ const char *profile_uuid_strs[][MAX_REF_PROFILE] = {{"00000000-0000-0000-0000-000000000001"}};
TAILQ_INIT(&expec_tx_queue_in);
TAILQ_INIT(&expec_tx_queue_out);
stub_init();
- dummy_swarmkv_init();
ctx = shaping_engine_init();
ASSERT_TRUE(ctx != NULL);
sf = shaping_flow_new(&ctx->thread_ctx[0]);
ASSERT_TRUE(sf != NULL);
- stub_set_matched_shaping_rules(1, rule_id, priority, profile_num, profile_id);
- stub_set_token_bucket_avl_per_sec(0, 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
+ stub_set_matched_shaping_rules(1, rule_uuid_strs, priority, profile_num, profile_uuid_strs);
+ stub_set_token_bucket_avl_per_sec(profile_uuid_strs[0][0], 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
+ stub_set_token_bucket_avl_per_sec(profile_uuid_strs[0][0], AVALIABLE_TOKEN_UNLIMITED, SHAPING_DIR_IN, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
actual_tx_queue = stub_get_tx_queue();
- shaper_rules_update(&ctx->thread_ctx[0], sf, rule_id, 1);
+ uuid_t rule_uuid;
+ uuid_parse(rule_uuid_strs[0], rule_uuid);
+ shaper_rules_update(&ctx->thread_ctx[0], sf, &rule_uuid, 1);
/*******send packets***********/
send_packets(&ctx->thread_ctx[0], sf, 10, 100, SHAPING_DIR_OUT, &expec_tx_queue_out, 1, 0);
@@ -576,7 +584,7 @@ TEST(single_session, udp_diff_direction)
ASSERT_EQ(0, judge_packet_eq(&expec_tx_queue_in, actual_tx_queue, 20));
ASSERT_TRUE(TAILQ_EMPTY(actual_tx_queue));
- stub_refresh_token_bucket(0);
+ stub_refresh_token_bucket(profile_uuid_strs[0][0]);
for (int i = 0; i < 22; i++) {//first polling just request token and don't send pkt
polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]);
stub_curr_time_ns_inc(STUB_TIME_INC_FOR_PACKET);
@@ -597,12 +605,13 @@ TEST(single_session, udp_diff_direction)
shaper_thread_resource_clear();
shaping_engine_destroy(ctx);
- stub_clear_matched_shaping_rules();
+ stub_clear_resource();
+ stub_swarmkv_clear_resource();
/*******test statistics***********/
- shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, 0, 0, 1, 20, 2000, 0, 0, 21000, SHAPING_DIR_OUT, profile_type_primary);
- shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, 0, 0, 1, 20, 2000, 0, 0, 0, SHAPING_DIR_IN, profile_type_primary);
+ shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, rule_uuid_strs[0], profile_uuid_strs[0][0], 1, 20, 2000, 0, 0, 21000, SHAPING_DIR_OUT, profile_type_primary);
+ shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, rule_uuid_strs[0], profile_uuid_strs[0][0], 1, 20, 2000, 0, 0, 0, SHAPING_DIR_IN, profile_type_primary);
free(counter_stat_str);
free(guage_stat_str);
}
@@ -620,25 +629,28 @@ TEST(single_session, udp_multi_rules)
struct stub_pkt_queue *actual_tx_queue;
struct shaping_ctx *ctx = NULL;
struct shaping_flow *sf = NULL;
- long long rule_id[] = {0, 1, 2};
+ const char *rule_uuid_strs[] = {"00000000-0000-0000-0000-000000000001", "00000000-0000-0000-0000-000000000002", "00000000-0000-0000-0000-000000000003"};
int priority[] = {1, 2, 3};
int profile_num[] = {1, 1, 1};
- int profile_id[][MAX_REF_PROFILE] = {{0}, {1}, {2}};
+ const char *profile_uuid_strs[][MAX_REF_PROFILE] = {{"00000000-0000-0000-0000-000000000001"}, {"00000000-0000-0000-0000-000000000002"}, {"00000000-0000-0000-0000-000000000003"}};
TAILQ_INIT(&expec_tx_queue);
stub_init();
- dummy_swarmkv_init();
ctx = shaping_engine_init();
ASSERT_TRUE(ctx != NULL);
sf = shaping_flow_new(&ctx->thread_ctx[0]);
ASSERT_TRUE(sf != NULL);
- stub_set_matched_shaping_rules(3, rule_id, priority, profile_num, profile_id);
- stub_set_token_bucket_avl_per_sec(0, 3000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
- stub_set_token_bucket_avl_per_sec(1, 2000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
- stub_set_token_bucket_avl_per_sec(2, 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
+ stub_set_matched_shaping_rules(3, rule_uuid_strs, priority, profile_num, profile_uuid_strs);
+ stub_set_token_bucket_avl_per_sec(profile_uuid_strs[0][0], 3000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
+ stub_set_token_bucket_avl_per_sec(profile_uuid_strs[1][0], 2000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
+ stub_set_token_bucket_avl_per_sec(profile_uuid_strs[2][0], 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
actual_tx_queue = stub_get_tx_queue();
- shaper_rules_update(&ctx->thread_ctx[0], sf, rule_id, 3);
+ uuid_t rule_uuids[3];
+ for (int i = 0; i < 3; i++) {
+ uuid_parse(rule_uuid_strs[i], rule_uuids[i]);
+ }
+ shaper_rules_update(&ctx->thread_ctx[0], sf, rule_uuids, 3);
/*******send packets***********/
send_packets(&ctx->thread_ctx[0], sf, 100, 100, SHAPING_DIR_OUT, &expec_tx_queue, 5, 0);
@@ -648,9 +660,9 @@ TEST(single_session, udp_multi_rules)
ASSERT_EQ(0, judge_packet_eq(&expec_tx_queue, actual_tx_queue, 10));
while (!TAILQ_EMPTY(&expec_tx_queue)) {//last 90 delay packets
- stub_refresh_token_bucket(0);
- stub_refresh_token_bucket(1);
- stub_refresh_token_bucket(2);
+ stub_refresh_token_bucket(profile_uuid_strs[0][0]);
+ stub_refresh_token_bucket(profile_uuid_strs[1][0]);
+ stub_refresh_token_bucket(profile_uuid_strs[2][0]);
for (int i = 0; i < 60; i++) {
//there are 3 rules, send one packet need 3 polling process, so 10 packets need 30 polling
//even though invoke polling more than 30 times, there should be only 10 pkts be sent
@@ -673,17 +685,18 @@ TEST(single_session, udp_multi_rules)
shaper_thread_resource_clear();
shaping_engine_destroy(ctx);
- stub_clear_matched_shaping_rules();
+ stub_clear_resource();
+ stub_swarmkv_clear_resource();
/*******test statistics***********/
//profile_id 0
- shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, 0, 0, 1, 100, 10000, 0, 0, 507000, SHAPING_DIR_OUT, profile_type_primary);
+ shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, rule_uuid_strs[0], profile_uuid_strs[0][0], 1, 100, 10000, 0, 0, 507000, SHAPING_DIR_OUT, profile_type_primary);
//profile_id 1
- shaping_stat_judge(counter_stat_str, guage_stat_str, 1, 1, 1, 1, 1, 100, 10000, 0, 0, 1000, SHAPING_DIR_OUT, profile_type_primary);
+ shaping_stat_judge(counter_stat_str, guage_stat_str, 1, 1, rule_uuid_strs[1], profile_uuid_strs[1][0], 1, 100, 10000, 0, 0, 1000, SHAPING_DIR_OUT, profile_type_primary);
//profile_id 2
- shaping_stat_judge(counter_stat_str, guage_stat_str, 2, 2, 2, 2, 1, 100, 10000, 0, 0, 91000, SHAPING_DIR_OUT, profile_type_primary);//max latency is first queued pkt
+ shaping_stat_judge(counter_stat_str, guage_stat_str, 2, 2, rule_uuid_strs[2], profile_uuid_strs[2][0], 1, 100, 10000, 0, 0, 91000, SHAPING_DIR_OUT, profile_type_primary);//max latency is first queued pkt
free(counter_stat_str);
free(guage_stat_str);
@@ -699,24 +712,25 @@ TEST(single_session, udp_borrow)
struct stub_pkt_queue *actual_tx_queue;
struct shaping_ctx *ctx = NULL;
struct shaping_flow *sf = NULL;
- long long rule_id[] = {1};
+ const char *rule_uuid_strs[] = {"00000000-0000-0000-0000-000000000001"};
int priority[] = {1};
int profile_num[] = {2};
- int profile_id[][MAX_REF_PROFILE] = {{1, 2}};
+ const char *profile_uuid_strs[][MAX_REF_PROFILE] = {{"00000000-0000-0000-0000-000000000001", "00000000-0000-0000-0000-000000000002"}};
TAILQ_INIT(&expec_tx_queue);
stub_init();
- dummy_swarmkv_init();
ctx = shaping_engine_init();
ASSERT_TRUE(ctx != NULL);
sf = shaping_flow_new(&ctx->thread_ctx[0]);
ASSERT_TRUE(sf != NULL);
- stub_set_matched_shaping_rules(1, rule_id, priority, profile_num, profile_id);
- stub_set_token_bucket_avl_per_sec(1, 0, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
- stub_set_token_bucket_avl_per_sec(2, 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
+ stub_set_matched_shaping_rules(1, rule_uuid_strs, priority, profile_num, profile_uuid_strs);
+ stub_set_token_bucket_avl_per_sec(profile_uuid_strs[0][0], 0, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
+ stub_set_token_bucket_avl_per_sec(profile_uuid_strs[0][1], 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
actual_tx_queue = stub_get_tx_queue();
- shaper_rules_update(&ctx->thread_ctx[0], sf, rule_id, 1);
+ uuid_t rule_uuid;
+ uuid_parse(rule_uuid_strs[0], rule_uuid);
+ shaper_rules_update(&ctx->thread_ctx[0], sf, &rule_uuid, 1);
/*******send packets***********/
send_packets(&ctx->thread_ctx[0], sf, 100, 100, SHAPING_DIR_OUT, &expec_tx_queue, 1, 0);
@@ -726,7 +740,7 @@ TEST(single_session, udp_borrow)
ASSERT_EQ(0, judge_packet_eq(&expec_tx_queue, actual_tx_queue, 10));
while (!TAILQ_EMPTY(&expec_tx_queue)) {//last 90 delay packets
- stub_refresh_token_bucket(2);
+ stub_refresh_token_bucket(profile_uuid_strs[0][1]);
for (int i = 0; i < 20; i++) {//even though invoke polling more than 10 times, there should be only 10 pkts be sent
polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]);
stub_curr_time_ns_inc(STUB_TIME_INC_FOR_PACKET);
@@ -747,14 +761,15 @@ TEST(single_session, udp_borrow)
shaper_thread_resource_clear();
shaping_engine_destroy(ctx);
- stub_clear_matched_shaping_rules();
+ stub_clear_resource();
+ stub_swarmkv_clear_resource();
/*******test statistics***********/
//profile_id 1, primary
- shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, 1, 1, 1, 0, 0, 0, 0, 171000, SHAPING_DIR_OUT, profile_type_primary);
+ shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, rule_uuid_strs[0], profile_uuid_strs[0][0], 1, 0, 0, 0, 0, 171000, SHAPING_DIR_OUT, profile_type_primary);
//profile_id 2, borrow
- shaping_stat_judge(counter_stat_str, guage_stat_str, 1, 0, 1, 2, 2, 100, 10000, 0, 0, 0, SHAPING_DIR_OUT, profile_type_borrow);
+ shaping_stat_judge(counter_stat_str, guage_stat_str, 1, 0, rule_uuid_strs[0], profile_uuid_strs[0][1], 2, 100, 10000, 0, 0, 0, SHAPING_DIR_OUT, profile_type_borrow);
free(counter_stat_str);
free(guage_stat_str);
@@ -772,25 +787,26 @@ TEST(single_session, udp_borrow_same_priority_9)
struct stub_pkt_queue *actual_tx_queue;
struct shaping_ctx *ctx = NULL;
struct shaping_flow *sf = NULL;
- long long rule_id[] = {1};
+ const char *rule_uuid_strs[] = {"00000000-0000-0000-0000-000000000001"};
int priority[] = {9};
int profile_num[] = {3};
- int profile_id[][MAX_REF_PROFILE] = {{1, 2, 3}};
+ const char *profile_uuid_strs[][MAX_REF_PROFILE] = {{"00000000-0000-0000-0000-000000000001", "00000000-0000-0000-0000-000000000002", "00000000-0000-0000-0000-000000000003"}};
TAILQ_INIT(&expec_tx_queue);
stub_init();
- dummy_swarmkv_init();
ctx = shaping_engine_init();
ASSERT_TRUE(ctx != NULL);
sf = shaping_flow_new(&ctx->thread_ctx[0]);
ASSERT_TRUE(sf != NULL);
- stub_set_matched_shaping_rules(1, rule_id, priority, profile_num, profile_id);
- stub_set_token_bucket_avl_per_sec(1, 0, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
- stub_set_token_bucket_avl_per_sec(2, 0, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
- stub_set_token_bucket_avl_per_sec(3, 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
+ stub_set_matched_shaping_rules(1, rule_uuid_strs, priority, profile_num, profile_uuid_strs);
+ stub_set_token_bucket_avl_per_sec(profile_uuid_strs[0][0], 0, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
+ stub_set_token_bucket_avl_per_sec(profile_uuid_strs[0][1], 0, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
+ stub_set_token_bucket_avl_per_sec(profile_uuid_strs[0][2], 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
actual_tx_queue = stub_get_tx_queue();
- shaper_rules_update(&ctx->thread_ctx[0], sf, rule_id, 1);
+ uuid_t rule_uuid;
+ uuid_parse(rule_uuid_strs[0], rule_uuid);
+ shaper_rules_update(&ctx->thread_ctx[0], sf, &rule_uuid, 1);
/*******send packets***********/
send_packets(&ctx->thread_ctx[0], sf, 100, 100, SHAPING_DIR_OUT, &expec_tx_queue, 1, 0);
@@ -800,7 +816,7 @@ TEST(single_session, udp_borrow_same_priority_9)
ASSERT_EQ(0, judge_packet_eq(&expec_tx_queue, actual_tx_queue, 10));
while (!TAILQ_EMPTY(&expec_tx_queue)) {//last 90 delay packets
- stub_refresh_token_bucket(3);
+ stub_refresh_token_bucket(profile_uuid_strs[0][2]);
for (int i = 0; i < 20; i++) {//even though invoke polling more than 10 times, there should be only 10 pkts be sent
polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]);
stub_curr_time_ns_inc(STUB_TIME_INC_FOR_PACKET);
@@ -821,72 +837,23 @@ TEST(single_session, udp_borrow_same_priority_9)
shaper_thread_resource_clear();
shaping_engine_destroy(ctx);
- stub_clear_matched_shaping_rules();
+ stub_clear_resource();
+ stub_swarmkv_clear_resource();
/*******test statistics***********/
//profile_id 1, primary
- shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, 1, 1, 9, 0, 0, 0, 0, 171000, SHAPING_DIR_OUT, profile_type_primary);
+ shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, rule_uuid_strs[0], profile_uuid_strs[0][0], 9, 0, 0, 0, 0, 171000, SHAPING_DIR_OUT, profile_type_primary);
//profile_id 2, borrow
- shaping_stat_judge(counter_stat_str, guage_stat_str, 1, 0, 1, 2, 9, 0, 0, 0, 0, 0, SHAPING_DIR_OUT, profile_type_borrow);
+ shaping_stat_judge(counter_stat_str, guage_stat_str, 1, 0, rule_uuid_strs[0], profile_uuid_strs[0][1], 9, 0, 0, 0, 0, 0, SHAPING_DIR_OUT, profile_type_borrow);
//profile_id 3, borrow
- shaping_stat_judge(counter_stat_str, guage_stat_str, 2, 0, 1, 3, 9, 100, 10000, 0, 0, 0, SHAPING_DIR_OUT, profile_type_borrow);
+ shaping_stat_judge(counter_stat_str, guage_stat_str, 2, 0, rule_uuid_strs[0], profile_uuid_strs[0][2], 9, 100, 10000, 0, 0, 0, SHAPING_DIR_OUT, profile_type_borrow);
free(counter_stat_str);
free(guage_stat_str);
}
-/*session1 match rule1
- rule1:
- priority:1
- profile1: limit 1000, first 20 pkts async, then sync
-*/
-TEST(single_session_async, udp_close_before_async_exec)
-{
- struct stub_pkt_queue expec_tx_queue;
- struct stub_pkt_queue *actual_tx_queue;
- struct shaping_ctx *ctx = NULL;
- struct shaping_flow *sf = NULL;
- long long rule_id[] = {0};
- int priority[] = {1};
- int profile_num[] = {1};
- int profile_id[][MAX_REF_PROFILE] = {{0}};
-
- TAILQ_INIT(&expec_tx_queue);
- stub_init();
- dummy_swarmkv_init();
- ctx = shaping_engine_init();
- ASSERT_TRUE(ctx != NULL);
- sf = shaping_flow_new(&ctx->thread_ctx[0]);
- ASSERT_TRUE(sf != NULL);
-
- stub_set_matched_shaping_rules(1, rule_id, priority, profile_num, profile_id);
- stub_set_token_bucket_avl_per_sec(0, 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
- stub_set_async_token_get_times(0, 20);
- actual_tx_queue = stub_get_tx_queue();
- shaper_rules_update(&ctx->thread_ctx[0], sf, rule_id, 1);
-
-
- /*******send packets***********/
- send_packets(&ctx->thread_ctx[0], sf, 10, 100, SHAPING_DIR_OUT, &expec_tx_queue, 1, 0);
- ASSERT_TRUE(TAILQ_EMPTY(actual_tx_queue));//async callback haven't been called, no token, no packet be sent
- sf->flag |= SESSION_CLOSE;// receive close ctrlbuf
-
- stub_set_async_token_get_times(0, 0);//refresh async count, async thread will be executed
- sleep(1);//ensure async thread exec complete
-
- for (int i = 0; i < 10; i++) {
- polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]);
- }
-
- ASSERT_EQ(0, judge_packet_eq(&expec_tx_queue, actual_tx_queue, 10));
-
- shaper_thread_resource_clear();
- shaping_engine_destroy(ctx);
- stub_clear_matched_shaping_rules();
-}
-
/*session1 match rule1; session2 match rule2
rule1:
priority:1
@@ -906,18 +873,15 @@ TEST(two_session_diff_priority_same_profile, udp_borrow_in_order)
struct shaping_ctx *ctx = NULL;
struct shaping_flow *sf1 = NULL;
struct shaping_flow *sf2 = NULL;
- long long rule_ids[] = {1, 2};
- long long rule_id1[] = {1};
- long long rule_id2[] = {2};
+ const char *rule_uuid_strs[] = {"00000000-0000-0000-0000-000000000001", "00000000-0000-0000-0000-000000000002"};
int profile_nums[] = {2, 1};
int prioritys[] = {1, 1};
- int profile_ids[][MAX_REF_PROFILE] = {{1, 2}, {2}};
+ const char *profile_uuid_strs[][MAX_REF_PROFILE] = {{"00000000-0000-0000-0000-000000000001", "00000000-0000-0000-0000-000000000002"}, {"00000000-0000-0000-0000-000000000002"}};
TAILQ_INIT(&expec_tx_queue1);
TAILQ_INIT(&expec_tx_queue2);
stub_init();
- dummy_swarmkv_init();
ctx = shaping_engine_init();
ASSERT_TRUE(ctx != NULL);
@@ -927,13 +891,17 @@ TEST(two_session_diff_priority_same_profile, udp_borrow_in_order)
ASSERT_TRUE(sf2 != NULL);
- stub_set_matched_shaping_rules(2, rule_ids, prioritys, profile_nums, profile_ids);
+ stub_set_matched_shaping_rules(2, rule_uuid_strs, prioritys, profile_nums, profile_uuid_strs);
- stub_set_token_bucket_avl_per_sec(1, 0, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
- stub_set_token_bucket_avl_per_sec(2, 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
+ stub_set_token_bucket_avl_per_sec(profile_uuid_strs[0][0], 0, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
+ stub_set_token_bucket_avl_per_sec(profile_uuid_strs[0][1], 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
actual_tx_queue = stub_get_tx_queue();
- shaper_rules_update(&ctx->thread_ctx[0], sf1, rule_id1, 1);
- shaper_rules_update(&ctx->thread_ctx[0], sf2, rule_id2, 1);
+ uuid_t rule_uuid1;
+ uuid_t rule_uuid2;
+ uuid_parse(rule_uuid_strs[0], rule_uuid1);
+ uuid_parse(rule_uuid_strs[1], rule_uuid2);
+ shaper_rules_update(&ctx->thread_ctx[0], sf1, &rule_uuid1, 1);
+ shaper_rules_update(&ctx->thread_ctx[0], sf2, &rule_uuid2, 1);
/*******send packets***********/
@@ -945,7 +913,7 @@ TEST(two_session_diff_priority_same_profile, udp_borrow_in_order)
ASSERT_EQ(0, judge_packet_eq(&expec_tx_queue1, actual_tx_queue, 10));
ASSERT_TRUE(TAILQ_EMPTY(actual_tx_queue));
- stub_refresh_token_bucket(2);
+ stub_refresh_token_bucket(profile_uuid_strs[0][1]);
for (int i = 0; i < 20; i++) {
polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]);
stub_curr_time_ns_inc(STUB_TIME_INC_FOR_PACKET);
@@ -954,7 +922,7 @@ TEST(two_session_diff_priority_same_profile, udp_borrow_in_order)
ASSERT_TRUE(TAILQ_EMPTY(actual_tx_queue));
while (!TAILQ_EMPTY(&expec_tx_queue2)) {
- stub_refresh_token_bucket(2);
+ stub_refresh_token_bucket(profile_uuid_strs[0][1]);
for (int i = 0; i < 20; i++) {//even though invoke polling more than 10 times, there should be only 10 pkts be sent
polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]);
stub_curr_time_ns_inc(STUB_TIME_INC_FOR_PACKET);
@@ -966,7 +934,7 @@ TEST(two_session_diff_priority_same_profile, udp_borrow_in_order)
stub_curr_time_s_inc(1);//inc time to refresh hmget interval
while (!TAILQ_EMPTY(&expec_tx_queue1)) {//last 90 delay packets
- stub_refresh_token_bucket(2);
+ stub_refresh_token_bucket(profile_uuid_strs[0][1]);
for (int i = 0; i < 20; i++) {//even though invoke polling more than 10 times, there should be only 10 pkts be sent
polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]);
stub_curr_time_ns_inc(STUB_TIME_INC_FOR_PACKET);
@@ -988,17 +956,18 @@ TEST(two_session_diff_priority_same_profile, udp_borrow_in_order)
shaper_thread_resource_clear();
shaping_engine_destroy(ctx);
- stub_clear_matched_shaping_rules();
+ stub_clear_resource();
+ stub_swarmkv_clear_resource();
/*******test statistics***********/
//profile_id 1, primary
- shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1471000, SHAPING_DIR_OUT, profile_type_primary);
+ shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, rule_uuid_strs[0], profile_uuid_strs[0][0], 1, 0, 0, 0, 0, 1471000, SHAPING_DIR_OUT, profile_type_primary);
//profile_id 2, borrow
- shaping_stat_judge(counter_stat_str, guage_stat_str, 1, 0, 1, 2, 2, 100, 10000, 0, 0, 0, SHAPING_DIR_OUT, profile_type_borrow);
+ shaping_stat_judge(counter_stat_str, guage_stat_str, 1, 0, rule_uuid_strs[0], profile_uuid_strs[0][1], 2, 100, 10000, 0, 0, 0, SHAPING_DIR_OUT, profile_type_borrow);
//profile_id 2, primary
- shaping_stat_judge(counter_stat_str, guage_stat_str, 2, 1, 2, 2, 1, 100, 10000, 0, 0, 191000, SHAPING_DIR_OUT, profile_type_primary);
+ shaping_stat_judge(counter_stat_str, guage_stat_str, 2, 1, rule_uuid_strs[1], profile_uuid_strs[1][0], 1, 100, 10000, 0, 0, 191000, SHAPING_DIR_OUT, profile_type_primary);
free(counter_stat_str);
free(guage_stat_str);
@@ -1022,18 +991,15 @@ TEST(two_session_diff_priority_same_profile, two_thread_udp_tx_in_order)
struct shaping_ctx *ctx = NULL;
struct shaping_flow *sf1 = NULL;
struct shaping_flow *sf2 = NULL;
- long long rule_ids[] = {1, 2};
- long long rule_id1[] = {1};
- long long rule_id2[] = {2};
+ const char *rule_uuid_strs[] = {"00000000-0000-0000-0000-000000000001", "00000000-0000-0000-0000-000000000002"};
int profile_nums[] = {1, 1};
int prioritys[] = {1, 2};
- int profile_id[][MAX_REF_PROFILE] = {{0}, {0}};
+ const char *profile_uuid_strs[][MAX_REF_PROFILE] = {{"00000000-0000-0000-0000-000000000001"}, {"00000000-0000-0000-0000-000000000001"}};
TAILQ_INIT(&expec_tx_queue1);
TAILQ_INIT(&expec_tx_queue2);
stub_init();
- dummy_swarmkv_init();
ctx = shaping_engine_init();
ASSERT_TRUE(ctx != NULL);
@@ -1042,12 +1008,16 @@ TEST(two_session_diff_priority_same_profile, two_thread_udp_tx_in_order)
sf2 = shaping_flow_new(&ctx->thread_ctx[1]);
ASSERT_TRUE(sf2 != NULL);
- stub_set_matched_shaping_rules(2, rule_ids, prioritys, profile_nums, profile_id);
+ stub_set_matched_shaping_rules(2, rule_uuid_strs, prioritys, profile_nums, profile_uuid_strs);
- stub_set_token_bucket_avl_per_sec(0, 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
+ stub_set_token_bucket_avl_per_sec(profile_uuid_strs[0][0], 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
actual_tx_queue = stub_get_tx_queue();
- shaper_rules_update(&ctx->thread_ctx[0], sf1, rule_id1, 1);
- shaper_rules_update(&ctx->thread_ctx[1], sf2, rule_id2, 1);
+ uuid_t rule_uuid1;
+ uuid_t rule_uuid2;
+ uuid_parse(rule_uuid_strs[0], rule_uuid1);
+ uuid_parse(rule_uuid_strs[1], rule_uuid2);
+ shaper_rules_update(&ctx->thread_ctx[0], sf1, &rule_uuid1, 1);
+ shaper_rules_update(&ctx->thread_ctx[1], sf2, &rule_uuid2, 1);
/*******send packets***********/
send_packets(&ctx->thread_ctx[0], sf1, 100, 100, SHAPING_DIR_OUT, &expec_tx_queue1, 1, 0);
@@ -1058,13 +1028,13 @@ TEST(two_session_diff_priority_same_profile, two_thread_udp_tx_in_order)
shaper_stat_refresh(&ctx->thread_ctx[0], sf1, 1);//刷新线程0中的优先级队列长度到swarmkv中
stub_curr_time_s_inc(1);//inc time to refresh hmget interval
for (int i = 0; i < 10; i++) {//线程1中的session优先级为2,被线程0中优先级为1的session阻断
- stub_refresh_token_bucket(0);
+ stub_refresh_token_bucket(profile_uuid_strs[0][0]);
polling_entry(ctx->thread_ctx[1].sp, ctx->thread_ctx[1].stat, &ctx->thread_ctx[1]);
ASSERT_EQ(-1, judge_packet_eq(&expec_tx_queue2, actual_tx_queue, 1));//优先级低,不能发出报文
}
while (!TAILQ_EMPTY(&expec_tx_queue1)) {
- stub_refresh_token_bucket(0);
+ stub_refresh_token_bucket(profile_uuid_strs[0][0]);
polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]);//require tokens
polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]);//send pkt
@@ -1074,7 +1044,7 @@ TEST(two_session_diff_priority_same_profile, two_thread_udp_tx_in_order)
stub_curr_time_s_inc(1);//inc time to refresh hmget interval
shaper_stat_refresh(&ctx->thread_ctx[0], sf1, 1);//刷新线程0中的优先级队列长度到swarmkv中
while (!TAILQ_EMPTY(&expec_tx_queue2)) {
- stub_refresh_token_bucket(0);
+ stub_refresh_token_bucket(profile_uuid_strs[0][0]);
polling_entry(ctx->thread_ctx[1].sp, ctx->thread_ctx[1].stat, &ctx->thread_ctx[1]);
polling_entry(ctx->thread_ctx[1].sp, ctx->thread_ctx[1].stat, &ctx->thread_ctx[1]);
@@ -1087,7 +1057,8 @@ TEST(two_session_diff_priority_same_profile, two_thread_udp_tx_in_order)
shaping_flow_free(&ctx->thread_ctx[1], sf2);
shaper_thread_resource_clear();
shaping_engine_destroy(ctx);
- stub_clear_matched_shaping_rules();
+ stub_clear_resource();
+ stub_swarmkv_clear_resource();
}
/*session1 match rule1; session2 match rule2
@@ -1108,18 +1079,15 @@ TEST(two_session_diff_priority_same_profile, profile_timer_test)
struct shaping_ctx *ctx = NULL;
struct shaping_flow *sf1 = NULL;
struct shaping_flow *sf2 = NULL;
- long long rule_ids[] = {1, 2};
- long long rule_id1[] = {1};
- long long rule_id2[] = {2};
+ const char *rule_uuid_strs[] = {"00000000-0000-0000-0000-000000000001", "00000000-0000-0000-0000-000000000002"};
int profile_nums[] = {1, 1};
int prioritys[] = {1, 2};
- int profile_id[][MAX_REF_PROFILE] = {{0}, {0}};
+ const char *profile_uuid_strs[][MAX_REF_PROFILE] = {{"00000000-0000-0000-0000-000000000001"}, {"00000000-0000-0000-0000-000000000001"}};
TAILQ_INIT(&expec_tx_queue1);
TAILQ_INIT(&expec_tx_queue2);
stub_init();
- dummy_swarmkv_init();
ctx = shaping_engine_init();
ASSERT_TRUE(ctx != NULL);
@@ -1128,12 +1096,16 @@ TEST(two_session_diff_priority_same_profile, profile_timer_test)
sf2 = shaping_flow_new(&ctx->thread_ctx[1]);
ASSERT_TRUE(sf2 != NULL);
- stub_set_matched_shaping_rules(2, rule_ids, prioritys, profile_nums, profile_id);
+ stub_set_matched_shaping_rules(2, rule_uuid_strs, prioritys, profile_nums, profile_uuid_strs);
- stub_set_token_bucket_avl_per_sec(0, 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
+ stub_set_token_bucket_avl_per_sec(profile_uuid_strs[0][0], 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
actual_tx_queue = stub_get_tx_queue();
- shaper_rules_update(&ctx->thread_ctx[0], sf1, rule_id1, 1);
- shaper_rules_update(&ctx->thread_ctx[1], sf2, rule_id2, 1);
+ uuid_t rule_uuid1;
+ uuid_t rule_uuid2;
+ uuid_parse(rule_uuid_strs[0], rule_uuid1);
+ uuid_parse(rule_uuid_strs[1], rule_uuid2);
+ shaper_rules_update(&ctx->thread_ctx[0], sf1, &rule_uuid1, 1);
+ shaper_rules_update(&ctx->thread_ctx[1], sf2, &rule_uuid2, 1);
/*******send packets***********/
send_packets(&ctx->thread_ctx[0], sf1, 100, 100, SHAPING_DIR_OUT, &expec_tx_queue1, 1, 0);
@@ -1150,7 +1122,7 @@ TEST(two_session_diff_priority_same_profile, profile_timer_test)
stub_curr_time_s_inc(1);//inc time to refresh hmget interval
for (int i = 0; i < 10; i++) {//线程1中的session优先级为2,被线程0中优先级为1的session阻断
- stub_refresh_token_bucket(0);
+ stub_refresh_token_bucket(profile_uuid_strs[0][0]);
polling_entry(ctx->thread_ctx[1].sp, ctx->thread_ctx[1].stat, &ctx->thread_ctx[1]);
stub_curr_time_ns_inc(STUB_TIME_INC_FOR_PACKET);
@@ -1158,7 +1130,7 @@ TEST(two_session_diff_priority_same_profile, profile_timer_test)
}
while (!TAILQ_EMPTY(&expec_tx_queue1)) {
- stub_refresh_token_bucket(0);
+ stub_refresh_token_bucket(profile_uuid_strs[0][0]);
polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]);//first polling request token
polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]);//then send pkt
stub_curr_time_ns_inc(STUB_TIME_INC_FOR_PACKET);
@@ -1175,7 +1147,7 @@ TEST(two_session_diff_priority_same_profile, profile_timer_test)
stub_curr_time_s_inc(1);//inc time to refresh hmget interval
while (!TAILQ_EMPTY(&expec_tx_queue2)) {
- stub_refresh_token_bucket(0);
+ stub_refresh_token_bucket(profile_uuid_strs[0][0]);
polling_entry(ctx->thread_ctx[1].sp, ctx->thread_ctx[1].stat, &ctx->thread_ctx[1]);//first polling request token
polling_entry(ctx->thread_ctx[1].sp, ctx->thread_ctx[1].stat, &ctx->thread_ctx[1]);//then send pkt
stub_curr_time_ns_inc(STUB_TIME_INC_FOR_PACKET);
@@ -1189,7 +1161,8 @@ TEST(two_session_diff_priority_same_profile, profile_timer_test)
shaping_flow_free(&ctx->thread_ctx[1], sf2);
shaper_thread_resource_clear();
shaping_engine_destroy(ctx);
- stub_clear_matched_shaping_rules();
+ stub_clear_resource();
+ stub_swarmkv_clear_resource();
}
/*session1 match rule1; session2 match rule2
@@ -1210,18 +1183,15 @@ TEST(two_session_diff_priority_same_profile, one_direction_dont_block_another)
struct shaping_ctx *ctx = NULL;
struct shaping_flow *sf1 = NULL;
struct shaping_flow *sf2 = NULL;
- long long rule_ids[] = {1, 2};
- long long rule_id1[] = {1};
- long long rule_id2[] = {2};
+ const char *rule_uuid_strs[] = {"00000000-0000-0000-0000-000000000001", "00000000-0000-0000-0000-000000000002"};
int profile_nums[] = {1, 1};
int prioritys[] = {1, 2};
- int profile_id[][MAX_REF_PROFILE] = {{0}, {0}};
+ const char *profile_uuid_strs[][MAX_REF_PROFILE] = {{"00000000-0000-0000-0000-000000000001"}, {"00000000-0000-0000-0000-000000000001"}};
TAILQ_INIT(&expec_tx_queue1);
TAILQ_INIT(&expec_tx_queue2);
stub_init();
- dummy_swarmkv_init();
ctx = shaping_engine_init();
ASSERT_TRUE(ctx != NULL);
@@ -1230,13 +1200,17 @@ TEST(two_session_diff_priority_same_profile, one_direction_dont_block_another)
sf2 = shaping_flow_new(&ctx->thread_ctx[1]);
ASSERT_TRUE(sf2 != NULL);
- stub_set_matched_shaping_rules(2, rule_ids, prioritys, profile_nums, profile_id);
+ stub_set_matched_shaping_rules(2, rule_uuid_strs, prioritys, profile_nums, profile_uuid_strs);
- stub_set_token_bucket_avl_per_sec(0, 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
- stub_set_token_bucket_avl_per_sec(0, 1000, SHAPING_DIR_IN, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
+ stub_set_token_bucket_avl_per_sec(profile_uuid_strs[0][0], 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
+ stub_set_token_bucket_avl_per_sec(profile_uuid_strs[0][0], 1000, SHAPING_DIR_IN, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
actual_tx_queue = stub_get_tx_queue();
- shaper_rules_update(&ctx->thread_ctx[0], sf1, rule_id1, 1);
- shaper_rules_update(&ctx->thread_ctx[1], sf2, rule_id2, 1);
+ uuid_t rule_uuid1;
+ uuid_t rule_uuid2;
+ uuid_parse(rule_uuid_strs[0], rule_uuid1);
+ uuid_parse(rule_uuid_strs[1], rule_uuid2);
+ shaper_rules_update(&ctx->thread_ctx[0], sf1, &rule_uuid1, 1);
+ shaper_rules_update(&ctx->thread_ctx[1], sf2, &rule_uuid2, 1);
/*******send packets***********/
send_packets(&ctx->thread_ctx[0], sf1, 100, 100, SHAPING_DIR_OUT, &expec_tx_queue1, 1, 0);
@@ -1254,7 +1228,7 @@ TEST(two_session_diff_priority_same_profile, one_direction_dont_block_another)
stub_curr_time_s_inc(1);//inc time to refresh hmget interval
while (!TAILQ_EMPTY(&expec_tx_queue2)) {//线程0中优先级为1的session阻断OUT方向,线程1中的session优先级为2,但是IN方向不受影响
- stub_refresh_token_bucket(0);
+ stub_refresh_token_bucket(profile_uuid_strs[0][0]);
polling_entry(ctx->thread_ctx[1].sp, ctx->thread_ctx[1].stat, &ctx->thread_ctx[1]);//first polling request token
polling_entry(ctx->thread_ctx[1].sp, ctx->thread_ctx[1].stat, &ctx->thread_ctx[1]);
stub_curr_time_ns_inc(STUB_TIME_INC_FOR_PACKET);
@@ -1263,7 +1237,7 @@ TEST(two_session_diff_priority_same_profile, one_direction_dont_block_another)
}
while (!TAILQ_EMPTY(&expec_tx_queue1)) {
- stub_refresh_token_bucket(0);
+ stub_refresh_token_bucket(profile_uuid_strs[0][0]);
polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]);//first polling request token
polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]);//then send pkt
stub_curr_time_ns_inc(STUB_TIME_INC_FOR_PACKET);
@@ -1277,7 +1251,8 @@ TEST(two_session_diff_priority_same_profile, one_direction_dont_block_another)
shaping_flow_free(&ctx->thread_ctx[1], sf2);
shaper_thread_resource_clear();
shaping_engine_destroy(ctx);
- stub_clear_matched_shaping_rules();
+ stub_clear_resource();
+ stub_swarmkv_clear_resource();
}
/*session1 match rule1 & rule2; session2 match rule3
@@ -1302,18 +1277,15 @@ TEST(two_sessions, priority_non_block)
struct shaping_ctx *ctx = NULL;
struct shaping_flow *sf1 = NULL;
struct shaping_flow *sf2 = NULL;
- long long rule_ids[] = {1, 2, 3};
- long long rule_id1[] = {1, 2};
- long long rule_id2[] = {3};
+ const char *rule_uuid_strs[] = {"00000000-0000-0000-0000-000000000001", "00000000-0000-0000-0000-000000000002", "00000000-0000-0000-0000-000000000003"};
int profile_nums[] = {1, 1, 1};
int prioritys[] = {1, 2, 3};
- int profile_id[][MAX_REF_PROFILE] = {{0}, {1}, {0}};
+ const char *profile_uuid_strs[][MAX_REF_PROFILE] = {{"00000000-0000-0000-0000-000000000001"}, {"00000000-0000-0000-0000-000000000002"}, {"00000000-0000-0000-0000-000000000001"}};
TAILQ_INIT(&expec_tx_queue1);
TAILQ_INIT(&expec_tx_queue2);
stub_init();
- dummy_swarmkv_init();
ctx = shaping_engine_init();
ASSERT_TRUE(ctx != NULL);
@@ -1322,13 +1294,18 @@ TEST(two_sessions, priority_non_block)
sf2 = shaping_flow_new(&ctx->thread_ctx[1]);
ASSERT_TRUE(sf2 != NULL);
- stub_set_matched_shaping_rules(3, rule_ids, prioritys, profile_nums, profile_id);
+ stub_set_matched_shaping_rules(3, rule_uuid_strs, prioritys, profile_nums, profile_uuid_strs);
- stub_set_token_bucket_avl_per_sec(0, 3000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
- stub_set_token_bucket_avl_per_sec(1, 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
+ stub_set_token_bucket_avl_per_sec(profile_uuid_strs[0][0], 3000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
+ stub_set_token_bucket_avl_per_sec(profile_uuid_strs[1][0], 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
actual_tx_queue = stub_get_tx_queue();
- shaper_rules_update(&ctx->thread_ctx[0], sf1, rule_id1, 2);
- shaper_rules_update(&ctx->thread_ctx[1], sf2, rule_id2, 1);
+ uuid_t rule_uuid1[2];
+ uuid_t rule_uuid2;
+ uuid_parse(rule_uuid_strs[0], rule_uuid1[0]);
+ uuid_parse(rule_uuid_strs[1], rule_uuid1[1]);
+ uuid_parse(rule_uuid_strs[2], rule_uuid2);
+ shaper_rules_update(&ctx->thread_ctx[0], sf1, rule_uuid1, 2);
+ shaper_rules_update(&ctx->thread_ctx[1], sf2, &rule_uuid2, 1);
/*******send packets***********/
send_packets(&ctx->thread_ctx[0], sf1, 100, 100, SHAPING_DIR_OUT, &expec_tx_queue1, 3, 0);//sf1 blocked by rule2(profile id 1), while rule3(profile id 0) still has 1000 token
@@ -1339,8 +1316,8 @@ TEST(two_sessions, priority_non_block)
ASSERT_TRUE(TAILQ_EMPTY(actual_tx_queue));
while (!TAILQ_EMPTY(&expec_tx_queue1)) {
- stub_refresh_token_bucket(0);
- stub_refresh_token_bucket(1);
+ stub_refresh_token_bucket(profile_uuid_strs[0][0]);
+ stub_refresh_token_bucket(profile_uuid_strs[1][0]);
for (int i = 0; i < 4; i++) {
polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]);//two rules, one rule need two polling, request token and send pkt
@@ -1353,7 +1330,8 @@ TEST(two_sessions, priority_non_block)
shaping_flow_free(&ctx->thread_ctx[1], sf2);
shaper_thread_resource_clear();
shaping_engine_destroy(ctx);
- stub_clear_matched_shaping_rules();
+ stub_clear_resource();
+ stub_swarmkv_clear_resource();
}
/*session1 match rule1; session2 match rule2
@@ -1376,18 +1354,15 @@ TEST(two_sessions, borrow_when_primary_profile_priority_blocked)
struct shaping_ctx *ctx = NULL;
struct shaping_flow *sf1 = NULL;
struct shaping_flow *sf2 = NULL;
- long long rule_ids[] = {1, 2};
- long long rule_id1[] = {1};
- long long rule_id2[] = {2};
+ const char *rule_uuid_strs[] = {"00000000-0000-0000-0000-000000000001", "00000000-0000-0000-0000-000000000002"};
int profile_nums[] = {1, 2};
int prioritys[] = {1, 2};
- int profile_id[][MAX_REF_PROFILE] = {{0}, {0, 1}};
+ const char *profile_uuid_strs[][MAX_REF_PROFILE] = {{"00000000-0000-0000-0000-000000000001"}, {"00000000-0000-0000-0000-000000000001", "00000000-0000-0000-0000-000000000002"}};
TAILQ_INIT(&expec_tx_queue1);
TAILQ_INIT(&expec_tx_queue2);
stub_init();
- dummy_swarmkv_init();
ctx = shaping_engine_init();
ASSERT_TRUE(ctx != NULL);
@@ -1396,13 +1371,17 @@ TEST(two_sessions, borrow_when_primary_profile_priority_blocked)
sf2 = shaping_flow_new(&ctx->thread_ctx[1]);
ASSERT_TRUE(sf2 != NULL);
- stub_set_matched_shaping_rules(2, rule_ids, prioritys, profile_nums, profile_id);
+ stub_set_matched_shaping_rules(2, rule_uuid_strs, prioritys, profile_nums, profile_uuid_strs);
- stub_set_token_bucket_avl_per_sec(0, 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
- stub_set_token_bucket_avl_per_sec(1, 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
+ stub_set_token_bucket_avl_per_sec(profile_uuid_strs[1][0], 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
+ stub_set_token_bucket_avl_per_sec(profile_uuid_strs[1][1], 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
actual_tx_queue = stub_get_tx_queue();
- shaper_rules_update(&ctx->thread_ctx[0], sf1, rule_id1, 1);
- shaper_rules_update(&ctx->thread_ctx[1], sf2, rule_id2, 1);
+ uuid_t rule_uuid1;
+ uuid_t rule_uuid2;
+ uuid_parse(rule_uuid_strs[0], rule_uuid1);
+ uuid_parse(rule_uuid_strs[1], rule_uuid2);
+ shaper_rules_update(&ctx->thread_ctx[0], sf1, &rule_uuid1, 1);
+ shaper_rules_update(&ctx->thread_ctx[1], sf2, &rule_uuid2, 1);
/*******send packets***********/
send_packets(&ctx->thread_ctx[0], sf1, 100, 100, SHAPING_DIR_OUT, &expec_tx_queue1, 1, 0);
@@ -1410,14 +1389,14 @@ TEST(two_sessions, borrow_when_primary_profile_priority_blocked)
ASSERT_EQ(0, judge_packet_eq(&expec_tx_queue1, actual_tx_queue, 10));
while (!TAILQ_EMPTY(&expec_tx_queue2)) {
- stub_refresh_token_bucket(1);
+ stub_refresh_token_bucket(profile_uuid_strs[1][1]);
polling_entry(ctx->thread_ctx[1].sp, ctx->thread_ctx[1].stat, &ctx->thread_ctx[1]);//primary profile blocked by priority, send by borrow profile
ASSERT_EQ(0, judge_packet_eq(&expec_tx_queue2, actual_tx_queue, 1));
}
while (!TAILQ_EMPTY(&expec_tx_queue1)) {
- stub_refresh_token_bucket(0);
+ stub_refresh_token_bucket(profile_uuid_strs[1][0]);
polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]);
polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]);
@@ -1428,7 +1407,8 @@ TEST(two_sessions, borrow_when_primary_profile_priority_blocked)
shaping_flow_free(&ctx->thread_ctx[1], sf2);
shaper_thread_resource_clear();
shaping_engine_destroy(ctx);
- stub_clear_matched_shaping_rules();
+ stub_clear_resource();
+ stub_swarmkv_clear_resource();
}
/*session1 match rule1; session2 match rule2
@@ -1451,18 +1431,15 @@ TEST(two_sessions, primary_profile_priority_blocked_by_borrow_profile)
struct shaping_ctx *ctx = NULL;
struct shaping_flow *sf1 = NULL;
struct shaping_flow *sf2 = NULL;
- long long rule_ids[] = {1, 2};
- long long rule_id1[] = {1};
- long long rule_id2[] = {2};
+ const char *rule_uuid_strs[] = {"00000000-0000-0000-0000-000000000001", "00000000-0000-0000-0000-000000000002"};
int profile_nums[] = {2, 1};
int prioritys[] = {1, 5};
- int profile_id[][MAX_REF_PROFILE] = {{0, 1}, {1}};
+ const char *profile_uuid_strs[][MAX_REF_PROFILE] = {{"00000000-0000-0000-0000-000000000001", "00000000-0000-0000-0000-000000000002"}, {"00000000-0000-0000-0000-000000000002"}};
TAILQ_INIT(&expec_tx_queue1);
TAILQ_INIT(&expec_tx_queue2);
stub_init();
- dummy_swarmkv_init();
ctx = shaping_engine_init();
ASSERT_TRUE(ctx != NULL);
@@ -1471,13 +1448,17 @@ TEST(two_sessions, primary_profile_priority_blocked_by_borrow_profile)
sf2 = shaping_flow_new(&ctx->thread_ctx[1]);
ASSERT_TRUE(sf2 != NULL);
- stub_set_matched_shaping_rules(2, rule_ids, prioritys, profile_nums, profile_id);
+ stub_set_matched_shaping_rules(2, rule_uuid_strs, prioritys, profile_nums, profile_uuid_strs);
- stub_set_token_bucket_avl_per_sec(0, 0, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
- stub_set_token_bucket_avl_per_sec(1, 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
+ stub_set_token_bucket_avl_per_sec(profile_uuid_strs[0][0], 0, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
+ stub_set_token_bucket_avl_per_sec(profile_uuid_strs[0][1], 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
actual_tx_queue = stub_get_tx_queue();
- shaper_rules_update(&ctx->thread_ctx[0], sf1, rule_id1, 1);
- shaper_rules_update(&ctx->thread_ctx[1], sf2, rule_id2, 1);
+ uuid_t rule_uuid1;
+ uuid_t rule_uuid2;
+ uuid_parse(rule_uuid_strs[0], rule_uuid1);
+ uuid_parse(rule_uuid_strs[1], rule_uuid2);
+ shaper_rules_update(&ctx->thread_ctx[0], sf1, &rule_uuid1, 1);
+ shaper_rules_update(&ctx->thread_ctx[1], sf2, &rule_uuid2, 1);
/*******send packets***********/
send_packets(&ctx->thread_ctx[0], sf1, 100, 100, SHAPING_DIR_OUT, &expec_tx_queue1, 1, 0);
@@ -1487,7 +1468,7 @@ TEST(two_sessions, primary_profile_priority_blocked_by_borrow_profile)
ASSERT_TRUE(TAILQ_EMPTY(actual_tx_queue));
while (!TAILQ_EMPTY(&expec_tx_queue1)) {
- stub_refresh_token_bucket(1);
+ stub_refresh_token_bucket(profile_uuid_strs[0][1]);
polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]);
polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]);
polling_entry(ctx->thread_ctx[1].sp, ctx->thread_ctx[1].stat, &ctx->thread_ctx[1]);//blocked by priority, sf1 has priority 2 for profile_b(id 1)
@@ -1498,7 +1479,7 @@ TEST(two_sessions, primary_profile_priority_blocked_by_borrow_profile)
stub_curr_time_s_inc(1);//inc time to refresh hmget interval
while (!TAILQ_EMPTY(&expec_tx_queue2)) {
- stub_refresh_token_bucket(1);
+ stub_refresh_token_bucket(profile_uuid_strs[0][1]);
polling_entry(ctx->thread_ctx[1].sp, ctx->thread_ctx[1].stat, &ctx->thread_ctx[1]);
polling_entry(ctx->thread_ctx[1].sp, ctx->thread_ctx[1].stat, &ctx->thread_ctx[1]);
@@ -1509,7 +1490,8 @@ TEST(two_sessions, primary_profile_priority_blocked_by_borrow_profile)
shaping_flow_free(&ctx->thread_ctx[1], sf2);
shaper_thread_resource_clear();
shaping_engine_destroy(ctx);
- stub_clear_matched_shaping_rules();
+ stub_clear_resource();
+ stub_swarmkv_clear_resource();
}
/*session1 match rule1
@@ -1521,23 +1503,24 @@ TEST(statistics, udp_drop_pkt)
struct stub_pkt_queue *actual_tx_queue;
struct shaping_ctx *ctx = NULL;
struct shaping_flow *sf = NULL;
- long long rule_id[] = {0};
+ const char *rule_uuid_strs[] = {"00000000-0000-0000-0000-000000000001"};
int priority[] = {1};
int profile_num[] = {1};
- int profile_id[][MAX_REF_PROFILE] = {{0}};
+ const char *profile_uuid_strs[][MAX_REF_PROFILE] = {{"00000000-0000-0000-0000-000000000001"}};
TAILQ_INIT(&expec_tx_queue);
stub_init();
- dummy_swarmkv_init();
ctx = shaping_engine_init();
ASSERT_TRUE(ctx != NULL);
sf = shaping_flow_new(&ctx->thread_ctx[0]);
ASSERT_TRUE(sf != NULL);
- stub_set_matched_shaping_rules(1, rule_id, priority, profile_num, profile_id);
- stub_set_token_bucket_avl_per_sec(0, 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
+ stub_set_matched_shaping_rules(1, rule_uuid_strs, priority, profile_num, profile_uuid_strs);
+ stub_set_token_bucket_avl_per_sec(profile_uuid_strs[0][0], 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
actual_tx_queue = stub_get_tx_queue();
- shaper_rules_update(&ctx->thread_ctx[0], sf, rule_id, 1);
+ uuid_t rule_uuid;
+ uuid_parse(rule_uuid_strs[0], rule_uuid);
+ shaper_rules_update(&ctx->thread_ctx[0], sf, &rule_uuid, 1);
/*******send packets***********/
send_packets(&ctx->thread_ctx[0], sf, SHAPING_SESSION_QUEUE_LEN + 10, 100, SHAPING_DIR_OUT, &expec_tx_queue, 1, 0);
@@ -1548,7 +1531,7 @@ TEST(statistics, udp_drop_pkt)
ASSERT_TRUE(TAILQ_EMPTY(actual_tx_queue));
while (!TAILQ_EMPTY(&expec_tx_queue)) {
- stub_refresh_token_bucket(0);
+ stub_refresh_token_bucket(profile_uuid_strs[0][0]);
polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]);
polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]);
stub_curr_time_ns_inc(STUB_TIME_INC_FOR_PACKET);
@@ -1572,11 +1555,12 @@ TEST(statistics, udp_drop_pkt)
shaper_thread_resource_clear();
shaping_engine_destroy(ctx);
- stub_clear_matched_shaping_rules();
+ stub_clear_resource();
+ stub_swarmkv_clear_resource();
/*******test statistics***********/
//judge shaping metric
- shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, 0, 0, 1, SHAPING_SESSION_QUEUE_LEN+10, (SHAPING_SESSION_QUEUE_LEN+10)*100, 100, 0, 228000, SHAPING_DIR_OUT, profile_type_primary);//every queued pkt's latency is max
+ shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, rule_uuid_strs[0], profile_uuid_strs[0][0], 1, SHAPING_SESSION_QUEUE_LEN+10, (SHAPING_SESSION_QUEUE_LEN+10)*100, 100, 0, 228000, SHAPING_DIR_OUT, profile_type_primary);//every queued pkt's latency is max
//judge shaping global metric
shaping_global_stat_judge(global_stat_str, SHAPING_SESSION_QUEUE_LEN+10, (SHAPING_SESSION_QUEUE_LEN+10)*100, 100, 10000, 0, 0);
@@ -1595,24 +1579,25 @@ TEST(statistics, udp_queueing_pkt)
struct stub_pkt_queue *actual_tx_queue;
struct shaping_ctx *ctx = NULL;
struct shaping_flow *sf = NULL;
- long long rule_id[] = {0};
+ const char *rule_uuid_strs[] = {"00000000-0000-0000-0000-000000000001"};
int priority[] = {1};
int profile_num[] = {1};
- int profile_id[][MAX_REF_PROFILE] = {{0}};
+ const char *profile_uuid_strs[][MAX_REF_PROFILE] = {{ "00000000-0000-0000-0000-000000000001"}};
TAILQ_INIT(&expec_tx_queue);
stub_init();
- dummy_swarmkv_init();
ctx = shaping_engine_init();
ASSERT_TRUE(ctx != NULL);
sf = shaping_flow_new(&ctx->thread_ctx[0]);
ASSERT_TRUE(sf != NULL);
- stub_set_matched_shaping_rules(1, rule_id, priority, profile_num, profile_id);
- stub_set_token_bucket_avl_per_sec(0, 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
+ stub_set_matched_shaping_rules(1, rule_uuid_strs, priority, profile_num, profile_uuid_strs);
+ stub_set_token_bucket_avl_per_sec(profile_uuid_strs[0][0], 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
actual_tx_queue = stub_get_tx_queue();
- shaper_rules_update(&ctx->thread_ctx[0], sf, rule_id, 1);
+ uuid_t rule_uuid;
+ uuid_parse(rule_uuid_strs[0], rule_uuid);
+ shaper_rules_update(&ctx->thread_ctx[0], sf, &rule_uuid, 1);
/*******send packets***********/
send_packets(&ctx->thread_ctx[0], sf, 100, 100, SHAPING_DIR_OUT, &expec_tx_queue, 1, 0);
@@ -1633,7 +1618,7 @@ TEST(statistics, udp_queueing_pkt)
fieldstat_easy_output(ctx->thread_ctx[0].stat->guage_instance, &guage_stat_str, &stat_str_len);
/*******judge metric********/
- shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, 0, 0, 1, 10, 1000, 0, 90, 0, SHAPING_DIR_OUT, profile_type_primary);
+ shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, rule_uuid_strs[0], profile_uuid_strs[0][0], 1, 10, 1000, 0, 90, 0, SHAPING_DIR_OUT, profile_type_primary);
shaping_global_stat_judge(global_stat_str, 10, 1000, 0, 0, 90, 9000);
free(global_stat_str);
@@ -1645,7 +1630,7 @@ TEST(statistics, udp_queueing_pkt)
ASSERT_TRUE(TAILQ_EMPTY(actual_tx_queue));
while (!TAILQ_EMPTY(&expec_tx_queue)) {//last 90 delay packets
- stub_refresh_token_bucket(0);
+ stub_refresh_token_bucket(profile_uuid_strs[0][0]);
polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]);
polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]);
stub_curr_time_ns_inc(STUB_TIME_INC_FOR_PACKET);
@@ -1662,11 +1647,12 @@ TEST(statistics, udp_queueing_pkt)
shaper_thread_resource_clear();
shaping_engine_destroy(ctx);
- stub_clear_matched_shaping_rules();
+ stub_clear_resource();
+ stub_swarmkv_clear_resource();
/*******test statistics***********/
//judge shaping metric
- shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, 0, 0, 1, 100, 10000, 0, 0, 90000, SHAPING_DIR_OUT, profile_type_primary);
+ shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, rule_uuid_strs[0], profile_uuid_strs[0][0], 1, 100, 10000, 0, 0, 90000, SHAPING_DIR_OUT, profile_type_primary);
//judge global metric
shaping_global_stat_judge(global_stat_str, 100, 10000, 0, 0, 0, 0);
diff --git a/shaping/test/gtest_shaper_aqm.cpp b/shaping/test/gtest_shaper_aqm.cpp
index 6acb1f3..fd12dfe 100644
--- a/shaping/test/gtest_shaper_aqm.cpp
+++ b/shaping/test/gtest_shaper_aqm.cpp
@@ -8,12 +8,15 @@ TEST(aqm_bule, need_drop)
{
struct shaper_aqm_blue_para para;
int drop_cnt = 0;
+ uuid_t uuid;
+
+ uuid_parse("00000000-0000-0000-0000-000000000001", uuid);
para.update_time = 0;
para.probability = 0;
for (int i = 0; i < 10000; i++) {
- if (shaper_aqm_blue_need_drop(0, &para, BLUE_QUEUE_LEN_MAX + 1)) {
+ if (shaper_aqm_blue_need_drop(uuid, &para, BLUE_QUEUE_LEN_MAX + 1)) {
drop_cnt++;
}
}
@@ -24,7 +27,7 @@ TEST(aqm_bule, need_drop)
EXPECT_LT(drop_cnt, 1100);
sleep(3);
- shaper_aqm_blue_need_drop(0, &para, 0);
+ shaper_aqm_blue_need_drop(uuid, &para, 0);
EXPECT_EQ(para.probability, BLUE_INCREMENT - BLUE_DECREMENT);
}
@@ -32,12 +35,15 @@ TEST(aqm_blue, no_drop)
{
struct shaper_aqm_blue_para para;
int drop_cnt = 0;
+ uuid_t uuid;
+
+ uuid_parse("00000000-0000-0000-0000-000000000001", uuid);
para.update_time = 0;
para.probability = 0;
for (int i = 0; i < 10000; i++) {
- if (shaper_aqm_blue_need_drop(0, &para, BLUE_QUEUE_LEN_MAX - 1)) {
+ if (shaper_aqm_blue_need_drop(uuid, &para, BLUE_QUEUE_LEN_MAX - 1)) {
drop_cnt++;
}
}
@@ -46,7 +52,7 @@ TEST(aqm_blue, no_drop)
EXPECT_EQ(drop_cnt, 0);
sleep(3);
- shaper_aqm_blue_need_drop(0, &para, BLUE_QUEUE_LEN_MAX - 1);
+ shaper_aqm_blue_need_drop(uuid, &para, BLUE_QUEUE_LEN_MAX - 1);
EXPECT_EQ(para.probability, 0);
}
@@ -54,26 +60,29 @@ TEST(aqm_codel, need_drop)
{
struct shaper_aqm_codel_para para;
int curr_time_ms = 0;
+ uuid_t uuid;
+
+ uuid_parse("00000000-0000-0000-0000-000000000001", uuid);
memset(&para, 0, sizeof(para));
- shaper_aqm_codel_need_drop(0, &para, curr_time_ms, CODEL_MAX_LATENCY + 1);
+ shaper_aqm_codel_need_drop(uuid, &para, curr_time_ms, CODEL_MAX_LATENCY + 1);
EXPECT_EQ(para.state, CODEL_STATE_DROPPING_TIMER);
EXPECT_EQ(para.start_drop_time_ms, curr_time_ms + CODEL_DROP_INTERVAL);
curr_time_ms = para.start_drop_time_ms + 1;
- shaper_aqm_codel_need_drop(0, &para, curr_time_ms, CODEL_MAX_LATENCY + 1);
+ shaper_aqm_codel_need_drop(uuid, &para, curr_time_ms, CODEL_MAX_LATENCY + 1);
EXPECT_EQ(para.state, CODEL_STATE_DROPPING_PHASE);
EXPECT_EQ(para.drop_count, 1);
EXPECT_EQ(para.next_drop_time_ms, int(curr_time_ms + CODEL_DROP_INTERVAL / sqrt(para.drop_count)));
curr_time_ms = para.next_drop_time_ms + 1;
- shaper_aqm_codel_need_drop(0, &para, curr_time_ms, CODEL_MAX_LATENCY + 1);
+ shaper_aqm_codel_need_drop(uuid, &para, curr_time_ms, CODEL_MAX_LATENCY + 1);
EXPECT_EQ(para.state, CODEL_STATE_DROPPING_PHASE);
EXPECT_EQ(para.drop_count, 2);
EXPECT_EQ(para.next_drop_time_ms, int(curr_time_ms + CODEL_DROP_INTERVAL / sqrt(para.drop_count)));
- shaper_aqm_codel_need_drop(0, &para, curr_time_ms, CODEL_MAX_LATENCY - 1);
+ shaper_aqm_codel_need_drop(uuid, &para, curr_time_ms, CODEL_MAX_LATENCY - 1);
EXPECT_EQ(para.state, CODEL_STATE_NORMAL);
}
@@ -81,10 +90,13 @@ TEST(aqm_codel, no_drop)
{
struct shaper_aqm_codel_para para;
int curr_time_ms = 0;
+ uuid_t uuid;
+
+ uuid_parse("00000000-0000-0000-0000-000000000001", uuid);
memset(&para, 0, sizeof(para));
- shaper_aqm_codel_need_drop(0, &para, curr_time_ms, CODEL_MAX_LATENCY - 1);
+ shaper_aqm_codel_need_drop(uuid, &para, curr_time_ms, CODEL_MAX_LATENCY - 1);
EXPECT_EQ(para.state, CODEL_STATE_NORMAL);
EXPECT_EQ(para.drop_count, 0);
}
diff --git a/shaping/test/gtest_shaper_maat.cpp b/shaping/test/gtest_shaper_maat.cpp
index 3b619fc..59381ce 100644
--- a/shaping/test/gtest_shaper_maat.cpp
+++ b/shaping/test/gtest_shaper_maat.cpp
@@ -7,27 +7,46 @@
TEST(shaping_rule, parse)
{
- const char *data = "182\t\
- 2\t\
- 32\t\
- 0\t\
- 1\t\
- {}\t\
- {\"vsys_id\":2333,\"priority\":1,\"fair_factor\":10,\"dscp_marking\":{\"enabled\":1,\"dscp_value\":10},\"profile_chain\":[1,2,3]}\t\
- 0\t\
- 1\t\
- }";
+ const char *data = "{\
+ \"uuid\": \"00000000-0000-0000-0000-000000000182\",\
+ \"service\": 2,\
+ \"action\": 32,\
+ \"do_blacklist\": 0,\
+ \"do_log\": 1,\
+ \"effective_rage\": 0,\
+ \"action_parameter\": {\
+ \"vsys_id\": 2333,\
+ \"priority\": 1,\
+ \"fair_factor\": 10,\
+ \"dscp_marking\": {\
+ \"enabled\": 1,\
+ \"dscp_type\": \"Assured Forwarding (AF)\",\
+ \"dscp_name\": \"af11\",\
+ \"dscp_value\": 10\
+ },\
+ \"profile_chain\": [\
+ \"00000000-0000-0000-0000-000000000001\",\
+ \"00000000-0000-0000-0000-000000000002\",\
+ \"00000000-0000-0000-0000-000000000003\"\
+ ]\
+ }\
+ }";
struct shaping_rule *s_rule = NULL;
struct shaping_rule *s_rule_dup = NULL;
- shaper_rule_ex_new("TRAFFIC_SHAPING_COMPILE", 0, NULL, data, (void**)&s_rule, 0, NULL);
+ shaper_rule_ex_new("TRAFFIC_SHAPING_RULE", NULL, data, (void**)&s_rule, 0, NULL);
EXPECT_EQ(s_rule->vsys_id, 2333);
- EXPECT_EQ(s_rule->id, 182);
- EXPECT_EQ(s_rule->primary_pf_id, 1);
+ char uuid_str[UUID_STR_LEN];
+ uuid_unparse(s_rule->uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000182", uuid_str);
+ uuid_unparse(s_rule->primary_pf_uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000001", uuid_str);
EXPECT_EQ(s_rule->borrow_pf_num, 2);
- EXPECT_EQ(s_rule->borrow_pf_id_array[0], 2);
- EXPECT_EQ(s_rule->borrow_pf_id_array[1], 3);
+ uuid_unparse(s_rule->borrow_pf_uuid_array[0], uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000002", uuid_str);
+ uuid_unparse(s_rule->borrow_pf_uuid_array[1], uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000003", uuid_str);
EXPECT_EQ(s_rule->priority, 1);
EXPECT_EQ(s_rule->dscp_enable, 1);
EXPECT_EQ(s_rule->dscp_value, 10);
@@ -40,18 +59,24 @@ TEST(shaping_rule, parse)
}
TEST(shaping_profile, parse)
-{ const char *data = "1\t\
- fair_share\t\
- max_min_host_fairness\t\
- [{\"direction\":\"incoming\",\"bandwidth\":1024},{\"direction\":\"outgoing\",\"bandwidth\":2048}]\t\
- {\"algorithm\":\"codel\"}\t\
- {}\t\
- 1";
+{
+ const char *data = "{\
+ \"uuid\":\"00000000-0000-0000-0000-000000000001\",\
+ \"type\": \"fair_share\",\
+ \"type_argument\": \"max_min_host_fairness\",\
+ \"limits\": [\
+ {\"direction\":\"incoming\",\"bandwidth\":1024},\
+ {\"direction\":\"outcoming\",\"bandwidth\":2048}\
+ ],\
+ \"aqm_options\": {\"algorithm\":\"codel\"},\
+ \"is_valid\":\"yes\"}";
struct shaping_profile *s_pf = NULL;
struct shaping_profile *s_pf_dup = NULL;
- shaper_profile_ex_new("TRAFFIC_SHAPING_PROFILE", 0, NULL, data, (void**)&s_pf, 0, NULL);
- EXPECT_EQ(s_pf->id, 1);
+ shaper_profile_ex_new("TRAFFIC_SHAPING_PROFILE", NULL, data, (void**)&s_pf, 0, NULL);
+ char uuid_str[UUID_STR_LEN];
+ uuid_unparse(s_pf->uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000001", uuid_str);
EXPECT_EQ(s_pf->in_limit_bandwidth, 1024);
EXPECT_EQ(s_pf->out_limit_bandwidth, 2048);
EXPECT_EQ(s_pf->type, PROFILE_TYPE_MAX_MIN_HOST_FAIRNESS);
@@ -69,51 +94,71 @@ TEST(shaping_flow, update_rule)
struct shaping_ctx *ctx = NULL;
struct shaping_flow sf;
struct shaping_rule_info *rule_info;
- long long rule_ids[] = {1, 2, 3};
+ const char *rule_uuid_strs[] = {"00000000-0000-0000-0000-000000000001", "00000000-0000-0000-0000-000000000002", "00000000-0000-0000-0000-000000000003"};
int prioritys[] = {1, 2, 3};
int profile_nums[] = {1, 2, 3};
- int profile_ids[][MAX_REF_PROFILE] = {{1}, {2, 3}, {4, 5, 6}};
+ const char *profile_uuid_strs[][MAX_REF_PROFILE] = {{"00000000-0000-0000-0000-000000000001"},
+ {"00000000-0000-0000-0000-000000000002", "00000000-0000-0000-0000-000000000003"},
+ {"00000000-0000-0000-0000-000000000004", "00000000-0000-0000-0000-000000000005", "00000000-0000-0000-0000-000000000006"}};
stub_init();
ctx = shaping_engine_init();
- stub_set_matched_shaping_rules(3, rule_ids, prioritys, profile_nums, profile_ids);
-
- ctx->maat_info->rule_table_id = STUB_MAAT_SHAPING_RULE_TABLE_ID;
- ctx->maat_info->profile_table_id = STUB_MAAT_SHAPING_PROFILE_TABLE_ID;
+ stub_set_matched_shaping_rules(3, rule_uuid_strs, prioritys, profile_nums, profile_uuid_strs);
+ stub_set_profile_type("00000000-0000-0000-0000-000000000001", PROFILE_TYPE_GENERIC);
+ stub_set_profile_type("00000000-0000-0000-0000-000000000002", PROFILE_TYPE_GENERIC);
+ stub_set_profile_type("00000000-0000-0000-0000-000000000003", PROFILE_TYPE_GENERIC);
+ stub_set_profile_type("00000000-0000-0000-0000-000000000004", PROFILE_TYPE_GENERIC);
+ stub_set_profile_type("00000000-0000-0000-0000-000000000005", PROFILE_TYPE_GENERIC);
+ stub_set_profile_type("00000000-0000-0000-0000-000000000006", PROFILE_TYPE_GENERIC);
memset(&sf, 0, sizeof(sf));
sf.priority = SHAPING_PRIORITY_NUM_MAX;
- shaper_rules_update(&ctx->thread_ctx[0], &sf, rule_ids, 3);
+ uuid_t rule_uuids[3];
+ uuid_parse("00000000-0000-0000-0000-000000000001", rule_uuids[0]);
+ uuid_parse("00000000-0000-0000-0000-000000000002", rule_uuids[1]);
+ uuid_parse("00000000-0000-0000-0000-000000000003", rule_uuids[2]);
+ shaper_rules_update(&ctx->thread_ctx[0], &sf, rule_uuids, 3);
EXPECT_EQ(sf.rule_num, 3);
rule_info = &sf.matched_rule_infos[0];
- EXPECT_EQ(rule_info->id, 1);
- EXPECT_EQ(rule_info->primary.id, 1);
+ char uuid_str[UUID_STR_LEN];
+ uuid_unparse(rule_info->uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000001", uuid_str);
+ uuid_unparse(rule_info->primary.uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000001", uuid_str);
EXPECT_EQ(rule_info->primary.priority, 1);
EXPECT_EQ(rule_info->borrowing_num, 0);
rule_info = &sf.matched_rule_infos[1];
- EXPECT_EQ(rule_info->id, 2);
- EXPECT_EQ(rule_info->primary.id, 2);
+ uuid_unparse(rule_info->uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000002", uuid_str);
+ uuid_unparse(rule_info->primary.uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000002", uuid_str);
EXPECT_EQ(rule_info->primary.priority, 1);
EXPECT_EQ(rule_info->borrowing_num, 1);
- EXPECT_EQ(rule_info->borrowing[0].id, 3);
+ uuid_unparse(rule_info->borrowing[0].uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000003", uuid_str);
EXPECT_EQ(rule_info->borrowing[0].priority, 2);
rule_info = &sf.matched_rule_infos[2];
- EXPECT_EQ(rule_info->id, 3);
- EXPECT_EQ(rule_info->primary.id, 4);
+ uuid_unparse(rule_info->uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000003", uuid_str);
+ uuid_unparse(rule_info->primary.uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000004", uuid_str);
EXPECT_EQ(rule_info->primary.priority, 1);
EXPECT_EQ(rule_info->borrowing_num, 2);
- EXPECT_EQ(rule_info->borrowing[0].id, 5);
+ uuid_unparse(rule_info->borrowing[0].uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000005", uuid_str);
EXPECT_EQ(rule_info->borrowing[0].priority, 2);
- EXPECT_EQ(rule_info->borrowing[1].id, 6);
+ uuid_unparse(rule_info->borrowing[1].uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000006", uuid_str);
EXPECT_EQ(rule_info->borrowing[1].priority, 3);
shaping_engine_destroy(ctx);
+ stub_clear_resource();
}
TEST(shaping_flow, update_rule_dup)
@@ -121,65 +166,92 @@ TEST(shaping_flow, update_rule_dup)
struct shaping_ctx *ctx = NULL;
struct shaping_flow sf;
struct shaping_rule_info *rule_info;
- long long rule_ids[] = {1, 2, 3, 4};
+ const char *rule_uuid_strs[] = {"00000000-0000-0000-0000-000000000001", "00000000-0000-0000-0000-000000000002", "00000000-0000-0000-0000-000000000003", "00000000-0000-0000-0000-000000000004"};
int prioritys[] = {1, 2, 3, 4};
int profile_nums[] = {1, 2, 3, 1};
- int profile_ids[][MAX_REF_PROFILE] = {{1}, {2, 3}, {4, 5, 6}, {7}};
+ const char *profile_uuid_strs[][MAX_REF_PROFILE] = {{"00000000-0000-0000-0000-000000000001"},
+ {"00000000-0000-0000-0000-000000000002", "00000000-0000-0000-0000-000000000003"},
+ {"00000000-0000-0000-0000-000000000004", "00000000-0000-0000-0000-000000000005", "00000000-0000-0000-0000-000000000006"},
+ {"00000000-0000-0000-0000-000000000007"}};
stub_init();
ctx = shaping_engine_init();
- stub_set_matched_shaping_rules(4, rule_ids, prioritys, profile_nums, profile_ids);
-
- ctx->maat_info->rule_table_id = STUB_MAAT_SHAPING_RULE_TABLE_ID;
- ctx->maat_info->profile_table_id = STUB_MAAT_SHAPING_PROFILE_TABLE_ID;
+ stub_set_matched_shaping_rules(4, rule_uuid_strs, prioritys, profile_nums, profile_uuid_strs);
+ stub_set_profile_type("00000000-0000-0000-0000-000000000001", PROFILE_TYPE_GENERIC);
+ stub_set_profile_type("00000000-0000-0000-0000-000000000002", PROFILE_TYPE_GENERIC);
+ stub_set_profile_type("00000000-0000-0000-0000-000000000003", PROFILE_TYPE_GENERIC);
+ stub_set_profile_type("00000000-0000-0000-0000-000000000004", PROFILE_TYPE_GENERIC);
+ stub_set_profile_type("00000000-0000-0000-0000-000000000005", PROFILE_TYPE_GENERIC);
+ stub_set_profile_type("00000000-0000-0000-0000-000000000006", PROFILE_TYPE_GENERIC);
+ stub_set_profile_type("00000000-0000-0000-0000-000000000007", PROFILE_TYPE_GENERIC);
memset(&sf, 0, sizeof(sf));
sf.priority = SHAPING_PRIORITY_NUM_MAX;
- long long rule_id1[] = {1, 2, 3};
- shaper_rules_update(&ctx->thread_ctx[0], &sf, rule_id1, 3);
+ uuid_t rule_uuid1[3];
+ uuid_parse("00000000-0000-0000-0000-000000000001", rule_uuid1[0]);
+ uuid_parse("00000000-0000-0000-0000-000000000002", rule_uuid1[1]);
+ uuid_parse("00000000-0000-0000-0000-000000000003", rule_uuid1[2]);
+ shaper_rules_update(&ctx->thread_ctx[0], &sf, rule_uuid1, 3);
EXPECT_EQ(sf.rule_num, 3);
rule_info = &sf.matched_rule_infos[0];
- EXPECT_EQ(rule_info->id, 1);
- EXPECT_EQ(rule_info->primary.id, 1);
+ char uuid_str[UUID_STR_LEN];
+ uuid_unparse(rule_info->uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000001", uuid_str);
+ uuid_unparse(rule_info->primary.uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000001", uuid_str);
EXPECT_EQ(rule_info->primary.priority, 1);
EXPECT_EQ(rule_info->borrowing_num, 0);
rule_info = &sf.matched_rule_infos[1];
- EXPECT_EQ(rule_info->id, 2);
- EXPECT_EQ(rule_info->primary.id, 2);
+ uuid_unparse(rule_info->uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000002", uuid_str);
+ uuid_unparse(rule_info->primary.uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000002", uuid_str);
EXPECT_EQ(rule_info->primary.priority, 1);
EXPECT_EQ(rule_info->borrowing_num, 1);
- EXPECT_EQ(rule_info->borrowing[0].id, 3);
+ uuid_unparse(rule_info->borrowing[0].uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000003", uuid_str);
EXPECT_EQ(rule_info->borrowing[0].priority, 2);
rule_info = &sf.matched_rule_infos[2];
- EXPECT_EQ(rule_info->id, 3);
- EXPECT_EQ(rule_info->primary.id, 4);
+ uuid_unparse(rule_info->uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000003", uuid_str);
+ uuid_unparse(rule_info->primary.uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000004", uuid_str);
EXPECT_EQ(rule_info->primary.priority, 1);
EXPECT_EQ(rule_info->borrowing_num, 2);
- EXPECT_EQ(rule_info->borrowing[0].id, 5);
+ uuid_unparse(rule_info->borrowing[0].uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000005", uuid_str);
EXPECT_EQ(rule_info->borrowing[0].priority, 2);
- EXPECT_EQ(rule_info->borrowing[1].id, 6);
+ uuid_unparse(rule_info->borrowing[1].uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000006", uuid_str);
EXPECT_EQ(rule_info->borrowing[1].priority, 3);
- long long rule_id2[] = {1};
- shaper_rules_update(&ctx->thread_ctx[0], &sf, rule_id2, 1);
+ uuid_t rule_uuid2;
+ uuid_parse("00000000-0000-0000-0000-000000000001", rule_uuid2);
+ shaper_rules_update(&ctx->thread_ctx[0], &sf, &rule_uuid2, 1);
EXPECT_EQ(sf.rule_num, 3);
- long long rule_id3[] = {2, 3, 4};
- shaper_rules_update(&ctx->thread_ctx[0], &sf, rule_id3, 3);
+ uuid_t rule_uuid3[3];
+ uuid_parse("00000000-0000-0000-0000-000000000002", rule_uuid3[0]);
+ uuid_parse("00000000-0000-0000-0000-000000000003", rule_uuid3[1]);
+ uuid_parse("00000000-0000-0000-0000-000000000004", rule_uuid3[2]);
+ shaper_rules_update(&ctx->thread_ctx[0], &sf, rule_uuid3, 3);
EXPECT_EQ(sf.rule_num, 4);
rule_info = &sf.matched_rule_infos[3];
- EXPECT_EQ(rule_info->id, 4);
- EXPECT_EQ(rule_info->primary.id, 7);
+ uuid_unparse(rule_info->uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000004", uuid_str);
+ uuid_unparse(rule_info->primary.uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000007", uuid_str);
EXPECT_EQ(rule_info->primary.priority, 1);
shaping_engine_destroy(ctx);
+ stub_clear_resource();
}
TEST(shaping_flow, update_rule_after_priority_confirmed)
@@ -187,57 +259,76 @@ TEST(shaping_flow, update_rule_after_priority_confirmed)
struct shaping_ctx *ctx = NULL;
struct shaping_flow sf;
struct shaping_rule_info *rule_info;
- long long rule_ids[] = {1, 2, 3};
+ const char *rule_uuid_strs[] = {"00000000-0000-0000-0000-000000000001", "00000000-0000-0000-0000-000000000002", "00000000-0000-0000-0000-000000000003"};
int prioritys[] = {1, 2, 3};
int profile_nums[] = {1, 2, 3};
- int profile_ids[][MAX_REF_PROFILE] = {{1}, {2, 3}, {4, 5, 6}};
+ const char *profile_uuid_strs[][MAX_REF_PROFILE] = {{"00000000-0000-0000-0000-000000000001"},
+ {"00000000-0000-0000-0000-000000000002", "00000000-0000-0000-0000-000000000003"},
+ {"00000000-0000-0000-0000-000000000004", "00000000-0000-0000-0000-000000000005", "00000000-0000-0000-0000-000000000006"}};
stub_init();
ctx = shaping_engine_init();
- stub_set_matched_shaping_rules(3, rule_ids, prioritys, profile_nums, profile_ids);
-
- ctx->maat_info->rule_table_id = STUB_MAAT_SHAPING_RULE_TABLE_ID;
- ctx->maat_info->profile_table_id = STUB_MAAT_SHAPING_PROFILE_TABLE_ID;
+ stub_set_matched_shaping_rules(3, rule_uuid_strs, prioritys, profile_nums, profile_uuid_strs);
+ stub_set_profile_type("00000000-0000-0000-0000-000000000001", PROFILE_TYPE_GENERIC);
+ stub_set_profile_type("00000000-0000-0000-0000-000000000002", PROFILE_TYPE_GENERIC);
+ stub_set_profile_type("00000000-0000-0000-0000-000000000003", PROFILE_TYPE_GENERIC);
+ stub_set_profile_type("00000000-0000-0000-0000-000000000004", PROFILE_TYPE_GENERIC);
+ stub_set_profile_type("00000000-0000-0000-0000-000000000005", PROFILE_TYPE_GENERIC);
+ stub_set_profile_type("00000000-0000-0000-0000-000000000006", PROFILE_TYPE_GENERIC);
memset(&sf, 0, sizeof(sf));
sf.priority = SHAPING_PRIORITY_NUM_MAX;
- long long first_rule_ids[] = {2, 3};
- shaper_rules_update(&ctx->thread_ctx[0], &sf, first_rule_ids, 2);
+ uuid_t first_rule_uuids[2];
+ uuid_parse("00000000-0000-0000-0000-000000000002", first_rule_uuids[0]);
+ uuid_parse("00000000-0000-0000-0000-000000000003", first_rule_uuids[1]);
+ shaper_rules_update(&ctx->thread_ctx[0], &sf, first_rule_uuids, 2);
sf.processed_pkts = CONFIRM_PRIORITY_PKTS + 1;
- long long after_confirm_priority_rule_ids[] = {1};
- shaper_rules_update(&ctx->thread_ctx[0], &sf, after_confirm_priority_rule_ids, 1);
+ uuid_t after_confirm_priority_rule_uuids;
+ uuid_parse("00000000-0000-0000-0000-000000000001", after_confirm_priority_rule_uuids);
+ shaper_rules_update(&ctx->thread_ctx[0], &sf, &after_confirm_priority_rule_uuids, 1);
EXPECT_EQ(sf.rule_num, 3);
EXPECT_EQ(sf.priority, 2);
rule_info = &sf.matched_rule_infos[0];
- EXPECT_EQ(rule_info->id, 2);
- EXPECT_EQ(rule_info->primary.id, 2);
+ char uuid_str[UUID_STR_LEN];
+ uuid_unparse(rule_info->uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000002", uuid_str);
+ uuid_unparse(rule_info->primary.uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000002", uuid_str);
EXPECT_EQ(rule_info->primary.priority, 2);
EXPECT_EQ(rule_info->borrowing_num, 1);
- EXPECT_EQ(rule_info->borrowing[0].id, 3);
+ uuid_unparse(rule_info->borrowing[0].uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000003", uuid_str);
EXPECT_EQ(rule_info->borrowing[0].priority, 3);
rule_info = &sf.matched_rule_infos[1];
- EXPECT_EQ(rule_info->id, 3);
- EXPECT_EQ(rule_info->primary.id, 4);
+ uuid_unparse(rule_info->uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000003", uuid_str);
+ uuid_unparse(rule_info->primary.uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000004", uuid_str);
EXPECT_EQ(rule_info->primary.priority, 2);
EXPECT_EQ(rule_info->borrowing_num, 2);
- EXPECT_EQ(rule_info->borrowing[0].id, 5);
+ uuid_unparse(rule_info->borrowing[0].uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000005", uuid_str);
EXPECT_EQ(rule_info->borrowing[0].priority, 3);
- EXPECT_EQ(rule_info->borrowing[1].id, 6);
+ uuid_unparse(rule_info->borrowing[1].uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000006", uuid_str);
EXPECT_EQ(rule_info->borrowing[1].priority, 4);
rule_info = &sf.matched_rule_infos[2];
- EXPECT_EQ(rule_info->id, 1);
- EXPECT_EQ(rule_info->primary.id, 1);
+ uuid_unparse(rule_info->uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000001", uuid_str);
+ uuid_unparse(rule_info->primary.uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000001", uuid_str);
EXPECT_EQ(rule_info->primary.priority, 2);
EXPECT_EQ(rule_info->borrowing_num, 0);
shaping_engine_destroy(ctx);
+ stub_clear_resource();
}
TEST(shaping_flow, update_rule_dscp)
@@ -245,71 +336,90 @@ TEST(shaping_flow, update_rule_dscp)
struct shaping_ctx *ctx = NULL;
struct shaping_flow sf;
struct shaping_rule_info *rule_info;
- long long rule_ids[] = {1, 2, 3, 4};
+ const char *rule_uuid_strs[] = {"00000000-0000-0000-0000-000000000001", "00000000-0000-0000-0000-000000000002", "00000000-0000-0000-0000-000000000003", "00000000-0000-0000-0000-000000000004"};
int prioritys[] = {1, 2, 3, 4};
int profile_nums[] = {1, 1, 1, 1};
- int profile_ids[][MAX_REF_PROFILE] = {{1}, {2}, {3}, {4}};
+ const char *profile_uuid_strs[][MAX_REF_PROFILE] = {{"00000000-0000-0000-0000-000000000001"},
+ {"00000000-0000-0000-0000-000000000002"},
+ {"00000000-0000-0000-0000-000000000003"},
+ {"00000000-0000-0000-0000-000000000004"}};
stub_init();
ctx = shaping_engine_init();
- stub_set_matched_shaping_rules(4, rule_ids, prioritys, profile_nums, profile_ids);
- stub_set_shaping_rule_dscp_value(1, 10);//AF11
- stub_set_shaping_rule_dscp_value(2, 12);//AF12
- stub_set_shaping_rule_dscp_value(3, 14);//AF13
- stub_set_shaping_rule_dscp_value(4, 40);//CS5
+ stub_set_matched_shaping_rules(4, rule_uuid_strs, prioritys, profile_nums, profile_uuid_strs);
+ stub_set_profile_type("00000000-0000-0000-0000-000000000001", PROFILE_TYPE_GENERIC);
+ stub_set_profile_type("00000000-0000-0000-0000-000000000002", PROFILE_TYPE_GENERIC);
+ stub_set_profile_type("00000000-0000-0000-0000-000000000003", PROFILE_TYPE_GENERIC);
+ stub_set_profile_type("00000000-0000-0000-0000-000000000004", PROFILE_TYPE_GENERIC);
- ctx->maat_info->rule_table_id = STUB_MAAT_SHAPING_RULE_TABLE_ID;
- ctx->maat_info->profile_table_id = STUB_MAAT_SHAPING_PROFILE_TABLE_ID;
+ stub_set_shaping_rule_dscp_value(rule_uuid_strs[0], 10);//AF11
+ stub_set_shaping_rule_dscp_value(rule_uuid_strs[1], 12);//AF12
+ stub_set_shaping_rule_dscp_value(rule_uuid_strs[2], 14);//AF13
+ stub_set_shaping_rule_dscp_value(rule_uuid_strs[3], 40);//CS5
memset(&sf, 0, sizeof(sf));
sf.priority = SHAPING_PRIORITY_NUM_MAX;
- long long rule_id1[] = {1, 2};
- shaper_rules_update(&ctx->thread_ctx[0], &sf, rule_id1, 2);
+ uuid_t rule_uuids1[2];
+ uuid_parse("00000000-0000-0000-0000-000000000001", rule_uuids1[0]);
+ uuid_parse("00000000-0000-0000-0000-000000000002", rule_uuids1[1]);
+ shaper_rules_update(&ctx->thread_ctx[0], &sf, rule_uuids1, 2);
EXPECT_EQ(sf.rule_num, 2);
EXPECT_EQ(sf.dscp_enable, 1);
EXPECT_EQ(sf.dscp_value, 10);
rule_info = &sf.matched_rule_infos[0];
- EXPECT_EQ(rule_info->id, 1);
- EXPECT_EQ(rule_info->primary.id, 1);
+ char uuid_str[UUID_STR_LEN];
+ uuid_unparse(rule_info->uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000001", uuid_str);
+ uuid_unparse(rule_info->primary.uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000001", uuid_str);
EXPECT_EQ(rule_info->primary.priority, 1);
EXPECT_EQ(rule_info->borrowing_num, 0);
rule_info = &sf.matched_rule_infos[1];
- EXPECT_EQ(rule_info->id, 2);
- EXPECT_EQ(rule_info->primary.id, 2);
+ uuid_unparse(rule_info->uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000002", uuid_str);
+ uuid_unparse(rule_info->primary.uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000002", uuid_str);
EXPECT_EQ(rule_info->primary.priority, 1);
EXPECT_EQ(rule_info->borrowing_num, 0);
- long long rule_id2[] = {3};
- shaper_rules_update(&ctx->thread_ctx[0], &sf, rule_id2, 1);
+ uuid_t rule_uuid2;
+ uuid_parse("00000000-0000-0000-0000-000000000003", rule_uuid2);
+ shaper_rules_update(&ctx->thread_ctx[0], &sf, &rule_uuid2, 1);
EXPECT_EQ(sf.rule_num, 3);
EXPECT_EQ(sf.dscp_enable, 1);
EXPECT_EQ(sf.dscp_value, 10);
rule_info = &sf.matched_rule_infos[2];
- EXPECT_EQ(rule_info->id, 3);
- EXPECT_EQ(rule_info->primary.id, 3);
+ uuid_unparse(rule_info->uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000003", uuid_str);
+ uuid_unparse(rule_info->primary.uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000003", uuid_str);
EXPECT_EQ(rule_info->primary.priority, 1);
EXPECT_EQ(rule_info->borrowing_num, 0);
- long long rule_id3[] = {4};
- shaper_rules_update(&ctx->thread_ctx[0], &sf, rule_id3, 3);
+ uuid_t rule_uuid3;
+ uuid_parse("00000000-0000-0000-0000-000000000004", rule_uuid3);
+ shaper_rules_update(&ctx->thread_ctx[0], &sf, &rule_uuid3, 1);
EXPECT_EQ(sf.rule_num, 4);
EXPECT_EQ(sf.dscp_enable, 1);
EXPECT_EQ(sf.dscp_value, 40);
rule_info = &sf.matched_rule_infos[3];
- EXPECT_EQ(rule_info->id, 4);
- EXPECT_EQ(rule_info->primary.id, 4);
+ uuid_unparse(rule_info->uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000004", uuid_str);
+ uuid_unparse(rule_info->primary.uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000004", uuid_str);
EXPECT_EQ(rule_info->primary.priority, 1);
EXPECT_EQ(rule_info->borrowing_num, 0);
shaping_engine_destroy(ctx);
+ stub_clear_resource();
}
int main(int argc, char **argv)
diff --git a/shaping/test/gtest_shaper_send_log.cpp b/shaping/test/gtest_shaper_send_log.cpp
index effd7bf..897120d 100644
--- a/shaping/test/gtest_shaper_send_log.cpp
+++ b/shaping/test/gtest_shaper_send_log.cpp
@@ -56,19 +56,19 @@ static void gtest_shaper_log_parse(struct shaping_flow *sf, const char *data, si
for (int i = 0; i < sf->rule_num; i++) {
ASSERT_EQ(mpack_type_map, mpack_node_type(mpack_node_array_at(tmp_node, i)) );
tmp_rule_node = mpack_node_map_cstr(mpack_node_array_at(tmp_node, i), "rule_id");
- EXPECT_EQ(mpack_type_uint, mpack_node_type(tmp_rule_node));
- sf->matched_rule_infos[i].id = mpack_node_u64(tmp_rule_node);
+ EXPECT_EQ(mpack_type_bin, mpack_node_type(tmp_rule_node));
+ uuid_copy(sf->matched_rule_infos[i].uuid, *(uuid_t*)mpack_node_bin_data(tmp_rule_node));
tmp_profile_node = mpack_node_map_cstr(mpack_node_array_at(tmp_node, i), "profile_ids");
ASSERT_EQ(mpack_type_array, mpack_node_type(tmp_profile_node));
int profile_array_len = mpack_node_array_length(tmp_profile_node);
sf->matched_rule_infos[i].borrowing_num = profile_array_len - 1;
for (int j = 0; j < profile_array_len; j++) {
- ASSERT_EQ(mpack_type_uint, mpack_node_type(mpack_node_array_at(tmp_profile_node, j)) );
+ ASSERT_EQ(mpack_type_bin, mpack_node_type(mpack_node_array_at(tmp_profile_node, j)) );
if (j == 0) {
- sf->matched_rule_infos[i].primary.id = mpack_node_u64(mpack_node_array_at(tmp_profile_node, j));
+ uuid_copy(sf->matched_rule_infos[i].primary.uuid, *(uuid_t*)mpack_node_bin_data(mpack_node_array_at(tmp_profile_node, j)));
} else {
- sf->matched_rule_infos[i].borrowing[j - 1].id = mpack_node_u64(mpack_node_array_at(tmp_profile_node, j));
+ uuid_copy(sf->matched_rule_infos[i].borrowing[j - 1].uuid, *(uuid_t*)mpack_node_bin_data(mpack_node_array_at(tmp_profile_node, j)));
}
}
}
@@ -88,26 +88,26 @@ TEST(MPACK_LOG, PARSE)
sf_in.ctrl_meta.session_id = 12345678;
sf_in.rule_num = 3;
- //rule_id 0, primary profile id 0, borrow profile id 1
- sf_in.matched_rule_infos[0].id = 0;
- sf_in.matched_rule_infos[0].primary.id = 0;
+ //rule_id 1, primary profile id 1, borrow profile id 2
+ uuid_parse("00000000-0000-0000-0000-000000000001", sf_in.matched_rule_infos[0].uuid);
+ uuid_parse("00000000-0000-0000-0000-000000000001", sf_in.matched_rule_infos[0].primary.uuid);
sf_in.matched_rule_infos[0].borrowing_num = 1;
- sf_in.matched_rule_infos[0].borrowing[0].id = 1;
+ uuid_parse("00000000-0000-0000-0000-000000000002", sf_in.matched_rule_infos[0].borrowing[0].uuid);
- //rule id 1, primary profile id 2, borrow profile id 3,4
- sf_in.matched_rule_infos[1].id = 1;
- sf_in.matched_rule_infos[1].primary.id = 2;
+ //rule id 2, primary profile id 3, borrow profile id 4,5
+ uuid_parse("00000000-0000-0000-0000-000000000002", sf_in.matched_rule_infos[1].uuid);
+ uuid_parse("00000000-0000-0000-0000-000000000003", sf_in.matched_rule_infos[1].primary.uuid);
sf_in.matched_rule_infos[1].borrowing_num = 2;
- sf_in.matched_rule_infos[1].borrowing[0].id = 3;
- sf_in.matched_rule_infos[1].borrowing[1].id = 4;
+ uuid_parse("00000000-0000-0000-0000-000000000004", sf_in.matched_rule_infos[1].borrowing[0].uuid);
+ uuid_parse("00000000-0000-0000-0000-000000000005", sf_in.matched_rule_infos[1].borrowing[1].uuid);
- //rule id 2, primary profile id 5, borrow profile id 6,7,8
- sf_in.matched_rule_infos[2].id = 2;
- sf_in.matched_rule_infos[2].primary.id = 5;
+ //rule id 3, primary profile id 6, borrow profile id 7,8,9
+ uuid_parse("00000000-0000-0000-0000-000000000003", sf_in.matched_rule_infos[2].uuid);
+ uuid_parse("00000000-0000-0000-0000-000000000006", sf_in.matched_rule_infos[2].primary.uuid);
sf_in.matched_rule_infos[2].borrowing_num = 3;
- sf_in.matched_rule_infos[2].borrowing[0].id = 6;
- sf_in.matched_rule_infos[2].borrowing[1].id = 7;
- sf_in.matched_rule_infos[2].borrowing[2].id = 8;
+ uuid_parse("00000000-0000-0000-0000-000000000007", sf_in.matched_rule_infos[2].borrowing[0].uuid);
+ uuid_parse("00000000-0000-0000-0000-000000000008", sf_in.matched_rule_infos[2].borrowing[1].uuid);
+ uuid_parse("00000000-0000-0000-0000-000000000009", sf_in.matched_rule_infos[2].borrowing[2].uuid);
shaper_session_log_prepare(&sf_in, &mpack_data, &mpack_size);
gtest_shaper_log_parse(&sf_out, mpack_data, mpack_size);
@@ -115,26 +115,39 @@ TEST(MPACK_LOG, PARSE)
EXPECT_EQ(sf_out.ctrl_meta.session_id, 12345678);
EXPECT_EQ(sf_out.rule_num, 3);
- //rule_id 0, primary profile id 0, borrow profile id 1
- EXPECT_EQ(sf_out.matched_rule_infos[0].id, 0);
- EXPECT_EQ(sf_out.matched_rule_infos[0].primary.id, 0);
+ //rule_id 1, primary profile id 1, borrow profile id 2
+ char uuid_str[UUID_STR_LEN];
+ uuid_unparse(sf_out.matched_rule_infos[0].uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000001", uuid_str);
+ uuid_unparse(sf_out.matched_rule_infos[0].primary.uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000001", uuid_str);
EXPECT_EQ(sf_out.matched_rule_infos[0].borrowing_num, 1);
- EXPECT_EQ(sf_out.matched_rule_infos[0].borrowing[0].id, 1);
-
- //rule id 1, primary profile id 2, borrow profile id 3,4
- EXPECT_EQ(sf_out.matched_rule_infos[1].id, 1);
- EXPECT_EQ(sf_out.matched_rule_infos[1].primary.id, 2);
+ uuid_unparse(sf_out.matched_rule_infos[0].borrowing[0].uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000002", uuid_str);
+
+ //rule id 2, primary profile id 3, borrow profile id 4,5
+ uuid_unparse(sf_out.matched_rule_infos[1].uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000002", uuid_str);
+ uuid_unparse(sf_out.matched_rule_infos[1].primary.uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000003", uuid_str);
EXPECT_EQ(sf_out.matched_rule_infos[1].borrowing_num, 2);
- EXPECT_EQ(sf_out.matched_rule_infos[1].borrowing[0].id, 3);
- EXPECT_EQ(sf_out.matched_rule_infos[1].borrowing[1].id, 4);
-
- //rule id 2, primary profile id 5, borrow profile id 6,7,8
- EXPECT_EQ(sf_out.matched_rule_infos[2].id, 2);
- EXPECT_EQ(sf_out.matched_rule_infos[2].primary.id, 5);
+ uuid_unparse(sf_out.matched_rule_infos[1].borrowing[0].uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000004", uuid_str);
+ uuid_unparse(sf_out.matched_rule_infos[1].borrowing[1].uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000005", uuid_str);
+
+ //rule id 3, primary profile id 6, borrow profile id 7,8,9
+ uuid_unparse(sf_out.matched_rule_infos[2].uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000003", uuid_str);
+ uuid_unparse(sf_out.matched_rule_infos[2].primary.uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000006", uuid_str);
EXPECT_EQ(sf_out.matched_rule_infos[2].borrowing_num, 3);
- EXPECT_EQ(sf_out.matched_rule_infos[2].borrowing[0].id, 6);
- EXPECT_EQ(sf_out.matched_rule_infos[2].borrowing[1].id, 7);
- EXPECT_EQ(sf_out.matched_rule_infos[2].borrowing[2].id, 8);
+ uuid_unparse(sf_out.matched_rule_infos[2].borrowing[0].uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000007", uuid_str);
+ uuid_unparse(sf_out.matched_rule_infos[2].borrowing[1].uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000008", uuid_str);
+ uuid_unparse(sf_out.matched_rule_infos[2].borrowing[2].uuid, uuid_str);
+ EXPECT_STREQ("00000000-0000-0000-0000-000000000009", uuid_str);
if (mpack_data) {
free(mpack_data);
diff --git a/shaping/test/gtest_shaper_with_swarmkv.cpp b/shaping/test/gtest_shaper_with_swarmkv.cpp
index 403aed2..bffe917 100644
--- a/shaping/test/gtest_shaper_with_swarmkv.cpp
+++ b/shaping/test/gtest_shaper_with_swarmkv.cpp
@@ -183,10 +183,10 @@ TEST(generic_profile, single_session)
struct stub_pkt_queue *actual_tx_queue;
struct shaping_ctx *ctx = NULL;
struct shaping_flow *sf = NULL;
- long long rule_id[] = {0};
+ const char *rule_uuid_strs[] = {"00000000-0000-0000-0000-000000000001"};
int priority[] = {1};
int profile_num[] = {1};
- int profile_id[][MAX_REF_PROFILE] = {{0}};
+ const char *profile_uuid_strs[][MAX_REF_PROFILE] = {{ "00000000-0000-0000-0000-000000000001"}};
struct cmd_exec_arg* reply_arg=NULL;
char result[2048]={0};
@@ -196,17 +196,20 @@ TEST(generic_profile, single_session)
sf = shaping_flow_new(&ctx->thread_ctx[0]);
ASSERT_TRUE(sf != NULL);
- stub_set_profile_limit_direction(0, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
- stub_set_matched_shaping_rules(1, rule_id, priority, profile_num, profile_id);
- shaper_rules_update(&ctx->thread_ctx[0], sf, rule_id, 1);
+ stub_set_profile_limit_direction(profile_uuid_strs[0][0], PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
+ stub_set_matched_shaping_rules(1, rule_uuid_strs, priority, profile_num, profile_uuid_strs);
+
+ uuid_t rule_uuid;
+ uuid_parse(rule_uuid_strs[0], rule_uuid);
+ shaper_rules_update(&ctx->thread_ctx[0], sf, &rule_uuid, 1);
//set swarmkv key
swarmkv_cli_set_db("swarmkv-shaping-nodes");
reply_arg=cmd_exec_arg_new();
cmd_exec_arg_expect_OK(reply_arg);
- swarmkv_cli_system_cmd(reply_arg, result, sizeof(result), swarmkv_expect_reply_string, "tcfg tsg-shaping-0-incoming 1000000 1000000");
- swarmkv_cli_system_cmd(reply_arg, result, sizeof(result), swarmkv_expect_reply_string, "tcfg tsg-shaping-0-outgoing 1000000 1000000");
+ swarmkv_cli_system_cmd(reply_arg, result, sizeof(result), swarmkv_expect_reply_string, "tcfg tsg-shaping-00000000-0000-0000-0000-000000000001-incoming 1000000 1000000");
+ swarmkv_cli_system_cmd(reply_arg, result, sizeof(result), swarmkv_expect_reply_string, "tcfg tsg-shaping-00000000-0000-0000-0000-000000000001-outgoing 1000000 1000000");
cmd_exec_arg_clear(reply_arg);
actual_tx_queue = stub_get_tx_queue();
@@ -246,6 +249,7 @@ TEST(generic_profile, single_session)
shaping_flow_free(&ctx->thread_ctx[0], sf);
shaper_thread_resource_clear();
shaping_engine_destroy(ctx);
+ stub_clear_resource();
}
TEST(fair_share_profile, two_members)
@@ -254,12 +258,10 @@ TEST(fair_share_profile, two_members)
struct shaping_ctx *ctx = NULL;
struct shaping_flow *sf1 = NULL;
struct shaping_flow *sf2 = NULL;
- long long rule_id[] = {0, 1};
- long long rule_id1[] = {0};
- long long rule_id2[] = {1};
+ const char *rule_uuid_strs[] = {"00000000-0000-0000-0000-000000000001", "00000000-0000-0000-0000-000000000002"};
int priority[] = {1, 1};
int profile_num[] = {1, 1};
- int profile_id[][MAX_REF_PROFILE] = {{0}, {0}};
+ const char *profile_uuid_strs[][MAX_REF_PROFILE] = {{ "00000000-0000-0000-0000-000000000001"}, { "00000000-0000-0000-0000-000000000001"}};
struct cmd_exec_arg* reply_arg=NULL;
char result[2048]={0};
@@ -271,20 +273,25 @@ TEST(fair_share_profile, two_members)
sf2 = shaping_flow_new(&ctx->thread_ctx[0]);
ASSERT_TRUE(sf2 != NULL);
- stub_set_profile_limit_direction(0, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
- stub_set_profile_type(0, PROFILE_TYPE_MAX_MIN_HOST_FAIRNESS);
- stub_set_shaping_rule_fair_factor(0, 1);
- stub_set_shaping_rule_fair_factor(1, 3);
- stub_set_matched_shaping_rules(2, rule_id, priority, profile_num, profile_id);
- shaper_rules_update(&ctx->thread_ctx[0], sf1, rule_id1, 1);
- shaper_rules_update(&ctx->thread_ctx[0], sf2, rule_id2, 1);
+ stub_set_matched_shaping_rules(2, rule_uuid_strs, priority, profile_num, profile_uuid_strs);
+ stub_set_profile_limit_direction(profile_uuid_strs[0][0], PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING);
+ stub_set_profile_type(profile_uuid_strs[0][0], PROFILE_TYPE_MAX_MIN_HOST_FAIRNESS);
+ stub_set_shaping_rule_fair_factor(rule_uuid_strs[0], 1);
+ stub_set_shaping_rule_fair_factor(rule_uuid_strs[1], 3);
+
+ uuid_t rule_uuid1;
+ uuid_t rule_uuid2;
+ uuid_parse(rule_uuid_strs[0], rule_uuid1);
+ uuid_parse(rule_uuid_strs[1], rule_uuid2);
+ shaper_rules_update(&ctx->thread_ctx[0], sf1, &rule_uuid1, 1);
+ shaper_rules_update(&ctx->thread_ctx[0], sf2, &rule_uuid2, 1);
sf1->src_ip_str = (char *)calloc(1, 16);
- sf1->src_ip_str_len = strlen(sf1->src_ip_str);
+ sf1->src_ip_str_len = strlen("1.1.1.1");
memcpy(sf1->src_ip_str, "1.1.1.1", sf1->src_ip_str_len);
sf2->src_ip_str = (char *)calloc(1, 16);
- sf2->src_ip_str_len = strlen(sf2->src_ip_str);
+ sf2->src_ip_str_len = strlen("2.2.2.2");
memcpy(sf2->src_ip_str, "2.2.2.2", sf2->src_ip_str_len);
//set swarmkv key
@@ -292,8 +299,8 @@ TEST(fair_share_profile, two_members)
reply_arg=cmd_exec_arg_new();
cmd_exec_arg_expect_OK(reply_arg);
- swarmkv_cli_system_cmd(reply_arg, result, sizeof(result), swarmkv_expect_reply_string, "ftcfg tsg-shaping-0-incoming 1000000 1000000 256");
- swarmkv_cli_system_cmd(reply_arg, result, sizeof(result), swarmkv_expect_reply_string, "ftcfg tsg-shaping-0-outgoing 1000000 1000000 256");
+ swarmkv_cli_system_cmd(reply_arg, result, sizeof(result), swarmkv_expect_reply_string, "ftcfg tsg-shaping-00000000-0000-0000-0000-000000000001-incoming 1000000 1000000 256");
+ swarmkv_cli_system_cmd(reply_arg, result, sizeof(result), swarmkv_expect_reply_string, "ftcfg tsg-shaping-00000000-0000-0000-0000-000000000001-outgoing 1000000 1000000 256");
cmd_exec_arg_clear(reply_arg);
actual_tx_queue = stub_get_tx_queue();
diff --git a/shaping/test/stub.cpp b/shaping/test/stub.cpp
index 24b3ac3..8e9140f 100644
--- a/shaping/test/stub.cpp
+++ b/shaping/test/stub.cpp
@@ -13,78 +13,126 @@
#include "shaper.h"
#include "shaper_stat.h"
#include "stub.h"
-#include "shaper_maat.h"
#include "log.h"
+#include "uthash.h"
#define MAX_STUB_TEST_SESSION_NUM 2
-#define MAX_STUB_RULE_NUM 8
-#define MAX_STUB_PROFILE_NUM 8
-
-struct stub_matched_rules {
- struct shaping_rule rules[MAX_STUB_RULE_NUM];
- int rule_num;
-};
-
struct stub_pkt_queue tx_queue;
-struct stub_matched_rules matched_rules;
-struct shaping_profile pf_array[MAX_STUB_PROFILE_NUM];
+struct stub_matched_rule *rules_hash = NULL;
+struct stub_shaping_profile *profiles_hash = NULL;
-void stub_set_profile_type(int profile_id, enum shaping_profile_type type)
+void stub_set_profile_type(const char *profile_uuid_str, enum shaping_profile_type type)
{
- pf_array[profile_id].type = type;
+ uuid_t profile_uuid;
+ struct stub_shaping_profile *stub_profile = NULL;
+
+ uuid_parse(profile_uuid_str, profile_uuid);
+ HASH_FIND(hh, profiles_hash, profile_uuid, sizeof(uuid_t), stub_profile);
+ if (!stub_profile) {
+ stub_profile = (struct stub_shaping_profile*)calloc(1, sizeof(struct stub_shaping_profile));
+ uuid_copy(stub_profile->profile.uuid, profile_uuid);
+ HASH_ADD(hh, profiles_hash, profile.uuid, sizeof(uuid_t), stub_profile);
+ }
+
+ stub_profile->profile.type = type;
+
return;
}
-void stub_set_profile_limit_direction(int profile_id, enum shaping_profile_limit_direction limit_direction)
+void stub_set_profile_limit_direction(const char *profile_uuid_str, enum shaping_profile_limit_direction limit_direction)
{
- pf_array[profile_id].limit_direction = limit_direction;
+ uuid_t profile_uuid;
+ struct stub_shaping_profile *stub_profile = NULL;
+
+ uuid_parse(profile_uuid_str, profile_uuid);
+ HASH_FIND(hh, profiles_hash, profile_uuid, sizeof(uuid_t), stub_profile);
+ if (!stub_profile) {
+ stub_profile = (struct stub_shaping_profile*)calloc(1, sizeof(struct stub_shaping_profile));
+ uuid_copy(stub_profile->profile.uuid, profile_uuid);
+ HASH_ADD(hh, profiles_hash, profile.uuid, sizeof(uuid_t), stub_profile);
+ }
+
+ stub_profile->profile.limit_direction = limit_direction;
+
return;
}
-void stub_set_matched_shaping_rules(int rule_num, long long *rule_id, const int *priority, const int *profile_num, int profile_id[][MAX_REF_PROFILE])
+void stub_set_matched_shaping_rules(int rule_num, const char *rule_uuid_str[], const int *priority, const int *profile_num, const char *profile_uuid_str[][MAX_REF_PROFILE])
{
- struct shaping_rule *rules;
int i, j;
- int id;
-
- rules = matched_rules.rules;
for (i = 0; i < rule_num; i++) {
- id = rule_id[i];
- assert(id < MAX_STUB_RULE_NUM);
-
- rules[id].vsys_id = STUB_TEST_VSYS_ID;
- rules[id].id = id;
- rules[id].primary_pf_id = profile_id[i][0];
- rules[id].borrow_pf_num = profile_num[i] - 1;
- rules[id].priority = priority[i];
+ struct stub_matched_rule *stub_matched_rule = NULL;
+ uuid_t rule_uuid;
+ uuid_parse(rule_uuid_str[i], rule_uuid);
+
+ HASH_FIND(hh, rules_hash, rule_uuid, sizeof(uuid_t), stub_matched_rule);
+ if (stub_matched_rule) {
+ continue;
+ }
+
+ stub_matched_rule = (struct stub_matched_rule*)calloc(1, sizeof(struct stub_matched_rule));
+
+ stub_matched_rule->rule.vsys_id = STUB_TEST_VSYS_ID;
+ uuid_copy(stub_matched_rule->rule.uuid, rule_uuid);
+ uuid_parse(profile_uuid_str[i][0], stub_matched_rule->rule.primary_pf_uuid);
+ stub_matched_rule->rule.borrow_pf_num = profile_num[i] - 1;
+ stub_matched_rule->rule.priority = priority[i];
for (j = 1; j < profile_num[i]; j++) {
- rules[id].borrow_pf_id_array[j - 1] = profile_id[i][j];
+ uuid_parse(profile_uuid_str[i][j], stub_matched_rule->rule.borrow_pf_uuid_array[j - 1]);
}
- }
- matched_rules.rule_num = rule_num;
+ HASH_ADD(hh, rules_hash, rule.uuid, sizeof(uuid_t), stub_matched_rule);
+ }
return;
}
-void stub_set_shaping_rule_dscp_value(int rule_id, int dscp_value)
+void stub_set_shaping_rule_dscp_value(const char *rule_uuid_str, int dscp_value)
{
- matched_rules.rules[rule_id].dscp_enable = 1;
- matched_rules.rules[rule_id].dscp_value = dscp_value;
+ uuid_t rule_uuid;
+ struct stub_matched_rule *stub_matched_rule = NULL;
+
+ uuid_parse(rule_uuid_str, rule_uuid);
+ HASH_FIND(hh, rules_hash, rule_uuid, sizeof(uuid_t), stub_matched_rule);
+ if (stub_matched_rule) {
+ stub_matched_rule->rule.dscp_enable = 1;
+ stub_matched_rule->rule.dscp_value = dscp_value;
+ }
+
return;
}
-void stub_set_shaping_rule_fair_factor(int rule_id, int fair_factor)
+void stub_set_shaping_rule_fair_factor(const char *rule_uuid_str, int fair_factor)
{
- matched_rules.rules[rule_id].fair_factor = fair_factor;
+ uuid_t rule_uuid;
+ struct stub_matched_rule *stub_matched_rule = NULL;
+
+ uuid_parse(rule_uuid_str, rule_uuid);
+ HASH_FIND(hh, rules_hash, rule_uuid, sizeof(uuid_t), stub_matched_rule);
+ if (stub_matched_rule) {
+ stub_matched_rule->rule.fair_factor = fair_factor;
+ }
+
return;
}
-void stub_clear_matched_shaping_rules()
+void stub_clear_resource()
{
- memset(&matched_rules, 0, sizeof(struct stub_matched_rules));
+ struct stub_matched_rule *stub_matched_rule, *tmp = NULL;
+
+ HASH_ITER(hh, rules_hash, stub_matched_rule, tmp) {
+ HASH_DEL(rules_hash, stub_matched_rule);
+ free(stub_matched_rule);
+ }
+
+ struct stub_shaping_profile *stub_profile, *tmp_profile = NULL;
+
+ HASH_ITER(hh, profiles_hash, stub_profile, tmp_profile) {
+ HASH_DEL(profiles_hash, stub_profile);
+ free(stub_profile);
+ }
return;
}
@@ -114,7 +162,6 @@ void stub_init()
LOG_INIT("./conf/zlog.conf");
TAILQ_INIT(&tx_queue);
- memset(&matched_rules, 0, sizeof(struct stub_matched_rules));
return;
}
@@ -160,15 +207,6 @@ void maat_options_free(struct maat_options *opts)
return;
}
-int maat_get_table_id(struct maat *instance, const char *table_name)
-{
- if (strcmp(table_name, "TRAFFIC_SHAPING_COMPILE") == 0) {
- return STUB_MAAT_SHAPING_RULE_TABLE_ID;
- } else {
- return STUB_MAAT_SHAPING_PROFILE_TABLE_ID;
- }
-}
-
int maat_plugin_table_ex_schema_register(struct maat *instance, const char *table_name,
maat_ex_new_func_t *new_func,
maat_ex_free_func_t *free_func,
@@ -186,17 +224,21 @@ void maat_free(struct maat *instance)
return;
}
-void *maat_plugin_table_get_ex_data(struct maat *instance, int table_id, const char *key, size_t key_len)
+void *maat_plugin_table_get_ex_data(struct maat *instance, const char *table_name, const char *key, size_t key_len)
{
- int rule_id;
- int profile_id;
+ uuid_t rule_uuid;
+ uuid_t profile_uuid;
- if (table_id == STUB_MAAT_SHAPING_RULE_TABLE_ID) {
- rule_id = *(int*)key;
- return &matched_rules.rules[rule_id];
+ if (strcmp(table_name, "TRAFFIC_SHAPING_RULE") == 0) {
+ struct stub_matched_rule *matched_rule = NULL;
+ uuid_parse(key, rule_uuid);
+ HASH_FIND(hh, rules_hash, rule_uuid, sizeof(uuid_t), matched_rule);
+ return &matched_rule->rule;
} else {
- profile_id = atoi(key);
- return &pf_array[profile_id];
+ struct stub_shaping_profile *stub_profile = NULL;
+ uuid_parse(key, profile_uuid);
+ HASH_FIND(hh, profiles_hash, profile_uuid, sizeof(uuid_t), stub_profile);
+ return &stub_profile->profile;
}
}
/**********************************************/
diff --git a/shaping/test/stub.h b/shaping/test/stub.h
index 7581a98..e5c73b5 100644
--- a/shaping/test/stub.h
+++ b/shaping/test/stub.h
@@ -1,19 +1,36 @@
#include <sys/queue.h>
#include "shaper.h"
+#include "shaper_maat.h"
#define OUT_ARG
#define MAX_REF_PROFILE 8
#define STUB_APP_STATE_HOLD_PACKET 0x04
-#define STUB_MAAT_SHAPING_RULE_TABLE_ID 0
-#define STUB_MAAT_SHAPING_PROFILE_TABLE_ID 1
-
#define STUB_TIME_INC_FOR_PACKET 1000000
#define STUB_TIME_INC_FOR_HMGET 10000000
#define STUB_TEST_VSYS_ID 2333
+#define AVALIABLE_TOKEN_UNLIMITED -1
+
+struct stub_matched_rule {
+ UT_hash_handle hh;
+ struct shaping_rule rule;
+};
+
+struct stub_avaliable_token {
+ int in_limit_bandwidth;
+ int out_limit_bandwidth;
+ int bidirection_limit_bandwidth;
+};
+
+struct stub_shaping_profile {
+ struct shaping_profile profile;
+ struct stub_avaliable_token avaliable_token;
+ UT_hash_handle hh;
+};
+
struct stub_packet {
unsigned char direction;
unsigned char pure_control;
@@ -28,16 +45,16 @@ struct stub_packet_node {
TAILQ_HEAD(stub_pkt_queue, stub_packet_node);
-void stub_set_token_bucket_avl_per_sec(int profile_id, unsigned int tokens, unsigned char direction, enum shaping_profile_limit_direction limit_direction);
-void stub_refresh_token_bucket(int profile_id);
-void stub_set_profile_type(int profile_id, enum shaping_profile_type type);
-void stub_set_profile_limit_direction(int profile_id, enum shaping_profile_limit_direction limit_direction);
-void stub_set_async_token_get_times(int profile_id, int times);
+void stub_set_token_bucket_avl_per_sec(const char *profile_uuid_str, unsigned int tokens, unsigned char direction, enum shaping_profile_limit_direction limit_direction);
+void stub_refresh_token_bucket(const char *profile_uuid_str);
+void stub_set_profile_type(const char *profile_uuid_str, enum shaping_profile_type type);
+void stub_set_profile_limit_direction(const char *profile_uuid_str, enum shaping_profile_limit_direction limit_direction);
-void stub_set_matched_shaping_rules(int rule_num, long long *rule_id, const int *priority, const int *profile_num, int profile_id[][MAX_REF_PROFILE]);
-void stub_set_shaping_rule_dscp_value(int rule_id, int dscp_value);
-void stub_set_shaping_rule_fair_factor(int rule_id, int fair_factor);
-void stub_clear_matched_shaping_rules();
+void stub_set_matched_shaping_rules(int rule_num, const char *rule_uuid_str[], const int *priority, const int *profile_num, const char *profile_uuid_str[][MAX_REF_PROFILE]);
+void stub_set_shaping_rule_dscp_value(const char *rule_uuid_str, int dscp_value);
+void stub_set_shaping_rule_fair_factor(const char *rule_uuid_str, int fair_factor);
+void stub_clear_resource();
+void stub_swarmkv_clear_resource();
void stub_send_packet(struct stub_packet *packet);
struct stub_pkt_queue* stub_get_tx_queue();
@@ -49,7 +66,6 @@ void stub_curr_time_s_inc(int time_s);
unsigned long long stub_curr_time_ns_get();
void stub_init();
-void dummy_swarmkv_init();
/*******************temporary for test******************************/
void stub_shaper_stat_send(int thread_seq);
diff --git a/shaping/test/test_conf/shaping_maat.json b/shaping/test/test_conf/shaping_maat.json
index 9d518f9..d8efe2e 100644
--- a/shaping/test/test_conf/shaping_maat.json
+++ b/shaping/test/test_conf/shaping_maat.json
@@ -1,75 +1,72 @@
{
- "compile_table": "TRAFFIC_SHAPING_COMPILE",
- "group2compile_table": "GROUP_SHAPING_COMPILE_RELATION",
- "group2group_table": "GROUP_GROUP_RELATION",
+ "compile_table": "TRAFFIC_SHAPING_RULE",
"rules": [
{
- "compile_id": 182,
+ "uuid": "00000000-0000-0000-0000-000000000182",
"service": 2,
"action": 32,
"do_blacklist": 0,
"do_log": 1,
"effective_rage": 0,
- "user_region": "{\"priority\":1,\"fair_factor\":10,\"profile_chain\":[1]}",
+ "action_parameter": {
+ "priority": 1,
+ "fair_factor": 10,
+ "dscp_marking": {
+ "enabled": 1,
+ "dscp_type": "Assured Forwarding (AF)",
+ "dscp_name": "af11",
+ "dscp_value": 10
+ },
+ "profile_chain": [
+ "00000000-0000-0000-0000-000000000001"
+ ]
+ },
"is_valid": "yes",
- "groups": [
- {
- "group_name":"OBJ_SRC_IP_ADDR",
- "virtual_table":"TSG_SECURITY_SOURCE_ADDR",
- "not_flag" : 0,
- "regions": [
- {
- "table_name": "TSG_OBJ_IP_ADDR",
- "table_type": "ip_plus",
- "table_content": {
- "saddr_format": "range",
- "addr_type": "ipv4",
- "src_ip1": "192.168.50.67",
- "src_ip2": "192.168.50.67",
- "sport_format": "range",
- "src_port1": "0",
- "src_port2": "65535",
- "protocol": 0,
- "direction": "double"
- }
- }
- ]
- },
- {
- "group_name":"OBJ_DST_IP_ADDR",
- "virtual_table":"TSG_SECURITY_DESTINATION_ADDR",
- "not_flag" : 0,
- "regions": [
- {
- "table_name": "TSG_OBJ_IP_ADDR",
- "table_type": "ip_plus",
- "table_content": {
- "saddr_format": "range",
- "addr_type": "ipv4",
- "src_ip1": "192.168.42.43",
- "src_ip2": "192.168.42.43",
- "sport_format": "range",
- "src_port1": "5678",
- "src_port2": "5678",
- "protocol": 0,
- "direction": "double"
- }
- }
- ]
- }
-
- ]
+ "and_conditions": [
+ {
+ "attribute_name": "ATTRIBUTE_IP_PLUS_SOURCE",
+ "objects": [
+ {
+ "object_name": "ExcludeLogicObject203_1",
+ "uuid": "00000000-0000-0000-0000-000000000198",
+ "items": [
+ {
+ "table_name": "IP_PLUS_CONFIG",
+ "table_type": "ip",
+ "table_content": {
+ "ip": "192.168.50.43-192.168.50.43"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "attribute_name": "ATTRIBUTE_IP_PLUS_DESTINATION",
+ "objects": [
+ {
+ "object_name": "ExcludeLogicObject203_2",
+ "uuid": "00000000-0000-0000-0000-000000000199",
+ "items": [
+ {
+ "table_name": "IP_PLUS_CONFIG",
+ "table_type": "ip",
+ "table_content": {
+ "ip": "47.92.108.93-47.92.108.93"
+ }
+ }
+ ]
+ }
+ ]
+ }
+ ]
}
],
"plugin_table": [
{
"table_name": "TRAFFIC_SHAPING_PROFILE",
"table_content": [
- "1\t{\"value\":\"local_host\",\"host_fairness\":1}\t[{\"direction\":\"incoming\",\"bandwidth\":102400},{\"direction\":\"outcoming\",\"bandwidth\":102400}]\t{\"enabled\":1,\"algorithm\":\"codel\"}\tnull\t{}\t1",
- "3\t{\"value\":\"local_host\",\"host_fairness\":1}\t[{\"direction\":\"incoming\",\"bandwidth\":10240},{\"direction\":\"outcoming\",\"bandwidth\":10240}]\t{\"enabled\":1,\"algorithm\":\"codel\"}\tnull\t{}\t1",
- "4\t{\"value\":\"local_host\",\"host_fairness\":1}\t[{\"direction\":\"incoming\",\"bandwidth\":10240},{\"direction\":\"outcoming\",\"bandwidth\":10240}]\t{\"enabled\":1,\"algorithm\":\"codel\"}\tnull\t{}\t1",
- "5\t{\"value\":\"local_host\",\"host_fairness\":1}\t[{\"direction\":\"incoming\",\"bandwidth\":10240},{\"direction\":\"outcoming\",\"bandwidth\":10240}]\t{\"enabled\":1,\"algorithm\":\"codel\"}\tnull\t{}\t1",
- "6\t{\"value\":\"local_host\",\"host_fairness\":1}\t[{\"direction\":\"incoming\",\"bandwidth\":10240},{\"direction\":\"outcoming\",\"bandwidth\":10240}]\t{\"enabled\":1,\"algorithm\":\"codel\"}\tnull\t{}\t1"
+ {"uuid":"00000000-0000-0000-0000-000000000001", "type": "generic", "type_argument": "none", "limits": [{"direction":"incoming","bandwidth":102400},{"direction":"outcoming","bandwidth":102400}], "aqm_options": {"algorithm":"codel"}, "is_valid":"yes"}
]
}
]
diff --git a/shaping/test/test_conf/table_info.json b/shaping/test/test_conf/table_info.json
index 692f4f6..2d98ad4 100644
--- a/shaping/test/test_conf/table_info.json
+++ b/shaping/test/test_conf/table_info.json
@@ -1,23 +1,22 @@
[
{
"table_id": 0,
- "table_name": "TRAFFIC_SHAPING_COMPILE",
+ "table_name": "TRAFFIC_SHAPING_RULE",
"table_type": "plugin",
- "valid_column": 8,
"custom": {
- "key": 1,
- "key_type": "integer",
- "key_len": 8
+ "gc_timeout_s":2,
+ "key_name": "uuid",
+ "key_type": "uuid"
}
},
{
"table_id": 1,
"table_name": "TRAFFIC_SHAPING_PROFILE",
"table_type": "plugin",
- "valid_column": 8,
"custom": {
- "key": 1,
- "key_type": "pointer"
+ "gc_timeout_s":2,
+ "key_name": "uuid",
+ "key_type": "uuid"
}
}