diff options
28 files changed, 1140 insertions, 952 deletions
@@ -1 +1,2 @@ -build/
\ No newline at end of file +build/ +.cache/
\ No newline at end of file diff --git a/ci/travis.sh b/ci/travis.sh index 2df9b40..413f024 100644 --- a/ci/travis.sh +++ b/ci/travis.sh @@ -49,6 +49,7 @@ yum install -y libibverbs # required by mrzcpd yum install -y libbreakpad_mini-devel yum install -y msgpack-devel yum install -y librdkafka-devel +yum install -y libuuid-devel source /etc/profile.d/framework.sh source /etc/profile.d/mrzcpd.sh diff --git a/common/CMakeLists.txt b/common/CMakeLists.txt index 622884b..bbb5a46 100644 --- a/common/CMakeLists.txt +++ b/common/CMakeLists.txt @@ -1,6 +1,6 @@ add_library(common src/log.cpp src/session_table.cpp src/utils.cpp src/addr_tuple4.cpp src/raw_packet.cpp src/mpack.c) -target_link_libraries(common PUBLIC MESA_handle_logger) +target_link_libraries(common PUBLIC MESA_handle_logger uuid) target_link_libraries(common PUBLIC mrzcpd) target_include_directories(common PUBLIC ${CMAKE_CURRENT_LIST_DIR}/include) diff --git a/common/include/utils.h b/common/include/utils.h index 38ad5d3..3f880f9 100644 --- a/common/include/utils.h +++ b/common/include/utils.h @@ -1,5 +1,6 @@ #ifndef _UTILS_H #define _UTILS_H +#include <uuid/uuid.h> #ifdef __cpluscplus extern "C" @@ -112,6 +113,11 @@ int get_ip_by_device_name(const char *dev_name, char *ip_buff); int get_mac_by_device_name(const char *dev_name, char *mac_buff); int str_to_mac(const char *str, char *mac_buff); +/****************************************************************************** + * uuid + ******************************************************************************/ +const char *uuid_print_str(uuid_t uuid); + #ifdef __cpluscplus } #endif diff --git a/common/src/utils.cpp b/common/src/utils.cpp index ecd856f..bbf0dca 100644 --- a/common/src/utils.cpp +++ b/common/src/utils.cpp @@ -292,3 +292,10 @@ int str_to_mac(const char *str, char *mac_buff) return -1; } } + +const char *uuid_print_str(uuid_t uuid) +{ + static thread_local char uuid_str[UUID_STR_LEN] = {0}; + uuid_unparse(uuid, uuid_str); + return uuid_str; +}
\ No newline at end of file diff --git a/common/test/gtest_ctrl_packet.cpp b/common/test/gtest_ctrl_packet.cpp index bb16363..7423baa 100644 --- a/common/test/gtest_ctrl_packet.cpp +++ b/common/test/gtest_ctrl_packet.cpp @@ -3,7 +3,7 @@ #include "shaper_marsio.h" -static int construct_shaping_cmsg(const char *tsync, unsigned long long session_id, const char *state, long long *shaping_rule_ids, int shaping_rule_id_num, char **mpack_data, size_t *mpack_size) +static int construct_shaping_cmsg(const char *tsync, unsigned long long session_id, const char *state, uuid_t *shaping_rule_ids, int shaping_rule_id_num, char **mpack_data, size_t *mpack_size) { mpack_writer_t writer; @@ -34,7 +34,7 @@ static int construct_shaping_cmsg(const char *tsync, unsigned long long session_ mpack_write_cstr(&writer, "rule_ids"); mpack_build_array(&writer); for (int i = 0; i < shaping_rule_id_num; i++) { - mpack_write_i64(&writer, shaping_rule_ids[i]); + mpack_write_bin(&writer, (char*)shaping_rule_ids[i], sizeof(uuid_t)); } mpack_complete_array(&writer);//build array rule_ids mpack_complete_map(&writer);//build map shaper @@ -52,10 +52,14 @@ TEST(CTRL_PACKET, PARSE) const char *tsync = "2.0"; unsigned long long session_id = 123456789; const char *state = "active"; - long long shaping_rule_ids[] = {4, 5, 6}; + uuid_t shaping_rule_ids[3]; char *mpack_data = NULL; size_t mpack_size = 0; + uuid_parse("00000000-0000-0000-0000-000000000001", shaping_rule_ids[0]); + uuid_parse("00000000-0000-0000-0000-000000000002", shaping_rule_ids[1]); + uuid_parse("00000000-0000-0000-0000-000000000003", shaping_rule_ids[2]); + construct_shaping_cmsg(tsync, session_id, state, shaping_rule_ids, 3, &mpack_data, &mpack_size); struct ctrl_pkt_data ctrl_data; @@ -65,9 +69,14 @@ TEST(CTRL_PACKET, PARSE) EXPECT_EQ(ctrl_data.session_id, 123456789); EXPECT_EQ(ctrl_data.state, SESSION_STATE_ACTIVE); EXPECT_EQ(ctrl_data.shaping_rule_num, 3); - EXPECT_EQ(ctrl_data.shaping_rule_ids[0], 4); - EXPECT_EQ(ctrl_data.shaping_rule_ids[1], 5); - EXPECT_EQ(ctrl_data.shaping_rule_ids[2], 6); + + char uuid_str[UUID_STR_LEN]; + uuid_unparse(ctrl_data.shaping_rule_uuids[0], uuid_str); + EXPECT_STREQ(uuid_str, "00000000-0000-0000-0000-000000000001"); + uuid_unparse(ctrl_data.shaping_rule_uuids[1], uuid_str); + EXPECT_STREQ(uuid_str, "00000000-0000-0000-0000-000000000002"); + uuid_unparse(ctrl_data.shaping_rule_uuids[2], uuid_str); + EXPECT_STREQ(uuid_str, "00000000-0000-0000-0000-000000000003"); if (mpack_data) { free(mpack_data); diff --git a/conf/shaping_maat.json b/conf/shaping_maat.json index ad01d58..7c31824 100644 --- a/conf/shaping_maat.json +++ b/conf/shaping_maat.json @@ -1,76 +1,71 @@ {
- "compile_table": "TRAFFIC_SHAPING_COMPILE",
- "group2compile_table": "GROUP_SHAPING_COMPILE_RELATION",
- "group2group_table": "GROUP_GROUP_RELATION",
+ "compile_table": "TRAFFIC_SHAPING_RULE",
"rules": [
{
- "compile_id": 182,
+ "uuid": "00000000-0000-0000-0000-000000000182",
"service": 2,
"action": 32,
"do_blacklist": 0,
"do_log": 1,
"effective_rage": 0,
"user_region": "{\"priority\":1,\"fair_factor\":10,\"profile_chain\":[1]}",
- "group_num" : 0,
+ "priority": 1,
+ "fair_factor": 10,
+ "dscp_marking": {
+ "enabled": 1,
+ "dscp_type": "Assured Forwarding (AF)",
+ "dscp_name": "af11",
+ "dscp_value": 10
+ },
+ "profile_chain": [
+ "00000000-0000-0000-0000-000000000001"
+ ],
"is_valid": "yes",
- "groups": [
- {
- "group_name":"OBJ_SRC_IP_ADDR",
- "virtual_table":"TSG_SECURITY_SOURCE_ADDR",
- "not_flag" : 0,
- "regions": [
- {
- "table_name": "TSG_OBJ_IP_ADDR",
- "table_type": "ip_plus",
- "table_content": {
- "saddr_format": "range",
- "addr_type": "ipv4",
- "src_ip1": "192.168.50.67",
- "src_ip2": "192.168.50.67",
- "sport_format": "range",
- "src_port1": "0",
- "src_port2": "65535",
- "protocol": 0,
- "direction": "double"
- }
- }
- ]
- },
- {
- "group_name":"OBJ_DST_IP_ADDR",
- "virtual_table":"TSG_SECURITY_DESTINATION_ADDR",
- "not_flag" : 0,
- "regions": [
- {
- "table_name": "TSG_OBJ_IP_ADDR",
- "table_type": "ip_plus",
- "table_content": {
- "saddr_format": "range",
- "addr_type": "ipv4",
- "src_ip1": "192.168.42.43",
- "src_ip2": "192.168.42.43",
- "sport_format": "range",
- "src_port1": "5678",
- "src_port2": "5678",
- "protocol": 0,
- "direction": "double"
- }
- }
- ]
- }
-
- ]
+ "and_conditions": [
+ {
+ "attribute_name": "ATTRIBUTE_IP_PLUS_SOURCE",
+ "objects": [
+ {
+ "object_name": "ExcludeLogicObject203_1",
+ "uuid": "00000000-0000-0000-0000-000000000198",
+ "items": [
+ {
+ "table_name": "IP_PLUS_CONFIG",
+ "table_type": "ip",
+ "table_content": {
+ "ip": "192.168.50.43-192.168.50.43"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "attribute_name": "ATTRIBUTE_IP_PLUS_DESTINATION",
+ "objects": [
+ {
+ "object_name": "ExcludeLogicObject203_2",
+ "uuid": "00000000-0000-0000-0000-000000000199",
+ "items": [
+ {
+ "table_name": "IP_PLUS_CONFIG",
+ "table_type": "ip",
+ "table_content": {
+ "ip": "47.92.108.93-47.92.108.93"
+ }
+ }
+ ]
+ }
+ ]
+ }
+ ]
}
],
"plugin_table": [
{
"table_name": "TRAFFIC_SHAPING_PROFILE",
"table_content": [
- "1\t{\"value\":\"local_host\",\"host_fairness\":1}\t[{\"direction\":\"incoming\",\"bandwidth\":102400},{\"direction\":\"outcoming\",\"bandwidth\":102400}]\t{\"enabled\":1,\"algorithm\":\"codel\"}\tnull\t{}\t1",
- "3\t{\"value\":\"local_host\",\"host_fairness\":1}\t[{\"direction\":\"incoming\",\"bandwidth\":10240},{\"direction\":\"outcoming\",\"bandwidth\":10240}]\t{\"enabled\":1,\"algorithm\":\"codel\"}\tnull\t{}\t1",
- "4\t{\"value\":\"local_host\",\"host_fairness\":1}\t[{\"direction\":\"incoming\",\"bandwidth\":10240},{\"direction\":\"outcoming\",\"bandwidth\":10240}]\t{\"enabled\":1,\"algorithm\":\"codel\"}\tnull\t{}\t1",
- "5\t{\"value\":\"local_host\",\"host_fairness\":1}\t[{\"direction\":\"incoming\",\"bandwidth\":10240},{\"direction\":\"outcoming\",\"bandwidth\":10240}]\t{\"enabled\":1,\"algorithm\":\"codel\"}\tnull\t{}\t1",
- "6\t{\"value\":\"local_host\",\"host_fairness\":1}\t[{\"direction\":\"incoming\",\"bandwidth\":10240},{\"direction\":\"outcoming\",\"bandwidth\":10240}]\t{\"enabled\":1,\"algorithm\":\"codel\"}\tnull\t{}\t1"
+ {"uuid":"00000000-0000-0000-0000-000000000001", "type": "generic", "type_argument": "none", "limits": [{"direction":"incoming","bandwidth":102400},{"direction":"outcoming","bandwidth":102400}], "aqm_options": {"algorithm":"codel"}, "is_valid":"yes"}
]
}
]
diff --git a/conf/table_info.json b/conf/table_info.json index b99eca1..30e7f5b 100644 --- a/conf/table_info.json +++ b/conf/table_info.json @@ -1,24 +1,21 @@ [ { "table_id": 0, - "table_name": "TRAFFIC_SHAPING_COMPILE", + "table_name": "TRAFFIC_SHAPING_RULE", "table_type": "plugin", - "valid_column": 9, "custom": { "gc_timeout_s":2, - "key": 1, - "key_type": "integer", - "key_len": 8 + "key_name": "uuid", + "key_type": "pointer" } }, { "table_id": 1, "table_name": "TRAFFIC_SHAPING_PROFILE", "table_type": "plugin", - "valid_column": 7, "custom": { "gc_timeout_s":2, - "key": 1, + "key_name": "uuid", "key_type": "pointer" } } diff --git a/shaping/include/shaper.h b/shaping/include/shaper.h index a438242..c5d3dd9 100644 --- a/shaping/include/shaper.h +++ b/shaping/include/shaper.h @@ -8,6 +8,7 @@ #include "shaper_stat.h" #include "shaper_global_stat.h" #include "shaper_aqm.h" +#include <uuid/uuid.h> extern "C" { #include "timeout.h" } @@ -59,7 +60,6 @@ struct shaping_thread_ctx { struct shaping_global_stat *global_stat; struct shaping_marsio_info *marsio_info; struct swarmkv *swarmkv_db;//handle of swarmkv - struct shaping_maat_info *maat_info; struct session_table *session_table; struct timeouts *expires; time_t last_update_timeout_sec; @@ -70,7 +70,6 @@ struct shaping_thread_ctx { struct shaping_ctx { int thread_num; struct swarmkv *swarmkv_db;//handle of swarmkv - struct shaping_maat_info *maat_info; struct shaping_marsio_info *marsio_info; struct shaping_stat *stat; struct shaping_global_stat *global_stat; @@ -108,7 +107,7 @@ struct shaper_token_multiple { }; struct shaping_profile_hash_node { - int id; + uuid_t uuid; enum shaper_aqm_type aqm_type; enum shaping_profile_limit_direction limit_direction; long long in_deposit_token_bits[SHAPING_PRIORITY_NUM_MAX]; @@ -133,7 +132,7 @@ struct shaping_profile_hash_node { }; struct shaping_profile_info { - int id;//profile_id + uuid_t uuid;//profile_id enum shaping_profile_type type; int priority; unsigned char async_pass[SHAPING_DIR_MAX]; @@ -147,8 +146,8 @@ struct shaping_profile_info { }; struct shaping_rule_info { + uuid_t uuid;//rule_id int vsys_id; - int id;//rule_id int fair_factor; struct shaping_profile_info primary; struct shaping_profile_info borrowing[SHAPING_REF_PROFILE_NUM_MAX - 1]; @@ -162,7 +161,7 @@ struct shaping_packet_wrapper { unsigned long long enqueue_time_us;//first enqueue time unsigned int length; int rule_anchor; - int aqm_processed_pf_ids[SHAPING_REF_PROFILE_NUM_MAX]; + uuid_t aqm_processed_pf_uuids[SHAPING_REF_PROFILE_NUM_MAX]; TAILQ_ENTRY(shaping_packet_wrapper) node; }; TAILQ_HEAD(delay_queue, shaping_packet_wrapper); @@ -225,7 +224,7 @@ struct shaping_hincrby_cb_arg { long long start_time_us; long long queue_len; enum shaping_packet_dir dir; - int profile_id; + uuid_t profile_uuid; int priority; int retry_cnt; }; diff --git a/shaping/include/shaper_aqm.h b/shaping/include/shaper_aqm.h index 2367d92..3436eae 100644 --- a/shaping/include/shaper_aqm.h +++ b/shaping/include/shaper_aqm.h @@ -1,5 +1,6 @@ #pragma once #include <time.h> +#include <uuid/uuid.h> #include "shaper_stat.h" @@ -37,5 +38,5 @@ struct shaper_aqm_codel_para { }; int shaper_aqm_need_drop(struct shaping_profile_info *profile, struct shaping_packet_wrapper *pkt_wrapper, enum shaping_packet_dir dir, struct timespec *curr_time, unsigned long long latency_us); -int shaper_aqm_blue_need_drop(int profile_id, struct shaper_aqm_blue_para *para, int curr_queue_len); -int shaper_aqm_codel_need_drop(int profile_id, struct shaper_aqm_codel_para *para, unsigned long long curr_time_ms, unsigned long long latency_ms);
\ No newline at end of file +int shaper_aqm_blue_need_drop(uuid_t profile_uuid, struct shaper_aqm_blue_para *para, int curr_queue_len); +int shaper_aqm_codel_need_drop(uuid_t profile_uuid, struct shaper_aqm_codel_para *para, unsigned long long curr_time_ms, unsigned long long latency_ms);
\ No newline at end of file diff --git a/shaping/include/shaper_maat.h b/shaping/include/shaper_maat.h index 3ec3259..78363fd 100644 --- a/shaping/include/shaper_maat.h +++ b/shaping/include/shaper_maat.h @@ -1,24 +1,20 @@ +#pragma once #include "shaper.h" -struct shaping_maat_info { - int rule_table_id; - int profile_table_id; -}; - struct shaping_rule { + uuid_t uuid; + uuid_t primary_pf_uuid; + uuid_t borrow_pf_uuid_array[SHAPING_REF_PROFILE_NUM_MAX]; + int borrow_pf_num; int vsys_id; - int id; int priority; - int primary_pf_id; - int borrow_pf_id_array[SHAPING_REF_PROFILE_NUM_MAX]; - int borrow_pf_num; int fair_factor; unsigned char dscp_enable; unsigned char dscp_value; }; struct shaping_profile { - int id; + uuid_t uuid; enum shaping_profile_type type; enum shaper_aqm_type aqm_type; enum shaping_profile_limit_direction limit_direction; @@ -28,17 +24,17 @@ struct shaping_profile { int valid; }; -void shaper_rule_ex_new(const char *table_name, int table_id, const char *key, const char *table_line, void **ad, long argl, void *argp); -void shaper_rule_ex_dup(int table_id, void **to, void **from, long argl, void *argp); -void shaper_rule_ex_free(int table_id, void **ad, long argl, void *argp); -void shaper_profile_ex_new(const char *table_name, int table_id, const char *key, const char *table_line, void **ad, long argl, void *argp); -void shaper_profile_ex_dup(int table_id, void **to, void **from, long argl, void *argp); -void shaper_profile_ex_free(int table_id, void **ad, long argl, void *argp); +void shaper_rule_ex_new(const char *table_name, const char *key, const char *table_line, void **ad, long argl, void *argp); +void shaper_rule_ex_dup(const char *table_name, void **to, void **from, long argl, void *argp); +void shaper_rule_ex_free(const char *table_name, void **ad, long argl, void *argp); +void shaper_profile_ex_new(const char *table_name, const char *key, const char *table_line, void **ad, long argl, void *argp); +void shaper_profile_ex_dup(const char *table_name, void **to, void **from, long argl, void *argp); +void shaper_profile_ex_free(const char *table_name, void **ad, long argl, void *argp); -int shaper_rule_is_enabled(struct shaping_thread_ctx *ctx, long long rule_id); +int shaper_rule_is_enabled(struct shaping_thread_ctx *ctx, uuid_t rule_uuid); -struct shaping_profile *shaper_maat_profile_get(struct shaping_thread_ctx *ctx, int profile_id); +struct shaping_profile *shaper_maat_profile_get(struct shaping_thread_ctx *ctx, uuid_t profile_uuid); -void shaper_rules_update(struct shaping_thread_ctx *ctx, struct shaping_flow *sf, long long *rule_compile_ids, int rule_num); -struct shaping_maat_info* shaper_maat_init(const char *instance_name); -void shaper_maat_destroy(struct shaping_maat_info *maat_info);
\ No newline at end of file +void shaper_rules_update(struct shaping_thread_ctx *ctx, struct shaping_flow *sf, uuid_t *rule_uuids, int rule_num); +int shaper_maat_init(const char *instance_name); +void shaper_maat_destroy();
\ No newline at end of file diff --git a/shaping/include/shaper_marsio.h b/shaping/include/shaper_marsio.h index fdc9c86..c530ee2 100644 --- a/shaping/include/shaper_marsio.h +++ b/shaping/include/shaper_marsio.h @@ -41,7 +41,7 @@ struct ctrl_pkt_data { uint64_t session_id; enum session_state state; - long long shaping_rule_ids[SHAPING_RULE_NUM_MAX]; + uuid_t shaping_rule_uuids[SHAPING_RULE_NUM_MAX]; int shaping_rule_num; }; diff --git a/shaping/src/shaper.cpp b/shaping/src/shaper.cpp index 9c2a49f..f7a81fb 100644 --- a/shaping/src/shaper.cpp +++ b/shaping/src/shaper.cpp @@ -35,8 +35,8 @@ extern "C" { #define SWARMKV_CALLER_LOOP_DIVISOR_MIN 1 #define SWARMKV_CALLER_LOOP_DIVISOR_MAX 10 -#define SWARMKV_IN_QUEUE_LEN_GET_CMD "HMGET tsg-shaping-%d priority-0-in priority-1-in priority-2-in priority-3-in priority-4-in priority-5-in priority-6-in priority-7-in priority-8-in priority-9-in" -#define SWARMKV_OUT_QUEUE_LEN_GET_CMD "HMGET tsg-shaping-%d priority-0-out priority-1-out priority-2-out priority-3-out priority-4-out priority-5-out priority-6-out priority-7-out priority-8-out priority-9-out" +#define SWARMKV_IN_QUEUE_LEN_GET_CMD "HMGET tsg-shaping-%s priority-0-in priority-1-in priority-2-in priority-3-in priority-4-in priority-5-in priority-6-in priority-7-in priority-8-in priority-9-in" +#define SWARMKV_OUT_QUEUE_LEN_GET_CMD "HMGET tsg-shaping-%s priority-0-out priority-1-out priority-2-out priority-3-out priority-4-out priority-5-out priority-6-out priority-7-out priority-8-out priority-9-out" struct shaper {//trees in one thread struct avl_tree *priority_trees[SHAPING_PRIORITY_NUM_MAX][SHAPING_DIR_MAX];//represent 10 avl tree corresponding to 10 priority @@ -442,7 +442,7 @@ static void shaper_token_multiple_update(struct shaping_thread_ctx *ctx, struct } END: - LOG_INFO("%s: profile id %d, token_get_multiple %d, has_failed_get_token %d, token_not_enough %d", LOG_TAG_SHAPING, profile->id, token_multiple->token_get_multiple, token_multiple->has_failed_get_token, token_multiple->token_not_enough); + LOG_INFO("%s: profile id %s, token_get_multiple %d, has_failed_get_token %d, token_not_enough %d", LOG_TAG_SHAPING, uuid_print_str(profile->uuid), token_multiple->token_get_multiple, token_multiple->has_failed_get_token, token_multiple->token_not_enough); token_multiple->has_failed_get_token = 0; token_multiple->token_not_enough = 0; @@ -476,7 +476,7 @@ static void shaper_deposit_token_add(struct shaping_thread_ctx *ctx, struct shap } break; default: - LOG_ERROR("%s: invalid profile type %d, profile id %d", LOG_TAG_SHAPING, profile->type, profile->id); + LOG_ERROR("%s: invalid profile type %d, profile id %s", LOG_TAG_SHAPING, profile->type, uuid_print_str(profile->uuid)); return; } @@ -508,7 +508,7 @@ static void shaper_token_get_cb(const struct swarmkv_reply *reply, void * cb_arg shaper_global_stat_async_callback_inc(&ctx->thread_global_stat); shaper_global_stat_tconsume_callback_inc(&ctx->thread_global_stat); - LOG_DEBUG("Swarmkv reply type =%d, profile_id %d, direction =%d, integer =%llu",reply->type, profile->id, arg->direction, reply->integer); + LOG_DEBUG("Swarmkv reply type =%d, profile_id %s, direction =%d, integer =%llu",reply->type, uuid_print_str(profile->uuid), arg->direction, reply->integer); if (reply->type != SWARMKV_REPLY_INTEGER) { shaper_global_stat_async_tconsume_failed_inc(&ctx->thread_global_stat); @@ -599,7 +599,7 @@ static int shaper_deposit_token_get(struct shaping_profile_info *profile, int re token_multiple = TOKEN_MULTIPLE_DEFAULT; break; default: - LOG_ERROR("%s: invalid profile type %d, profile id %d", LOG_TAG_SHAPING, profile->type, profile->id); + LOG_ERROR("%s: invalid profile type %d, profile id %s", LOG_TAG_SHAPING, profile->type, uuid_print_str(profile->uuid)); return 0; } @@ -627,7 +627,7 @@ static void shaper_profile_hash_node_refresh(struct shaping_thread_ctx *ctx, str return; } - struct shaping_profile *profile = shaper_maat_profile_get(ctx, pf_hash_node->id); + struct shaping_profile *profile = shaper_maat_profile_get(ctx, pf_hash_node->uuid); if (profile) { pf_hash_node->limit_direction = profile->limit_direction; pf_hash_node->aqm_type = profile->aqm_type; @@ -641,7 +641,7 @@ static void shaper_token_get_from_profile(struct shaping_thread_ctx *ctx, struct { struct shaping_tconsume_cb_arg *arg = NULL; struct shaping_profile_hash_node *pf_hash_node = pf_info->hash_node; - char key[32] = {0}; + char key[64] = {0}; if (pf_hash_node->tconsume_ref_cnt > 0) { return; @@ -650,9 +650,9 @@ static void shaper_token_get_from_profile(struct shaping_thread_ctx *ctx, struct shaper_profile_hash_node_refresh(ctx, pf_hash_node, curr_timespec); if (pf_hash_node->limit_direction == PROFILE_LIMIT_DIRECTION_BIDIRECTION) { - snprintf(key, sizeof(key), "tsg-shaping-%d-bidirectional", pf_info->id); + snprintf(key, sizeof(key), "tsg-shaping-%s-bidirectional", uuid_print_str(pf_info->uuid)); } else { - snprintf(key, sizeof(key), "tsg-shaping-%d-%s", pf_info->id, dir == SHAPING_DIR_OUT ? "outgoing" : "incoming"); + snprintf(key, sizeof(key), "tsg-shaping-%s-%s", uuid_print_str(pf_info->uuid), dir == SHAPING_DIR_OUT ? "outgoing" : "incoming"); } arg = (struct shaping_tconsume_cb_arg *)calloc(1, sizeof(struct shaping_tconsume_cb_arg)); @@ -769,9 +769,9 @@ static int shaper_profile_is_priority_blocked(struct shaping_thread_ctx *ctx, st shaper_global_stat_async_invoke_inc(&ctx->thread_global_stat); shaper_global_stat_hmget_invoke_inc(&ctx->thread_global_stat); if (direction == SHAPING_DIR_IN) { - swarmkv_async_command(ctx->swarmkv_db, shaper_queue_len_get_cb, arg, SWARMKV_IN_QUEUE_LEN_GET_CMD, profile->id); + swarmkv_async_command(ctx->swarmkv_db, shaper_queue_len_get_cb, arg, SWARMKV_IN_QUEUE_LEN_GET_CMD, uuid_print_str(profile->uuid)); } else { - swarmkv_async_command(ctx->swarmkv_db, shaper_queue_len_get_cb, arg, SWARMKV_OUT_QUEUE_LEN_GET_CMD, profile->id); + swarmkv_async_command(ctx->swarmkv_db, shaper_queue_len_get_cb, arg, SWARMKV_OUT_QUEUE_LEN_GET_CMD, uuid_print_str(profile->uuid)); } for (int i = 0; i < priority; i++) { @@ -794,14 +794,15 @@ void shaper_profile_hash_node_set(struct shaping_thread_ctx *ctx, struct shaping { if (profile->hash_node == NULL) { struct shaping_profile_hash_node *hash_node = NULL; - HASH_FIND_INT(thread_sp_hashtbl, &profile->id, hash_node); + + HASH_FIND(hh, thread_sp_hashtbl, &(profile->uuid), sizeof(uuid_t), hash_node); if (hash_node) { profile->hash_node = hash_node; } else { profile->hash_node = (struct shaping_profile_hash_node*)calloc(1, sizeof(struct shaping_profile_hash_node)); - profile->hash_node->id = profile->id; + uuid_copy(profile->hash_node->uuid, profile->uuid); profile->hash_node->token_multiple.token_get_multiple = TOKEN_MULTIPLE_DEFAULT; - HASH_ADD_INT(thread_sp_hashtbl, id, profile->hash_node); + HASH_ADD(hh, thread_sp_hashtbl, uuid, sizeof(uuid_t), profile->hash_node); timeout_init(&profile->hash_node->timeout_handle, TIMEOUT_ABS); timeouts_add(ctx->expires, &profile->hash_node->timeout_handle, time(NULL) + SHAPING_STAT_REFRESH_INTERVAL_SEC); } @@ -841,7 +842,7 @@ static int shaper_token_consume(struct shaping_thread_ctx *ctx, struct shaping_f time_t curr_time = time(NULL); if (curr_time - sf->check_rule_time >= ctx->conf.check_rule_enable_interval_sec) { sf->check_rule_time = curr_time; - if (shaper_rule_is_enabled(ctx, rule->id) != 1) { + if (shaper_rule_is_enabled(ctx, rule->uuid) != 1) { rule->is_enabled = 0; return SHAPER_TOKEN_GET_PASS;//rule is disabled, don't need to get token and forward packet } else { @@ -1381,7 +1382,8 @@ static void shaper_datapath_telemetry_info_append(struct shaping_marsio_info *ma for (int i= 0; i < sf->rule_num; i++) { rule = &sf->matched_rule_infos[i]; - len += snprintf(datapath_telemetry_str + len, sizeof(datapath_telemetry_str) - len, "rule_id=%d, primary_pf_id=%d", rule->id, rule->primary.id); + len += snprintf(datapath_telemetry_str + len, sizeof(datapath_telemetry_str) - len, "rule_id=%s, ", uuid_print_str(rule->uuid)); + len += snprintf(datapath_telemetry_str + len, sizeof(datapath_telemetry_str) - len, "primary_pf_id=%s", uuid_print_str(rule->primary.uuid)); if (rule->borrowing_num > 0) { len += snprintf(datapath_telemetry_str + len, sizeof(datapath_telemetry_str) - len, ", borrow_pf_ids:["); @@ -1390,7 +1392,7 @@ static void shaper_datapath_telemetry_info_append(struct shaping_marsio_info *ma if (j != 0) { len += snprintf(datapath_telemetry_str + len, sizeof(datapath_telemetry_str) - len, ","); } - len += snprintf(datapath_telemetry_str + len, sizeof(datapath_telemetry_str) - len, "%d", rule->borrowing[j].id); + len += snprintf(datapath_telemetry_str + len, sizeof(datapath_telemetry_str) - len, "%s", rule->borrowing[j].uuid); } if (rule->borrowing_num > 0) { len += snprintf(datapath_telemetry_str + len, sizeof(datapath_telemetry_str) - len, "]"); @@ -1556,7 +1558,7 @@ void shaping_engine_destroy(struct shaping_ctx *ctx) if (ctx) { shaper_swarmkv_destroy(ctx->swarmkv_db); - shaper_maat_destroy(ctx->maat_info); + shaper_maat_destroy(); shaper_marsio_destroy(ctx->marsio_info); shaper_stat_destroy(ctx->stat); shaper_global_stat_destroy(ctx->global_stat); @@ -1598,8 +1600,7 @@ struct shaping_ctx *shaping_engine_init() } /*init maat*/ - ctx->maat_info = shaper_maat_init("SHAPING"); - if (ctx->maat_info == NULL) { + if (shaper_maat_init("SHAPING") < 0) { goto ERROR; } @@ -1627,7 +1628,6 @@ struct shaping_ctx *shaping_engine_init() ctx->thread_ctx[i].stat = ctx->stat; ctx->thread_ctx[i].global_stat = ctx->global_stat; ctx->thread_ctx[i].session_table = session_table_create(); - ctx->thread_ctx[i].maat_info = ctx->maat_info; ctx->thread_ctx[i].marsio_info = ctx->marsio_info; ctx->thread_ctx[i].swarmkv_db = ctx->swarmkv_db; ctx->thread_ctx[i].expires = timeouts_open(0, &error); diff --git a/shaping/src/shaper_aqm.cpp b/shaping/src/shaper_aqm.cpp index 1de33e9..bdfa443 100644 --- a/shaping/src/shaper_aqm.cpp +++ b/shaping/src/shaper_aqm.cpp @@ -5,7 +5,7 @@ #include "shaper_aqm.h" thread_local unsigned int seed = 0; -int shaper_aqm_blue_need_drop(int profile_id, struct shaper_aqm_blue_para *para, int curr_queue_len) +int shaper_aqm_blue_need_drop(uuid_t profile_uuid, struct shaper_aqm_blue_para *para, int curr_queue_len) { time_t curr_time; @@ -17,7 +17,7 @@ int shaper_aqm_blue_need_drop(int profile_id, struct shaper_aqm_blue_para *para, para->probability = (para->probability - BLUE_DECREMENT) >= 0 ? (para->probability - BLUE_DECREMENT) : 0; } - LOG_INFO("%s: profile id: %d blue probability update to %d", LOG_TAG_SHAPING, profile_id, para->probability); + LOG_INFO("%s: profile id: %s blue probability update to %d", LOG_TAG_SHAPING, uuid_print_str(profile_uuid), para->probability); } if (rand_r(&seed) % BLUE_PROBABILITY_MAX < para->probability) { @@ -27,12 +27,12 @@ int shaper_aqm_blue_need_drop(int profile_id, struct shaper_aqm_blue_para *para, return 0; } -int shaper_aqm_codel_need_drop(int profile_id, struct shaper_aqm_codel_para *para, unsigned long long curr_time_ms, unsigned long long latency_ms) +int shaper_aqm_codel_need_drop(uuid_t profile_uuid, struct shaper_aqm_codel_para *para, unsigned long long curr_time_ms, unsigned long long latency_ms) { if (latency_ms < CODEL_MAX_LATENCY) { if (para->state != CODEL_STATE_NORMAL) { para->state = CODEL_STATE_NORMAL; - LOG_INFO("%s: profile id: %d codel enter state CODEL_STATE_NORMAL, last DROPPING_PHASE drop count %d", LOG_TAG_SHAPING, profile_id, para->drop_count); + LOG_INFO("%s: profile id: %s codel enter state CODEL_STATE_NORMAL, last DROPPING_PHASE drop count %d", LOG_TAG_SHAPING, uuid_print_str(profile_uuid), para->drop_count); } return 0; @@ -43,7 +43,7 @@ int shaper_aqm_codel_need_drop(int profile_id, struct shaper_aqm_codel_para *par case CODEL_STATE_NORMAL: para->start_drop_time_ms = curr_time_ms + CODEL_DROP_INTERVAL; para->state = CODEL_STATE_DROPPING_TIMER; - LOG_INFO("%s: profile id: %d codel enter state CODEL_STATE_DROPPING_TIMER", LOG_TAG_SHAPING, profile_id); + LOG_INFO("%s: profile id: %s codel enter state CODEL_STATE_DROPPING_TIMER", LOG_TAG_SHAPING, uuid_print_str(profile_uuid)); break; case CODEL_STATE_DROPPING_TIMER: if (curr_time_ms >= para->start_drop_time_ms) { @@ -51,7 +51,7 @@ int shaper_aqm_codel_need_drop(int profile_id, struct shaper_aqm_codel_para *par para->drop_count = 1; para->next_drop_time_ms = curr_time_ms + CODEL_DROP_INTERVAL / sqrt(para->drop_count); ret = 1; - LOG_INFO("%s: profile id: %d codel enter state CODEL_STATE_DROPPING_PHASE", LOG_TAG_SHAPING, profile_id); + LOG_INFO("%s: profile id: %s codel enter state CODEL_STATE_DROPPING_PHASE", LOG_TAG_SHAPING, uuid_print_str(profile_uuid)); } break; case CODEL_STATE_DROPPING_PHASE: @@ -69,14 +69,14 @@ int shaper_aqm_codel_need_drop(int profile_id, struct shaper_aqm_codel_para *par } -static int shaper_aqm_have_processed(struct shaping_packet_wrapper *pkt_wrapper, int profile_id) +static int shaper_aqm_have_processed(struct shaping_packet_wrapper *pkt_wrapper, uuid_t profile_uuid) { int i = 0; for (i = 0; i < SHAPING_REF_PROFILE_NUM_MAX; i++) { - if (pkt_wrapper->aqm_processed_pf_ids[i] == profile_id) { + if (uuid_compare(profile_uuid, pkt_wrapper->aqm_processed_pf_uuids[i]) == 0) { return 1; - } else if (pkt_wrapper->aqm_processed_pf_ids[i] == 0) { + } else if (uuid_is_null(pkt_wrapper->aqm_processed_pf_uuids[i])) { break; } } @@ -84,13 +84,13 @@ static int shaper_aqm_have_processed(struct shaping_packet_wrapper *pkt_wrapper, return 0; } -static void shaper_aqm_mark_processed(struct shaping_packet_wrapper *pkt_wrapper, int profile_id) +static void shaper_aqm_mark_processed(struct shaping_packet_wrapper *pkt_wrapper, uuid_t profile_uuid) { int i = 0; for (i = 0; i < SHAPING_REF_PROFILE_NUM_MAX; i++) { - if (pkt_wrapper->aqm_processed_pf_ids[i] == 0) { - pkt_wrapper->aqm_processed_pf_ids[i] = profile_id; + if (uuid_is_null(pkt_wrapper->aqm_processed_pf_uuids[i])) { + uuid_copy(pkt_wrapper->aqm_processed_pf_uuids[i], profile_uuid); break; } } @@ -105,23 +105,23 @@ int shaper_aqm_need_drop(struct shaping_profile_info *profile, struct shaping_pa return 0; } - if (shaper_aqm_have_processed(pkt_wrapper, profile->id)) { + if (shaper_aqm_have_processed(pkt_wrapper, profile->uuid)) { return 0; } switch (profile->hash_node->aqm_type) { case AQM_TYPE_BLUE: - ret = shaper_aqm_blue_need_drop(profile->id, &profile->hash_node->aqm_blue_para, profile->hash_node->queue_len[profile->priority][dir]); + ret = shaper_aqm_blue_need_drop(profile->uuid, &profile->hash_node->aqm_blue_para, profile->hash_node->queue_len[profile->priority][dir]); break; case AQM_TYPE_CODEL: curr_time_ms = curr_time->tv_sec * MILLI_SECONDS_PER_SEC + curr_time->tv_nsec / NANO_SECONDS_PER_MILLI_SEC; - ret = shaper_aqm_codel_need_drop(profile->id, &profile->hash_node->aqm_codel_para, curr_time_ms, latency_us / 1000); + ret = shaper_aqm_codel_need_drop(profile->uuid, &profile->hash_node->aqm_codel_para, curr_time_ms, latency_us / 1000); break; default: break; } - shaper_aqm_mark_processed(pkt_wrapper, profile->id); + shaper_aqm_mark_processed(pkt_wrapper, profile->uuid); return ret; }
\ No newline at end of file diff --git a/shaping/src/shaper_maat.cpp b/shaping/src/shaper_maat.cpp index ea91a1d..e74c280 100644 --- a/shaping/src/shaper_maat.cpp +++ b/shaping/src/shaper_maat.cpp @@ -15,7 +15,7 @@ #include "utils.h" #define SHAPING_STREAM_TIMEOUT 3600 -#define SHAPING_RULE_TABLE_NAME "TRAFFIC_SHAPING_COMPILE" +#define SHAPING_RULE_TABLE_NAME "TRAFFIC_SHAPING_RULE" #define SHAPING_PROFILE_TABLE_NAME "TRAFFIC_SHAPING_PROFILE" enum input_mode @@ -78,16 +78,16 @@ static int dscp_value_to_priority[DSCP_VALUE_MAX] = {DSCP_CLASS_DF, DSCP_CLASS_M struct maat *g_maat_instance = NULL; -void shaper_rule_ex_new(const char *table_name, int table_id, const char *key, const char *table_line, void **ad, long argl, void *argp) +void shaper_rule_ex_new(const char *table_name, const char *key, const char *table_line, void **ad, long argl, void *argp) { struct shaping_rule *s_rule; cJSON *json=NULL; cJSON *tmp_obj = NULL; + cJSON *action_para_obj = NULL; cJSON *tmp_array_obj = NULL; cJSON *dscp_obj = NULL; int array_size; - char user_region[1024] = {0}; - int i, ret; + int i; if (strncmp(table_name, SHAPING_RULE_TABLE_NAME, strlen(table_name)) != 0) { return; @@ -95,77 +95,80 @@ void shaper_rule_ex_new(const char *table_name, int table_id, const char *key, c s_rule = (struct shaping_rule*)calloc(1, sizeof(struct shaping_rule)); - ret = sscanf(table_line, "%d\t%*d\t%*d\t%*d\t%*d\t%*s\t%s\t", &s_rule->id, user_region); - if (ret != 2) { - LOG_ERROR("%s: sscanf parse rule failed for table line %s", LOG_TAG_MAAT, table_line); + json = cJSON_Parse(table_line); + if (!json) { + LOG_ERROR("%s: json parse rule failed for table line %s", LOG_TAG_MAAT, table_line); goto END; } - json = cJSON_Parse(user_region); - if (!json) {//required - LOG_ERROR("%s: json parse rule failed for table line %s", LOG_TAG_MAAT, table_line); + tmp_obj = cJSON_GetObjectItem(json, "uuid"); + if (!tmp_obj) { + LOG_ERROR("%s: json parse uuid failed for table line %s", LOG_TAG_MAAT, table_line); + goto END; + } + uuid_parse(tmp_obj->valuestring, s_rule->uuid); + + action_para_obj = cJSON_GetObjectItem(json, "action_parameter"); + if (!action_para_obj) { + LOG_ERROR("%s: json parse action_parameter failed for table line %s", LOG_TAG_MAAT, table_line); goto END; } - tmp_obj = cJSON_GetObjectItem(json, "vsys_id"); + tmp_obj = cJSON_GetObjectItem(action_para_obj, "vsys_id"); if (!tmp_obj) { LOG_ERROR("%s: json parse vsys_id failed for table line %s", LOG_TAG_MAAT, table_line); goto END; } s_rule->vsys_id = tmp_obj->valueint; - tmp_obj = cJSON_GetObjectItem(json, "priority"); + tmp_obj = cJSON_GetObjectItem(action_para_obj, "priority"); if (!tmp_obj) { LOG_ERROR("%s: json parse priority failed for table line %s", LOG_TAG_MAAT, table_line); goto END; } s_rule->priority = tmp_obj->valueint; - tmp_obj = cJSON_GetObjectItem(json, "fair_factor"); + tmp_obj = cJSON_GetObjectItem(action_para_obj, "fair_factor"); if (!tmp_obj) { - LOG_ERROR("%s: json parse fair-factor failed for table line %s", LOG_TAG_MAAT, table_line); + LOG_ERROR("%s: json parse fair_factor failed for table line %s", LOG_TAG_MAAT, table_line); goto END; } s_rule->fair_factor = tmp_obj->valueint; - //dscp_marking - tmp_obj = cJSON_GetObjectItem(json, "dscp_marking"); - if (!tmp_obj) { + dscp_obj = cJSON_GetObjectItem(action_para_obj, "dscp_marking"); + if (!dscp_obj) { LOG_ERROR("%s: json parse dscp_marking failed for table line %s", LOG_TAG_MAAT, table_line); goto END; } - dscp_obj = cJSON_GetObjectItem(tmp_obj, "enabled"); - if (dscp_obj && dscp_obj->valueint == 1) { - dscp_obj = cJSON_GetObjectItem(tmp_obj, "dscp_value"); - if (dscp_obj && dscp_obj->valueint < DSCP_VALUE_MAX && dscp_value_to_priority[dscp_obj->valueint] != DSCP_CLASS_MAX) { - s_rule->dscp_enable = 1; - s_rule->dscp_value = dscp_obj->valueint; + tmp_obj = cJSON_GetObjectItem(dscp_obj, "enabled"); + if (tmp_obj && tmp_obj->valueint == 1) { + s_rule->dscp_enable = 1; + tmp_obj = cJSON_GetObjectItem(dscp_obj, "dscp_value"); + if (tmp_obj && tmp_obj->valueint < DSCP_VALUE_MAX && dscp_value_to_priority[tmp_obj->valueint] != DSCP_CLASS_MAX) { + s_rule->dscp_value = tmp_obj->valueint; } else { LOG_ERROR("%s: json parse dscp_value wrong for table line %s", LOG_TAG_MAAT, table_line); goto END; } } - //profile_chain - tmp_obj = cJSON_GetObjectItem(json, "profile_chain"); - if (!tmp_obj) {//required + tmp_array_obj = cJSON_GetObjectItem(action_para_obj, "profile_chain"); + if (!tmp_array_obj) { LOG_ERROR("%s: json parse profile_chain failed for table line %s", LOG_TAG_MAAT, table_line); goto END; } - - array_size = cJSON_GetArraySize(tmp_obj); + array_size = cJSON_GetArraySize(tmp_array_obj); if (array_size < 1) { LOG_ERROR("%s: json parse profile_chain empty for table line %s", LOG_TAG_MAAT, table_line); goto END; } - - tmp_array_obj = cJSON_GetArrayItem(tmp_obj, 0); - s_rule->primary_pf_id = tmp_array_obj->valueint; + tmp_obj = cJSON_GetArrayItem(tmp_array_obj, 0); + uuid_parse(tmp_obj->valuestring, s_rule->primary_pf_uuid); s_rule->borrow_pf_num = array_size - 1; for (i = 1; i < array_size; i++) { - tmp_array_obj = cJSON_GetArrayItem(tmp_obj, i); - s_rule->borrow_pf_id_array[i - 1] = tmp_array_obj->valueint; + tmp_obj = cJSON_GetArrayItem(tmp_array_obj, i); + uuid_parse(tmp_obj->valuestring, s_rule->borrow_pf_uuid_array[i - 1]); } END: @@ -176,7 +179,7 @@ END: return; } -void shaper_rule_ex_dup(int table_id, void **to, void **from, long argl, void *argp) +void shaper_rule_ex_dup(const char *table_name, void **to, void **from, long argl, void *argp) { if (*from == NULL) { return; @@ -187,7 +190,7 @@ void shaper_rule_ex_dup(int table_id, void **to, void **from, long argl, void *a return; } -void shaper_rule_ex_free(int table_id, void **ad, long argl, void *argp) +void shaper_rule_ex_free(const char *table_name, void **ad, long argl, void *argp) { if (*ad == NULL) { return; @@ -199,20 +202,16 @@ void shaper_rule_ex_free(int table_id, void **ad, long argl, void *argp) return; } -void shaper_profile_ex_new(const char *table_name, int table_id, const char *key, const char *table_line, void **ad, long argl, void *argp) +void shaper_profile_ex_new(const char *table_name, const char *key, const char *table_line, void **ad, long argl, void *argp) { struct shaping_profile *s_pf; cJSON *json=NULL; cJSON *tmp_array_obj = NULL; cJSON *tmp_obj = NULL; - char profile_type[64] = {0}; - char type_arg[64] = {0}; - char limits[128] = {0}; - char aqm_options[64] = {0}; - char volume_based_shaping[64] = {0}; + cJSON *type_arg_obj = NULL; + cJSON *aqm_options_obj = NULL; + cJSON *limits_obj = NULL; int limit_bandwidth; - int array_size, i; - int ret; if (strncmp(table_name, SHAPING_PROFILE_TABLE_NAME, strlen(table_name)) != 0) { return; @@ -220,26 +219,43 @@ void shaper_profile_ex_new(const char *table_name, int table_id, const char *key s_pf = (struct shaping_profile*)calloc(1, sizeof(struct shaping_profile)); - ret = sscanf(table_line, "%d\t%63s\t%63s\t%127s\t%63s\t%63s\t%d", - &s_pf->id, profile_type, type_arg, limits, aqm_options, volume_based_shaping, &s_pf->valid); - if (ret != 7) { - LOG_ERROR("%s: sscanf parse failed for profile line %s", LOG_TAG_MAAT, table_line); + json = cJSON_Parse(table_line); + if (!json) { + LOG_ERROR("%s: json parse profile failed for table line %s", LOG_TAG_MAAT, table_line); goto END; } - if (strcmp(profile_type, "generic") == 0) { + tmp_obj = cJSON_GetObjectItem(json, "uuid"); + if (!tmp_obj) { + LOG_ERROR("%s: json parse uuid failed for table line %s", LOG_TAG_MAAT, table_line); + goto END; + } + uuid_parse(tmp_obj->valuestring, s_pf->uuid); + + //parse profile type + tmp_obj = cJSON_GetObjectItem(json, "type"); + if (!tmp_obj) { + LOG_ERROR("%s: json parse type failed for table line %s", LOG_TAG_MAAT, table_line); + goto END; + } + type_arg_obj = cJSON_GetObjectItem(json, "type_argument"); + if (!type_arg_obj) { + LOG_ERROR("%s: json parse type_argument failed for table line %s", LOG_TAG_MAAT, table_line); + goto END; + } + if (strcmp(tmp_obj->valuestring, "generic") == 0) { s_pf->type = PROFILE_TYPE_GENERIC; - } else if (strcmp(profile_type, "fair_share") == 0) { - if (strcmp(type_arg, "host_fairness") == 0) { + } else if (strcmp(tmp_obj->valuestring, "fair_share") == 0) { + if (strcmp(type_arg_obj->valuestring, "host_fairness") == 0) { s_pf->type = PROFILE_TYPE_HOST_FARINESS; - } else if (strcmp(type_arg, "max_min_host_fairness") == 0) { + } else if (strcmp(type_arg_obj->valuestring, "max_min_host_fairness") == 0) { s_pf->type = PROFILE_TYPE_MAX_MIN_HOST_FAIRNESS; } else { LOG_ERROR("%s: profile type argument wrong for profile line %s", LOG_TAG_MAAT, table_line); goto END; } - } else if (strcmp(profile_type, "split_by") == 0) { - if (strcmp(type_arg, "local_host") == 0) { + } else if (strcmp(tmp_obj->valuestring, "split_by") == 0) { + if (strcmp(type_arg_obj->valuestring, "local_host") == 0) { s_pf->type = PROFILE_TYPE_SPLIT_BY_LOCAL_HOST; } else { LOG_ERROR("%s: profile type argument wrong for profile line %s", LOG_TAG_MAAT, table_line); @@ -251,16 +267,14 @@ void shaper_profile_ex_new(const char *table_name, int table_id, const char *key } //parse aqm options - json = cJSON_Parse(aqm_options); - if (!json) { - LOG_ERROR("%s: json parse profile aqm options failed for profile id %d, line %s", LOG_TAG_MAAT, s_pf->id, table_line); + aqm_options_obj = cJSON_GetObjectItem(json, "aqm_options"); + if (!aqm_options_obj) { + LOG_ERROR("%s: json parse aqm options failed for line %s", LOG_TAG_MAAT, table_line); goto END; } - - - tmp_obj = cJSON_GetObjectItem(json, "algorithm"); + tmp_obj = cJSON_GetObjectItem(aqm_options_obj, "algorithm"); if (!tmp_obj || tmp_obj->type != cJSON_String || !tmp_obj->valuestring) { - LOG_ERROR("%s: json parse algorithm failed for profile id %d, line %s", LOG_TAG_MAAT, s_pf->id, table_line); + LOG_ERROR("%s: json parse aqm algorithm failed for line %s", LOG_TAG_MAAT, table_line); goto END; } if (strncmp(tmp_obj->valuestring, "none", strlen(tmp_obj->valuestring)) == 0) { @@ -270,32 +284,27 @@ void shaper_profile_ex_new(const char *table_name, int table_id, const char *key } else if (strncmp(tmp_obj->valuestring, "codel", strlen(tmp_obj->valuestring)) == 0) { s_pf->aqm_type = AQM_TYPE_CODEL; } else { - LOG_ERROR("%s: json parse aqm type wrong for profile id %d, line %s", LOG_TAG_MAAT, s_pf->id, table_line); + LOG_ERROR("%s: json parse aqm type wrong for line %s", LOG_TAG_MAAT, table_line); goto END; } - - cJSON_Delete(json); //parse limits of profile - json = cJSON_Parse(limits); - if (!json) { - LOG_ERROR("%s: json parse profile limits failed for profile id %d, line %s", LOG_TAG_MAAT, s_pf->id, table_line); + limits_obj = cJSON_GetObjectItem(json, "limits"); + if (!limits_obj) { + LOG_ERROR("%s: json parse limits failed for line %s", LOG_TAG_MAAT, table_line); goto END; } - - array_size = cJSON_GetArraySize(json); - for (i = 0; i < array_size; i++) { - tmp_array_obj = cJSON_GetArrayItem(json, i); + cJSON_ArrayForEach(tmp_array_obj, limits_obj) { tmp_obj = cJSON_GetObjectItem(tmp_array_obj, "bandwidth"); if (!tmp_obj) { - LOG_ERROR("%s: json parse limit bandwidth failed for profile id %d, line %s", LOG_TAG_MAAT, s_pf->id, table_line); + LOG_ERROR("%s: json parse limit bandwidth failed for line %s", LOG_TAG_MAAT, table_line); goto END; } limit_bandwidth = tmp_obj->valueint; tmp_obj = cJSON_GetObjectItem(tmp_array_obj, "direction"); if (!tmp_obj) { - LOG_ERROR("%s: json parse limit direction failed for profile id %d, line %s", LOG_TAG_MAAT, s_pf->id, table_line); + LOG_ERROR("%s: json parse limit direction failed for line %s", LOG_TAG_MAAT, table_line); goto END; } @@ -322,7 +331,7 @@ END: return; } -void shaper_profile_ex_dup(int table_id, void **to, void **from, long argl, void *argp) +void shaper_profile_ex_dup(const char *table_name, void **to, void **from, long argl, void *argp) { if (*from == NULL) { return; @@ -333,7 +342,7 @@ void shaper_profile_ex_dup(int table_id, void **to, void **from, long argl, void return; } -void shaper_profile_ex_free(int table_id, void **ad, long argl, void *argp) +void shaper_profile_ex_free(const char *table_name, void **ad, long argl, void *argp) { if (*ad == NULL) { return; @@ -347,7 +356,7 @@ void shaper_profile_ex_free(int table_id, void **ad, long argl, void *argp) void shaper_profile_update(struct shaping_thread_ctx *ctx, struct shaping_profile_info *s_pf_info, struct shaping_profile *s_pf_ex) { - s_pf_info->id = s_pf_ex->id; + uuid_copy(s_pf_info->uuid, s_pf_ex->uuid); s_pf_info->type = s_pf_ex->type; shaper_profile_hash_node_set(ctx, s_pf_info); s_pf_info->hash_node->aqm_type = s_pf_ex->aqm_type; @@ -356,37 +365,40 @@ void shaper_profile_update(struct shaping_thread_ctx *ctx, struct shaping_profil return; } -struct shaping_profile *shaper_maat_profile_get(struct shaping_thread_ctx *ctx, int profile_id) +struct shaping_profile *shaper_maat_profile_get(struct shaping_thread_ctx *ctx, uuid_t profile_uuid) { struct shaping_profile *s_pf = NULL; - char pf_id_key[8] = {0}; + char uuid_str[UUID_STR_LEN] = {0}; - snprintf(pf_id_key, sizeof(pf_id_key), "%d", profile_id); - s_pf = (struct shaping_profile *)maat_plugin_table_get_ex_data(g_maat_instance, ctx->maat_info->profile_table_id, pf_id_key, strlen(pf_id_key)); + uuid_unparse(profile_uuid, uuid_str); + s_pf = (struct shaping_profile *)maat_plugin_table_get_ex_data(g_maat_instance, SHAPING_PROFILE_TABLE_NAME, uuid_str, strlen(uuid_str)); if (!s_pf) { - LOG_ERROR("%s maat_plugin_table_get_ex_data get profile failed for key %s", LOG_TAG_MAAT, pf_id_key); + LOG_ERROR("%s maat_plugin_table_get_ex_data get profile failed for key %s", LOG_TAG_MAAT, uuid_str); } return s_pf; } -static int shaper_rule_update(struct shaping_thread_ctx *ctx, struct shaping_flow *sf, struct shaping_rule_info *s_rule_info, long long rule_compile_id, int *priority_changed) +static int shaper_rule_update(struct shaping_thread_ctx *ctx, struct shaping_flow *sf, struct shaping_rule_info *s_rule_info, uuid_t rule_uuid, int *priority_changed) { struct shaping_rule *s_rule = NULL; struct shaping_profile *s_pf = NULL; + char uuid_str[UUID_STR_LEN] = {0}; - s_rule = (struct shaping_rule*)maat_plugin_table_get_ex_data(g_maat_instance, ctx->maat_info->rule_table_id, (char *)&rule_compile_id, sizeof(rule_compile_id)); + uuid_unparse(rule_uuid, uuid_str); + s_rule = (struct shaping_rule*)maat_plugin_table_get_ex_data(g_maat_instance, SHAPING_RULE_TABLE_NAME, uuid_str, strlen(uuid_str)); if (!s_rule) { - LOG_ERROR("%s maat_plugin_table_get_ex_data get rule failed for compile id %lld", LOG_TAG_MAAT, rule_compile_id); + LOG_ERROR("%s maat_plugin_table_get_ex_data get rule failed for rule id %s", LOG_TAG_MAAT, uuid_str); return -1; } - s_rule_info->id = s_rule->id; + + uuid_copy(s_rule_info->uuid, s_rule->uuid); s_rule_info->fair_factor = s_rule->fair_factor; s_rule_info->vsys_id = s_rule->vsys_id; s_rule_info->is_enabled = 1; - s_pf = shaper_maat_profile_get(ctx, s_rule->primary_pf_id); + s_pf = shaper_maat_profile_get(ctx, s_rule->primary_pf_uuid); if (!s_pf) { return -1; } @@ -411,7 +423,7 @@ static int shaper_rule_update(struct shaping_thread_ctx *ctx, struct shaping_flo } for (int i = 0; i < s_rule->borrow_pf_num; i++) { - s_pf = shaper_maat_profile_get(ctx, s_rule->borrow_pf_id_array[i]); + s_pf = shaper_maat_profile_get(ctx, s_rule->borrow_pf_uuid_array[i]); if (!s_pf) { return -1; } @@ -436,9 +448,11 @@ static void shaper_profiles_priority_update(struct shaping_flow *sf) return; } -int shaper_rule_is_enabled(struct shaping_thread_ctx *ctx, long long rule_id) +int shaper_rule_is_enabled(struct shaping_thread_ctx *ctx, uuid_t rule_uuid) { - struct shaping_rule *s_rule = (struct shaping_rule*)maat_plugin_table_get_ex_data(g_maat_instance, ctx->maat_info->rule_table_id, (char *)&rule_id, sizeof(rule_id)); + char uuid_str[UUID_STR_LEN] = {0}; + uuid_unparse(rule_uuid, uuid_str); + struct shaping_rule *s_rule = (struct shaping_rule*)maat_plugin_table_get_ex_data(g_maat_instance, SHAPING_RULE_TABLE_NAME, uuid_str, sizeof(uuid_str)); if (s_rule) { return 1; @@ -447,19 +461,19 @@ int shaper_rule_is_enabled(struct shaping_thread_ctx *ctx, long long rule_id) return 0; } -static int shaper_rules_dup_remove(struct shaping_flow *sf, long long *rule_compile_ids, int rule_num, long long *rule_ids_remove_dup) +static int shaper_rules_dup_remove(struct shaping_flow *sf, uuid_t *rule_uuids, int rule_num, uuid_t *rule_uuids_remove_dup) { int i, j; int rule_num_remove_dup = 0; for (i = 0; i < rule_num; i++) { for (j = 0; j < sf->rule_num; j++) { - if (rule_compile_ids[i] == sf->matched_rule_infos[j].id) { + if (uuid_compare(rule_uuids[i], sf->matched_rule_infos[j].uuid) == 0) { break; } } if (j == sf->rule_num) { - rule_ids_remove_dup[rule_num_remove_dup] = rule_compile_ids[i]; + uuid_copy(rule_uuids_remove_dup[rule_num_remove_dup], rule_uuids[i]); rule_num_remove_dup++; } } @@ -467,10 +481,10 @@ static int shaper_rules_dup_remove(struct shaping_flow *sf, long long *rule_comp return rule_num_remove_dup; } -void shaper_rules_update(struct shaping_thread_ctx *ctx, struct shaping_flow *sf, long long *rule_compile_ids, int rule_num) +void shaper_rules_update(struct shaping_thread_ctx *ctx, struct shaping_flow *sf, uuid_t *rule_uuids, int rule_num) { int priority_changed = 0; - long long rule_ids_remove_dup[SHAPING_RULE_NUM_MAX] = {0}; + uuid_t rule_uuids_remove_dup[SHAPING_RULE_NUM_MAX]; int rule_num_remove_dup = 0; int old_rule_num = sf->rule_num; @@ -483,7 +497,7 @@ void shaper_rules_update(struct shaping_thread_ctx *ctx, struct shaping_flow *sf return; } - rule_num_remove_dup = shaper_rules_dup_remove(sf, rule_compile_ids, rule_num, rule_ids_remove_dup); + rule_num_remove_dup = shaper_rules_dup_remove(sf, rule_uuids, rule_num, rule_uuids_remove_dup); if (rule_num_remove_dup == 0) { return; } @@ -498,7 +512,7 @@ void shaper_rules_update(struct shaping_thread_ctx *ctx, struct shaping_flow *sf } for (int i = 0; i < rule_num_remove_dup; i++) { - if (shaper_rule_update(ctx, sf, &sf->matched_rule_infos[sf->rule_num], rule_ids_remove_dup[i], &priority_changed) == 0) { + if (shaper_rule_update(ctx, sf, &sf->matched_rule_infos[sf->rule_num], rule_uuids_remove_dup[i], &priority_changed) == 0) { sf->rule_num++; } } @@ -526,18 +540,15 @@ static int shaper_maat_config_load(struct shaper_maat_config *conf) return 0; } -struct shaping_maat_info* shaper_maat_init(const char *instance_name) +int shaper_maat_init(const char *instance_name) { struct maat_options *opts; - struct shaping_maat_info *maat_info; struct shaper_maat_config conf; int ret; if (shaper_maat_config_load(&conf) < 0) { - return NULL; + return -1; } - - maat_info = (struct shaping_maat_info *)calloc(1, sizeof(struct shaping_maat_info)); opts = maat_options_new(); @@ -578,17 +589,6 @@ struct shaping_maat_info* shaper_maat_init(const char *instance_name) goto ERROR; } - maat_info->rule_table_id = maat_get_table_id(g_maat_instance, SHAPING_RULE_TABLE_NAME); - if (maat_info->rule_table_id < 0) { - LOG_ERROR("%s: shaping maat register table %s failed", LOG_TAG_MAAT, SHAPING_RULE_TABLE_NAME); - goto ERROR; - } - maat_info->profile_table_id = maat_get_table_id(g_maat_instance, SHAPING_PROFILE_TABLE_NAME); - if (maat_info->profile_table_id < 0) { - LOG_ERROR("%s: shaping maat register table %s failed", LOG_TAG_MAAT, SHAPING_PROFILE_TABLE_NAME); - goto ERROR; - } - ret = maat_plugin_table_ex_schema_register(g_maat_instance, SHAPING_RULE_TABLE_NAME, shaper_rule_ex_new, shaper_rule_ex_free, shaper_rule_ex_dup, 0, NULL); if (ret < 0) { LOG_ERROR("%s: shaping maat register callback funcs for table %s failed", LOG_TAG_MAAT, SHAPING_RULE_TABLE_NAME); @@ -603,17 +603,14 @@ struct shaping_maat_info* shaper_maat_init(const char *instance_name) LOG_DEBUG("%s: shaping maat init complete", LOG_TAG_MAAT); - return maat_info; + return 0; ERROR: - shaper_maat_destroy(maat_info); - return NULL; + shaper_maat_destroy(); + return -1; } -void shaper_maat_destroy(struct shaping_maat_info *maat_info) +void shaper_maat_destroy() { - if (maat_info) { - free(maat_info); - } if (g_maat_instance) { maat_free(g_maat_instance); } diff --git a/shaping/src/shaper_marsio.cpp b/shaping/src/shaper_marsio.cpp index 357e818..a2c1097 100644 --- a/shaping/src/shaper_marsio.cpp +++ b/shaping/src/shaper_marsio.cpp @@ -265,11 +265,11 @@ int shaper_marsio_ctrl_pkt_data_parse(struct ctrl_pkt_data *ctrl_data, const cha shaping_rule_id_array_size = mpack_node_array_length(tmp_node); ctrl_data->shaping_rule_num = MIN(shaping_rule_id_array_size, SHAPING_RULE_NUM_MAX); for (int i = 0; i < ctrl_data->shaping_rule_num; i++) { - if (mpack_type_uint != mpack_node_type(mpack_node_array_at(tmp_node, i))) { + if (mpack_type_bin != mpack_node_type(mpack_node_array_at(tmp_node, i))) { LOG_ERROR("%s: shaping marsio msgpack shaping rule id type wrong at index %d, type is %d", LOG_TAG_MARSIO, i, mpack_node_type(mpack_node_array_at(tmp_node, i))); goto ERROR; } - ctrl_data->shaping_rule_ids[i] = mpack_node_i64(mpack_node_array_at(tmp_node, i)); + uuid_copy(ctrl_data->shaping_rule_uuids[i], *(uuid_t*)mpack_node_bin_data(mpack_node_array_at(tmp_node, i))); } SUCCESS: diff --git a/shaping/src/shaper_session.cpp b/shaping/src/shaper_session.cpp index f58d55f..8fbb65d 100644 --- a/shaping/src/shaper_session.cpp +++ b/shaping/src/shaper_session.cpp @@ -85,17 +85,17 @@ void shaper_session_log_prepare(struct shaping_flow *sf, char **mpack_data, size mpack_build_map(&writer); mpack_write_cstr(&writer, "rule_id"); - mpack_write_i64(&writer, rule_info->id); - LOG_DEBUG("%s: rule id %d", LOG_TAG_SHAPING, rule_info->id); + mpack_write_bin(&writer, (char*)rule_info->uuid, sizeof(uuid_t)); + LOG_DEBUG("%s: rule id %s", LOG_TAG_SHAPING, uuid_print_str(rule_info->uuid)); mpack_write_cstr(&writer, "profile_ids"); mpack_build_array(&writer); - mpack_write_i64(&writer, rule_info->primary.id); - LOG_DEBUG("%s: primary_profile id %d", LOG_TAG_SHAPING, rule_info->primary.id); + mpack_write_bin(&writer, (char*)rule_info->primary.uuid, sizeof(uuid_t)); + LOG_DEBUG("%s: primary_profile id %s", LOG_TAG_SHAPING, uuid_print_str(rule_info->primary.uuid)); for (int j = 0; j < rule_info->borrowing_num; j++) { - mpack_write_i64(&writer, rule_info->borrowing[j].id); - LOG_DEBUG("%s: borrow_profile id %d", LOG_TAG_SHAPING, rule_info->borrowing[j].id); + mpack_write_bin(&writer, (char*)rule_info->borrowing[j].uuid, sizeof(uuid_t)); + LOG_DEBUG("%s: borrow_profile id %s", LOG_TAG_SHAPING, uuid_print_str(rule_info->borrowing[j].uuid)); } mpack_complete_array(&writer);//end build array for profile_ids mpack_complete_map(&writer); @@ -221,7 +221,7 @@ struct shaping_flow* shaper_session_active(struct shaping_thread_ctx *ctx, struc sf = (struct shaping_flow *)node->val_data; } - shaper_rules_update(ctx, sf, ctrl_data->shaping_rule_ids, ctrl_data->shaping_rule_num); + shaper_rules_update(ctx, sf, ctrl_data->shaping_rule_uuids, ctrl_data->shaping_rule_num); shaper_session_log_send(ctx, sf);//send log of rules and profiles when receive new matched rules return sf; diff --git a/shaping/src/shaper_stat.cpp b/shaping/src/shaper_stat.cpp index 6275d93..bf88f41 100644 --- a/shaping/src/shaper_stat.cpp +++ b/shaping/src/shaper_stat.cpp @@ -27,8 +27,8 @@ struct shaper_stat_conf { thread_local struct field tags[TAG_IDX_MAX] = { [TAG_VSYS_ID_IDX] = {.key = "vsys_id", .type = FIELD_VALUE_INTEGER}, - [TAG_RULE_ID_IDX] = {.key = "rule_id", .type = FIELD_VALUE_INTEGER}, - [TAG_PROFILE_ID_IDX] = {.key = "profile_id", .type = FIELD_VALUE_INTEGER}, + [TAG_RULE_ID_IDX] = {.key = "rule_uuid", .type = FIELD_VALUE_CSTRING}, + [TAG_PROFILE_ID_IDX] = {.key = "profile_uuid", .type = FIELD_VALUE_CSTRING}, [TAG_PRIORITY_IDX] = {.key = "priority", .type = FIELD_VALUE_INTEGER}, [TAG_PROFILE_TYPE_IDX] = {.key = "profile_type", .type = FIELD_VALUE_CSTRING} }; @@ -223,13 +223,18 @@ ERROR: return NULL; } -static void shaper_stat_tags_build(int vsys_id, int rule_id, int profile_id, int priority, int profile_type) +static void shaper_stat_tags_build(int vsys_id, uuid_t rule_uuid, uuid_t profile_uuid, int priority, int profile_type) { + static thread_local char rule_uuid_str[UUID_STR_LEN] = {0}; + static thread_local char profile_uuid_str[UUID_STR_LEN] = {0}; + tags[TAG_VSYS_ID_IDX].value_longlong = vsys_id; - tags[TAG_RULE_ID_IDX].value_longlong = rule_id; + uuid_unparse(rule_uuid, rule_uuid_str); + tags[TAG_RULE_ID_IDX].value_str = rule_uuid_str; - tags[TAG_PROFILE_ID_IDX].value_longlong = profile_id; + uuid_unparse(profile_uuid, profile_uuid_str); + tags[TAG_PROFILE_ID_IDX].value_str = profile_uuid_str; tags[TAG_PRIORITY_IDX].value_longlong = priority; @@ -261,8 +266,8 @@ static void shaper_stat_swarmkv_hincrby_cb(const struct swarmkv_reply *reply, vo shaper_global_stat_async_hincrby_failed_inc(&ctx->thread_global_stat); if (arg->retry_cnt >= HINCRBY_RETRY_MAX) { - LOG_ERROR("%s: shaping stat hincrby failed after retry %d times for profile id %d priority %d, operate %s queue_len %lld", - LOG_TAG_STAT, arg->retry_cnt, arg->profile_id, arg->priority, arg->dir == SHAPING_DIR_IN ? "in" : "out", arg->queue_len); + LOG_ERROR("%s: shaping stat hincrby failed after retry %d times for profile id %s priority %d, operate %s queue_len %lld", + LOG_TAG_STAT, arg->retry_cnt, uuid_print_str(arg->profile_uuid), arg->priority, arg->dir == SHAPING_DIR_IN ? "in" : "out", arg->queue_len); goto END; } @@ -271,12 +276,12 @@ static void shaper_stat_swarmkv_hincrby_cb(const struct swarmkv_reply *reply, vo shaper_global_stat_async_invoke_inc(&ctx->thread_global_stat);//hincrby failed, retry shaper_global_stat_hincrby_invoke_inc(&ctx->thread_global_stat); - LOG_DEBUG("%s: shaping stat hincrby failed, retry for profile id %d priority %d, operate %s queue_len %lld", LOG_TAG_STAT, arg->profile_id, arg->priority, arg->dir == SHAPING_DIR_IN ? "in" : "out", arg->queue_len); + LOG_DEBUG("%s: shaping stat hincrby failed, retry for profile id %s priority %d, operate %s queue_len %lld", LOG_TAG_STAT, uuid_print_str(arg->profile_uuid), arg->priority, arg->dir == SHAPING_DIR_IN ? "in" : "out", arg->queue_len); if (arg->dir == SHAPING_DIR_IN) { - swarmkv_async_command(ctx->swarmkv_db, shaper_stat_swarmkv_hincrby_cb, arg, "HINCRBY tsg-shaping-%d priority-%d-in %lld", arg->profile_id, arg->priority, arg->queue_len); + swarmkv_async_command(ctx->swarmkv_db, shaper_stat_swarmkv_hincrby_cb, arg, "HINCRBY tsg-shaping-%s priority-%d-in %lld", uuid_print_str(arg->profile_uuid), arg->priority, arg->queue_len); } else { - swarmkv_async_command(ctx->swarmkv_db, shaper_stat_swarmkv_hincrby_cb, arg, "HINCRBY tsg-shaping-%d priority-%d-out %lld", arg->profile_id, arg->priority, arg->queue_len); + swarmkv_async_command(ctx->swarmkv_db, shaper_stat_swarmkv_hincrby_cb, arg, "HINCRBY tsg-shaping-%s priority-%d-out %lld", uuid_print_str(arg->profile_uuid), arg->priority, arg->queue_len); } return; @@ -297,16 +302,16 @@ static void shaper_stat_priority_queue_len_refresh_dir(struct shaping_thread_ctx arg->ctx = ctx; arg->start_time_us = curr_time_us; - arg->profile_id = profile_hash_node->id; + uuid_copy(arg->profile_uuid, profile_hash_node->uuid); arg->priority = priority; arg->dir = direction; arg->queue_len = profile_hash_node->local_queue_len[priority][direction]; shaper_global_stat_async_invoke_inc(&ctx->thread_global_stat); shaper_global_stat_hincrby_invoke_inc(&ctx->thread_global_stat); if (direction == SHAPING_DIR_IN) { - swarmkv_async_command(ctx->swarmkv_db, shaper_stat_swarmkv_hincrby_cb, arg, "HINCRBY tsg-shaping-%d priority-%d-in %lld", arg->profile_id, arg->priority, arg->queue_len); + swarmkv_async_command(ctx->swarmkv_db, shaper_stat_swarmkv_hincrby_cb, arg, "HINCRBY tsg-shaping-%s priority-%d-in %lld", uuid_print_str(arg->profile_uuid), arg->priority, arg->queue_len); } else { - swarmkv_async_command(ctx->swarmkv_db, shaper_stat_swarmkv_hincrby_cb, arg, "HINCRBY tsg-shaping-%d priority-%d-out %lld", arg->profile_id, arg->priority, arg->queue_len); + swarmkv_async_command(ctx->swarmkv_db, shaper_stat_swarmkv_hincrby_cb, arg, "HINCRBY tsg-shaping-%s priority-%d-out %lld", uuid_print_str(arg->profile_uuid), arg->priority, arg->queue_len); } profile_hash_node->local_queue_len[priority][direction] = 0; @@ -362,7 +367,7 @@ static void shaper_stat_profile_metirc_refresh(struct shaping_thread_ctx *ctx, s return; } - shaper_stat_tags_build(rule->vsys_id, rule->id, profile->id, priority, profile_type); + shaper_stat_tags_build(rule->vsys_id, rule->uuid, profile->uuid, priority, profile_type); fieldstat_easy_counter_incrby(stat->counter_instance, thread_id, stat->column_ids[IN_DROP_PKTS_IDX], tags, TAG_IDX_MAX, profile_stat->in.drop_pkts); fieldstat_easy_counter_incrby(stat->counter_instance, thread_id, stat->column_ids[IN_PKTS_IDX], tags, TAG_IDX_MAX, profile_stat->in.pkts); diff --git a/shaping/test/dummy_swarmkv.cpp b/shaping/test/dummy_swarmkv.cpp index 85251b8..7671a52 100644 --- a/shaping/test/dummy_swarmkv.cpp +++ b/shaping/test/dummy_swarmkv.cpp @@ -9,95 +9,81 @@ using namespace std; -#define MAX_STUB_RULE_NUM 8 -#define MAX_STUB_PROFILE_NUM 8 - -#define DEFAULT_AVALIABLE_TOKEN_PER_SEC -1 - -struct stub_token_thread_arg { - int profile_id; - struct swarmkv_reply reply; - swarmkv_on_reply_callback_t *cb; - void *cb_arg; -}; - -struct stub_avaliable_token { - int in_limit_bandwidth; - int out_limit_bandwidth; - int bidirection_limit_bandwidth; +struct profile_priority_queue_len { + uuid_t profile_uuid; + int priority_queue_len[SHAPING_PRIORITY_NUM_MAX][SHAPING_DIR_MAX]; + UT_hash_handle hh; }; -static int profile_priority_len[MAX_STUB_PROFILE_NUM][SHAPING_PRIORITY_NUM_MAX][SHAPING_DIR_MAX]; -static struct stub_avaliable_token pf_curr_avl_token[MAX_STUB_PROFILE_NUM]; -static int pf_async_times[MAX_STUB_PROFILE_NUM]; -vector<struct stub_token_thread_arg> pf_async_thread[MAX_STUB_PROFILE_NUM]; -extern struct shaping_profile pf_array[MAX_STUB_PROFILE_NUM]; +struct profile_priority_queue_len *profile_priority_queue_len_hash = NULL; +extern struct stub_shaping_profile *profiles_hash; -void dummy_swarmkv_init() +void stub_set_token_bucket_avl_per_sec(const char *profile_uuid_str, unsigned int tokens, unsigned char direction, enum shaping_profile_limit_direction limit_direction) { - memset(&pf_array, 0, MAX_STUB_PROFILE_NUM * sizeof(struct shaping_profile)); - memset(&profile_priority_len, 0, MAX_STUB_PROFILE_NUM * SHAPING_PRIORITY_NUM_MAX * SHAPING_DIR_MAX * sizeof(int)); - - for (int i = 0; i < MAX_STUB_PROFILE_NUM; i++) { - pf_curr_avl_token[i].in_limit_bandwidth = DEFAULT_AVALIABLE_TOKEN_PER_SEC; - pf_curr_avl_token[i].out_limit_bandwidth = DEFAULT_AVALIABLE_TOKEN_PER_SEC; - pf_curr_avl_token[i].bidirection_limit_bandwidth = DEFAULT_AVALIABLE_TOKEN_PER_SEC; - pf_array[i].id = i; - pf_array[i].in_limit_bandwidth = DEFAULT_AVALIABLE_TOKEN_PER_SEC; - pf_array[i].out_limit_bandwidth = DEFAULT_AVALIABLE_TOKEN_PER_SEC; - pf_async_times[i] = 0; - memset(profile_priority_len[i], 0, 10 * sizeof(int)); + uuid_t profile_uuid; + struct stub_shaping_profile *stub_profile = NULL; + unsigned token_bits; + + uuid_parse(profile_uuid_str, profile_uuid); + HASH_FIND(hh, profiles_hash, profile_uuid, sizeof(uuid_t), stub_profile); + if (!stub_profile) { + stub_profile = (struct stub_shaping_profile*)calloc(1, sizeof(struct stub_shaping_profile)); + uuid_copy(stub_profile->profile.uuid, profile_uuid); + HASH_ADD(hh, profiles_hash, profile.uuid, sizeof(uuid_t), stub_profile); } -} - -void * stub_get_token_thread_func(void *data) -{ - struct stub_token_thread_arg *thread_arg; - thread_arg = (struct stub_token_thread_arg*)data; + stub_profile->profile.limit_direction = limit_direction; - thread_arg->cb(&thread_arg->reply, thread_arg->cb_arg); - - return NULL; -} - -void stub_set_token_bucket_avl_per_sec(int profile_id, unsigned int tokens, unsigned char direction, enum shaping_profile_limit_direction limit_direction) -{ - pf_array[profile_id].limit_direction = limit_direction; + if (tokens == AVALIABLE_TOKEN_UNLIMITED) { + token_bits = tokens; + } else { + token_bits = tokens * 8; + } if (limit_direction == PROFILE_LIMIT_DIRECTION_BIDIRECTION) { - pf_array[profile_id].bidirection_limit_bandwidth = tokens * 8; - pf_curr_avl_token[profile_id].bidirection_limit_bandwidth = tokens * 8; + stub_profile->profile.bidirection_limit_bandwidth = token_bits; + stub_profile->avaliable_token.bidirection_limit_bandwidth = token_bits; } else { if (direction == SHAPING_DIR_IN) { - pf_array[profile_id].in_limit_bandwidth = tokens * 8; - pf_curr_avl_token[profile_id].in_limit_bandwidth = tokens * 8; + stub_profile->profile.in_limit_bandwidth = token_bits; + stub_profile->avaliable_token.in_limit_bandwidth = token_bits; } else { - pf_array[profile_id].out_limit_bandwidth = tokens * 8; - pf_curr_avl_token[profile_id].out_limit_bandwidth = tokens * 8; + stub_profile->profile.out_limit_bandwidth = token_bits; + stub_profile->avaliable_token.out_limit_bandwidth = token_bits; } } return; } -void stub_refresh_token_bucket(int profile_id) +void stub_refresh_token_bucket(const char *profile_uuid_str) { - pf_curr_avl_token[profile_id].bidirection_limit_bandwidth = pf_array[profile_id].bidirection_limit_bandwidth; - pf_curr_avl_token[profile_id].in_limit_bandwidth = pf_array[profile_id].in_limit_bandwidth; - pf_curr_avl_token[profile_id].out_limit_bandwidth = pf_array[profile_id].out_limit_bandwidth; + uuid_t profile_uuid; + struct stub_shaping_profile *stub_profile = NULL; + + uuid_parse(profile_uuid_str, profile_uuid); + HASH_FIND(hh, profiles_hash, profile_uuid, sizeof(uuid_t), stub_profile); + if (!stub_profile) { + return; + } + + if (stub_profile->profile.limit_direction == PROFILE_LIMIT_DIRECTION_BIDIRECTION) { + stub_profile->avaliable_token.bidirection_limit_bandwidth = stub_profile->profile.bidirection_limit_bandwidth; + } else { + stub_profile->avaliable_token.in_limit_bandwidth = stub_profile->profile.in_limit_bandwidth; + stub_profile->avaliable_token.out_limit_bandwidth = stub_profile->profile.out_limit_bandwidth; + } + return; } -void stub_set_async_token_get_times(int profile_id, int times) +void stub_swarmkv_clear_resource() { - pf_async_times[profile_id] = times; - - if (pf_async_times[profile_id] == 0) { - for (unsigned int i = 0; i < pf_async_thread[profile_id].size(); i++) { - stub_get_token_thread_func(&pf_async_thread[profile_id][i]); - } - pf_async_thread[profile_id].clear(); + struct profile_priority_queue_len *node, *tmp = NULL; + + HASH_ITER(hh, profile_priority_queue_len_hash, node, tmp) { + HASH_DEL(profile_priority_queue_len_hash, node); + free(node); } return; @@ -185,21 +171,30 @@ int swarmkv_options_set_log_level(struct swarmkv_options *opts, int loglevel) static void swarmkv_hincrby_cmd_func(char *cmd_str, swarmkv_on_reply_callback_t * cb, void *cb_arg) { - int profile_id; + uuid_t profile_uuid; + char uuid_str[UUID_STR_LEN] = {0}; int priority; int value; char direction[5] = {0}; enum shaping_packet_dir dir; struct swarmkv_reply *reply = (struct swarmkv_reply*)calloc(1, sizeof(struct swarmkv_reply)); - sscanf(cmd_str, "HINCRBY tsg-shaping-%d priority-%d-%s %d", &profile_id, &priority, direction, &value); + sscanf(cmd_str, "HINCRBY tsg-shaping-%s priority-%d-%s %d", uuid_str, &priority, direction, &value); + uuid_parse(uuid_str, profile_uuid); if (strncmp(direction, "in", 2) == 0) { dir = SHAPING_DIR_IN; } else { dir = SHAPING_DIR_OUT; } - profile_priority_len[profile_id][priority][dir] += value; + struct profile_priority_queue_len *node = NULL; + HASH_FIND(hh, profile_priority_queue_len_hash, profile_uuid, sizeof(uuid_t), node); + if (!node) { + node = (struct profile_priority_queue_len*)calloc(1, sizeof(struct profile_priority_queue_len)); + uuid_copy(node->profile_uuid, profile_uuid); + HASH_ADD(hh, profile_priority_queue_len_hash, profile_uuid, sizeof(uuid_t), node); + } + node->priority_queue_len[priority][dir] += value; reply->type = SWARMKV_REPLY_INTEGER; cb(reply, cb_arg); @@ -211,7 +206,8 @@ static void swarmkv_hincrby_cmd_func(char *cmd_str, swarmkv_on_reply_callback_t static void swarmkv_hmget_cmd_func(char *cmd_str, swarmkv_on_reply_callback_t * cb, void *cb_arg) { - int profile_id; + uuid_t profile_uuid; + char uuid_str[UUID_STR_LEN] = {0}; int priority[10]; int ret; int priority_num; @@ -219,10 +215,19 @@ static void swarmkv_hmget_cmd_func(char *cmd_str, swarmkv_on_reply_callback_t * enum shaping_packet_dir dir; struct swarmkv_reply *reply = (struct swarmkv_reply*)calloc(1, sizeof(struct swarmkv_reply)); - ret = sscanf(cmd_str, "HMGET tsg-shaping-%d priority-%d-%s priority-%d-%*s priority-%d-%*s priority-%d-%*s priority-%d-%*s priority-%d-%*s priority-%d-%*s priority-%d-%*s priority-%d-%*s", - &profile_id, &priority[0], direction, &priority[1], &priority[2], &priority[3], &priority[4], &priority[5], &priority[6], &priority[7], &priority[8]); + ret = sscanf(cmd_str, "HMGET tsg-shaping-%s priority-%d-%s priority-%d-%*s priority-%d-%*s priority-%d-%*s priority-%d-%*s priority-%d-%*s priority-%d-%*s priority-%d-%*s priority-%d-%*s", + uuid_str, &priority[0], direction, &priority[1], &priority[2], &priority[3], &priority[4], &priority[5], &priority[6], &priority[7], &priority[8]); priority_num = ret - 1; + uuid_parse(uuid_str, profile_uuid); + struct profile_priority_queue_len *node = NULL; + HASH_FIND(hh, profile_priority_queue_len_hash, profile_uuid, sizeof(uuid_t), node); + if (!node) { + node = (struct profile_priority_queue_len*)calloc(1, sizeof(struct profile_priority_queue_len)); + uuid_copy(node->profile_uuid, profile_uuid); + HASH_ADD(hh, profile_priority_queue_len_hash, profile_uuid, sizeof(uuid_t), node); + } + if (strncmp(direction, "in", 2) == 0) { dir = SHAPING_DIR_IN; } else { @@ -237,7 +242,7 @@ static void swarmkv_hmget_cmd_func(char *cmd_str, swarmkv_on_reply_callback_t * reply->elements[i]->type = SWARMKV_REPLY_STRING; char tmp_str[128] = {0}; - sprintf(tmp_str, "%d", profile_priority_len[profile_id][priority[i]][dir]); + sprintf(tmp_str, "%d", node->priority_queue_len[priority[i]][dir]); reply->elements[i]->str = (char *)calloc(1, strlen(tmp_str)); memcpy(reply->elements[i]->str, tmp_str, strlen(tmp_str)); reply->elements[i]->len = strlen(tmp_str); @@ -281,59 +286,46 @@ void swarmkv_async_command(struct swarmkv *db, swarmkv_on_reply_callback_t * cb, void swarmkv_tconsume(struct swarmkv * db, const char * key, size_t keylen, long long tokens, swarmkv_on_reply_callback_t *cb, void *cb_arg) { int actual_tokens; - struct stub_token_thread_arg thread_arg; struct swarmkv_reply reply; - int profile_id; char direction[16] = {0}; - - sscanf(key, "tsg-shaping-%d-%15s", &profile_id, direction); + char uuid_str[UUID_STR_LEN] = {0}; + uuid_t profile_uuid; + struct stub_shaping_profile *stub_profile = NULL; + + sscanf(key, "tsg-shaping-%36s-%15s", uuid_str, direction); + uuid_parse(uuid_str, profile_uuid); + HASH_FIND(hh, profiles_hash, profile_uuid, sizeof(uuid_t), stub_profile); + if (!stub_profile) { + return; + } if (strncmp("bidirectional", direction, sizeof(direction)) == 0) { - if (pf_curr_avl_token[profile_id].bidirection_limit_bandwidth == DEFAULT_AVALIABLE_TOKEN_PER_SEC) { + if (stub_profile->avaliable_token.bidirection_limit_bandwidth == AVALIABLE_TOKEN_UNLIMITED) { actual_tokens = tokens; } else { - actual_tokens = pf_curr_avl_token[profile_id].bidirection_limit_bandwidth >= tokens ? tokens : 0; - pf_curr_avl_token[profile_id].bidirection_limit_bandwidth -= actual_tokens; + actual_tokens = stub_profile->avaliable_token.bidirection_limit_bandwidth >= tokens ? tokens : 0; + stub_profile->avaliable_token.bidirection_limit_bandwidth -= actual_tokens; } } else if (strncmp("incoming", direction, sizeof(direction)) == 0) { - if (pf_curr_avl_token[profile_id].in_limit_bandwidth == DEFAULT_AVALIABLE_TOKEN_PER_SEC) { + if (stub_profile->avaliable_token.in_limit_bandwidth == AVALIABLE_TOKEN_UNLIMITED) { actual_tokens = tokens; } else { - actual_tokens = pf_curr_avl_token[profile_id].in_limit_bandwidth >= tokens ? tokens : 0; - pf_curr_avl_token[profile_id].in_limit_bandwidth -= actual_tokens; + actual_tokens = stub_profile->avaliable_token.in_limit_bandwidth >= tokens ? tokens : 0; + stub_profile->avaliable_token.in_limit_bandwidth -= actual_tokens; } } else { - if (pf_curr_avl_token[profile_id].out_limit_bandwidth == DEFAULT_AVALIABLE_TOKEN_PER_SEC) { + if (stub_profile->avaliable_token.out_limit_bandwidth == AVALIABLE_TOKEN_UNLIMITED) { actual_tokens = tokens; } else { - actual_tokens = pf_curr_avl_token[profile_id].out_limit_bandwidth >= tokens ? tokens : 0; - pf_curr_avl_token[profile_id].out_limit_bandwidth -= actual_tokens; + actual_tokens = stub_profile->avaliable_token.out_limit_bandwidth >= tokens ? tokens : 0; + stub_profile->avaliable_token.out_limit_bandwidth -= actual_tokens; } } - if (pf_async_times[profile_id] == 0) { - for (unsigned int i = 0; i < pf_async_thread[profile_id].size(); i++) { - stub_get_token_thread_func(&pf_async_thread[profile_id][i]); - } - pf_async_thread[profile_id].clear(); - } - reply.integer = actual_tokens; reply.type = SWARMKV_REPLY_INTEGER; - if (pf_async_times[profile_id] > 0) { - pf_async_times[profile_id]--; - - thread_arg.profile_id = profile_id; - thread_arg.reply = reply; - thread_arg.cb = cb; - thread_arg.cb_arg = cb_arg; - - pf_async_thread[profile_id].push_back(thread_arg); - - } else { - cb(&reply, cb_arg); - } + cb(&reply, cb_arg); return; } diff --git a/shaping/test/gtest_shaper.cpp b/shaping/test/gtest_shaper.cpp index 2f9a1b1..e79bd2d 100644 --- a/shaping/test/gtest_shaper.cpp +++ b/shaping/test/gtest_shaper.cpp @@ -99,7 +99,7 @@ static int judge_packet_eq(struct stub_pkt_queue *expec_queue, struct stub_pkt_q } static void shaping_stat_judge(char *counter_file_line, char *guage_file_line, int counter_json_array_idx, - int guage_json_array_idx, int rule_id, int profile_id, int priority, + int guage_json_array_idx, const char *rule_uuid, const char *profile_uuid, int priority, unsigned long long tx_pkts, unsigned long long tx_bytes, unsigned long long drop_pkts, long long queue_len, long long max_latency, unsigned char direction, char profile_type[]) @@ -127,13 +127,13 @@ static void shaping_stat_judge(char *counter_file_line, char *guage_file_line, i ASSERT_TRUE(tmp_obj != NULL); EXPECT_EQ(tmp_obj->valueint, STUB_TEST_VSYS_ID); - tmp_obj = cJSON_GetObjectItem(json_array_element, "rule_id"); + tmp_obj = cJSON_GetObjectItem(json_array_element, "rule_uuid"); ASSERT_TRUE(tmp_obj != NULL); - EXPECT_EQ(rule_id, tmp_obj->valueint); + EXPECT_STREQ(rule_uuid, tmp_obj->valuestring); - tmp_obj = cJSON_GetObjectItem(json_array_element, "profile_id"); + tmp_obj = cJSON_GetObjectItem(json_array_element, "profile_uuid"); ASSERT_TRUE(tmp_obj != NULL); - EXPECT_EQ(profile_id, tmp_obj->valueint); + EXPECT_STREQ(profile_uuid, tmp_obj->valuestring); tmp_obj = cJSON_GetObjectItem(json_array_element, "priority"); ASSERT_TRUE(tmp_obj != NULL); @@ -237,23 +237,24 @@ TEST(single_session, udp_tx_in_order) struct stub_pkt_queue *actual_tx_queue; struct shaping_ctx *ctx = NULL; struct shaping_flow *sf = NULL; - long long rule_id[] = {0}; + const char *rule_uuid_strs[] = {"00000000-0000-0000-0000-000000000001"}; int priority[] = {1}; int profile_num[] = {1}; - int profile_id[][MAX_REF_PROFILE] = {{0}}; + const char *profile_uuid_strs[][MAX_REF_PROFILE] = {{"00000000-0000-0000-0000-000000000001"}}; TAILQ_INIT(&expec_tx_queue); stub_init(); - dummy_swarmkv_init(); ctx = shaping_engine_init(); ASSERT_TRUE(ctx != NULL); sf = shaping_flow_new(&ctx->thread_ctx[0]); ASSERT_TRUE(sf != NULL); - stub_set_matched_shaping_rules(1, rule_id, priority, profile_num, profile_id); - stub_set_token_bucket_avl_per_sec(0, 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); + stub_set_matched_shaping_rules(1, rule_uuid_strs, priority, profile_num, profile_uuid_strs); + stub_set_token_bucket_avl_per_sec(profile_uuid_strs[0][0], 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); actual_tx_queue = stub_get_tx_queue(); - shaper_rules_update(&ctx->thread_ctx[0], sf, rule_id, 1); + uuid_t rule_uuid; + uuid_parse(rule_uuid_strs[0], rule_uuid); + shaper_rules_update(&ctx->thread_ctx[0], sf, &rule_uuid, 1); /**********send packets*********************/ @@ -265,7 +266,7 @@ TEST(single_session, udp_tx_in_order) ASSERT_TRUE(TAILQ_EMPTY(actual_tx_queue)); while (!TAILQ_EMPTY(&expec_tx_queue)) {//last 90 delay packets - stub_refresh_token_bucket(0); + stub_refresh_token_bucket(profile_uuid_strs[0][0]); for (int i = 0; i < 20; i++) {//even though invoke polling more than 10 times, there should be only 10 pkts be sent polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]); stub_curr_time_ns_inc(STUB_TIME_INC_FOR_PACKET); @@ -292,11 +293,11 @@ TEST(single_session, udp_tx_in_order) shaper_thread_resource_clear(); shaping_engine_destroy(ctx); - stub_clear_matched_shaping_rules(); + stub_swarmkv_clear_resource(); /*******test statistics***********/ //judge shaping metric - shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, 0, 0, 1, 100, 10000, 0, 0, 171000, SHAPING_DIR_OUT, profile_type_primary);//max latency is last 10 pkts + shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, rule_uuid_strs[0], profile_uuid_strs[0][0], 1, 100, 10000, 0, 0, 171000, SHAPING_DIR_OUT, profile_type_primary);//max latency is last 10 pkts //judge shaping global metric shaping_global_stat_judge(global_stat_str, 100, 10000, 0, 0, 0, 0); @@ -316,24 +317,25 @@ TEST(bidirectional, udp_tx_in_order) struct stub_pkt_queue *actual_tx_queue; struct shaping_ctx *ctx = NULL; struct shaping_flow *sf = NULL; - long long rule_id[] = {0}; + const char *rule_uuid_strs[] = {"00000000-0000-0000-0000-000000000001"}; int priority[] = {1}; int profile_num[] = {1}; - int profile_id[][MAX_REF_PROFILE] = {{0}}; + const char *profile_uuid_strs[][MAX_REF_PROFILE] = {{"00000000-0000-0000-0000-000000000001"}}; TAILQ_INIT(&expec_tx_queue_in); TAILQ_INIT(&expec_tx_queue_out); stub_init(); - dummy_swarmkv_init(); ctx = shaping_engine_init(); ASSERT_TRUE(ctx != NULL); sf = shaping_flow_new(&ctx->thread_ctx[0]); ASSERT_TRUE(sf != NULL); - stub_set_matched_shaping_rules(1, rule_id, priority, profile_num, profile_id); - stub_set_token_bucket_avl_per_sec(0, 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_BIDIRECTION); + stub_set_matched_shaping_rules(1, rule_uuid_strs, priority, profile_num, profile_uuid_strs); + stub_set_token_bucket_avl_per_sec(profile_uuid_strs[0][0], 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_BIDIRECTION); actual_tx_queue = stub_get_tx_queue(); - shaper_rules_update(&ctx->thread_ctx[0], sf, rule_id, 1); + uuid_t rule_uuid; + uuid_parse(rule_uuid_strs[0], rule_uuid); + shaper_rules_update(&ctx->thread_ctx[0], sf, &rule_uuid, 1); /*******send packets***********/ send_packets(&ctx->thread_ctx[0], sf, 10, 100, SHAPING_DIR_OUT, &expec_tx_queue_out, 1, 0); @@ -345,12 +347,12 @@ TEST(bidirectional, udp_tx_in_order) ASSERT_EQ(0, judge_packet_eq(&expec_tx_queue_out, actual_tx_queue, 10)); ASSERT_TRUE(TAILQ_EMPTY(actual_tx_queue)); - stub_refresh_token_bucket(0); + stub_refresh_token_bucket(profile_uuid_strs[0][0]); polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]); ASSERT_EQ(0, judge_packet_eq(&expec_tx_queue_out, actual_tx_queue, 1)); while(!TAILQ_EMPTY(&expec_tx_queue_out)) { - stub_refresh_token_bucket(0); + stub_refresh_token_bucket(profile_uuid_strs[0][0]); polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]); ASSERT_EQ(0, judge_packet_eq(&expec_tx_queue_in, actual_tx_queue, 1)); @@ -367,7 +369,7 @@ TEST(bidirectional, udp_tx_in_order) shaper_thread_resource_clear(); shaping_engine_destroy(ctx); - stub_clear_matched_shaping_rules(); + stub_swarmkv_clear_resource(); } @@ -380,24 +382,25 @@ TEST(max_min_host_fairness_profile, udp_tx_in_order) struct stub_pkt_queue *actual_tx_queue; struct shaping_ctx *ctx = NULL; struct shaping_flow *sf = NULL; - long long rule_id[] = {0}; + const char *rule_uuid_strs[] = {"00000000-0000-0000-0000-000000000001"}; int priority[] = {1}; int profile_num[] = {1}; - int profile_id[][MAX_REF_PROFILE] = {{0}}; + const char *profile_uuid_strs[][MAX_REF_PROFILE] = {{"00000000-0000-0000-0000-000000000001"}}; TAILQ_INIT(&expec_tx_queue); stub_init(); - dummy_swarmkv_init(); ctx = shaping_engine_init(); ASSERT_TRUE(ctx != NULL); sf = shaping_flow_new(&ctx->thread_ctx[0]); ASSERT_TRUE(sf != NULL); - stub_set_matched_shaping_rules(1, rule_id, priority, profile_num, profile_id); - stub_set_token_bucket_avl_per_sec(0, 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); - stub_set_profile_type(0, PROFILE_TYPE_MAX_MIN_HOST_FAIRNESS); + stub_set_matched_shaping_rules(1, rule_uuid_strs, priority, profile_num, profile_uuid_strs); + stub_set_token_bucket_avl_per_sec(profile_uuid_strs[0][0], 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); + stub_set_profile_type(profile_uuid_strs[0][0], PROFILE_TYPE_MAX_MIN_HOST_FAIRNESS); actual_tx_queue = stub_get_tx_queue(); - shaper_rules_update(&ctx->thread_ctx[0], sf, rule_id, 1); + uuid_t rule_uuid; + uuid_parse(rule_uuid_strs[0], rule_uuid); + shaper_rules_update(&ctx->thread_ctx[0], sf, &rule_uuid, 1); /**********send packets*********************/ @@ -409,7 +412,7 @@ TEST(max_min_host_fairness_profile, udp_tx_in_order) ASSERT_TRUE(TAILQ_EMPTY(actual_tx_queue)); while (!TAILQ_EMPTY(&expec_tx_queue)) {//last 90 delay packets - stub_refresh_token_bucket(0); + stub_refresh_token_bucket(profile_uuid_strs[0][0]); for (int i = 0; i < 20; i++) {//even though invoke polling more than 10 times, there should be only 10 pkts be sent polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]); stub_curr_time_ns_inc(STUB_TIME_INC_FOR_PACKET); @@ -436,11 +439,12 @@ TEST(max_min_host_fairness_profile, udp_tx_in_order) shaper_thread_resource_clear(); shaping_engine_destroy(ctx); - stub_clear_matched_shaping_rules(); + stub_clear_resource(); + stub_swarmkv_clear_resource(); /*******test statistics***********/ //judge shaping metric - shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, 0, 0, 1, 100, 10000, 0, 0, 171000, SHAPING_DIR_OUT, profile_type_primary);//max latency is last 10 pkts + shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, rule_uuid_strs[0], profile_uuid_strs[0][0], 1, 100, 10000, 0, 0, 171000, SHAPING_DIR_OUT, profile_type_primary);//max latency is last 10 pkts //judge shaping global metric shaping_global_stat_judge(global_stat_str, 100, 10000, 0, 0, 0, 0); @@ -460,24 +464,25 @@ TEST(single_session, tcp_tx_in_order) struct stub_pkt_queue *actual_tx_queue; struct shaping_ctx *ctx = NULL; struct shaping_flow *sf = NULL; - long long rule_id[] = {0}; + const char *rule_uuid_strs[] = {"00000000-0000-0000-0000-000000000001"}; int priority[] = {1}; int profile_num[] = {1}; - int profile_id[][MAX_REF_PROFILE] = {{0}}; + const char *profile_uuid_strs[][MAX_REF_PROFILE] = {{"00000000-0000-0000-0000-000000000001"}}; TAILQ_INIT(&expec_tx_queue); TAILQ_INIT(&expec_pure_ctl_tx_queue); stub_init(); - dummy_swarmkv_init(); ctx = shaping_engine_init(); ASSERT_TRUE(ctx != NULL); sf = shaping_flow_new(&ctx->thread_ctx[0]); ASSERT_TRUE(sf != NULL); - stub_set_matched_shaping_rules(1, rule_id, priority, profile_num, profile_id); - stub_set_token_bucket_avl_per_sec(0, 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); + stub_set_matched_shaping_rules(1, rule_uuid_strs, priority, profile_num, profile_uuid_strs); + stub_set_token_bucket_avl_per_sec(profile_uuid_strs[0][0], 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); actual_tx_queue = stub_get_tx_queue(); - shaper_rules_update(&ctx->thread_ctx[0], sf, rule_id, 1); + uuid_t rule_uuid; + uuid_parse(rule_uuid_strs[0], rule_uuid); + shaper_rules_update(&ctx->thread_ctx[0], sf, &rule_uuid, 1); /*******send packets***********/ send_packets(&ctx->thread_ctx[0], sf, 20, 100, SHAPING_DIR_OUT, &expec_tx_queue, 1, 0); @@ -498,18 +503,18 @@ TEST(single_session, tcp_tx_in_order) shaper_stat_refresh(&ctx->thread_ctx[0], sf, 1); fieldstat_easy_output(ctx->thread_ctx[0].stat->counter_instance, &counter_stat_str, &stat_str_len); fieldstat_easy_output(ctx->thread_ctx[0].stat->guage_instance, &guage_stat_str, &stat_str_len); - shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, 0, 0, 1, 20, 2000, 0, 10, 0, SHAPING_DIR_OUT, profile_type_primary);//*test statistics + shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, rule_uuid_strs[0], profile_uuid_strs[0][0], 1, 20, 2000, 0, 10, 0, SHAPING_DIR_OUT, profile_type_primary);//*test statistics free(counter_stat_str); free(guage_stat_str); - stub_refresh_token_bucket(0); + stub_refresh_token_bucket(profile_uuid_strs[0][0]); for (int i = 0; i < 10; i++) { polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]); stub_curr_time_ns_inc(STUB_TIME_INC_FOR_PACKET); } ASSERT_TRUE(TAILQ_EMPTY(actual_tx_queue));//pure ctrl pkts force consume 1000 tokens, current token: -1000--->0, so no pkt can be sent - stub_refresh_token_bucket(0); + stub_refresh_token_bucket(profile_uuid_strs[0][0]); for (int i = 0; i < 11; i++) {//10 pkts which is not pure control, first polling request 10 times token, then 10 loops send 10 pkts polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]); stub_curr_time_ns_inc(STUB_TIME_INC_FOR_PACKET); @@ -524,11 +529,12 @@ TEST(single_session, tcp_tx_in_order) shaper_thread_resource_clear(); shaping_engine_destroy(ctx); - stub_clear_matched_shaping_rules(); + stub_clear_resource(); + stub_swarmkv_clear_resource(); /*******test statistics***********/ - shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, 0, 0, 1, 30, 3000, 0, 0, 31000, SHAPING_DIR_OUT, profile_type_primary); + shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, rule_uuid_strs[0], profile_uuid_strs[0][0], 1, 30, 3000, 0, 0, 31000, SHAPING_DIR_OUT, profile_type_primary); free(counter_stat_str); free(guage_stat_str); } @@ -544,24 +550,26 @@ TEST(single_session, udp_diff_direction) struct stub_pkt_queue *actual_tx_queue; struct shaping_ctx *ctx = NULL; struct shaping_flow *sf = NULL; - long long rule_id[] = {0}; + const char *rule_uuid_strs[] = {"00000000-0000-0000-0000-000000000001"}; int priority[] = {1}; int profile_num[] = {1}; - int profile_id[][MAX_REF_PROFILE] = {{0}}; + const char *profile_uuid_strs[][MAX_REF_PROFILE] = {{"00000000-0000-0000-0000-000000000001"}}; TAILQ_INIT(&expec_tx_queue_in); TAILQ_INIT(&expec_tx_queue_out); stub_init(); - dummy_swarmkv_init(); ctx = shaping_engine_init(); ASSERT_TRUE(ctx != NULL); sf = shaping_flow_new(&ctx->thread_ctx[0]); ASSERT_TRUE(sf != NULL); - stub_set_matched_shaping_rules(1, rule_id, priority, profile_num, profile_id); - stub_set_token_bucket_avl_per_sec(0, 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); + stub_set_matched_shaping_rules(1, rule_uuid_strs, priority, profile_num, profile_uuid_strs); + stub_set_token_bucket_avl_per_sec(profile_uuid_strs[0][0], 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); + stub_set_token_bucket_avl_per_sec(profile_uuid_strs[0][0], AVALIABLE_TOKEN_UNLIMITED, SHAPING_DIR_IN, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); actual_tx_queue = stub_get_tx_queue(); - shaper_rules_update(&ctx->thread_ctx[0], sf, rule_id, 1); + uuid_t rule_uuid; + uuid_parse(rule_uuid_strs[0], rule_uuid); + shaper_rules_update(&ctx->thread_ctx[0], sf, &rule_uuid, 1); /*******send packets***********/ send_packets(&ctx->thread_ctx[0], sf, 10, 100, SHAPING_DIR_OUT, &expec_tx_queue_out, 1, 0); @@ -576,7 +584,7 @@ TEST(single_session, udp_diff_direction) ASSERT_EQ(0, judge_packet_eq(&expec_tx_queue_in, actual_tx_queue, 20)); ASSERT_TRUE(TAILQ_EMPTY(actual_tx_queue)); - stub_refresh_token_bucket(0); + stub_refresh_token_bucket(profile_uuid_strs[0][0]); for (int i = 0; i < 22; i++) {//first polling just request token and don't send pkt polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]); stub_curr_time_ns_inc(STUB_TIME_INC_FOR_PACKET); @@ -597,12 +605,13 @@ TEST(single_session, udp_diff_direction) shaper_thread_resource_clear(); shaping_engine_destroy(ctx); - stub_clear_matched_shaping_rules(); + stub_clear_resource(); + stub_swarmkv_clear_resource(); /*******test statistics***********/ - shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, 0, 0, 1, 20, 2000, 0, 0, 21000, SHAPING_DIR_OUT, profile_type_primary); - shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, 0, 0, 1, 20, 2000, 0, 0, 0, SHAPING_DIR_IN, profile_type_primary); + shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, rule_uuid_strs[0], profile_uuid_strs[0][0], 1, 20, 2000, 0, 0, 21000, SHAPING_DIR_OUT, profile_type_primary); + shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, rule_uuid_strs[0], profile_uuid_strs[0][0], 1, 20, 2000, 0, 0, 0, SHAPING_DIR_IN, profile_type_primary); free(counter_stat_str); free(guage_stat_str); } @@ -620,25 +629,28 @@ TEST(single_session, udp_multi_rules) struct stub_pkt_queue *actual_tx_queue; struct shaping_ctx *ctx = NULL; struct shaping_flow *sf = NULL; - long long rule_id[] = {0, 1, 2}; + const char *rule_uuid_strs[] = {"00000000-0000-0000-0000-000000000001", "00000000-0000-0000-0000-000000000002", "00000000-0000-0000-0000-000000000003"}; int priority[] = {1, 2, 3}; int profile_num[] = {1, 1, 1}; - int profile_id[][MAX_REF_PROFILE] = {{0}, {1}, {2}}; + const char *profile_uuid_strs[][MAX_REF_PROFILE] = {{"00000000-0000-0000-0000-000000000001"}, {"00000000-0000-0000-0000-000000000002"}, {"00000000-0000-0000-0000-000000000003"}}; TAILQ_INIT(&expec_tx_queue); stub_init(); - dummy_swarmkv_init(); ctx = shaping_engine_init(); ASSERT_TRUE(ctx != NULL); sf = shaping_flow_new(&ctx->thread_ctx[0]); ASSERT_TRUE(sf != NULL); - stub_set_matched_shaping_rules(3, rule_id, priority, profile_num, profile_id); - stub_set_token_bucket_avl_per_sec(0, 3000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); - stub_set_token_bucket_avl_per_sec(1, 2000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); - stub_set_token_bucket_avl_per_sec(2, 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); + stub_set_matched_shaping_rules(3, rule_uuid_strs, priority, profile_num, profile_uuid_strs); + stub_set_token_bucket_avl_per_sec(profile_uuid_strs[0][0], 3000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); + stub_set_token_bucket_avl_per_sec(profile_uuid_strs[1][0], 2000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); + stub_set_token_bucket_avl_per_sec(profile_uuid_strs[2][0], 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); actual_tx_queue = stub_get_tx_queue(); - shaper_rules_update(&ctx->thread_ctx[0], sf, rule_id, 3); + uuid_t rule_uuids[3]; + for (int i = 0; i < 3; i++) { + uuid_parse(rule_uuid_strs[i], rule_uuids[i]); + } + shaper_rules_update(&ctx->thread_ctx[0], sf, rule_uuids, 3); /*******send packets***********/ send_packets(&ctx->thread_ctx[0], sf, 100, 100, SHAPING_DIR_OUT, &expec_tx_queue, 5, 0); @@ -648,9 +660,9 @@ TEST(single_session, udp_multi_rules) ASSERT_EQ(0, judge_packet_eq(&expec_tx_queue, actual_tx_queue, 10)); while (!TAILQ_EMPTY(&expec_tx_queue)) {//last 90 delay packets - stub_refresh_token_bucket(0); - stub_refresh_token_bucket(1); - stub_refresh_token_bucket(2); + stub_refresh_token_bucket(profile_uuid_strs[0][0]); + stub_refresh_token_bucket(profile_uuid_strs[1][0]); + stub_refresh_token_bucket(profile_uuid_strs[2][0]); for (int i = 0; i < 60; i++) { //there are 3 rules, send one packet need 3 polling process, so 10 packets need 30 polling //even though invoke polling more than 30 times, there should be only 10 pkts be sent @@ -673,17 +685,18 @@ TEST(single_session, udp_multi_rules) shaper_thread_resource_clear(); shaping_engine_destroy(ctx); - stub_clear_matched_shaping_rules(); + stub_clear_resource(); + stub_swarmkv_clear_resource(); /*******test statistics***********/ //profile_id 0 - shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, 0, 0, 1, 100, 10000, 0, 0, 507000, SHAPING_DIR_OUT, profile_type_primary); + shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, rule_uuid_strs[0], profile_uuid_strs[0][0], 1, 100, 10000, 0, 0, 507000, SHAPING_DIR_OUT, profile_type_primary); //profile_id 1 - shaping_stat_judge(counter_stat_str, guage_stat_str, 1, 1, 1, 1, 1, 100, 10000, 0, 0, 1000, SHAPING_DIR_OUT, profile_type_primary); + shaping_stat_judge(counter_stat_str, guage_stat_str, 1, 1, rule_uuid_strs[1], profile_uuid_strs[1][0], 1, 100, 10000, 0, 0, 1000, SHAPING_DIR_OUT, profile_type_primary); //profile_id 2 - shaping_stat_judge(counter_stat_str, guage_stat_str, 2, 2, 2, 2, 1, 100, 10000, 0, 0, 91000, SHAPING_DIR_OUT, profile_type_primary);//max latency is first queued pkt + shaping_stat_judge(counter_stat_str, guage_stat_str, 2, 2, rule_uuid_strs[2], profile_uuid_strs[2][0], 1, 100, 10000, 0, 0, 91000, SHAPING_DIR_OUT, profile_type_primary);//max latency is first queued pkt free(counter_stat_str); free(guage_stat_str); @@ -699,24 +712,25 @@ TEST(single_session, udp_borrow) struct stub_pkt_queue *actual_tx_queue; struct shaping_ctx *ctx = NULL; struct shaping_flow *sf = NULL; - long long rule_id[] = {1}; + const char *rule_uuid_strs[] = {"00000000-0000-0000-0000-000000000001"}; int priority[] = {1}; int profile_num[] = {2}; - int profile_id[][MAX_REF_PROFILE] = {{1, 2}}; + const char *profile_uuid_strs[][MAX_REF_PROFILE] = {{"00000000-0000-0000-0000-000000000001", "00000000-0000-0000-0000-000000000002"}}; TAILQ_INIT(&expec_tx_queue); stub_init(); - dummy_swarmkv_init(); ctx = shaping_engine_init(); ASSERT_TRUE(ctx != NULL); sf = shaping_flow_new(&ctx->thread_ctx[0]); ASSERT_TRUE(sf != NULL); - stub_set_matched_shaping_rules(1, rule_id, priority, profile_num, profile_id); - stub_set_token_bucket_avl_per_sec(1, 0, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); - stub_set_token_bucket_avl_per_sec(2, 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); + stub_set_matched_shaping_rules(1, rule_uuid_strs, priority, profile_num, profile_uuid_strs); + stub_set_token_bucket_avl_per_sec(profile_uuid_strs[0][0], 0, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); + stub_set_token_bucket_avl_per_sec(profile_uuid_strs[0][1], 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); actual_tx_queue = stub_get_tx_queue(); - shaper_rules_update(&ctx->thread_ctx[0], sf, rule_id, 1); + uuid_t rule_uuid; + uuid_parse(rule_uuid_strs[0], rule_uuid); + shaper_rules_update(&ctx->thread_ctx[0], sf, &rule_uuid, 1); /*******send packets***********/ send_packets(&ctx->thread_ctx[0], sf, 100, 100, SHAPING_DIR_OUT, &expec_tx_queue, 1, 0); @@ -726,7 +740,7 @@ TEST(single_session, udp_borrow) ASSERT_EQ(0, judge_packet_eq(&expec_tx_queue, actual_tx_queue, 10)); while (!TAILQ_EMPTY(&expec_tx_queue)) {//last 90 delay packets - stub_refresh_token_bucket(2); + stub_refresh_token_bucket(profile_uuid_strs[0][1]); for (int i = 0; i < 20; i++) {//even though invoke polling more than 10 times, there should be only 10 pkts be sent polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]); stub_curr_time_ns_inc(STUB_TIME_INC_FOR_PACKET); @@ -747,14 +761,15 @@ TEST(single_session, udp_borrow) shaper_thread_resource_clear(); shaping_engine_destroy(ctx); - stub_clear_matched_shaping_rules(); + stub_clear_resource(); + stub_swarmkv_clear_resource(); /*******test statistics***********/ //profile_id 1, primary - shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, 1, 1, 1, 0, 0, 0, 0, 171000, SHAPING_DIR_OUT, profile_type_primary); + shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, rule_uuid_strs[0], profile_uuid_strs[0][0], 1, 0, 0, 0, 0, 171000, SHAPING_DIR_OUT, profile_type_primary); //profile_id 2, borrow - shaping_stat_judge(counter_stat_str, guage_stat_str, 1, 0, 1, 2, 2, 100, 10000, 0, 0, 0, SHAPING_DIR_OUT, profile_type_borrow); + shaping_stat_judge(counter_stat_str, guage_stat_str, 1, 0, rule_uuid_strs[0], profile_uuid_strs[0][1], 2, 100, 10000, 0, 0, 0, SHAPING_DIR_OUT, profile_type_borrow); free(counter_stat_str); free(guage_stat_str); @@ -772,25 +787,26 @@ TEST(single_session, udp_borrow_same_priority_9) struct stub_pkt_queue *actual_tx_queue; struct shaping_ctx *ctx = NULL; struct shaping_flow *sf = NULL; - long long rule_id[] = {1}; + const char *rule_uuid_strs[] = {"00000000-0000-0000-0000-000000000001"}; int priority[] = {9}; int profile_num[] = {3}; - int profile_id[][MAX_REF_PROFILE] = {{1, 2, 3}}; + const char *profile_uuid_strs[][MAX_REF_PROFILE] = {{"00000000-0000-0000-0000-000000000001", "00000000-0000-0000-0000-000000000002", "00000000-0000-0000-0000-000000000003"}}; TAILQ_INIT(&expec_tx_queue); stub_init(); - dummy_swarmkv_init(); ctx = shaping_engine_init(); ASSERT_TRUE(ctx != NULL); sf = shaping_flow_new(&ctx->thread_ctx[0]); ASSERT_TRUE(sf != NULL); - stub_set_matched_shaping_rules(1, rule_id, priority, profile_num, profile_id); - stub_set_token_bucket_avl_per_sec(1, 0, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); - stub_set_token_bucket_avl_per_sec(2, 0, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); - stub_set_token_bucket_avl_per_sec(3, 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); + stub_set_matched_shaping_rules(1, rule_uuid_strs, priority, profile_num, profile_uuid_strs); + stub_set_token_bucket_avl_per_sec(profile_uuid_strs[0][0], 0, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); + stub_set_token_bucket_avl_per_sec(profile_uuid_strs[0][1], 0, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); + stub_set_token_bucket_avl_per_sec(profile_uuid_strs[0][2], 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); actual_tx_queue = stub_get_tx_queue(); - shaper_rules_update(&ctx->thread_ctx[0], sf, rule_id, 1); + uuid_t rule_uuid; + uuid_parse(rule_uuid_strs[0], rule_uuid); + shaper_rules_update(&ctx->thread_ctx[0], sf, &rule_uuid, 1); /*******send packets***********/ send_packets(&ctx->thread_ctx[0], sf, 100, 100, SHAPING_DIR_OUT, &expec_tx_queue, 1, 0); @@ -800,7 +816,7 @@ TEST(single_session, udp_borrow_same_priority_9) ASSERT_EQ(0, judge_packet_eq(&expec_tx_queue, actual_tx_queue, 10)); while (!TAILQ_EMPTY(&expec_tx_queue)) {//last 90 delay packets - stub_refresh_token_bucket(3); + stub_refresh_token_bucket(profile_uuid_strs[0][2]); for (int i = 0; i < 20; i++) {//even though invoke polling more than 10 times, there should be only 10 pkts be sent polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]); stub_curr_time_ns_inc(STUB_TIME_INC_FOR_PACKET); @@ -821,72 +837,23 @@ TEST(single_session, udp_borrow_same_priority_9) shaper_thread_resource_clear(); shaping_engine_destroy(ctx); - stub_clear_matched_shaping_rules(); + stub_clear_resource(); + stub_swarmkv_clear_resource(); /*******test statistics***********/ //profile_id 1, primary - shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, 1, 1, 9, 0, 0, 0, 0, 171000, SHAPING_DIR_OUT, profile_type_primary); + shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, rule_uuid_strs[0], profile_uuid_strs[0][0], 9, 0, 0, 0, 0, 171000, SHAPING_DIR_OUT, profile_type_primary); //profile_id 2, borrow - shaping_stat_judge(counter_stat_str, guage_stat_str, 1, 0, 1, 2, 9, 0, 0, 0, 0, 0, SHAPING_DIR_OUT, profile_type_borrow); + shaping_stat_judge(counter_stat_str, guage_stat_str, 1, 0, rule_uuid_strs[0], profile_uuid_strs[0][1], 9, 0, 0, 0, 0, 0, SHAPING_DIR_OUT, profile_type_borrow); //profile_id 3, borrow - shaping_stat_judge(counter_stat_str, guage_stat_str, 2, 0, 1, 3, 9, 100, 10000, 0, 0, 0, SHAPING_DIR_OUT, profile_type_borrow); + shaping_stat_judge(counter_stat_str, guage_stat_str, 2, 0, rule_uuid_strs[0], profile_uuid_strs[0][2], 9, 100, 10000, 0, 0, 0, SHAPING_DIR_OUT, profile_type_borrow); free(counter_stat_str); free(guage_stat_str); } -/*session1 match rule1 - rule1: - priority:1 - profile1: limit 1000, first 20 pkts async, then sync -*/ -TEST(single_session_async, udp_close_before_async_exec) -{ - struct stub_pkt_queue expec_tx_queue; - struct stub_pkt_queue *actual_tx_queue; - struct shaping_ctx *ctx = NULL; - struct shaping_flow *sf = NULL; - long long rule_id[] = {0}; - int priority[] = {1}; - int profile_num[] = {1}; - int profile_id[][MAX_REF_PROFILE] = {{0}}; - - TAILQ_INIT(&expec_tx_queue); - stub_init(); - dummy_swarmkv_init(); - ctx = shaping_engine_init(); - ASSERT_TRUE(ctx != NULL); - sf = shaping_flow_new(&ctx->thread_ctx[0]); - ASSERT_TRUE(sf != NULL); - - stub_set_matched_shaping_rules(1, rule_id, priority, profile_num, profile_id); - stub_set_token_bucket_avl_per_sec(0, 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); - stub_set_async_token_get_times(0, 20); - actual_tx_queue = stub_get_tx_queue(); - shaper_rules_update(&ctx->thread_ctx[0], sf, rule_id, 1); - - - /*******send packets***********/ - send_packets(&ctx->thread_ctx[0], sf, 10, 100, SHAPING_DIR_OUT, &expec_tx_queue, 1, 0); - ASSERT_TRUE(TAILQ_EMPTY(actual_tx_queue));//async callback haven't been called, no token, no packet be sent - sf->flag |= SESSION_CLOSE;// receive close ctrlbuf - - stub_set_async_token_get_times(0, 0);//refresh async count, async thread will be executed - sleep(1);//ensure async thread exec complete - - for (int i = 0; i < 10; i++) { - polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]); - } - - ASSERT_EQ(0, judge_packet_eq(&expec_tx_queue, actual_tx_queue, 10)); - - shaper_thread_resource_clear(); - shaping_engine_destroy(ctx); - stub_clear_matched_shaping_rules(); -} - /*session1 match rule1; session2 match rule2 rule1: priority:1 @@ -906,18 +873,15 @@ TEST(two_session_diff_priority_same_profile, udp_borrow_in_order) struct shaping_ctx *ctx = NULL; struct shaping_flow *sf1 = NULL; struct shaping_flow *sf2 = NULL; - long long rule_ids[] = {1, 2}; - long long rule_id1[] = {1}; - long long rule_id2[] = {2}; + const char *rule_uuid_strs[] = {"00000000-0000-0000-0000-000000000001", "00000000-0000-0000-0000-000000000002"}; int profile_nums[] = {2, 1}; int prioritys[] = {1, 1}; - int profile_ids[][MAX_REF_PROFILE] = {{1, 2}, {2}}; + const char *profile_uuid_strs[][MAX_REF_PROFILE] = {{"00000000-0000-0000-0000-000000000001", "00000000-0000-0000-0000-000000000002"}, {"00000000-0000-0000-0000-000000000002"}}; TAILQ_INIT(&expec_tx_queue1); TAILQ_INIT(&expec_tx_queue2); stub_init(); - dummy_swarmkv_init(); ctx = shaping_engine_init(); ASSERT_TRUE(ctx != NULL); @@ -927,13 +891,17 @@ TEST(two_session_diff_priority_same_profile, udp_borrow_in_order) ASSERT_TRUE(sf2 != NULL); - stub_set_matched_shaping_rules(2, rule_ids, prioritys, profile_nums, profile_ids); + stub_set_matched_shaping_rules(2, rule_uuid_strs, prioritys, profile_nums, profile_uuid_strs); - stub_set_token_bucket_avl_per_sec(1, 0, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); - stub_set_token_bucket_avl_per_sec(2, 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); + stub_set_token_bucket_avl_per_sec(profile_uuid_strs[0][0], 0, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); + stub_set_token_bucket_avl_per_sec(profile_uuid_strs[0][1], 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); actual_tx_queue = stub_get_tx_queue(); - shaper_rules_update(&ctx->thread_ctx[0], sf1, rule_id1, 1); - shaper_rules_update(&ctx->thread_ctx[0], sf2, rule_id2, 1); + uuid_t rule_uuid1; + uuid_t rule_uuid2; + uuid_parse(rule_uuid_strs[0], rule_uuid1); + uuid_parse(rule_uuid_strs[1], rule_uuid2); + shaper_rules_update(&ctx->thread_ctx[0], sf1, &rule_uuid1, 1); + shaper_rules_update(&ctx->thread_ctx[0], sf2, &rule_uuid2, 1); /*******send packets***********/ @@ -945,7 +913,7 @@ TEST(two_session_diff_priority_same_profile, udp_borrow_in_order) ASSERT_EQ(0, judge_packet_eq(&expec_tx_queue1, actual_tx_queue, 10)); ASSERT_TRUE(TAILQ_EMPTY(actual_tx_queue)); - stub_refresh_token_bucket(2); + stub_refresh_token_bucket(profile_uuid_strs[0][1]); for (int i = 0; i < 20; i++) { polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]); stub_curr_time_ns_inc(STUB_TIME_INC_FOR_PACKET); @@ -954,7 +922,7 @@ TEST(two_session_diff_priority_same_profile, udp_borrow_in_order) ASSERT_TRUE(TAILQ_EMPTY(actual_tx_queue)); while (!TAILQ_EMPTY(&expec_tx_queue2)) { - stub_refresh_token_bucket(2); + stub_refresh_token_bucket(profile_uuid_strs[0][1]); for (int i = 0; i < 20; i++) {//even though invoke polling more than 10 times, there should be only 10 pkts be sent polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]); stub_curr_time_ns_inc(STUB_TIME_INC_FOR_PACKET); @@ -966,7 +934,7 @@ TEST(two_session_diff_priority_same_profile, udp_borrow_in_order) stub_curr_time_s_inc(1);//inc time to refresh hmget interval while (!TAILQ_EMPTY(&expec_tx_queue1)) {//last 90 delay packets - stub_refresh_token_bucket(2); + stub_refresh_token_bucket(profile_uuid_strs[0][1]); for (int i = 0; i < 20; i++) {//even though invoke polling more than 10 times, there should be only 10 pkts be sent polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]); stub_curr_time_ns_inc(STUB_TIME_INC_FOR_PACKET); @@ -988,17 +956,18 @@ TEST(two_session_diff_priority_same_profile, udp_borrow_in_order) shaper_thread_resource_clear(); shaping_engine_destroy(ctx); - stub_clear_matched_shaping_rules(); + stub_clear_resource(); + stub_swarmkv_clear_resource(); /*******test statistics***********/ //profile_id 1, primary - shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1471000, SHAPING_DIR_OUT, profile_type_primary); + shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, rule_uuid_strs[0], profile_uuid_strs[0][0], 1, 0, 0, 0, 0, 1471000, SHAPING_DIR_OUT, profile_type_primary); //profile_id 2, borrow - shaping_stat_judge(counter_stat_str, guage_stat_str, 1, 0, 1, 2, 2, 100, 10000, 0, 0, 0, SHAPING_DIR_OUT, profile_type_borrow); + shaping_stat_judge(counter_stat_str, guage_stat_str, 1, 0, rule_uuid_strs[0], profile_uuid_strs[0][1], 2, 100, 10000, 0, 0, 0, SHAPING_DIR_OUT, profile_type_borrow); //profile_id 2, primary - shaping_stat_judge(counter_stat_str, guage_stat_str, 2, 1, 2, 2, 1, 100, 10000, 0, 0, 191000, SHAPING_DIR_OUT, profile_type_primary); + shaping_stat_judge(counter_stat_str, guage_stat_str, 2, 1, rule_uuid_strs[1], profile_uuid_strs[1][0], 1, 100, 10000, 0, 0, 191000, SHAPING_DIR_OUT, profile_type_primary); free(counter_stat_str); free(guage_stat_str); @@ -1022,18 +991,15 @@ TEST(two_session_diff_priority_same_profile, two_thread_udp_tx_in_order) struct shaping_ctx *ctx = NULL; struct shaping_flow *sf1 = NULL; struct shaping_flow *sf2 = NULL; - long long rule_ids[] = {1, 2}; - long long rule_id1[] = {1}; - long long rule_id2[] = {2}; + const char *rule_uuid_strs[] = {"00000000-0000-0000-0000-000000000001", "00000000-0000-0000-0000-000000000002"}; int profile_nums[] = {1, 1}; int prioritys[] = {1, 2}; - int profile_id[][MAX_REF_PROFILE] = {{0}, {0}}; + const char *profile_uuid_strs[][MAX_REF_PROFILE] = {{"00000000-0000-0000-0000-000000000001"}, {"00000000-0000-0000-0000-000000000001"}}; TAILQ_INIT(&expec_tx_queue1); TAILQ_INIT(&expec_tx_queue2); stub_init(); - dummy_swarmkv_init(); ctx = shaping_engine_init(); ASSERT_TRUE(ctx != NULL); @@ -1042,12 +1008,16 @@ TEST(two_session_diff_priority_same_profile, two_thread_udp_tx_in_order) sf2 = shaping_flow_new(&ctx->thread_ctx[1]); ASSERT_TRUE(sf2 != NULL); - stub_set_matched_shaping_rules(2, rule_ids, prioritys, profile_nums, profile_id); + stub_set_matched_shaping_rules(2, rule_uuid_strs, prioritys, profile_nums, profile_uuid_strs); - stub_set_token_bucket_avl_per_sec(0, 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); + stub_set_token_bucket_avl_per_sec(profile_uuid_strs[0][0], 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); actual_tx_queue = stub_get_tx_queue(); - shaper_rules_update(&ctx->thread_ctx[0], sf1, rule_id1, 1); - shaper_rules_update(&ctx->thread_ctx[1], sf2, rule_id2, 1); + uuid_t rule_uuid1; + uuid_t rule_uuid2; + uuid_parse(rule_uuid_strs[0], rule_uuid1); + uuid_parse(rule_uuid_strs[1], rule_uuid2); + shaper_rules_update(&ctx->thread_ctx[0], sf1, &rule_uuid1, 1); + shaper_rules_update(&ctx->thread_ctx[1], sf2, &rule_uuid2, 1); /*******send packets***********/ send_packets(&ctx->thread_ctx[0], sf1, 100, 100, SHAPING_DIR_OUT, &expec_tx_queue1, 1, 0); @@ -1058,13 +1028,13 @@ TEST(two_session_diff_priority_same_profile, two_thread_udp_tx_in_order) shaper_stat_refresh(&ctx->thread_ctx[0], sf1, 1);//刷新线程0中的优先级队列长度到swarmkv中 stub_curr_time_s_inc(1);//inc time to refresh hmget interval for (int i = 0; i < 10; i++) {//线程1中的session优先级为2,被线程0中优先级为1的session阻断 - stub_refresh_token_bucket(0); + stub_refresh_token_bucket(profile_uuid_strs[0][0]); polling_entry(ctx->thread_ctx[1].sp, ctx->thread_ctx[1].stat, &ctx->thread_ctx[1]); ASSERT_EQ(-1, judge_packet_eq(&expec_tx_queue2, actual_tx_queue, 1));//优先级低,不能发出报文 } while (!TAILQ_EMPTY(&expec_tx_queue1)) { - stub_refresh_token_bucket(0); + stub_refresh_token_bucket(profile_uuid_strs[0][0]); polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]);//require tokens polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]);//send pkt @@ -1074,7 +1044,7 @@ TEST(two_session_diff_priority_same_profile, two_thread_udp_tx_in_order) stub_curr_time_s_inc(1);//inc time to refresh hmget interval shaper_stat_refresh(&ctx->thread_ctx[0], sf1, 1);//刷新线程0中的优先级队列长度到swarmkv中 while (!TAILQ_EMPTY(&expec_tx_queue2)) { - stub_refresh_token_bucket(0); + stub_refresh_token_bucket(profile_uuid_strs[0][0]); polling_entry(ctx->thread_ctx[1].sp, ctx->thread_ctx[1].stat, &ctx->thread_ctx[1]); polling_entry(ctx->thread_ctx[1].sp, ctx->thread_ctx[1].stat, &ctx->thread_ctx[1]); @@ -1087,7 +1057,8 @@ TEST(two_session_diff_priority_same_profile, two_thread_udp_tx_in_order) shaping_flow_free(&ctx->thread_ctx[1], sf2); shaper_thread_resource_clear(); shaping_engine_destroy(ctx); - stub_clear_matched_shaping_rules(); + stub_clear_resource(); + stub_swarmkv_clear_resource(); } /*session1 match rule1; session2 match rule2 @@ -1108,18 +1079,15 @@ TEST(two_session_diff_priority_same_profile, profile_timer_test) struct shaping_ctx *ctx = NULL; struct shaping_flow *sf1 = NULL; struct shaping_flow *sf2 = NULL; - long long rule_ids[] = {1, 2}; - long long rule_id1[] = {1}; - long long rule_id2[] = {2}; + const char *rule_uuid_strs[] = {"00000000-0000-0000-0000-000000000001", "00000000-0000-0000-0000-000000000002"}; int profile_nums[] = {1, 1}; int prioritys[] = {1, 2}; - int profile_id[][MAX_REF_PROFILE] = {{0}, {0}}; + const char *profile_uuid_strs[][MAX_REF_PROFILE] = {{"00000000-0000-0000-0000-000000000001"}, {"00000000-0000-0000-0000-000000000001"}}; TAILQ_INIT(&expec_tx_queue1); TAILQ_INIT(&expec_tx_queue2); stub_init(); - dummy_swarmkv_init(); ctx = shaping_engine_init(); ASSERT_TRUE(ctx != NULL); @@ -1128,12 +1096,16 @@ TEST(two_session_diff_priority_same_profile, profile_timer_test) sf2 = shaping_flow_new(&ctx->thread_ctx[1]); ASSERT_TRUE(sf2 != NULL); - stub_set_matched_shaping_rules(2, rule_ids, prioritys, profile_nums, profile_id); + stub_set_matched_shaping_rules(2, rule_uuid_strs, prioritys, profile_nums, profile_uuid_strs); - stub_set_token_bucket_avl_per_sec(0, 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); + stub_set_token_bucket_avl_per_sec(profile_uuid_strs[0][0], 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); actual_tx_queue = stub_get_tx_queue(); - shaper_rules_update(&ctx->thread_ctx[0], sf1, rule_id1, 1); - shaper_rules_update(&ctx->thread_ctx[1], sf2, rule_id2, 1); + uuid_t rule_uuid1; + uuid_t rule_uuid2; + uuid_parse(rule_uuid_strs[0], rule_uuid1); + uuid_parse(rule_uuid_strs[1], rule_uuid2); + shaper_rules_update(&ctx->thread_ctx[0], sf1, &rule_uuid1, 1); + shaper_rules_update(&ctx->thread_ctx[1], sf2, &rule_uuid2, 1); /*******send packets***********/ send_packets(&ctx->thread_ctx[0], sf1, 100, 100, SHAPING_DIR_OUT, &expec_tx_queue1, 1, 0); @@ -1150,7 +1122,7 @@ TEST(two_session_diff_priority_same_profile, profile_timer_test) stub_curr_time_s_inc(1);//inc time to refresh hmget interval for (int i = 0; i < 10; i++) {//线程1中的session优先级为2,被线程0中优先级为1的session阻断 - stub_refresh_token_bucket(0); + stub_refresh_token_bucket(profile_uuid_strs[0][0]); polling_entry(ctx->thread_ctx[1].sp, ctx->thread_ctx[1].stat, &ctx->thread_ctx[1]); stub_curr_time_ns_inc(STUB_TIME_INC_FOR_PACKET); @@ -1158,7 +1130,7 @@ TEST(two_session_diff_priority_same_profile, profile_timer_test) } while (!TAILQ_EMPTY(&expec_tx_queue1)) { - stub_refresh_token_bucket(0); + stub_refresh_token_bucket(profile_uuid_strs[0][0]); polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]);//first polling request token polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]);//then send pkt stub_curr_time_ns_inc(STUB_TIME_INC_FOR_PACKET); @@ -1175,7 +1147,7 @@ TEST(two_session_diff_priority_same_profile, profile_timer_test) stub_curr_time_s_inc(1);//inc time to refresh hmget interval while (!TAILQ_EMPTY(&expec_tx_queue2)) { - stub_refresh_token_bucket(0); + stub_refresh_token_bucket(profile_uuid_strs[0][0]); polling_entry(ctx->thread_ctx[1].sp, ctx->thread_ctx[1].stat, &ctx->thread_ctx[1]);//first polling request token polling_entry(ctx->thread_ctx[1].sp, ctx->thread_ctx[1].stat, &ctx->thread_ctx[1]);//then send pkt stub_curr_time_ns_inc(STUB_TIME_INC_FOR_PACKET); @@ -1189,7 +1161,8 @@ TEST(two_session_diff_priority_same_profile, profile_timer_test) shaping_flow_free(&ctx->thread_ctx[1], sf2); shaper_thread_resource_clear(); shaping_engine_destroy(ctx); - stub_clear_matched_shaping_rules(); + stub_clear_resource(); + stub_swarmkv_clear_resource(); } /*session1 match rule1; session2 match rule2 @@ -1210,18 +1183,15 @@ TEST(two_session_diff_priority_same_profile, one_direction_dont_block_another) struct shaping_ctx *ctx = NULL; struct shaping_flow *sf1 = NULL; struct shaping_flow *sf2 = NULL; - long long rule_ids[] = {1, 2}; - long long rule_id1[] = {1}; - long long rule_id2[] = {2}; + const char *rule_uuid_strs[] = {"00000000-0000-0000-0000-000000000001", "00000000-0000-0000-0000-000000000002"}; int profile_nums[] = {1, 1}; int prioritys[] = {1, 2}; - int profile_id[][MAX_REF_PROFILE] = {{0}, {0}}; + const char *profile_uuid_strs[][MAX_REF_PROFILE] = {{"00000000-0000-0000-0000-000000000001"}, {"00000000-0000-0000-0000-000000000001"}}; TAILQ_INIT(&expec_tx_queue1); TAILQ_INIT(&expec_tx_queue2); stub_init(); - dummy_swarmkv_init(); ctx = shaping_engine_init(); ASSERT_TRUE(ctx != NULL); @@ -1230,13 +1200,17 @@ TEST(two_session_diff_priority_same_profile, one_direction_dont_block_another) sf2 = shaping_flow_new(&ctx->thread_ctx[1]); ASSERT_TRUE(sf2 != NULL); - stub_set_matched_shaping_rules(2, rule_ids, prioritys, profile_nums, profile_id); + stub_set_matched_shaping_rules(2, rule_uuid_strs, prioritys, profile_nums, profile_uuid_strs); - stub_set_token_bucket_avl_per_sec(0, 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); - stub_set_token_bucket_avl_per_sec(0, 1000, SHAPING_DIR_IN, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); + stub_set_token_bucket_avl_per_sec(profile_uuid_strs[0][0], 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); + stub_set_token_bucket_avl_per_sec(profile_uuid_strs[0][0], 1000, SHAPING_DIR_IN, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); actual_tx_queue = stub_get_tx_queue(); - shaper_rules_update(&ctx->thread_ctx[0], sf1, rule_id1, 1); - shaper_rules_update(&ctx->thread_ctx[1], sf2, rule_id2, 1); + uuid_t rule_uuid1; + uuid_t rule_uuid2; + uuid_parse(rule_uuid_strs[0], rule_uuid1); + uuid_parse(rule_uuid_strs[1], rule_uuid2); + shaper_rules_update(&ctx->thread_ctx[0], sf1, &rule_uuid1, 1); + shaper_rules_update(&ctx->thread_ctx[1], sf2, &rule_uuid2, 1); /*******send packets***********/ send_packets(&ctx->thread_ctx[0], sf1, 100, 100, SHAPING_DIR_OUT, &expec_tx_queue1, 1, 0); @@ -1254,7 +1228,7 @@ TEST(two_session_diff_priority_same_profile, one_direction_dont_block_another) stub_curr_time_s_inc(1);//inc time to refresh hmget interval while (!TAILQ_EMPTY(&expec_tx_queue2)) {//线程0中优先级为1的session阻断OUT方向,线程1中的session优先级为2,但是IN方向不受影响 - stub_refresh_token_bucket(0); + stub_refresh_token_bucket(profile_uuid_strs[0][0]); polling_entry(ctx->thread_ctx[1].sp, ctx->thread_ctx[1].stat, &ctx->thread_ctx[1]);//first polling request token polling_entry(ctx->thread_ctx[1].sp, ctx->thread_ctx[1].stat, &ctx->thread_ctx[1]); stub_curr_time_ns_inc(STUB_TIME_INC_FOR_PACKET); @@ -1263,7 +1237,7 @@ TEST(two_session_diff_priority_same_profile, one_direction_dont_block_another) } while (!TAILQ_EMPTY(&expec_tx_queue1)) { - stub_refresh_token_bucket(0); + stub_refresh_token_bucket(profile_uuid_strs[0][0]); polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]);//first polling request token polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]);//then send pkt stub_curr_time_ns_inc(STUB_TIME_INC_FOR_PACKET); @@ -1277,7 +1251,8 @@ TEST(two_session_diff_priority_same_profile, one_direction_dont_block_another) shaping_flow_free(&ctx->thread_ctx[1], sf2); shaper_thread_resource_clear(); shaping_engine_destroy(ctx); - stub_clear_matched_shaping_rules(); + stub_clear_resource(); + stub_swarmkv_clear_resource(); } /*session1 match rule1 & rule2; session2 match rule3 @@ -1302,18 +1277,15 @@ TEST(two_sessions, priority_non_block) struct shaping_ctx *ctx = NULL; struct shaping_flow *sf1 = NULL; struct shaping_flow *sf2 = NULL; - long long rule_ids[] = {1, 2, 3}; - long long rule_id1[] = {1, 2}; - long long rule_id2[] = {3}; + const char *rule_uuid_strs[] = {"00000000-0000-0000-0000-000000000001", "00000000-0000-0000-0000-000000000002", "00000000-0000-0000-0000-000000000003"}; int profile_nums[] = {1, 1, 1}; int prioritys[] = {1, 2, 3}; - int profile_id[][MAX_REF_PROFILE] = {{0}, {1}, {0}}; + const char *profile_uuid_strs[][MAX_REF_PROFILE] = {{"00000000-0000-0000-0000-000000000001"}, {"00000000-0000-0000-0000-000000000002"}, {"00000000-0000-0000-0000-000000000001"}}; TAILQ_INIT(&expec_tx_queue1); TAILQ_INIT(&expec_tx_queue2); stub_init(); - dummy_swarmkv_init(); ctx = shaping_engine_init(); ASSERT_TRUE(ctx != NULL); @@ -1322,13 +1294,18 @@ TEST(two_sessions, priority_non_block) sf2 = shaping_flow_new(&ctx->thread_ctx[1]); ASSERT_TRUE(sf2 != NULL); - stub_set_matched_shaping_rules(3, rule_ids, prioritys, profile_nums, profile_id); + stub_set_matched_shaping_rules(3, rule_uuid_strs, prioritys, profile_nums, profile_uuid_strs); - stub_set_token_bucket_avl_per_sec(0, 3000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); - stub_set_token_bucket_avl_per_sec(1, 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); + stub_set_token_bucket_avl_per_sec(profile_uuid_strs[0][0], 3000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); + stub_set_token_bucket_avl_per_sec(profile_uuid_strs[1][0], 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); actual_tx_queue = stub_get_tx_queue(); - shaper_rules_update(&ctx->thread_ctx[0], sf1, rule_id1, 2); - shaper_rules_update(&ctx->thread_ctx[1], sf2, rule_id2, 1); + uuid_t rule_uuid1[2]; + uuid_t rule_uuid2; + uuid_parse(rule_uuid_strs[0], rule_uuid1[0]); + uuid_parse(rule_uuid_strs[1], rule_uuid1[1]); + uuid_parse(rule_uuid_strs[2], rule_uuid2); + shaper_rules_update(&ctx->thread_ctx[0], sf1, rule_uuid1, 2); + shaper_rules_update(&ctx->thread_ctx[1], sf2, &rule_uuid2, 1); /*******send packets***********/ send_packets(&ctx->thread_ctx[0], sf1, 100, 100, SHAPING_DIR_OUT, &expec_tx_queue1, 3, 0);//sf1 blocked by rule2(profile id 1), while rule3(profile id 0) still has 1000 token @@ -1339,8 +1316,8 @@ TEST(two_sessions, priority_non_block) ASSERT_TRUE(TAILQ_EMPTY(actual_tx_queue)); while (!TAILQ_EMPTY(&expec_tx_queue1)) { - stub_refresh_token_bucket(0); - stub_refresh_token_bucket(1); + stub_refresh_token_bucket(profile_uuid_strs[0][0]); + stub_refresh_token_bucket(profile_uuid_strs[1][0]); for (int i = 0; i < 4; i++) { polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]);//two rules, one rule need two polling, request token and send pkt @@ -1353,7 +1330,8 @@ TEST(two_sessions, priority_non_block) shaping_flow_free(&ctx->thread_ctx[1], sf2); shaper_thread_resource_clear(); shaping_engine_destroy(ctx); - stub_clear_matched_shaping_rules(); + stub_clear_resource(); + stub_swarmkv_clear_resource(); } /*session1 match rule1; session2 match rule2 @@ -1376,18 +1354,15 @@ TEST(two_sessions, borrow_when_primary_profile_priority_blocked) struct shaping_ctx *ctx = NULL; struct shaping_flow *sf1 = NULL; struct shaping_flow *sf2 = NULL; - long long rule_ids[] = {1, 2}; - long long rule_id1[] = {1}; - long long rule_id2[] = {2}; + const char *rule_uuid_strs[] = {"00000000-0000-0000-0000-000000000001", "00000000-0000-0000-0000-000000000002"}; int profile_nums[] = {1, 2}; int prioritys[] = {1, 2}; - int profile_id[][MAX_REF_PROFILE] = {{0}, {0, 1}}; + const char *profile_uuid_strs[][MAX_REF_PROFILE] = {{"00000000-0000-0000-0000-000000000001"}, {"00000000-0000-0000-0000-000000000001", "00000000-0000-0000-0000-000000000002"}}; TAILQ_INIT(&expec_tx_queue1); TAILQ_INIT(&expec_tx_queue2); stub_init(); - dummy_swarmkv_init(); ctx = shaping_engine_init(); ASSERT_TRUE(ctx != NULL); @@ -1396,13 +1371,17 @@ TEST(two_sessions, borrow_when_primary_profile_priority_blocked) sf2 = shaping_flow_new(&ctx->thread_ctx[1]); ASSERT_TRUE(sf2 != NULL); - stub_set_matched_shaping_rules(2, rule_ids, prioritys, profile_nums, profile_id); + stub_set_matched_shaping_rules(2, rule_uuid_strs, prioritys, profile_nums, profile_uuid_strs); - stub_set_token_bucket_avl_per_sec(0, 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); - stub_set_token_bucket_avl_per_sec(1, 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); + stub_set_token_bucket_avl_per_sec(profile_uuid_strs[1][0], 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); + stub_set_token_bucket_avl_per_sec(profile_uuid_strs[1][1], 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); actual_tx_queue = stub_get_tx_queue(); - shaper_rules_update(&ctx->thread_ctx[0], sf1, rule_id1, 1); - shaper_rules_update(&ctx->thread_ctx[1], sf2, rule_id2, 1); + uuid_t rule_uuid1; + uuid_t rule_uuid2; + uuid_parse(rule_uuid_strs[0], rule_uuid1); + uuid_parse(rule_uuid_strs[1], rule_uuid2); + shaper_rules_update(&ctx->thread_ctx[0], sf1, &rule_uuid1, 1); + shaper_rules_update(&ctx->thread_ctx[1], sf2, &rule_uuid2, 1); /*******send packets***********/ send_packets(&ctx->thread_ctx[0], sf1, 100, 100, SHAPING_DIR_OUT, &expec_tx_queue1, 1, 0); @@ -1410,14 +1389,14 @@ TEST(two_sessions, borrow_when_primary_profile_priority_blocked) ASSERT_EQ(0, judge_packet_eq(&expec_tx_queue1, actual_tx_queue, 10)); while (!TAILQ_EMPTY(&expec_tx_queue2)) { - stub_refresh_token_bucket(1); + stub_refresh_token_bucket(profile_uuid_strs[1][1]); polling_entry(ctx->thread_ctx[1].sp, ctx->thread_ctx[1].stat, &ctx->thread_ctx[1]);//primary profile blocked by priority, send by borrow profile ASSERT_EQ(0, judge_packet_eq(&expec_tx_queue2, actual_tx_queue, 1)); } while (!TAILQ_EMPTY(&expec_tx_queue1)) { - stub_refresh_token_bucket(0); + stub_refresh_token_bucket(profile_uuid_strs[1][0]); polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]); polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]); @@ -1428,7 +1407,8 @@ TEST(two_sessions, borrow_when_primary_profile_priority_blocked) shaping_flow_free(&ctx->thread_ctx[1], sf2); shaper_thread_resource_clear(); shaping_engine_destroy(ctx); - stub_clear_matched_shaping_rules(); + stub_clear_resource(); + stub_swarmkv_clear_resource(); } /*session1 match rule1; session2 match rule2 @@ -1451,18 +1431,15 @@ TEST(two_sessions, primary_profile_priority_blocked_by_borrow_profile) struct shaping_ctx *ctx = NULL; struct shaping_flow *sf1 = NULL; struct shaping_flow *sf2 = NULL; - long long rule_ids[] = {1, 2}; - long long rule_id1[] = {1}; - long long rule_id2[] = {2}; + const char *rule_uuid_strs[] = {"00000000-0000-0000-0000-000000000001", "00000000-0000-0000-0000-000000000002"}; int profile_nums[] = {2, 1}; int prioritys[] = {1, 5}; - int profile_id[][MAX_REF_PROFILE] = {{0, 1}, {1}}; + const char *profile_uuid_strs[][MAX_REF_PROFILE] = {{"00000000-0000-0000-0000-000000000001", "00000000-0000-0000-0000-000000000002"}, {"00000000-0000-0000-0000-000000000002"}}; TAILQ_INIT(&expec_tx_queue1); TAILQ_INIT(&expec_tx_queue2); stub_init(); - dummy_swarmkv_init(); ctx = shaping_engine_init(); ASSERT_TRUE(ctx != NULL); @@ -1471,13 +1448,17 @@ TEST(two_sessions, primary_profile_priority_blocked_by_borrow_profile) sf2 = shaping_flow_new(&ctx->thread_ctx[1]); ASSERT_TRUE(sf2 != NULL); - stub_set_matched_shaping_rules(2, rule_ids, prioritys, profile_nums, profile_id); + stub_set_matched_shaping_rules(2, rule_uuid_strs, prioritys, profile_nums, profile_uuid_strs); - stub_set_token_bucket_avl_per_sec(0, 0, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); - stub_set_token_bucket_avl_per_sec(1, 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); + stub_set_token_bucket_avl_per_sec(profile_uuid_strs[0][0], 0, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); + stub_set_token_bucket_avl_per_sec(profile_uuid_strs[0][1], 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); actual_tx_queue = stub_get_tx_queue(); - shaper_rules_update(&ctx->thread_ctx[0], sf1, rule_id1, 1); - shaper_rules_update(&ctx->thread_ctx[1], sf2, rule_id2, 1); + uuid_t rule_uuid1; + uuid_t rule_uuid2; + uuid_parse(rule_uuid_strs[0], rule_uuid1); + uuid_parse(rule_uuid_strs[1], rule_uuid2); + shaper_rules_update(&ctx->thread_ctx[0], sf1, &rule_uuid1, 1); + shaper_rules_update(&ctx->thread_ctx[1], sf2, &rule_uuid2, 1); /*******send packets***********/ send_packets(&ctx->thread_ctx[0], sf1, 100, 100, SHAPING_DIR_OUT, &expec_tx_queue1, 1, 0); @@ -1487,7 +1468,7 @@ TEST(two_sessions, primary_profile_priority_blocked_by_borrow_profile) ASSERT_TRUE(TAILQ_EMPTY(actual_tx_queue)); while (!TAILQ_EMPTY(&expec_tx_queue1)) { - stub_refresh_token_bucket(1); + stub_refresh_token_bucket(profile_uuid_strs[0][1]); polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]); polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]); polling_entry(ctx->thread_ctx[1].sp, ctx->thread_ctx[1].stat, &ctx->thread_ctx[1]);//blocked by priority, sf1 has priority 2 for profile_b(id 1) @@ -1498,7 +1479,7 @@ TEST(two_sessions, primary_profile_priority_blocked_by_borrow_profile) stub_curr_time_s_inc(1);//inc time to refresh hmget interval while (!TAILQ_EMPTY(&expec_tx_queue2)) { - stub_refresh_token_bucket(1); + stub_refresh_token_bucket(profile_uuid_strs[0][1]); polling_entry(ctx->thread_ctx[1].sp, ctx->thread_ctx[1].stat, &ctx->thread_ctx[1]); polling_entry(ctx->thread_ctx[1].sp, ctx->thread_ctx[1].stat, &ctx->thread_ctx[1]); @@ -1509,7 +1490,8 @@ TEST(two_sessions, primary_profile_priority_blocked_by_borrow_profile) shaping_flow_free(&ctx->thread_ctx[1], sf2); shaper_thread_resource_clear(); shaping_engine_destroy(ctx); - stub_clear_matched_shaping_rules(); + stub_clear_resource(); + stub_swarmkv_clear_resource(); } /*session1 match rule1 @@ -1521,23 +1503,24 @@ TEST(statistics, udp_drop_pkt) struct stub_pkt_queue *actual_tx_queue; struct shaping_ctx *ctx = NULL; struct shaping_flow *sf = NULL; - long long rule_id[] = {0}; + const char *rule_uuid_strs[] = {"00000000-0000-0000-0000-000000000001"}; int priority[] = {1}; int profile_num[] = {1}; - int profile_id[][MAX_REF_PROFILE] = {{0}}; + const char *profile_uuid_strs[][MAX_REF_PROFILE] = {{"00000000-0000-0000-0000-000000000001"}}; TAILQ_INIT(&expec_tx_queue); stub_init(); - dummy_swarmkv_init(); ctx = shaping_engine_init(); ASSERT_TRUE(ctx != NULL); sf = shaping_flow_new(&ctx->thread_ctx[0]); ASSERT_TRUE(sf != NULL); - stub_set_matched_shaping_rules(1, rule_id, priority, profile_num, profile_id); - stub_set_token_bucket_avl_per_sec(0, 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); + stub_set_matched_shaping_rules(1, rule_uuid_strs, priority, profile_num, profile_uuid_strs); + stub_set_token_bucket_avl_per_sec(profile_uuid_strs[0][0], 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); actual_tx_queue = stub_get_tx_queue(); - shaper_rules_update(&ctx->thread_ctx[0], sf, rule_id, 1); + uuid_t rule_uuid; + uuid_parse(rule_uuid_strs[0], rule_uuid); + shaper_rules_update(&ctx->thread_ctx[0], sf, &rule_uuid, 1); /*******send packets***********/ send_packets(&ctx->thread_ctx[0], sf, SHAPING_SESSION_QUEUE_LEN + 10, 100, SHAPING_DIR_OUT, &expec_tx_queue, 1, 0); @@ -1548,7 +1531,7 @@ TEST(statistics, udp_drop_pkt) ASSERT_TRUE(TAILQ_EMPTY(actual_tx_queue)); while (!TAILQ_EMPTY(&expec_tx_queue)) { - stub_refresh_token_bucket(0); + stub_refresh_token_bucket(profile_uuid_strs[0][0]); polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]); polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]); stub_curr_time_ns_inc(STUB_TIME_INC_FOR_PACKET); @@ -1572,11 +1555,12 @@ TEST(statistics, udp_drop_pkt) shaper_thread_resource_clear(); shaping_engine_destroy(ctx); - stub_clear_matched_shaping_rules(); + stub_clear_resource(); + stub_swarmkv_clear_resource(); /*******test statistics***********/ //judge shaping metric - shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, 0, 0, 1, SHAPING_SESSION_QUEUE_LEN+10, (SHAPING_SESSION_QUEUE_LEN+10)*100, 100, 0, 228000, SHAPING_DIR_OUT, profile_type_primary);//every queued pkt's latency is max + shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, rule_uuid_strs[0], profile_uuid_strs[0][0], 1, SHAPING_SESSION_QUEUE_LEN+10, (SHAPING_SESSION_QUEUE_LEN+10)*100, 100, 0, 228000, SHAPING_DIR_OUT, profile_type_primary);//every queued pkt's latency is max //judge shaping global metric shaping_global_stat_judge(global_stat_str, SHAPING_SESSION_QUEUE_LEN+10, (SHAPING_SESSION_QUEUE_LEN+10)*100, 100, 10000, 0, 0); @@ -1595,24 +1579,25 @@ TEST(statistics, udp_queueing_pkt) struct stub_pkt_queue *actual_tx_queue; struct shaping_ctx *ctx = NULL; struct shaping_flow *sf = NULL; - long long rule_id[] = {0}; + const char *rule_uuid_strs[] = {"00000000-0000-0000-0000-000000000001"}; int priority[] = {1}; int profile_num[] = {1}; - int profile_id[][MAX_REF_PROFILE] = {{0}}; + const char *profile_uuid_strs[][MAX_REF_PROFILE] = {{ "00000000-0000-0000-0000-000000000001"}}; TAILQ_INIT(&expec_tx_queue); stub_init(); - dummy_swarmkv_init(); ctx = shaping_engine_init(); ASSERT_TRUE(ctx != NULL); sf = shaping_flow_new(&ctx->thread_ctx[0]); ASSERT_TRUE(sf != NULL); - stub_set_matched_shaping_rules(1, rule_id, priority, profile_num, profile_id); - stub_set_token_bucket_avl_per_sec(0, 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); + stub_set_matched_shaping_rules(1, rule_uuid_strs, priority, profile_num, profile_uuid_strs); + stub_set_token_bucket_avl_per_sec(profile_uuid_strs[0][0], 1000, SHAPING_DIR_OUT, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); actual_tx_queue = stub_get_tx_queue(); - shaper_rules_update(&ctx->thread_ctx[0], sf, rule_id, 1); + uuid_t rule_uuid; + uuid_parse(rule_uuid_strs[0], rule_uuid); + shaper_rules_update(&ctx->thread_ctx[0], sf, &rule_uuid, 1); /*******send packets***********/ send_packets(&ctx->thread_ctx[0], sf, 100, 100, SHAPING_DIR_OUT, &expec_tx_queue, 1, 0); @@ -1633,7 +1618,7 @@ TEST(statistics, udp_queueing_pkt) fieldstat_easy_output(ctx->thread_ctx[0].stat->guage_instance, &guage_stat_str, &stat_str_len); /*******judge metric********/ - shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, 0, 0, 1, 10, 1000, 0, 90, 0, SHAPING_DIR_OUT, profile_type_primary); + shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, rule_uuid_strs[0], profile_uuid_strs[0][0], 1, 10, 1000, 0, 90, 0, SHAPING_DIR_OUT, profile_type_primary); shaping_global_stat_judge(global_stat_str, 10, 1000, 0, 0, 90, 9000); free(global_stat_str); @@ -1645,7 +1630,7 @@ TEST(statistics, udp_queueing_pkt) ASSERT_TRUE(TAILQ_EMPTY(actual_tx_queue)); while (!TAILQ_EMPTY(&expec_tx_queue)) {//last 90 delay packets - stub_refresh_token_bucket(0); + stub_refresh_token_bucket(profile_uuid_strs[0][0]); polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]); polling_entry(ctx->thread_ctx[0].sp, ctx->thread_ctx[0].stat, &ctx->thread_ctx[0]); stub_curr_time_ns_inc(STUB_TIME_INC_FOR_PACKET); @@ -1662,11 +1647,12 @@ TEST(statistics, udp_queueing_pkt) shaper_thread_resource_clear(); shaping_engine_destroy(ctx); - stub_clear_matched_shaping_rules(); + stub_clear_resource(); + stub_swarmkv_clear_resource(); /*******test statistics***********/ //judge shaping metric - shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, 0, 0, 1, 100, 10000, 0, 0, 90000, SHAPING_DIR_OUT, profile_type_primary); + shaping_stat_judge(counter_stat_str, guage_stat_str, 0, 0, rule_uuid_strs[0], profile_uuid_strs[0][0], 1, 100, 10000, 0, 0, 90000, SHAPING_DIR_OUT, profile_type_primary); //judge global metric shaping_global_stat_judge(global_stat_str, 100, 10000, 0, 0, 0, 0); diff --git a/shaping/test/gtest_shaper_aqm.cpp b/shaping/test/gtest_shaper_aqm.cpp index 6acb1f3..fd12dfe 100644 --- a/shaping/test/gtest_shaper_aqm.cpp +++ b/shaping/test/gtest_shaper_aqm.cpp @@ -8,12 +8,15 @@ TEST(aqm_bule, need_drop) { struct shaper_aqm_blue_para para; int drop_cnt = 0; + uuid_t uuid; + + uuid_parse("00000000-0000-0000-0000-000000000001", uuid); para.update_time = 0; para.probability = 0; for (int i = 0; i < 10000; i++) { - if (shaper_aqm_blue_need_drop(0, ¶, BLUE_QUEUE_LEN_MAX + 1)) { + if (shaper_aqm_blue_need_drop(uuid, ¶, BLUE_QUEUE_LEN_MAX + 1)) { drop_cnt++; } } @@ -24,7 +27,7 @@ TEST(aqm_bule, need_drop) EXPECT_LT(drop_cnt, 1100); sleep(3); - shaper_aqm_blue_need_drop(0, ¶, 0); + shaper_aqm_blue_need_drop(uuid, ¶, 0); EXPECT_EQ(para.probability, BLUE_INCREMENT - BLUE_DECREMENT); } @@ -32,12 +35,15 @@ TEST(aqm_blue, no_drop) { struct shaper_aqm_blue_para para; int drop_cnt = 0; + uuid_t uuid; + + uuid_parse("00000000-0000-0000-0000-000000000001", uuid); para.update_time = 0; para.probability = 0; for (int i = 0; i < 10000; i++) { - if (shaper_aqm_blue_need_drop(0, ¶, BLUE_QUEUE_LEN_MAX - 1)) { + if (shaper_aqm_blue_need_drop(uuid, ¶, BLUE_QUEUE_LEN_MAX - 1)) { drop_cnt++; } } @@ -46,7 +52,7 @@ TEST(aqm_blue, no_drop) EXPECT_EQ(drop_cnt, 0); sleep(3); - shaper_aqm_blue_need_drop(0, ¶, BLUE_QUEUE_LEN_MAX - 1); + shaper_aqm_blue_need_drop(uuid, ¶, BLUE_QUEUE_LEN_MAX - 1); EXPECT_EQ(para.probability, 0); } @@ -54,26 +60,29 @@ TEST(aqm_codel, need_drop) { struct shaper_aqm_codel_para para; int curr_time_ms = 0; + uuid_t uuid; + + uuid_parse("00000000-0000-0000-0000-000000000001", uuid); memset(¶, 0, sizeof(para)); - shaper_aqm_codel_need_drop(0, ¶, curr_time_ms, CODEL_MAX_LATENCY + 1); + shaper_aqm_codel_need_drop(uuid, ¶, curr_time_ms, CODEL_MAX_LATENCY + 1); EXPECT_EQ(para.state, CODEL_STATE_DROPPING_TIMER); EXPECT_EQ(para.start_drop_time_ms, curr_time_ms + CODEL_DROP_INTERVAL); curr_time_ms = para.start_drop_time_ms + 1; - shaper_aqm_codel_need_drop(0, ¶, curr_time_ms, CODEL_MAX_LATENCY + 1); + shaper_aqm_codel_need_drop(uuid, ¶, curr_time_ms, CODEL_MAX_LATENCY + 1); EXPECT_EQ(para.state, CODEL_STATE_DROPPING_PHASE); EXPECT_EQ(para.drop_count, 1); EXPECT_EQ(para.next_drop_time_ms, int(curr_time_ms + CODEL_DROP_INTERVAL / sqrt(para.drop_count))); curr_time_ms = para.next_drop_time_ms + 1; - shaper_aqm_codel_need_drop(0, ¶, curr_time_ms, CODEL_MAX_LATENCY + 1); + shaper_aqm_codel_need_drop(uuid, ¶, curr_time_ms, CODEL_MAX_LATENCY + 1); EXPECT_EQ(para.state, CODEL_STATE_DROPPING_PHASE); EXPECT_EQ(para.drop_count, 2); EXPECT_EQ(para.next_drop_time_ms, int(curr_time_ms + CODEL_DROP_INTERVAL / sqrt(para.drop_count))); - shaper_aqm_codel_need_drop(0, ¶, curr_time_ms, CODEL_MAX_LATENCY - 1); + shaper_aqm_codel_need_drop(uuid, ¶, curr_time_ms, CODEL_MAX_LATENCY - 1); EXPECT_EQ(para.state, CODEL_STATE_NORMAL); } @@ -81,10 +90,13 @@ TEST(aqm_codel, no_drop) { struct shaper_aqm_codel_para para; int curr_time_ms = 0; + uuid_t uuid; + + uuid_parse("00000000-0000-0000-0000-000000000001", uuid); memset(¶, 0, sizeof(para)); - shaper_aqm_codel_need_drop(0, ¶, curr_time_ms, CODEL_MAX_LATENCY - 1); + shaper_aqm_codel_need_drop(uuid, ¶, curr_time_ms, CODEL_MAX_LATENCY - 1); EXPECT_EQ(para.state, CODEL_STATE_NORMAL); EXPECT_EQ(para.drop_count, 0); } diff --git a/shaping/test/gtest_shaper_maat.cpp b/shaping/test/gtest_shaper_maat.cpp index 3b619fc..59381ce 100644 --- a/shaping/test/gtest_shaper_maat.cpp +++ b/shaping/test/gtest_shaper_maat.cpp @@ -7,27 +7,46 @@ TEST(shaping_rule, parse) { - const char *data = "182\t\ - 2\t\ - 32\t\ - 0\t\ - 1\t\ - {}\t\ - {\"vsys_id\":2333,\"priority\":1,\"fair_factor\":10,\"dscp_marking\":{\"enabled\":1,\"dscp_value\":10},\"profile_chain\":[1,2,3]}\t\ - 0\t\ - 1\t\ - }"; + const char *data = "{\ + \"uuid\": \"00000000-0000-0000-0000-000000000182\",\ + \"service\": 2,\ + \"action\": 32,\ + \"do_blacklist\": 0,\ + \"do_log\": 1,\ + \"effective_rage\": 0,\ + \"action_parameter\": {\ + \"vsys_id\": 2333,\ + \"priority\": 1,\ + \"fair_factor\": 10,\ + \"dscp_marking\": {\ + \"enabled\": 1,\ + \"dscp_type\": \"Assured Forwarding (AF)\",\ + \"dscp_name\": \"af11\",\ + \"dscp_value\": 10\ + },\ + \"profile_chain\": [\ + \"00000000-0000-0000-0000-000000000001\",\ + \"00000000-0000-0000-0000-000000000002\",\ + \"00000000-0000-0000-0000-000000000003\"\ + ]\ + }\ + }"; struct shaping_rule *s_rule = NULL; struct shaping_rule *s_rule_dup = NULL; - shaper_rule_ex_new("TRAFFIC_SHAPING_COMPILE", 0, NULL, data, (void**)&s_rule, 0, NULL); + shaper_rule_ex_new("TRAFFIC_SHAPING_RULE", NULL, data, (void**)&s_rule, 0, NULL); EXPECT_EQ(s_rule->vsys_id, 2333); - EXPECT_EQ(s_rule->id, 182); - EXPECT_EQ(s_rule->primary_pf_id, 1); + char uuid_str[UUID_STR_LEN]; + uuid_unparse(s_rule->uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000182", uuid_str); + uuid_unparse(s_rule->primary_pf_uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000001", uuid_str); EXPECT_EQ(s_rule->borrow_pf_num, 2); - EXPECT_EQ(s_rule->borrow_pf_id_array[0], 2); - EXPECT_EQ(s_rule->borrow_pf_id_array[1], 3); + uuid_unparse(s_rule->borrow_pf_uuid_array[0], uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000002", uuid_str); + uuid_unparse(s_rule->borrow_pf_uuid_array[1], uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000003", uuid_str); EXPECT_EQ(s_rule->priority, 1); EXPECT_EQ(s_rule->dscp_enable, 1); EXPECT_EQ(s_rule->dscp_value, 10); @@ -40,18 +59,24 @@ TEST(shaping_rule, parse) } TEST(shaping_profile, parse) -{ const char *data = "1\t\ - fair_share\t\ - max_min_host_fairness\t\ - [{\"direction\":\"incoming\",\"bandwidth\":1024},{\"direction\":\"outgoing\",\"bandwidth\":2048}]\t\ - {\"algorithm\":\"codel\"}\t\ - {}\t\ - 1"; +{ + const char *data = "{\ + \"uuid\":\"00000000-0000-0000-0000-000000000001\",\ + \"type\": \"fair_share\",\ + \"type_argument\": \"max_min_host_fairness\",\ + \"limits\": [\ + {\"direction\":\"incoming\",\"bandwidth\":1024},\ + {\"direction\":\"outcoming\",\"bandwidth\":2048}\ + ],\ + \"aqm_options\": {\"algorithm\":\"codel\"},\ + \"is_valid\":\"yes\"}"; struct shaping_profile *s_pf = NULL; struct shaping_profile *s_pf_dup = NULL; - shaper_profile_ex_new("TRAFFIC_SHAPING_PROFILE", 0, NULL, data, (void**)&s_pf, 0, NULL); - EXPECT_EQ(s_pf->id, 1); + shaper_profile_ex_new("TRAFFIC_SHAPING_PROFILE", NULL, data, (void**)&s_pf, 0, NULL); + char uuid_str[UUID_STR_LEN]; + uuid_unparse(s_pf->uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000001", uuid_str); EXPECT_EQ(s_pf->in_limit_bandwidth, 1024); EXPECT_EQ(s_pf->out_limit_bandwidth, 2048); EXPECT_EQ(s_pf->type, PROFILE_TYPE_MAX_MIN_HOST_FAIRNESS); @@ -69,51 +94,71 @@ TEST(shaping_flow, update_rule) struct shaping_ctx *ctx = NULL; struct shaping_flow sf; struct shaping_rule_info *rule_info; - long long rule_ids[] = {1, 2, 3}; + const char *rule_uuid_strs[] = {"00000000-0000-0000-0000-000000000001", "00000000-0000-0000-0000-000000000002", "00000000-0000-0000-0000-000000000003"}; int prioritys[] = {1, 2, 3}; int profile_nums[] = {1, 2, 3}; - int profile_ids[][MAX_REF_PROFILE] = {{1}, {2, 3}, {4, 5, 6}}; + const char *profile_uuid_strs[][MAX_REF_PROFILE] = {{"00000000-0000-0000-0000-000000000001"}, + {"00000000-0000-0000-0000-000000000002", "00000000-0000-0000-0000-000000000003"}, + {"00000000-0000-0000-0000-000000000004", "00000000-0000-0000-0000-000000000005", "00000000-0000-0000-0000-000000000006"}}; stub_init(); ctx = shaping_engine_init(); - stub_set_matched_shaping_rules(3, rule_ids, prioritys, profile_nums, profile_ids); - - ctx->maat_info->rule_table_id = STUB_MAAT_SHAPING_RULE_TABLE_ID; - ctx->maat_info->profile_table_id = STUB_MAAT_SHAPING_PROFILE_TABLE_ID; + stub_set_matched_shaping_rules(3, rule_uuid_strs, prioritys, profile_nums, profile_uuid_strs); + stub_set_profile_type("00000000-0000-0000-0000-000000000001", PROFILE_TYPE_GENERIC); + stub_set_profile_type("00000000-0000-0000-0000-000000000002", PROFILE_TYPE_GENERIC); + stub_set_profile_type("00000000-0000-0000-0000-000000000003", PROFILE_TYPE_GENERIC); + stub_set_profile_type("00000000-0000-0000-0000-000000000004", PROFILE_TYPE_GENERIC); + stub_set_profile_type("00000000-0000-0000-0000-000000000005", PROFILE_TYPE_GENERIC); + stub_set_profile_type("00000000-0000-0000-0000-000000000006", PROFILE_TYPE_GENERIC); memset(&sf, 0, sizeof(sf)); sf.priority = SHAPING_PRIORITY_NUM_MAX; - shaper_rules_update(&ctx->thread_ctx[0], &sf, rule_ids, 3); + uuid_t rule_uuids[3]; + uuid_parse("00000000-0000-0000-0000-000000000001", rule_uuids[0]); + uuid_parse("00000000-0000-0000-0000-000000000002", rule_uuids[1]); + uuid_parse("00000000-0000-0000-0000-000000000003", rule_uuids[2]); + shaper_rules_update(&ctx->thread_ctx[0], &sf, rule_uuids, 3); EXPECT_EQ(sf.rule_num, 3); rule_info = &sf.matched_rule_infos[0]; - EXPECT_EQ(rule_info->id, 1); - EXPECT_EQ(rule_info->primary.id, 1); + char uuid_str[UUID_STR_LEN]; + uuid_unparse(rule_info->uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000001", uuid_str); + uuid_unparse(rule_info->primary.uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000001", uuid_str); EXPECT_EQ(rule_info->primary.priority, 1); EXPECT_EQ(rule_info->borrowing_num, 0); rule_info = &sf.matched_rule_infos[1]; - EXPECT_EQ(rule_info->id, 2); - EXPECT_EQ(rule_info->primary.id, 2); + uuid_unparse(rule_info->uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000002", uuid_str); + uuid_unparse(rule_info->primary.uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000002", uuid_str); EXPECT_EQ(rule_info->primary.priority, 1); EXPECT_EQ(rule_info->borrowing_num, 1); - EXPECT_EQ(rule_info->borrowing[0].id, 3); + uuid_unparse(rule_info->borrowing[0].uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000003", uuid_str); EXPECT_EQ(rule_info->borrowing[0].priority, 2); rule_info = &sf.matched_rule_infos[2]; - EXPECT_EQ(rule_info->id, 3); - EXPECT_EQ(rule_info->primary.id, 4); + uuid_unparse(rule_info->uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000003", uuid_str); + uuid_unparse(rule_info->primary.uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000004", uuid_str); EXPECT_EQ(rule_info->primary.priority, 1); EXPECT_EQ(rule_info->borrowing_num, 2); - EXPECT_EQ(rule_info->borrowing[0].id, 5); + uuid_unparse(rule_info->borrowing[0].uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000005", uuid_str); EXPECT_EQ(rule_info->borrowing[0].priority, 2); - EXPECT_EQ(rule_info->borrowing[1].id, 6); + uuid_unparse(rule_info->borrowing[1].uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000006", uuid_str); EXPECT_EQ(rule_info->borrowing[1].priority, 3); shaping_engine_destroy(ctx); + stub_clear_resource(); } TEST(shaping_flow, update_rule_dup) @@ -121,65 +166,92 @@ TEST(shaping_flow, update_rule_dup) struct shaping_ctx *ctx = NULL; struct shaping_flow sf; struct shaping_rule_info *rule_info; - long long rule_ids[] = {1, 2, 3, 4}; + const char *rule_uuid_strs[] = {"00000000-0000-0000-0000-000000000001", "00000000-0000-0000-0000-000000000002", "00000000-0000-0000-0000-000000000003", "00000000-0000-0000-0000-000000000004"}; int prioritys[] = {1, 2, 3, 4}; int profile_nums[] = {1, 2, 3, 1}; - int profile_ids[][MAX_REF_PROFILE] = {{1}, {2, 3}, {4, 5, 6}, {7}}; + const char *profile_uuid_strs[][MAX_REF_PROFILE] = {{"00000000-0000-0000-0000-000000000001"}, + {"00000000-0000-0000-0000-000000000002", "00000000-0000-0000-0000-000000000003"}, + {"00000000-0000-0000-0000-000000000004", "00000000-0000-0000-0000-000000000005", "00000000-0000-0000-0000-000000000006"}, + {"00000000-0000-0000-0000-000000000007"}}; stub_init(); ctx = shaping_engine_init(); - stub_set_matched_shaping_rules(4, rule_ids, prioritys, profile_nums, profile_ids); - - ctx->maat_info->rule_table_id = STUB_MAAT_SHAPING_RULE_TABLE_ID; - ctx->maat_info->profile_table_id = STUB_MAAT_SHAPING_PROFILE_TABLE_ID; + stub_set_matched_shaping_rules(4, rule_uuid_strs, prioritys, profile_nums, profile_uuid_strs); + stub_set_profile_type("00000000-0000-0000-0000-000000000001", PROFILE_TYPE_GENERIC); + stub_set_profile_type("00000000-0000-0000-0000-000000000002", PROFILE_TYPE_GENERIC); + stub_set_profile_type("00000000-0000-0000-0000-000000000003", PROFILE_TYPE_GENERIC); + stub_set_profile_type("00000000-0000-0000-0000-000000000004", PROFILE_TYPE_GENERIC); + stub_set_profile_type("00000000-0000-0000-0000-000000000005", PROFILE_TYPE_GENERIC); + stub_set_profile_type("00000000-0000-0000-0000-000000000006", PROFILE_TYPE_GENERIC); + stub_set_profile_type("00000000-0000-0000-0000-000000000007", PROFILE_TYPE_GENERIC); memset(&sf, 0, sizeof(sf)); sf.priority = SHAPING_PRIORITY_NUM_MAX; - long long rule_id1[] = {1, 2, 3}; - shaper_rules_update(&ctx->thread_ctx[0], &sf, rule_id1, 3); + uuid_t rule_uuid1[3]; + uuid_parse("00000000-0000-0000-0000-000000000001", rule_uuid1[0]); + uuid_parse("00000000-0000-0000-0000-000000000002", rule_uuid1[1]); + uuid_parse("00000000-0000-0000-0000-000000000003", rule_uuid1[2]); + shaper_rules_update(&ctx->thread_ctx[0], &sf, rule_uuid1, 3); EXPECT_EQ(sf.rule_num, 3); rule_info = &sf.matched_rule_infos[0]; - EXPECT_EQ(rule_info->id, 1); - EXPECT_EQ(rule_info->primary.id, 1); + char uuid_str[UUID_STR_LEN]; + uuid_unparse(rule_info->uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000001", uuid_str); + uuid_unparse(rule_info->primary.uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000001", uuid_str); EXPECT_EQ(rule_info->primary.priority, 1); EXPECT_EQ(rule_info->borrowing_num, 0); rule_info = &sf.matched_rule_infos[1]; - EXPECT_EQ(rule_info->id, 2); - EXPECT_EQ(rule_info->primary.id, 2); + uuid_unparse(rule_info->uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000002", uuid_str); + uuid_unparse(rule_info->primary.uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000002", uuid_str); EXPECT_EQ(rule_info->primary.priority, 1); EXPECT_EQ(rule_info->borrowing_num, 1); - EXPECT_EQ(rule_info->borrowing[0].id, 3); + uuid_unparse(rule_info->borrowing[0].uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000003", uuid_str); EXPECT_EQ(rule_info->borrowing[0].priority, 2); rule_info = &sf.matched_rule_infos[2]; - EXPECT_EQ(rule_info->id, 3); - EXPECT_EQ(rule_info->primary.id, 4); + uuid_unparse(rule_info->uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000003", uuid_str); + uuid_unparse(rule_info->primary.uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000004", uuid_str); EXPECT_EQ(rule_info->primary.priority, 1); EXPECT_EQ(rule_info->borrowing_num, 2); - EXPECT_EQ(rule_info->borrowing[0].id, 5); + uuid_unparse(rule_info->borrowing[0].uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000005", uuid_str); EXPECT_EQ(rule_info->borrowing[0].priority, 2); - EXPECT_EQ(rule_info->borrowing[1].id, 6); + uuid_unparse(rule_info->borrowing[1].uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000006", uuid_str); EXPECT_EQ(rule_info->borrowing[1].priority, 3); - long long rule_id2[] = {1}; - shaper_rules_update(&ctx->thread_ctx[0], &sf, rule_id2, 1); + uuid_t rule_uuid2; + uuid_parse("00000000-0000-0000-0000-000000000001", rule_uuid2); + shaper_rules_update(&ctx->thread_ctx[0], &sf, &rule_uuid2, 1); EXPECT_EQ(sf.rule_num, 3); - long long rule_id3[] = {2, 3, 4}; - shaper_rules_update(&ctx->thread_ctx[0], &sf, rule_id3, 3); + uuid_t rule_uuid3[3]; + uuid_parse("00000000-0000-0000-0000-000000000002", rule_uuid3[0]); + uuid_parse("00000000-0000-0000-0000-000000000003", rule_uuid3[1]); + uuid_parse("00000000-0000-0000-0000-000000000004", rule_uuid3[2]); + shaper_rules_update(&ctx->thread_ctx[0], &sf, rule_uuid3, 3); EXPECT_EQ(sf.rule_num, 4); rule_info = &sf.matched_rule_infos[3]; - EXPECT_EQ(rule_info->id, 4); - EXPECT_EQ(rule_info->primary.id, 7); + uuid_unparse(rule_info->uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000004", uuid_str); + uuid_unparse(rule_info->primary.uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000007", uuid_str); EXPECT_EQ(rule_info->primary.priority, 1); shaping_engine_destroy(ctx); + stub_clear_resource(); } TEST(shaping_flow, update_rule_after_priority_confirmed) @@ -187,57 +259,76 @@ TEST(shaping_flow, update_rule_after_priority_confirmed) struct shaping_ctx *ctx = NULL; struct shaping_flow sf; struct shaping_rule_info *rule_info; - long long rule_ids[] = {1, 2, 3}; + const char *rule_uuid_strs[] = {"00000000-0000-0000-0000-000000000001", "00000000-0000-0000-0000-000000000002", "00000000-0000-0000-0000-000000000003"}; int prioritys[] = {1, 2, 3}; int profile_nums[] = {1, 2, 3}; - int profile_ids[][MAX_REF_PROFILE] = {{1}, {2, 3}, {4, 5, 6}}; + const char *profile_uuid_strs[][MAX_REF_PROFILE] = {{"00000000-0000-0000-0000-000000000001"}, + {"00000000-0000-0000-0000-000000000002", "00000000-0000-0000-0000-000000000003"}, + {"00000000-0000-0000-0000-000000000004", "00000000-0000-0000-0000-000000000005", "00000000-0000-0000-0000-000000000006"}}; stub_init(); ctx = shaping_engine_init(); - stub_set_matched_shaping_rules(3, rule_ids, prioritys, profile_nums, profile_ids); - - ctx->maat_info->rule_table_id = STUB_MAAT_SHAPING_RULE_TABLE_ID; - ctx->maat_info->profile_table_id = STUB_MAAT_SHAPING_PROFILE_TABLE_ID; + stub_set_matched_shaping_rules(3, rule_uuid_strs, prioritys, profile_nums, profile_uuid_strs); + stub_set_profile_type("00000000-0000-0000-0000-000000000001", PROFILE_TYPE_GENERIC); + stub_set_profile_type("00000000-0000-0000-0000-000000000002", PROFILE_TYPE_GENERIC); + stub_set_profile_type("00000000-0000-0000-0000-000000000003", PROFILE_TYPE_GENERIC); + stub_set_profile_type("00000000-0000-0000-0000-000000000004", PROFILE_TYPE_GENERIC); + stub_set_profile_type("00000000-0000-0000-0000-000000000005", PROFILE_TYPE_GENERIC); + stub_set_profile_type("00000000-0000-0000-0000-000000000006", PROFILE_TYPE_GENERIC); memset(&sf, 0, sizeof(sf)); sf.priority = SHAPING_PRIORITY_NUM_MAX; - long long first_rule_ids[] = {2, 3}; - shaper_rules_update(&ctx->thread_ctx[0], &sf, first_rule_ids, 2); + uuid_t first_rule_uuids[2]; + uuid_parse("00000000-0000-0000-0000-000000000002", first_rule_uuids[0]); + uuid_parse("00000000-0000-0000-0000-000000000003", first_rule_uuids[1]); + shaper_rules_update(&ctx->thread_ctx[0], &sf, first_rule_uuids, 2); sf.processed_pkts = CONFIRM_PRIORITY_PKTS + 1; - long long after_confirm_priority_rule_ids[] = {1}; - shaper_rules_update(&ctx->thread_ctx[0], &sf, after_confirm_priority_rule_ids, 1); + uuid_t after_confirm_priority_rule_uuids; + uuid_parse("00000000-0000-0000-0000-000000000001", after_confirm_priority_rule_uuids); + shaper_rules_update(&ctx->thread_ctx[0], &sf, &after_confirm_priority_rule_uuids, 1); EXPECT_EQ(sf.rule_num, 3); EXPECT_EQ(sf.priority, 2); rule_info = &sf.matched_rule_infos[0]; - EXPECT_EQ(rule_info->id, 2); - EXPECT_EQ(rule_info->primary.id, 2); + char uuid_str[UUID_STR_LEN]; + uuid_unparse(rule_info->uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000002", uuid_str); + uuid_unparse(rule_info->primary.uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000002", uuid_str); EXPECT_EQ(rule_info->primary.priority, 2); EXPECT_EQ(rule_info->borrowing_num, 1); - EXPECT_EQ(rule_info->borrowing[0].id, 3); + uuid_unparse(rule_info->borrowing[0].uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000003", uuid_str); EXPECT_EQ(rule_info->borrowing[0].priority, 3); rule_info = &sf.matched_rule_infos[1]; - EXPECT_EQ(rule_info->id, 3); - EXPECT_EQ(rule_info->primary.id, 4); + uuid_unparse(rule_info->uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000003", uuid_str); + uuid_unparse(rule_info->primary.uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000004", uuid_str); EXPECT_EQ(rule_info->primary.priority, 2); EXPECT_EQ(rule_info->borrowing_num, 2); - EXPECT_EQ(rule_info->borrowing[0].id, 5); + uuid_unparse(rule_info->borrowing[0].uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000005", uuid_str); EXPECT_EQ(rule_info->borrowing[0].priority, 3); - EXPECT_EQ(rule_info->borrowing[1].id, 6); + uuid_unparse(rule_info->borrowing[1].uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000006", uuid_str); EXPECT_EQ(rule_info->borrowing[1].priority, 4); rule_info = &sf.matched_rule_infos[2]; - EXPECT_EQ(rule_info->id, 1); - EXPECT_EQ(rule_info->primary.id, 1); + uuid_unparse(rule_info->uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000001", uuid_str); + uuid_unparse(rule_info->primary.uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000001", uuid_str); EXPECT_EQ(rule_info->primary.priority, 2); EXPECT_EQ(rule_info->borrowing_num, 0); shaping_engine_destroy(ctx); + stub_clear_resource(); } TEST(shaping_flow, update_rule_dscp) @@ -245,71 +336,90 @@ TEST(shaping_flow, update_rule_dscp) struct shaping_ctx *ctx = NULL; struct shaping_flow sf; struct shaping_rule_info *rule_info; - long long rule_ids[] = {1, 2, 3, 4}; + const char *rule_uuid_strs[] = {"00000000-0000-0000-0000-000000000001", "00000000-0000-0000-0000-000000000002", "00000000-0000-0000-0000-000000000003", "00000000-0000-0000-0000-000000000004"}; int prioritys[] = {1, 2, 3, 4}; int profile_nums[] = {1, 1, 1, 1}; - int profile_ids[][MAX_REF_PROFILE] = {{1}, {2}, {3}, {4}}; + const char *profile_uuid_strs[][MAX_REF_PROFILE] = {{"00000000-0000-0000-0000-000000000001"}, + {"00000000-0000-0000-0000-000000000002"}, + {"00000000-0000-0000-0000-000000000003"}, + {"00000000-0000-0000-0000-000000000004"}}; stub_init(); ctx = shaping_engine_init(); - stub_set_matched_shaping_rules(4, rule_ids, prioritys, profile_nums, profile_ids); - stub_set_shaping_rule_dscp_value(1, 10);//AF11 - stub_set_shaping_rule_dscp_value(2, 12);//AF12 - stub_set_shaping_rule_dscp_value(3, 14);//AF13 - stub_set_shaping_rule_dscp_value(4, 40);//CS5 + stub_set_matched_shaping_rules(4, rule_uuid_strs, prioritys, profile_nums, profile_uuid_strs); + stub_set_profile_type("00000000-0000-0000-0000-000000000001", PROFILE_TYPE_GENERIC); + stub_set_profile_type("00000000-0000-0000-0000-000000000002", PROFILE_TYPE_GENERIC); + stub_set_profile_type("00000000-0000-0000-0000-000000000003", PROFILE_TYPE_GENERIC); + stub_set_profile_type("00000000-0000-0000-0000-000000000004", PROFILE_TYPE_GENERIC); - ctx->maat_info->rule_table_id = STUB_MAAT_SHAPING_RULE_TABLE_ID; - ctx->maat_info->profile_table_id = STUB_MAAT_SHAPING_PROFILE_TABLE_ID; + stub_set_shaping_rule_dscp_value(rule_uuid_strs[0], 10);//AF11 + stub_set_shaping_rule_dscp_value(rule_uuid_strs[1], 12);//AF12 + stub_set_shaping_rule_dscp_value(rule_uuid_strs[2], 14);//AF13 + stub_set_shaping_rule_dscp_value(rule_uuid_strs[3], 40);//CS5 memset(&sf, 0, sizeof(sf)); sf.priority = SHAPING_PRIORITY_NUM_MAX; - long long rule_id1[] = {1, 2}; - shaper_rules_update(&ctx->thread_ctx[0], &sf, rule_id1, 2); + uuid_t rule_uuids1[2]; + uuid_parse("00000000-0000-0000-0000-000000000001", rule_uuids1[0]); + uuid_parse("00000000-0000-0000-0000-000000000002", rule_uuids1[1]); + shaper_rules_update(&ctx->thread_ctx[0], &sf, rule_uuids1, 2); EXPECT_EQ(sf.rule_num, 2); EXPECT_EQ(sf.dscp_enable, 1); EXPECT_EQ(sf.dscp_value, 10); rule_info = &sf.matched_rule_infos[0]; - EXPECT_EQ(rule_info->id, 1); - EXPECT_EQ(rule_info->primary.id, 1); + char uuid_str[UUID_STR_LEN]; + uuid_unparse(rule_info->uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000001", uuid_str); + uuid_unparse(rule_info->primary.uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000001", uuid_str); EXPECT_EQ(rule_info->primary.priority, 1); EXPECT_EQ(rule_info->borrowing_num, 0); rule_info = &sf.matched_rule_infos[1]; - EXPECT_EQ(rule_info->id, 2); - EXPECT_EQ(rule_info->primary.id, 2); + uuid_unparse(rule_info->uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000002", uuid_str); + uuid_unparse(rule_info->primary.uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000002", uuid_str); EXPECT_EQ(rule_info->primary.priority, 1); EXPECT_EQ(rule_info->borrowing_num, 0); - long long rule_id2[] = {3}; - shaper_rules_update(&ctx->thread_ctx[0], &sf, rule_id2, 1); + uuid_t rule_uuid2; + uuid_parse("00000000-0000-0000-0000-000000000003", rule_uuid2); + shaper_rules_update(&ctx->thread_ctx[0], &sf, &rule_uuid2, 1); EXPECT_EQ(sf.rule_num, 3); EXPECT_EQ(sf.dscp_enable, 1); EXPECT_EQ(sf.dscp_value, 10); rule_info = &sf.matched_rule_infos[2]; - EXPECT_EQ(rule_info->id, 3); - EXPECT_EQ(rule_info->primary.id, 3); + uuid_unparse(rule_info->uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000003", uuid_str); + uuid_unparse(rule_info->primary.uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000003", uuid_str); EXPECT_EQ(rule_info->primary.priority, 1); EXPECT_EQ(rule_info->borrowing_num, 0); - long long rule_id3[] = {4}; - shaper_rules_update(&ctx->thread_ctx[0], &sf, rule_id3, 3); + uuid_t rule_uuid3; + uuid_parse("00000000-0000-0000-0000-000000000004", rule_uuid3); + shaper_rules_update(&ctx->thread_ctx[0], &sf, &rule_uuid3, 1); EXPECT_EQ(sf.rule_num, 4); EXPECT_EQ(sf.dscp_enable, 1); EXPECT_EQ(sf.dscp_value, 40); rule_info = &sf.matched_rule_infos[3]; - EXPECT_EQ(rule_info->id, 4); - EXPECT_EQ(rule_info->primary.id, 4); + uuid_unparse(rule_info->uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000004", uuid_str); + uuid_unparse(rule_info->primary.uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000004", uuid_str); EXPECT_EQ(rule_info->primary.priority, 1); EXPECT_EQ(rule_info->borrowing_num, 0); shaping_engine_destroy(ctx); + stub_clear_resource(); } int main(int argc, char **argv) diff --git a/shaping/test/gtest_shaper_send_log.cpp b/shaping/test/gtest_shaper_send_log.cpp index effd7bf..897120d 100644 --- a/shaping/test/gtest_shaper_send_log.cpp +++ b/shaping/test/gtest_shaper_send_log.cpp @@ -56,19 +56,19 @@ static void gtest_shaper_log_parse(struct shaping_flow *sf, const char *data, si for (int i = 0; i < sf->rule_num; i++) { ASSERT_EQ(mpack_type_map, mpack_node_type(mpack_node_array_at(tmp_node, i)) ); tmp_rule_node = mpack_node_map_cstr(mpack_node_array_at(tmp_node, i), "rule_id"); - EXPECT_EQ(mpack_type_uint, mpack_node_type(tmp_rule_node)); - sf->matched_rule_infos[i].id = mpack_node_u64(tmp_rule_node); + EXPECT_EQ(mpack_type_bin, mpack_node_type(tmp_rule_node)); + uuid_copy(sf->matched_rule_infos[i].uuid, *(uuid_t*)mpack_node_bin_data(tmp_rule_node)); tmp_profile_node = mpack_node_map_cstr(mpack_node_array_at(tmp_node, i), "profile_ids"); ASSERT_EQ(mpack_type_array, mpack_node_type(tmp_profile_node)); int profile_array_len = mpack_node_array_length(tmp_profile_node); sf->matched_rule_infos[i].borrowing_num = profile_array_len - 1; for (int j = 0; j < profile_array_len; j++) { - ASSERT_EQ(mpack_type_uint, mpack_node_type(mpack_node_array_at(tmp_profile_node, j)) ); + ASSERT_EQ(mpack_type_bin, mpack_node_type(mpack_node_array_at(tmp_profile_node, j)) ); if (j == 0) { - sf->matched_rule_infos[i].primary.id = mpack_node_u64(mpack_node_array_at(tmp_profile_node, j)); + uuid_copy(sf->matched_rule_infos[i].primary.uuid, *(uuid_t*)mpack_node_bin_data(mpack_node_array_at(tmp_profile_node, j))); } else { - sf->matched_rule_infos[i].borrowing[j - 1].id = mpack_node_u64(mpack_node_array_at(tmp_profile_node, j)); + uuid_copy(sf->matched_rule_infos[i].borrowing[j - 1].uuid, *(uuid_t*)mpack_node_bin_data(mpack_node_array_at(tmp_profile_node, j))); } } } @@ -88,26 +88,26 @@ TEST(MPACK_LOG, PARSE) sf_in.ctrl_meta.session_id = 12345678; sf_in.rule_num = 3; - //rule_id 0, primary profile id 0, borrow profile id 1 - sf_in.matched_rule_infos[0].id = 0; - sf_in.matched_rule_infos[0].primary.id = 0; + //rule_id 1, primary profile id 1, borrow profile id 2 + uuid_parse("00000000-0000-0000-0000-000000000001", sf_in.matched_rule_infos[0].uuid); + uuid_parse("00000000-0000-0000-0000-000000000001", sf_in.matched_rule_infos[0].primary.uuid); sf_in.matched_rule_infos[0].borrowing_num = 1; - sf_in.matched_rule_infos[0].borrowing[0].id = 1; + uuid_parse("00000000-0000-0000-0000-000000000002", sf_in.matched_rule_infos[0].borrowing[0].uuid); - //rule id 1, primary profile id 2, borrow profile id 3,4 - sf_in.matched_rule_infos[1].id = 1; - sf_in.matched_rule_infos[1].primary.id = 2; + //rule id 2, primary profile id 3, borrow profile id 4,5 + uuid_parse("00000000-0000-0000-0000-000000000002", sf_in.matched_rule_infos[1].uuid); + uuid_parse("00000000-0000-0000-0000-000000000003", sf_in.matched_rule_infos[1].primary.uuid); sf_in.matched_rule_infos[1].borrowing_num = 2; - sf_in.matched_rule_infos[1].borrowing[0].id = 3; - sf_in.matched_rule_infos[1].borrowing[1].id = 4; + uuid_parse("00000000-0000-0000-0000-000000000004", sf_in.matched_rule_infos[1].borrowing[0].uuid); + uuid_parse("00000000-0000-0000-0000-000000000005", sf_in.matched_rule_infos[1].borrowing[1].uuid); - //rule id 2, primary profile id 5, borrow profile id 6,7,8 - sf_in.matched_rule_infos[2].id = 2; - sf_in.matched_rule_infos[2].primary.id = 5; + //rule id 3, primary profile id 6, borrow profile id 7,8,9 + uuid_parse("00000000-0000-0000-0000-000000000003", sf_in.matched_rule_infos[2].uuid); + uuid_parse("00000000-0000-0000-0000-000000000006", sf_in.matched_rule_infos[2].primary.uuid); sf_in.matched_rule_infos[2].borrowing_num = 3; - sf_in.matched_rule_infos[2].borrowing[0].id = 6; - sf_in.matched_rule_infos[2].borrowing[1].id = 7; - sf_in.matched_rule_infos[2].borrowing[2].id = 8; + uuid_parse("00000000-0000-0000-0000-000000000007", sf_in.matched_rule_infos[2].borrowing[0].uuid); + uuid_parse("00000000-0000-0000-0000-000000000008", sf_in.matched_rule_infos[2].borrowing[1].uuid); + uuid_parse("00000000-0000-0000-0000-000000000009", sf_in.matched_rule_infos[2].borrowing[2].uuid); shaper_session_log_prepare(&sf_in, &mpack_data, &mpack_size); gtest_shaper_log_parse(&sf_out, mpack_data, mpack_size); @@ -115,26 +115,39 @@ TEST(MPACK_LOG, PARSE) EXPECT_EQ(sf_out.ctrl_meta.session_id, 12345678); EXPECT_EQ(sf_out.rule_num, 3); - //rule_id 0, primary profile id 0, borrow profile id 1 - EXPECT_EQ(sf_out.matched_rule_infos[0].id, 0); - EXPECT_EQ(sf_out.matched_rule_infos[0].primary.id, 0); + //rule_id 1, primary profile id 1, borrow profile id 2 + char uuid_str[UUID_STR_LEN]; + uuid_unparse(sf_out.matched_rule_infos[0].uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000001", uuid_str); + uuid_unparse(sf_out.matched_rule_infos[0].primary.uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000001", uuid_str); EXPECT_EQ(sf_out.matched_rule_infos[0].borrowing_num, 1); - EXPECT_EQ(sf_out.matched_rule_infos[0].borrowing[0].id, 1); - - //rule id 1, primary profile id 2, borrow profile id 3,4 - EXPECT_EQ(sf_out.matched_rule_infos[1].id, 1); - EXPECT_EQ(sf_out.matched_rule_infos[1].primary.id, 2); + uuid_unparse(sf_out.matched_rule_infos[0].borrowing[0].uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000002", uuid_str); + + //rule id 2, primary profile id 3, borrow profile id 4,5 + uuid_unparse(sf_out.matched_rule_infos[1].uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000002", uuid_str); + uuid_unparse(sf_out.matched_rule_infos[1].primary.uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000003", uuid_str); EXPECT_EQ(sf_out.matched_rule_infos[1].borrowing_num, 2); - EXPECT_EQ(sf_out.matched_rule_infos[1].borrowing[0].id, 3); - EXPECT_EQ(sf_out.matched_rule_infos[1].borrowing[1].id, 4); - - //rule id 2, primary profile id 5, borrow profile id 6,7,8 - EXPECT_EQ(sf_out.matched_rule_infos[2].id, 2); - EXPECT_EQ(sf_out.matched_rule_infos[2].primary.id, 5); + uuid_unparse(sf_out.matched_rule_infos[1].borrowing[0].uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000004", uuid_str); + uuid_unparse(sf_out.matched_rule_infos[1].borrowing[1].uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000005", uuid_str); + + //rule id 3, primary profile id 6, borrow profile id 7,8,9 + uuid_unparse(sf_out.matched_rule_infos[2].uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000003", uuid_str); + uuid_unparse(sf_out.matched_rule_infos[2].primary.uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000006", uuid_str); EXPECT_EQ(sf_out.matched_rule_infos[2].borrowing_num, 3); - EXPECT_EQ(sf_out.matched_rule_infos[2].borrowing[0].id, 6); - EXPECT_EQ(sf_out.matched_rule_infos[2].borrowing[1].id, 7); - EXPECT_EQ(sf_out.matched_rule_infos[2].borrowing[2].id, 8); + uuid_unparse(sf_out.matched_rule_infos[2].borrowing[0].uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000007", uuid_str); + uuid_unparse(sf_out.matched_rule_infos[2].borrowing[1].uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000008", uuid_str); + uuid_unparse(sf_out.matched_rule_infos[2].borrowing[2].uuid, uuid_str); + EXPECT_STREQ("00000000-0000-0000-0000-000000000009", uuid_str); if (mpack_data) { free(mpack_data); diff --git a/shaping/test/gtest_shaper_with_swarmkv.cpp b/shaping/test/gtest_shaper_with_swarmkv.cpp index 403aed2..bffe917 100644 --- a/shaping/test/gtest_shaper_with_swarmkv.cpp +++ b/shaping/test/gtest_shaper_with_swarmkv.cpp @@ -183,10 +183,10 @@ TEST(generic_profile, single_session) struct stub_pkt_queue *actual_tx_queue; struct shaping_ctx *ctx = NULL; struct shaping_flow *sf = NULL; - long long rule_id[] = {0}; + const char *rule_uuid_strs[] = {"00000000-0000-0000-0000-000000000001"}; int priority[] = {1}; int profile_num[] = {1}; - int profile_id[][MAX_REF_PROFILE] = {{0}}; + const char *profile_uuid_strs[][MAX_REF_PROFILE] = {{ "00000000-0000-0000-0000-000000000001"}}; struct cmd_exec_arg* reply_arg=NULL; char result[2048]={0}; @@ -196,17 +196,20 @@ TEST(generic_profile, single_session) sf = shaping_flow_new(&ctx->thread_ctx[0]); ASSERT_TRUE(sf != NULL); - stub_set_profile_limit_direction(0, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); - stub_set_matched_shaping_rules(1, rule_id, priority, profile_num, profile_id); - shaper_rules_update(&ctx->thread_ctx[0], sf, rule_id, 1); + stub_set_profile_limit_direction(profile_uuid_strs[0][0], PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); + stub_set_matched_shaping_rules(1, rule_uuid_strs, priority, profile_num, profile_uuid_strs); + + uuid_t rule_uuid; + uuid_parse(rule_uuid_strs[0], rule_uuid); + shaper_rules_update(&ctx->thread_ctx[0], sf, &rule_uuid, 1); //set swarmkv key swarmkv_cli_set_db("swarmkv-shaping-nodes"); reply_arg=cmd_exec_arg_new(); cmd_exec_arg_expect_OK(reply_arg); - swarmkv_cli_system_cmd(reply_arg, result, sizeof(result), swarmkv_expect_reply_string, "tcfg tsg-shaping-0-incoming 1000000 1000000"); - swarmkv_cli_system_cmd(reply_arg, result, sizeof(result), swarmkv_expect_reply_string, "tcfg tsg-shaping-0-outgoing 1000000 1000000"); + swarmkv_cli_system_cmd(reply_arg, result, sizeof(result), swarmkv_expect_reply_string, "tcfg tsg-shaping-00000000-0000-0000-0000-000000000001-incoming 1000000 1000000"); + swarmkv_cli_system_cmd(reply_arg, result, sizeof(result), swarmkv_expect_reply_string, "tcfg tsg-shaping-00000000-0000-0000-0000-000000000001-outgoing 1000000 1000000"); cmd_exec_arg_clear(reply_arg); actual_tx_queue = stub_get_tx_queue(); @@ -246,6 +249,7 @@ TEST(generic_profile, single_session) shaping_flow_free(&ctx->thread_ctx[0], sf); shaper_thread_resource_clear(); shaping_engine_destroy(ctx); + stub_clear_resource(); } TEST(fair_share_profile, two_members) @@ -254,12 +258,10 @@ TEST(fair_share_profile, two_members) struct shaping_ctx *ctx = NULL; struct shaping_flow *sf1 = NULL; struct shaping_flow *sf2 = NULL; - long long rule_id[] = {0, 1}; - long long rule_id1[] = {0}; - long long rule_id2[] = {1}; + const char *rule_uuid_strs[] = {"00000000-0000-0000-0000-000000000001", "00000000-0000-0000-0000-000000000002"}; int priority[] = {1, 1}; int profile_num[] = {1, 1}; - int profile_id[][MAX_REF_PROFILE] = {{0}, {0}}; + const char *profile_uuid_strs[][MAX_REF_PROFILE] = {{ "00000000-0000-0000-0000-000000000001"}, { "00000000-0000-0000-0000-000000000001"}}; struct cmd_exec_arg* reply_arg=NULL; char result[2048]={0}; @@ -271,20 +273,25 @@ TEST(fair_share_profile, two_members) sf2 = shaping_flow_new(&ctx->thread_ctx[0]); ASSERT_TRUE(sf2 != NULL); - stub_set_profile_limit_direction(0, PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); - stub_set_profile_type(0, PROFILE_TYPE_MAX_MIN_HOST_FAIRNESS); - stub_set_shaping_rule_fair_factor(0, 1); - stub_set_shaping_rule_fair_factor(1, 3); - stub_set_matched_shaping_rules(2, rule_id, priority, profile_num, profile_id); - shaper_rules_update(&ctx->thread_ctx[0], sf1, rule_id1, 1); - shaper_rules_update(&ctx->thread_ctx[0], sf2, rule_id2, 1); + stub_set_matched_shaping_rules(2, rule_uuid_strs, priority, profile_num, profile_uuid_strs); + stub_set_profile_limit_direction(profile_uuid_strs[0][0], PROFILE_LIMIT_DIRECTION_INCOMING_OUTGOING); + stub_set_profile_type(profile_uuid_strs[0][0], PROFILE_TYPE_MAX_MIN_HOST_FAIRNESS); + stub_set_shaping_rule_fair_factor(rule_uuid_strs[0], 1); + stub_set_shaping_rule_fair_factor(rule_uuid_strs[1], 3); + + uuid_t rule_uuid1; + uuid_t rule_uuid2; + uuid_parse(rule_uuid_strs[0], rule_uuid1); + uuid_parse(rule_uuid_strs[1], rule_uuid2); + shaper_rules_update(&ctx->thread_ctx[0], sf1, &rule_uuid1, 1); + shaper_rules_update(&ctx->thread_ctx[0], sf2, &rule_uuid2, 1); sf1->src_ip_str = (char *)calloc(1, 16); - sf1->src_ip_str_len = strlen(sf1->src_ip_str); + sf1->src_ip_str_len = strlen("1.1.1.1"); memcpy(sf1->src_ip_str, "1.1.1.1", sf1->src_ip_str_len); sf2->src_ip_str = (char *)calloc(1, 16); - sf2->src_ip_str_len = strlen(sf2->src_ip_str); + sf2->src_ip_str_len = strlen("2.2.2.2"); memcpy(sf2->src_ip_str, "2.2.2.2", sf2->src_ip_str_len); //set swarmkv key @@ -292,8 +299,8 @@ TEST(fair_share_profile, two_members) reply_arg=cmd_exec_arg_new(); cmd_exec_arg_expect_OK(reply_arg); - swarmkv_cli_system_cmd(reply_arg, result, sizeof(result), swarmkv_expect_reply_string, "ftcfg tsg-shaping-0-incoming 1000000 1000000 256"); - swarmkv_cli_system_cmd(reply_arg, result, sizeof(result), swarmkv_expect_reply_string, "ftcfg tsg-shaping-0-outgoing 1000000 1000000 256"); + swarmkv_cli_system_cmd(reply_arg, result, sizeof(result), swarmkv_expect_reply_string, "ftcfg tsg-shaping-00000000-0000-0000-0000-000000000001-incoming 1000000 1000000 256"); + swarmkv_cli_system_cmd(reply_arg, result, sizeof(result), swarmkv_expect_reply_string, "ftcfg tsg-shaping-00000000-0000-0000-0000-000000000001-outgoing 1000000 1000000 256"); cmd_exec_arg_clear(reply_arg); actual_tx_queue = stub_get_tx_queue(); diff --git a/shaping/test/stub.cpp b/shaping/test/stub.cpp index 24b3ac3..8e9140f 100644 --- a/shaping/test/stub.cpp +++ b/shaping/test/stub.cpp @@ -13,78 +13,126 @@ #include "shaper.h" #include "shaper_stat.h" #include "stub.h" -#include "shaper_maat.h" #include "log.h" +#include "uthash.h" #define MAX_STUB_TEST_SESSION_NUM 2 -#define MAX_STUB_RULE_NUM 8 -#define MAX_STUB_PROFILE_NUM 8 - -struct stub_matched_rules { - struct shaping_rule rules[MAX_STUB_RULE_NUM]; - int rule_num; -}; - struct stub_pkt_queue tx_queue; -struct stub_matched_rules matched_rules; -struct shaping_profile pf_array[MAX_STUB_PROFILE_NUM]; +struct stub_matched_rule *rules_hash = NULL; +struct stub_shaping_profile *profiles_hash = NULL; -void stub_set_profile_type(int profile_id, enum shaping_profile_type type) +void stub_set_profile_type(const char *profile_uuid_str, enum shaping_profile_type type) { - pf_array[profile_id].type = type; + uuid_t profile_uuid; + struct stub_shaping_profile *stub_profile = NULL; + + uuid_parse(profile_uuid_str, profile_uuid); + HASH_FIND(hh, profiles_hash, profile_uuid, sizeof(uuid_t), stub_profile); + if (!stub_profile) { + stub_profile = (struct stub_shaping_profile*)calloc(1, sizeof(struct stub_shaping_profile)); + uuid_copy(stub_profile->profile.uuid, profile_uuid); + HASH_ADD(hh, profiles_hash, profile.uuid, sizeof(uuid_t), stub_profile); + } + + stub_profile->profile.type = type; + return; } -void stub_set_profile_limit_direction(int profile_id, enum shaping_profile_limit_direction limit_direction) +void stub_set_profile_limit_direction(const char *profile_uuid_str, enum shaping_profile_limit_direction limit_direction) { - pf_array[profile_id].limit_direction = limit_direction; + uuid_t profile_uuid; + struct stub_shaping_profile *stub_profile = NULL; + + uuid_parse(profile_uuid_str, profile_uuid); + HASH_FIND(hh, profiles_hash, profile_uuid, sizeof(uuid_t), stub_profile); + if (!stub_profile) { + stub_profile = (struct stub_shaping_profile*)calloc(1, sizeof(struct stub_shaping_profile)); + uuid_copy(stub_profile->profile.uuid, profile_uuid); + HASH_ADD(hh, profiles_hash, profile.uuid, sizeof(uuid_t), stub_profile); + } + + stub_profile->profile.limit_direction = limit_direction; + return; } -void stub_set_matched_shaping_rules(int rule_num, long long *rule_id, const int *priority, const int *profile_num, int profile_id[][MAX_REF_PROFILE]) +void stub_set_matched_shaping_rules(int rule_num, const char *rule_uuid_str[], const int *priority, const int *profile_num, const char *profile_uuid_str[][MAX_REF_PROFILE]) { - struct shaping_rule *rules; int i, j; - int id; - - rules = matched_rules.rules; for (i = 0; i < rule_num; i++) { - id = rule_id[i]; - assert(id < MAX_STUB_RULE_NUM); - - rules[id].vsys_id = STUB_TEST_VSYS_ID; - rules[id].id = id; - rules[id].primary_pf_id = profile_id[i][0]; - rules[id].borrow_pf_num = profile_num[i] - 1; - rules[id].priority = priority[i]; + struct stub_matched_rule *stub_matched_rule = NULL; + uuid_t rule_uuid; + uuid_parse(rule_uuid_str[i], rule_uuid); + + HASH_FIND(hh, rules_hash, rule_uuid, sizeof(uuid_t), stub_matched_rule); + if (stub_matched_rule) { + continue; + } + + stub_matched_rule = (struct stub_matched_rule*)calloc(1, sizeof(struct stub_matched_rule)); + + stub_matched_rule->rule.vsys_id = STUB_TEST_VSYS_ID; + uuid_copy(stub_matched_rule->rule.uuid, rule_uuid); + uuid_parse(profile_uuid_str[i][0], stub_matched_rule->rule.primary_pf_uuid); + stub_matched_rule->rule.borrow_pf_num = profile_num[i] - 1; + stub_matched_rule->rule.priority = priority[i]; for (j = 1; j < profile_num[i]; j++) { - rules[id].borrow_pf_id_array[j - 1] = profile_id[i][j]; + uuid_parse(profile_uuid_str[i][j], stub_matched_rule->rule.borrow_pf_uuid_array[j - 1]); } - } - matched_rules.rule_num = rule_num; + HASH_ADD(hh, rules_hash, rule.uuid, sizeof(uuid_t), stub_matched_rule); + } return; } -void stub_set_shaping_rule_dscp_value(int rule_id, int dscp_value) +void stub_set_shaping_rule_dscp_value(const char *rule_uuid_str, int dscp_value) { - matched_rules.rules[rule_id].dscp_enable = 1; - matched_rules.rules[rule_id].dscp_value = dscp_value; + uuid_t rule_uuid; + struct stub_matched_rule *stub_matched_rule = NULL; + + uuid_parse(rule_uuid_str, rule_uuid); + HASH_FIND(hh, rules_hash, rule_uuid, sizeof(uuid_t), stub_matched_rule); + if (stub_matched_rule) { + stub_matched_rule->rule.dscp_enable = 1; + stub_matched_rule->rule.dscp_value = dscp_value; + } + return; } -void stub_set_shaping_rule_fair_factor(int rule_id, int fair_factor) +void stub_set_shaping_rule_fair_factor(const char *rule_uuid_str, int fair_factor) { - matched_rules.rules[rule_id].fair_factor = fair_factor; + uuid_t rule_uuid; + struct stub_matched_rule *stub_matched_rule = NULL; + + uuid_parse(rule_uuid_str, rule_uuid); + HASH_FIND(hh, rules_hash, rule_uuid, sizeof(uuid_t), stub_matched_rule); + if (stub_matched_rule) { + stub_matched_rule->rule.fair_factor = fair_factor; + } + return; } -void stub_clear_matched_shaping_rules() +void stub_clear_resource() { - memset(&matched_rules, 0, sizeof(struct stub_matched_rules)); + struct stub_matched_rule *stub_matched_rule, *tmp = NULL; + + HASH_ITER(hh, rules_hash, stub_matched_rule, tmp) { + HASH_DEL(rules_hash, stub_matched_rule); + free(stub_matched_rule); + } + + struct stub_shaping_profile *stub_profile, *tmp_profile = NULL; + + HASH_ITER(hh, profiles_hash, stub_profile, tmp_profile) { + HASH_DEL(profiles_hash, stub_profile); + free(stub_profile); + } return; } @@ -114,7 +162,6 @@ void stub_init() LOG_INIT("./conf/zlog.conf"); TAILQ_INIT(&tx_queue); - memset(&matched_rules, 0, sizeof(struct stub_matched_rules)); return; } @@ -160,15 +207,6 @@ void maat_options_free(struct maat_options *opts) return; } -int maat_get_table_id(struct maat *instance, const char *table_name) -{ - if (strcmp(table_name, "TRAFFIC_SHAPING_COMPILE") == 0) { - return STUB_MAAT_SHAPING_RULE_TABLE_ID; - } else { - return STUB_MAAT_SHAPING_PROFILE_TABLE_ID; - } -} - int maat_plugin_table_ex_schema_register(struct maat *instance, const char *table_name, maat_ex_new_func_t *new_func, maat_ex_free_func_t *free_func, @@ -186,17 +224,21 @@ void maat_free(struct maat *instance) return; } -void *maat_plugin_table_get_ex_data(struct maat *instance, int table_id, const char *key, size_t key_len) +void *maat_plugin_table_get_ex_data(struct maat *instance, const char *table_name, const char *key, size_t key_len) { - int rule_id; - int profile_id; + uuid_t rule_uuid; + uuid_t profile_uuid; - if (table_id == STUB_MAAT_SHAPING_RULE_TABLE_ID) { - rule_id = *(int*)key; - return &matched_rules.rules[rule_id]; + if (strcmp(table_name, "TRAFFIC_SHAPING_RULE") == 0) { + struct stub_matched_rule *matched_rule = NULL; + uuid_parse(key, rule_uuid); + HASH_FIND(hh, rules_hash, rule_uuid, sizeof(uuid_t), matched_rule); + return &matched_rule->rule; } else { - profile_id = atoi(key); - return &pf_array[profile_id]; + struct stub_shaping_profile *stub_profile = NULL; + uuid_parse(key, profile_uuid); + HASH_FIND(hh, profiles_hash, profile_uuid, sizeof(uuid_t), stub_profile); + return &stub_profile->profile; } } /**********************************************/ diff --git a/shaping/test/stub.h b/shaping/test/stub.h index 7581a98..e5c73b5 100644 --- a/shaping/test/stub.h +++ b/shaping/test/stub.h @@ -1,19 +1,36 @@ #include <sys/queue.h> #include "shaper.h" +#include "shaper_maat.h" #define OUT_ARG #define MAX_REF_PROFILE 8 #define STUB_APP_STATE_HOLD_PACKET 0x04 -#define STUB_MAAT_SHAPING_RULE_TABLE_ID 0 -#define STUB_MAAT_SHAPING_PROFILE_TABLE_ID 1 - #define STUB_TIME_INC_FOR_PACKET 1000000 #define STUB_TIME_INC_FOR_HMGET 10000000 #define STUB_TEST_VSYS_ID 2333 +#define AVALIABLE_TOKEN_UNLIMITED -1 + +struct stub_matched_rule { + UT_hash_handle hh; + struct shaping_rule rule; +}; + +struct stub_avaliable_token { + int in_limit_bandwidth; + int out_limit_bandwidth; + int bidirection_limit_bandwidth; +}; + +struct stub_shaping_profile { + struct shaping_profile profile; + struct stub_avaliable_token avaliable_token; + UT_hash_handle hh; +}; + struct stub_packet { unsigned char direction; unsigned char pure_control; @@ -28,16 +45,16 @@ struct stub_packet_node { TAILQ_HEAD(stub_pkt_queue, stub_packet_node); -void stub_set_token_bucket_avl_per_sec(int profile_id, unsigned int tokens, unsigned char direction, enum shaping_profile_limit_direction limit_direction); -void stub_refresh_token_bucket(int profile_id); -void stub_set_profile_type(int profile_id, enum shaping_profile_type type); -void stub_set_profile_limit_direction(int profile_id, enum shaping_profile_limit_direction limit_direction); -void stub_set_async_token_get_times(int profile_id, int times); +void stub_set_token_bucket_avl_per_sec(const char *profile_uuid_str, unsigned int tokens, unsigned char direction, enum shaping_profile_limit_direction limit_direction); +void stub_refresh_token_bucket(const char *profile_uuid_str); +void stub_set_profile_type(const char *profile_uuid_str, enum shaping_profile_type type); +void stub_set_profile_limit_direction(const char *profile_uuid_str, enum shaping_profile_limit_direction limit_direction); -void stub_set_matched_shaping_rules(int rule_num, long long *rule_id, const int *priority, const int *profile_num, int profile_id[][MAX_REF_PROFILE]); -void stub_set_shaping_rule_dscp_value(int rule_id, int dscp_value); -void stub_set_shaping_rule_fair_factor(int rule_id, int fair_factor); -void stub_clear_matched_shaping_rules(); +void stub_set_matched_shaping_rules(int rule_num, const char *rule_uuid_str[], const int *priority, const int *profile_num, const char *profile_uuid_str[][MAX_REF_PROFILE]); +void stub_set_shaping_rule_dscp_value(const char *rule_uuid_str, int dscp_value); +void stub_set_shaping_rule_fair_factor(const char *rule_uuid_str, int fair_factor); +void stub_clear_resource(); +void stub_swarmkv_clear_resource(); void stub_send_packet(struct stub_packet *packet); struct stub_pkt_queue* stub_get_tx_queue(); @@ -49,7 +66,6 @@ void stub_curr_time_s_inc(int time_s); unsigned long long stub_curr_time_ns_get(); void stub_init(); -void dummy_swarmkv_init(); /*******************temporary for test******************************/ void stub_shaper_stat_send(int thread_seq); diff --git a/shaping/test/test_conf/shaping_maat.json b/shaping/test/test_conf/shaping_maat.json index 9d518f9..d8efe2e 100644 --- a/shaping/test/test_conf/shaping_maat.json +++ b/shaping/test/test_conf/shaping_maat.json @@ -1,75 +1,72 @@ {
- "compile_table": "TRAFFIC_SHAPING_COMPILE",
- "group2compile_table": "GROUP_SHAPING_COMPILE_RELATION",
- "group2group_table": "GROUP_GROUP_RELATION",
+ "compile_table": "TRAFFIC_SHAPING_RULE",
"rules": [
{
- "compile_id": 182,
+ "uuid": "00000000-0000-0000-0000-000000000182",
"service": 2,
"action": 32,
"do_blacklist": 0,
"do_log": 1,
"effective_rage": 0,
- "user_region": "{\"priority\":1,\"fair_factor\":10,\"profile_chain\":[1]}",
+ "action_parameter": {
+ "priority": 1,
+ "fair_factor": 10,
+ "dscp_marking": {
+ "enabled": 1,
+ "dscp_type": "Assured Forwarding (AF)",
+ "dscp_name": "af11",
+ "dscp_value": 10
+ },
+ "profile_chain": [
+ "00000000-0000-0000-0000-000000000001"
+ ]
+ },
"is_valid": "yes",
- "groups": [
- {
- "group_name":"OBJ_SRC_IP_ADDR",
- "virtual_table":"TSG_SECURITY_SOURCE_ADDR",
- "not_flag" : 0,
- "regions": [
- {
- "table_name": "TSG_OBJ_IP_ADDR",
- "table_type": "ip_plus",
- "table_content": {
- "saddr_format": "range",
- "addr_type": "ipv4",
- "src_ip1": "192.168.50.67",
- "src_ip2": "192.168.50.67",
- "sport_format": "range",
- "src_port1": "0",
- "src_port2": "65535",
- "protocol": 0,
- "direction": "double"
- }
- }
- ]
- },
- {
- "group_name":"OBJ_DST_IP_ADDR",
- "virtual_table":"TSG_SECURITY_DESTINATION_ADDR",
- "not_flag" : 0,
- "regions": [
- {
- "table_name": "TSG_OBJ_IP_ADDR",
- "table_type": "ip_plus",
- "table_content": {
- "saddr_format": "range",
- "addr_type": "ipv4",
- "src_ip1": "192.168.42.43",
- "src_ip2": "192.168.42.43",
- "sport_format": "range",
- "src_port1": "5678",
- "src_port2": "5678",
- "protocol": 0,
- "direction": "double"
- }
- }
- ]
- }
-
- ]
+ "and_conditions": [
+ {
+ "attribute_name": "ATTRIBUTE_IP_PLUS_SOURCE",
+ "objects": [
+ {
+ "object_name": "ExcludeLogicObject203_1",
+ "uuid": "00000000-0000-0000-0000-000000000198",
+ "items": [
+ {
+ "table_name": "IP_PLUS_CONFIG",
+ "table_type": "ip",
+ "table_content": {
+ "ip": "192.168.50.43-192.168.50.43"
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "attribute_name": "ATTRIBUTE_IP_PLUS_DESTINATION",
+ "objects": [
+ {
+ "object_name": "ExcludeLogicObject203_2",
+ "uuid": "00000000-0000-0000-0000-000000000199",
+ "items": [
+ {
+ "table_name": "IP_PLUS_CONFIG",
+ "table_type": "ip",
+ "table_content": {
+ "ip": "47.92.108.93-47.92.108.93"
+ }
+ }
+ ]
+ }
+ ]
+ }
+ ]
}
],
"plugin_table": [
{
"table_name": "TRAFFIC_SHAPING_PROFILE",
"table_content": [
- "1\t{\"value\":\"local_host\",\"host_fairness\":1}\t[{\"direction\":\"incoming\",\"bandwidth\":102400},{\"direction\":\"outcoming\",\"bandwidth\":102400}]\t{\"enabled\":1,\"algorithm\":\"codel\"}\tnull\t{}\t1",
- "3\t{\"value\":\"local_host\",\"host_fairness\":1}\t[{\"direction\":\"incoming\",\"bandwidth\":10240},{\"direction\":\"outcoming\",\"bandwidth\":10240}]\t{\"enabled\":1,\"algorithm\":\"codel\"}\tnull\t{}\t1",
- "4\t{\"value\":\"local_host\",\"host_fairness\":1}\t[{\"direction\":\"incoming\",\"bandwidth\":10240},{\"direction\":\"outcoming\",\"bandwidth\":10240}]\t{\"enabled\":1,\"algorithm\":\"codel\"}\tnull\t{}\t1",
- "5\t{\"value\":\"local_host\",\"host_fairness\":1}\t[{\"direction\":\"incoming\",\"bandwidth\":10240},{\"direction\":\"outcoming\",\"bandwidth\":10240}]\t{\"enabled\":1,\"algorithm\":\"codel\"}\tnull\t{}\t1",
- "6\t{\"value\":\"local_host\",\"host_fairness\":1}\t[{\"direction\":\"incoming\",\"bandwidth\":10240},{\"direction\":\"outcoming\",\"bandwidth\":10240}]\t{\"enabled\":1,\"algorithm\":\"codel\"}\tnull\t{}\t1"
+ {"uuid":"00000000-0000-0000-0000-000000000001", "type": "generic", "type_argument": "none", "limits": [{"direction":"incoming","bandwidth":102400},{"direction":"outcoming","bandwidth":102400}], "aqm_options": {"algorithm":"codel"}, "is_valid":"yes"}
]
}
]
diff --git a/shaping/test/test_conf/table_info.json b/shaping/test/test_conf/table_info.json index 692f4f6..2d98ad4 100644 --- a/shaping/test/test_conf/table_info.json +++ b/shaping/test/test_conf/table_info.json @@ -1,23 +1,22 @@ [ { "table_id": 0, - "table_name": "TRAFFIC_SHAPING_COMPILE", + "table_name": "TRAFFIC_SHAPING_RULE", "table_type": "plugin", - "valid_column": 8, "custom": { - "key": 1, - "key_type": "integer", - "key_len": 8 + "gc_timeout_s":2, + "key_name": "uuid", + "key_type": "uuid" } }, { "table_id": 1, "table_name": "TRAFFIC_SHAPING_PROFILE", "table_type": "plugin", - "valid_column": 8, "custom": { - "key": 1, - "key_type": "pointer" + "gc_timeout_s":2, + "key_name": "uuid", + "key_type": "uuid" } } |
