From f234a3888717e448c3b9821ea82d44d16dbaee78 Mon Sep 17 00:00:00 2001 From: root Date: Tue, 16 Jul 2024 02:07:45 +0000 Subject: add temp code --- shaping/src/shaper.cpp | 3 +- shaping/test/CMakeLists.txt | 6 +- shaping/test/dummy_swarmkv.cpp | 374 +++++++++++++++++++++++++++ shaping/test/dummy_time.cpp | 35 +++ shaping/test/gtest_shaper.cpp | 2 - shaping/test/gtest_shaper_with_swarmkv.cpp | 11 + shaping/test/stub.cpp | 398 +---------------------------- shaping/test/stub.h | 1 + 8 files changed, 428 insertions(+), 402 deletions(-) create mode 100644 shaping/test/dummy_swarmkv.cpp create mode 100644 shaping/test/dummy_time.cpp create mode 100644 shaping/test/gtest_shaper_with_swarmkv.cpp diff --git a/shaping/src/shaper.cpp b/shaping/src/shaper.cpp index 89e5d1b..7938acf 100644 --- a/shaping/src/shaper.cpp +++ b/shaping/src/shaper.cpp @@ -676,7 +676,7 @@ static void shaper_token_get_from_profile(struct shaping_thread_ctx *ctx, struct case PROFILE_TYPE_MAX_MIN_HOST_FAIRNESS: swarmkv_ftconsume(ctx->swarmkv_db, key, strlen(key), sf->src_ip_str, sf->src_ip_str_len, sf->matched_rule_infos[sf->anchor[dir]].fair_factor, req_token_bits, shaper_token_get_cb, arg); //TODO: ftconsume with flexiable - //swarmkv_async_command(ctx->swarmkv_db, shaper_token_get_cb, arg, "FTCONSUME %s %s %d %d %s", key, sf->src_ip_str, sf->matched_rule_infos[sf->anchor].fair_factor, req_token_bits, "FLEXIBLE"); + //swarmkv_async_command(ctx->swarmkv_db, shaper_token_get_cb, arg, "FTCONSUME %s %s %d %d %s", key, sf->src_ip_str, sf->matched_rule_infos[sf->anchor[dir]].fair_factor, req_token_bits, "FLEXIBLE"); break; case PROFILE_TYPE_SPLIT_BY_LOCAL_HOST: swarmkv_btconsume(ctx->swarmkv_db, key, strlen(key), sf->src_ip_str, sf->src_ip_str_len, req_token_bits * TOKEN_MULTIPLE_DEFAULT, shaper_token_get_cb, arg); @@ -813,6 +813,7 @@ void shaper_profile_hash_node_set(struct shaping_thread_ctx *ctx, struct shaping static int shaping_swarmkv_is_too_short_interval(long long curr_time_ms, struct shaping_profile_info *profile, unsigned char direction) { long long last_failed_ms = 0; + return 0; switch (profile->type) { case PROFILE_TYPE_GENERIC: diff --git a/shaping/test/CMakeLists.txt b/shaping/test/CMakeLists.txt index 1c75bd6..643c222 100644 --- a/shaping/test/CMakeLists.txt +++ b/shaping/test/CMakeLists.txt @@ -3,7 +3,7 @@ # gtest_shaper_maat ############################################################################### -add_executable(gtest_shaper_maat gtest_shaper_maat.cpp stub.cpp) +add_executable(gtest_shaper_maat gtest_shaper_maat.cpp stub.cpp dummy_swarmkv.cpp) target_include_directories(gtest_shaper_maat PUBLIC ${CMAKE_SOURCE_DIR}/common/include) target_include_directories(gtest_shaper_maat PUBLIC ${CMAKE_SOURCE_DIR}/shaping/include) target_link_libraries(gtest_shaper_maat common shaper pthread gtest) @@ -12,7 +12,7 @@ target_link_libraries(gtest_shaper_maat common shaper pthread gtest) # gtest_shaper_maat ############################################################################### -add_executable(gtest_shaper_send_log gtest_shaper_send_log.cpp stub.cpp) +add_executable(gtest_shaper_send_log gtest_shaper_send_log.cpp stub.cpp dummy_swarmkv.cpp) target_include_directories(gtest_shaper_send_log PUBLIC ${CMAKE_SOURCE_DIR}/common/include) target_include_directories(gtest_shaper_send_log PUBLIC ${CMAKE_SOURCE_DIR}/shaping/include) target_link_libraries(gtest_shaper_send_log common shaper pthread gtest) @@ -21,7 +21,7 @@ target_link_libraries(gtest_shaper_send_log common shaper pthread gtest) # gtest_shaper ############################################################################### -add_executable(gtest_shaper gtest_shaper.cpp stub.cpp) +add_executable(gtest_shaper gtest_shaper.cpp stub.cpp dummy_swarmkv.cpp dummy_time.cpp) target_include_directories(gtest_shaper PUBLIC ${CMAKE_SOURCE_DIR}/common/include) target_include_directories(gtest_shaper PUBLIC ${CMAKE_SOURCE_DIR}/shaping/include) target_link_libraries(gtest_shaper common shaper pthread gtest) diff --git a/shaping/test/dummy_swarmkv.cpp b/shaping/test/dummy_swarmkv.cpp new file mode 100644 index 0000000..c86e524 --- /dev/null +++ b/shaping/test/dummy_swarmkv.cpp @@ -0,0 +1,374 @@ +#include +#include +#include +#include + +#include "shaper.h" +#include "shaper_maat.h" +#include "stub.h" + +using namespace std; + +#define MAX_STUB_RULE_NUM 8 +#define MAX_STUB_PROFILE_NUM 8 + +#define DEFAULT_AVALIABLE_TOKEN_PER_SEC -1 + +struct stub_token_thread_arg { + int profile_id; + struct swarmkv_reply reply; + swarmkv_on_reply_callback_t *cb; + void *cb_arg; +}; + +struct stub_avaliable_token { + int in_limit_bandwidth; + int out_limit_bandwidth; + int bidirection_limit_bandwidth; +}; + +static int profile_priority_len[MAX_STUB_PROFILE_NUM][SHAPING_PRIORITY_NUM_MAX][SHAPING_DIR_MAX]; +static struct stub_avaliable_token pf_curr_avl_token[MAX_STUB_PROFILE_NUM]; +static int pf_async_times[MAX_STUB_PROFILE_NUM]; +vector pf_async_thread[MAX_STUB_PROFILE_NUM]; +struct shaping_profile pf_array[MAX_STUB_PROFILE_NUM]; + +void init_dummy_swarmkv() +{ + memset(&pf_array, 0, MAX_STUB_PROFILE_NUM * sizeof(struct shaping_profile)); + memset(&profile_priority_len, 0, MAX_STUB_PROFILE_NUM * SHAPING_PRIORITY_NUM_MAX * SHAPING_DIR_MAX * sizeof(int)); + + for (int i = 0; i < MAX_STUB_PROFILE_NUM; i++) { + pf_curr_avl_token[i].in_limit_bandwidth = DEFAULT_AVALIABLE_TOKEN_PER_SEC; + pf_curr_avl_token[i].out_limit_bandwidth = DEFAULT_AVALIABLE_TOKEN_PER_SEC; + pf_curr_avl_token[i].bidirection_limit_bandwidth = DEFAULT_AVALIABLE_TOKEN_PER_SEC; + pf_array[i].id = i; + pf_array[i].in_limit_bandwidth = DEFAULT_AVALIABLE_TOKEN_PER_SEC; + pf_array[i].out_limit_bandwidth = DEFAULT_AVALIABLE_TOKEN_PER_SEC; + pf_async_times[i] = 0; + memset(profile_priority_len[i], 0, 10 * sizeof(int)); + } +} + +void * stub_get_token_thread_func(void *data) +{ + struct stub_token_thread_arg *thread_arg; + + thread_arg = (struct stub_token_thread_arg*)data; + + thread_arg->cb(&thread_arg->reply, thread_arg->cb_arg); + + return NULL; +} + +void stub_set_token_bucket_avl_per_sec(int profile_id, unsigned int tokens, unsigned char direction, enum shaping_profile_limit_direction limit_direction) +{ + pf_array[profile_id].limit_direction = limit_direction; + + if (limit_direction == PROFILE_LIMIT_DIRECTION_BIDIRECTION) { + pf_array[profile_id].bidirection_limit_bandwidth = tokens * 8; + pf_curr_avl_token[profile_id].bidirection_limit_bandwidth = tokens * 8; + } else { + if (direction == SHAPING_DIR_IN) { + pf_array[profile_id].in_limit_bandwidth = tokens * 8; + pf_curr_avl_token[profile_id].in_limit_bandwidth = tokens * 8; + } else { + pf_array[profile_id].out_limit_bandwidth = tokens * 8; + pf_curr_avl_token[profile_id].out_limit_bandwidth = tokens * 8; + } + } + + return; +} + +void stub_refresh_token_bucket(int profile_id) +{ + pf_curr_avl_token[profile_id].bidirection_limit_bandwidth = pf_array[profile_id].bidirection_limit_bandwidth; + pf_curr_avl_token[profile_id].in_limit_bandwidth = pf_array[profile_id].in_limit_bandwidth; + pf_curr_avl_token[profile_id].out_limit_bandwidth = pf_array[profile_id].out_limit_bandwidth; + return; +} + +void stub_set_async_token_get_times(int profile_id, int times) +{ + pf_async_times[profile_id] = times; + + if (pf_async_times[profile_id] == 0) { + for (unsigned int i = 0; i < pf_async_thread[profile_id].size(); i++) { + stub_get_token_thread_func(&pf_async_thread[profile_id][i]); + } + pf_async_thread[profile_id].clear(); + } + + return; +} + +/**************stub of swarmkv*****************/ +void swarmkv_register_thread(struct swarmkv *db) +{ + return; +} + +struct swarmkv_options* swarmkv_options_new(void) +{ + return NULL; +} + +int swarmkv_options_set_logger(struct swarmkv_options *opts, void *logger) +{ + return 0; +} + +int swarmkv_options_set_bind_address(struct swarmkv_options *opts, const char* ip_addr) +{ + return 0; +} + +int swarmkv_options_set_cluster_port(struct swarmkv_options *opts, unsigned int cluster_port) +{ + return 0; +} + +int swarmkv_options_set_consul_port(struct swarmkv_options *opts, unsigned int consul_port) +{ + return 0; +} + +int swarmkv_options_set_consul_host(struct swarmkv_options *opts, const char* ip_addr) +{ + return 0; +} + +int swarmkv_options_set_cluster_announce_ip(struct swarmkv_options *opts, const char *ip_addr) +{ + return 0; +} + +int swarmkv_options_set_cluster_announce_port(struct swarmkv_options *opts, unsigned int cluster_announce_port) +{ + return 0; +} + +int swarmkv_options_set_health_check_port(struct swarmkv_options *opts, unsigned int health_check_port) +{ + return 0; +} + +int swarmkv_options_set_health_check_announce_port(struct swarmkv_options *opts, unsigned int health_check_announce_port) +{ + return 0; +} + +struct swarmkv *swarmkv_open(struct swarmkv_options *opts, const char * cluster_name, char **err) +{ + struct swarmkv *db; + + db = (struct swarmkv *)calloc(1, 1); + + return db; +} + +long long swarmkv_caller_get_pending_commands(struct swarmkv *db) +{ + return 0; +} + +int swarmkv_options_set_log_path(struct swarmkv_options *opts, const char *logpath) +{ + return 0; +} + +int swarmkv_options_set_log_level(struct swarmkv_options *opts, int loglevel) +{ + return 0; +} + +static void swarmkv_hincrby_cmd_func(char *cmd_str, swarmkv_on_reply_callback_t * cb, void *cb_arg) +{ + int profile_id; + int priority; + int value; + char direction[5] = {0}; + enum shaping_packet_dir dir; + struct swarmkv_reply *reply = (struct swarmkv_reply*)calloc(1, sizeof(struct swarmkv_reply)); + + sscanf(cmd_str, "HINCRBY tsg-shaping-%d priority-%d-%s %d", &profile_id, &priority, direction, &value); + if (strncmp(direction, "in", 2) == 0) { + dir = SHAPING_DIR_IN; + } else { + dir = SHAPING_DIR_OUT; + } + + profile_priority_len[profile_id][priority][dir] += value; + + reply->type = SWARMKV_REPLY_INTEGER; + cb(reply, cb_arg); + + free(reply); + + return; +} + +static void swarmkv_hmget_cmd_func(char *cmd_str, swarmkv_on_reply_callback_t * cb, void *cb_arg) +{ + int profile_id; + int priority[10]; + int ret; + int priority_num; + char direction[5] = {0}; + enum shaping_packet_dir dir; + struct swarmkv_reply *reply = (struct swarmkv_reply*)calloc(1, sizeof(struct swarmkv_reply)); + + ret = sscanf(cmd_str, "HMGET tsg-shaping-%d priority-%d-%s priority-%d-%*s priority-%d-%*s priority-%d-%*s priority-%d-%*s priority-%d-%*s priority-%d-%*s priority-%d-%*s priority-%d-%*s", + &profile_id, &priority[0], direction, &priority[1], &priority[2], &priority[3], &priority[4], &priority[5], &priority[6], &priority[7], &priority[8]); + priority_num = ret - 1; + + if (strncmp(direction, "in", 2) == 0) { + dir = SHAPING_DIR_IN; + } else { + dir = SHAPING_DIR_OUT; + } + + reply->type = SWARMKV_REPLY_ARRAY; + reply->n_element = priority_num; + reply->elements = (struct swarmkv_reply**)calloc(priority_num, sizeof(struct swarmkv_reply*)); + for (int i = 0; i < priority_num; i++) { + reply->elements[i] = (struct swarmkv_reply*)calloc(1, sizeof(struct swarmkv_reply)); + reply->elements[i]->type = SWARMKV_REPLY_STRING; + + char tmp_str[128] = {0}; + sprintf(tmp_str, "%d", profile_priority_len[profile_id][priority[i]][dir]); + reply->elements[i]->str = (char *)calloc(1, strlen(tmp_str)); + memcpy(reply->elements[i]->str, tmp_str, strlen(tmp_str)); + reply->elements[i]->len = strlen(tmp_str); + } + + cb(reply, cb_arg); + + for(unsigned int i = 0; i < reply->n_element; i++) { + if (reply->elements[i]) { + free(reply->elements[i]->str); + free(reply->elements[i]); + } + } + free(reply->elements); + free(reply); + +} + +void swarmkv_async_command(struct swarmkv *db, swarmkv_on_reply_callback_t * cb, void *cb_arg, const char *format, ...) +{ + char *cmd_str = NULL; + char cmd_keyword[32] = {0}; + + va_list ap; + va_start(ap,format); + vasprintf(&cmd_str, format, ap); + va_end(ap); + + sscanf(cmd_str, "%31s %*s", cmd_keyword); + if (strcmp(cmd_keyword, "HINCRBY") == 0) { + swarmkv_hincrby_cmd_func(cmd_str, cb, cb_arg); + } else if (strcmp(cmd_keyword, "HMGET") == 0) { + swarmkv_hmget_cmd_func(cmd_str, cb, cb_arg); + } + + free(cmd_str); + + return; +} + +void swarmkv_tconsume(struct swarmkv * db, const char * key, size_t keylen, long long tokens, swarmkv_on_reply_callback_t *cb, void *cb_arg) +{ + int actual_tokens; + struct stub_token_thread_arg thread_arg; + struct swarmkv_reply reply; + int profile_id; + char direction[16] = {0}; + + sscanf(key, "tsg-shaping-%d-%15s", &profile_id, direction); + + if (strncmp("bidirectional", direction, sizeof(direction)) == 0) { + if (pf_curr_avl_token[profile_id].bidirection_limit_bandwidth == DEFAULT_AVALIABLE_TOKEN_PER_SEC) { + actual_tokens = tokens; + } else { + actual_tokens = pf_curr_avl_token[profile_id].bidirection_limit_bandwidth >= tokens ? tokens : 0; + pf_curr_avl_token[profile_id].bidirection_limit_bandwidth -= actual_tokens; + } + } else if (strncmp("incoming", direction, sizeof(direction)) == 0) { + if (pf_curr_avl_token[profile_id].in_limit_bandwidth == DEFAULT_AVALIABLE_TOKEN_PER_SEC) { + actual_tokens = tokens; + } else { + actual_tokens = pf_curr_avl_token[profile_id].in_limit_bandwidth >= tokens ? tokens : 0; + pf_curr_avl_token[profile_id].in_limit_bandwidth -= actual_tokens; + } + } else { + if (pf_curr_avl_token[profile_id].out_limit_bandwidth == DEFAULT_AVALIABLE_TOKEN_PER_SEC) { + actual_tokens = tokens; + } else { + actual_tokens = pf_curr_avl_token[profile_id].out_limit_bandwidth >= tokens ? tokens : 0; + pf_curr_avl_token[profile_id].out_limit_bandwidth -= actual_tokens; + } + } + + if (pf_async_times[profile_id] == 0) { + for (unsigned int i = 0; i < pf_async_thread[profile_id].size(); i++) { + stub_get_token_thread_func(&pf_async_thread[profile_id][i]); + } + pf_async_thread[profile_id].clear(); + } + + reply.integer = actual_tokens; + reply.type = SWARMKV_REPLY_INTEGER; + + if (pf_async_times[profile_id] > 0) { + pf_async_times[profile_id]--; + + thread_arg.profile_id = profile_id; + thread_arg.reply = reply; + thread_arg.cb = cb; + thread_arg.cb_arg = cb_arg; + + pf_async_thread[profile_id].push_back(thread_arg); + + } else { + cb(&reply, cb_arg); + } + + return; +} + +void swarmkv_ftconsume(struct swarmkv * db, const char * key, size_t keylen, const char * member, size_t member_len, long long weight, long long tokens, swarmkv_on_reply_callback_t *cb, void *cb_arg) +{ + swarmkv_tconsume(db, key, keylen, tokens, cb, cb_arg); + return; +} + +void swarmkv_btconsume(struct swarmkv * db, const char * key, size_t keylen, const char * member, size_t member_len, long long tokens, swarmkv_on_reply_callback_t *cb, void *cb_arg) +{ + return; +} + +void swarmkv_close(struct swarmkv * db) +{ + if (db) { + free(db); + } + return; +} + +void swarmkv_caller_loop(struct swarmkv *db, int flags, struct timeval *tv) +{ + return; +} + +int swarmkv_options_set_caller_thread_number(struct swarmkv_options *opts, int nr_caller_threads) +{ + return 0; +} + +int swarmkv_options_set_worker_thread_number(struct swarmkv_options *opts, int nr_worker_threads) +{ + return 0; +} +/**********************************************/ \ No newline at end of file diff --git a/shaping/test/dummy_time.cpp b/shaping/test/dummy_time.cpp new file mode 100644 index 0000000..b97d988 --- /dev/null +++ b/shaping/test/dummy_time.cpp @@ -0,0 +1,35 @@ +#include + +static unsigned long long curr_time_ns = 2000000000;//2s +static unsigned int curr_time_s = 0; + +void stub_curr_time_ns_inc(unsigned long long time_ns) +{ + curr_time_ns += time_ns; + + return; +} + +void stub_curr_time_s_inc(int time_s) +{ + curr_time_s += time_s; + + return; +} + +unsigned long long stub_curr_time_ns_get() +{ + return curr_time_ns; +} + +/****************stub of time.h****************/ + +int clock_gettime (clockid_t __clock_id, struct timespec *__tp) +{ + __tp->tv_sec = curr_time_s; + __tp->tv_nsec = curr_time_ns; + + return 0; +} + +/**********************************************/ \ No newline at end of file diff --git a/shaping/test/gtest_shaper.cpp b/shaping/test/gtest_shaper.cpp index b342eda..49b9a06 100644 --- a/shaping/test/gtest_shaper.cpp +++ b/shaping/test/gtest_shaper.cpp @@ -1,5 +1,3 @@ -#include -#include #include #include #include diff --git a/shaping/test/gtest_shaper_with_swarmkv.cpp b/shaping/test/gtest_shaper_with_swarmkv.cpp new file mode 100644 index 0000000..e3f2304 --- /dev/null +++ b/shaping/test/gtest_shaper_with_swarmkv.cpp @@ -0,0 +1,11 @@ +#include + +TEST(single_session, generic_profile) +{} + +int main(int argc, char **argv) +{ + testing::InitGoogleTest(&argc, argv); + //testing::GTEST_FLAG(filter) = "single_session.udp_tx_in_order"; + return RUN_ALL_TESTS(); +} \ No newline at end of file diff --git a/shaping/test/stub.cpp b/shaping/test/stub.cpp index be3f29d..4da686e 100644 --- a/shaping/test/stub.cpp +++ b/shaping/test/stub.cpp @@ -1,16 +1,12 @@ #include -#include #include #include #include #include -#include #include #include #include -#include -#include #include #include #include @@ -20,9 +16,6 @@ #include "shaper_maat.h" #include "log.h" -using namespace std; - -#define DEFAULT_AVALIABLE_TOKEN_PER_SEC -1 #define MAX_STUB_TEST_SESSION_NUM 2 #define MAX_STUB_RULE_NUM 8 #define MAX_STUB_PROFILE_NUM 8 @@ -32,69 +25,10 @@ struct stub_matched_rules { int rule_num; }; -struct stub_token_thread_arg { - int profile_id; - struct swarmkv_reply reply; - swarmkv_on_reply_callback_t *cb; - void *cb_arg; -}; - -struct stub_avaliable_token { - int in_limit_bandwidth; - int out_limit_bandwidth; - int bidirection_limit_bandwidth; -}; struct stub_pkt_queue tx_queue; - -static struct stub_avaliable_token pf_curr_avl_token[MAX_STUB_PROFILE_NUM]; -static int pf_async_times[MAX_STUB_PROFILE_NUM]; -vector pf_async_thread[MAX_STUB_PROFILE_NUM]; struct stub_matched_rules matched_rules; -struct shaping_profile pf_array[MAX_STUB_PROFILE_NUM]; -static int profile_priority_len[MAX_STUB_PROFILE_NUM][SHAPING_PRIORITY_NUM_MAX][SHAPING_DIR_MAX]; - -static unsigned long long curr_time_ns = 2000000000;//2s -static unsigned int curr_time_s = 0; - -void * stub_get_token_thread_func(void *data) -{ - struct stub_token_thread_arg *thread_arg; - - thread_arg = (struct stub_token_thread_arg*)data; - - thread_arg->cb(&thread_arg->reply, thread_arg->cb_arg); - - return NULL; -} - -void stub_set_token_bucket_avl_per_sec(int profile_id, unsigned int tokens, unsigned char direction, enum shaping_profile_limit_direction limit_direction) -{ - pf_array[profile_id].limit_direction = limit_direction; - - if (limit_direction == PROFILE_LIMIT_DIRECTION_BIDIRECTION) { - pf_array[profile_id].bidirection_limit_bandwidth = tokens * 8; - pf_curr_avl_token[profile_id].bidirection_limit_bandwidth = tokens * 8; - } else { - if (direction == SHAPING_DIR_IN) { - pf_array[profile_id].in_limit_bandwidth = tokens * 8; - pf_curr_avl_token[profile_id].in_limit_bandwidth = tokens * 8; - } else { - pf_array[profile_id].out_limit_bandwidth = tokens * 8; - pf_curr_avl_token[profile_id].out_limit_bandwidth = tokens * 8; - } - } - - return; -} - -void stub_refresh_token_bucket(int profile_id) -{ - pf_curr_avl_token[profile_id].bidirection_limit_bandwidth = pf_array[profile_id].bidirection_limit_bandwidth; - pf_curr_avl_token[profile_id].in_limit_bandwidth = pf_array[profile_id].in_limit_bandwidth; - pf_curr_avl_token[profile_id].out_limit_bandwidth = pf_array[profile_id].out_limit_bandwidth; - return; -} +extern struct shaping_profile pf_array[MAX_STUB_PROFILE_NUM]; void stub_set_profile_type(int profile_id, enum shaping_profile_type type) { @@ -102,20 +36,6 @@ void stub_set_profile_type(int profile_id, enum shaping_profile_type type) return; } -void stub_set_async_token_get_times(int profile_id, int times) -{ - pf_async_times[profile_id] = times; - - if (pf_async_times[profile_id] == 0) { - for (unsigned int i = 0; i < pf_async_thread[profile_id].size(); i++) { - stub_get_token_thread_func(&pf_async_thread[profile_id][i]); - } - pf_async_thread[profile_id].clear(); - } - - return; -} - void stub_set_matched_shaping_rules(int rule_num, long long *rule_id, const int *priority, const int *profile_num, int profile_id[][MAX_REF_PROFILE]) { struct shaping_rule *rules; @@ -177,331 +97,17 @@ struct stub_pkt_queue* stub_get_tx_queue() return &tx_queue; } -void stub_curr_time_ns_inc(unsigned long long time_ns) -{ - curr_time_ns += time_ns; - - return; -} - -void stub_curr_time_s_inc(int time_s) -{ - curr_time_s += time_s; - - return; -} - -unsigned long long stub_curr_time_ns_get() -{ - return curr_time_ns; -} - void stub_init() { - int i; - LOG_INIT("./conf/zlog.conf"); TAILQ_INIT(&tx_queue); memset(&matched_rules, 0, sizeof(struct stub_matched_rules)); - memset(&pf_array, 0, MAX_STUB_PROFILE_NUM * sizeof(struct shaping_profile)); - memset(&profile_priority_len, 0, MAX_STUB_PROFILE_NUM * SHAPING_PRIORITY_NUM_MAX * SHAPING_DIR_MAX * sizeof(int)); - - for (i = 0; i < MAX_STUB_PROFILE_NUM; i++) { - pf_curr_avl_token[i].in_limit_bandwidth = DEFAULT_AVALIABLE_TOKEN_PER_SEC; - pf_curr_avl_token[i].out_limit_bandwidth = DEFAULT_AVALIABLE_TOKEN_PER_SEC; - pf_curr_avl_token[i].bidirection_limit_bandwidth = DEFAULT_AVALIABLE_TOKEN_PER_SEC; - pf_array[i].id = i; - pf_array[i].in_limit_bandwidth = DEFAULT_AVALIABLE_TOKEN_PER_SEC; - pf_array[i].out_limit_bandwidth = DEFAULT_AVALIABLE_TOKEN_PER_SEC; - pf_async_times[i] = 0; - memset(profile_priority_len[i], 0, 10 * sizeof(int)); - } - return; -} - -/****************stub of time.h****************/ - -int clock_gettime (clockid_t __clock_id, struct timespec *__tp) -{ - __tp->tv_sec = curr_time_s; - __tp->tv_nsec = curr_time_ns; - - return 0; -} - -/**********************************************/ - - -/**************stub of swarmkv*****************/ -void swarmkv_register_thread(struct swarmkv *db) -{ - return; -} - -struct swarmkv_options* swarmkv_options_new(void) -{ - return NULL; -} - -int swarmkv_options_set_logger(struct swarmkv_options *opts, void *logger) -{ - return 0; -} - -int swarmkv_options_set_bind_address(struct swarmkv_options *opts, const char* ip_addr) -{ - return 0; -} - -int swarmkv_options_set_cluster_port(struct swarmkv_options *opts, unsigned int cluster_port) -{ - return 0; -} - -int swarmkv_options_set_consul_port(struct swarmkv_options *opts, unsigned int consul_port) -{ - return 0; -} - -int swarmkv_options_set_consul_host(struct swarmkv_options *opts, const char* ip_addr) -{ - return 0; -} - -int swarmkv_options_set_cluster_announce_ip(struct swarmkv_options *opts, const char *ip_addr) -{ - return 0; -} - -int swarmkv_options_set_cluster_announce_port(struct swarmkv_options *opts, unsigned int cluster_announce_port) -{ - return 0; -} - -int swarmkv_options_set_health_check_port(struct swarmkv_options *opts, unsigned int health_check_port) -{ - return 0; -} - -int swarmkv_options_set_health_check_announce_port(struct swarmkv_options *opts, unsigned int health_check_announce_port) -{ - return 0; -} - -struct swarmkv *swarmkv_open(struct swarmkv_options *opts, const char * cluster_name, char **err) -{ - struct swarmkv *db; - - db = (struct swarmkv *)calloc(1, 1); - - return db; -} - -long long swarmkv_caller_get_pending_commands(struct swarmkv *db) -{ - return 0; -} - -int swarmkv_options_set_log_path(struct swarmkv_options *opts, const char *logpath) -{ - return 0; -} - -int swarmkv_options_set_log_level(struct swarmkv_options *opts, int loglevel) -{ - return 0; -} - -static void swarmkv_hincrby_cmd_func(char *cmd_str, swarmkv_on_reply_callback_t * cb, void *cb_arg) -{ - int profile_id; - int priority; - int value; - char direction[5] = {0}; - enum shaping_packet_dir dir; - struct swarmkv_reply *reply = (struct swarmkv_reply*)calloc(1, sizeof(struct swarmkv_reply)); - - sscanf(cmd_str, "HINCRBY tsg-shaping-%d priority-%d-%s %d", &profile_id, &priority, direction, &value); - if (strncmp(direction, "in", 2) == 0) { - dir = SHAPING_DIR_IN; - } else { - dir = SHAPING_DIR_OUT; - } - - profile_priority_len[profile_id][priority][dir] += value; - - reply->type = SWARMKV_REPLY_INTEGER; - cb(reply, cb_arg); - - free(reply); - - return; -} - -static void swarmkv_hmget_cmd_func(char *cmd_str, swarmkv_on_reply_callback_t * cb, void *cb_arg) -{ - int profile_id; - int priority[10]; - int ret; - int priority_num; - char direction[5] = {0}; - enum shaping_packet_dir dir; - struct swarmkv_reply *reply = (struct swarmkv_reply*)calloc(1, sizeof(struct swarmkv_reply)); - - ret = sscanf(cmd_str, "HMGET tsg-shaping-%d priority-%d-%s priority-%d-%*s priority-%d-%*s priority-%d-%*s priority-%d-%*s priority-%d-%*s priority-%d-%*s priority-%d-%*s priority-%d-%*s", - &profile_id, &priority[0], direction, &priority[1], &priority[2], &priority[3], &priority[4], &priority[5], &priority[6], &priority[7], &priority[8]); - priority_num = ret - 1; - - if (strncmp(direction, "in", 2) == 0) { - dir = SHAPING_DIR_IN; - } else { - dir = SHAPING_DIR_OUT; - } - - reply->type = SWARMKV_REPLY_ARRAY; - reply->n_element = priority_num; - reply->elements = (struct swarmkv_reply**)calloc(priority_num, sizeof(struct swarmkv_reply*)); - for (int i = 0; i < priority_num; i++) { - reply->elements[i] = (struct swarmkv_reply*)calloc(1, sizeof(struct swarmkv_reply)); - reply->elements[i]->type = SWARMKV_REPLY_STRING; - - char tmp_str[128] = {0}; - sprintf(tmp_str, "%d", profile_priority_len[profile_id][priority[i]][dir]); - reply->elements[i]->str = (char *)calloc(1, strlen(tmp_str)); - memcpy(reply->elements[i]->str, tmp_str, strlen(tmp_str)); - reply->elements[i]->len = strlen(tmp_str); - } - - cb(reply, cb_arg); - - for(unsigned int i = 0; i < reply->n_element; i++) { - if (reply->elements[i]) { - free(reply->elements[i]->str); - free(reply->elements[i]); - } - } - free(reply->elements); - free(reply); -} - -void swarmkv_async_command(struct swarmkv *db, swarmkv_on_reply_callback_t * cb, void *cb_arg, const char *format, ...) -{ - char *cmd_str = NULL; - char cmd_keyword[32] = {0}; - - va_list ap; - va_start(ap,format); - vasprintf(&cmd_str, format, ap); - va_end(ap); - - sscanf(cmd_str, "%31s %*s", cmd_keyword); - if (strcmp(cmd_keyword, "HINCRBY") == 0) { - swarmkv_hincrby_cmd_func(cmd_str, cb, cb_arg); - } else if (strcmp(cmd_keyword, "HMGET") == 0) { - swarmkv_hmget_cmd_func(cmd_str, cb, cb_arg); - } - - free(cmd_str); - - return; -} - -void swarmkv_tconsume(struct swarmkv * db, const char * key, size_t keylen, long long tokens, swarmkv_on_reply_callback_t *cb, void *cb_arg) -{ - int actual_tokens; - struct stub_token_thread_arg thread_arg; - struct swarmkv_reply reply; - int profile_id; - char direction[16] = {0}; - - sscanf(key, "tsg-shaping-%d-%15s", &profile_id, direction); - - if (strncmp("bidirectional", direction, sizeof(direction)) == 0) { - if (pf_curr_avl_token[profile_id].bidirection_limit_bandwidth == DEFAULT_AVALIABLE_TOKEN_PER_SEC) { - actual_tokens = tokens; - } else { - actual_tokens = pf_curr_avl_token[profile_id].bidirection_limit_bandwidth >= tokens ? tokens : 0; - pf_curr_avl_token[profile_id].bidirection_limit_bandwidth -= actual_tokens; - } - } else if (strncmp("incoming", direction, sizeof(direction)) == 0) { - if (pf_curr_avl_token[profile_id].in_limit_bandwidth == DEFAULT_AVALIABLE_TOKEN_PER_SEC) { - actual_tokens = tokens; - } else { - actual_tokens = pf_curr_avl_token[profile_id].in_limit_bandwidth >= tokens ? tokens : 0; - pf_curr_avl_token[profile_id].in_limit_bandwidth -= actual_tokens; - } - } else { - if (pf_curr_avl_token[profile_id].out_limit_bandwidth == DEFAULT_AVALIABLE_TOKEN_PER_SEC) { - actual_tokens = tokens; - } else { - actual_tokens = pf_curr_avl_token[profile_id].out_limit_bandwidth >= tokens ? tokens : 0; - pf_curr_avl_token[profile_id].out_limit_bandwidth -= actual_tokens; - } - } - - if (pf_async_times[profile_id] == 0) { - for (unsigned int i = 0; i < pf_async_thread[profile_id].size(); i++) { - stub_get_token_thread_func(&pf_async_thread[profile_id][i]); - } - pf_async_thread[profile_id].clear(); - } - - reply.integer = actual_tokens; - reply.type = SWARMKV_REPLY_INTEGER; - - if (pf_async_times[profile_id] > 0) { - pf_async_times[profile_id]--; - - thread_arg.profile_id = profile_id; - thread_arg.reply = reply; - thread_arg.cb = cb; - thread_arg.cb_arg = cb_arg; - - pf_async_thread[profile_id].push_back(thread_arg); - - } else { - cb(&reply, cb_arg); - } - + init_dummy_swarmkv(); return; } -void swarmkv_ftconsume(struct swarmkv * db, const char * key, size_t keylen, const char * member, size_t member_len, long long weight, long long tokens, swarmkv_on_reply_callback_t *cb, void *cb_arg) -{ - swarmkv_tconsume(db, key, keylen, tokens, cb, cb_arg); - return; -} - -void swarmkv_btconsume(struct swarmkv * db, const char * key, size_t keylen, const char * member, size_t member_len, long long tokens, swarmkv_on_reply_callback_t *cb, void *cb_arg) -{ - return; -} - -void swarmkv_close(struct swarmkv * db) -{ - if (db) { - free(db); - } - return; -} - -void swarmkv_caller_loop(struct swarmkv *db, int flags, struct timeval *tv) -{ - return; -} - -int swarmkv_options_set_caller_thread_number(struct swarmkv_options *opts, int nr_caller_threads) -{ - return 0; -} - -int swarmkv_options_set_worker_thread_number(struct swarmkv_options *opts, int nr_worker_threads) -{ - return 0; -} -/**********************************************/ /*************stub of maat*********************/ struct maat_options *maat_options_new(void) diff --git a/shaping/test/stub.h b/shaping/test/stub.h index d466944..d791295 100644 --- a/shaping/test/stub.h +++ b/shaping/test/stub.h @@ -49,6 +49,7 @@ void stub_curr_time_s_inc(int time_s); unsigned long long stub_curr_time_ns_get(); void stub_init(); +void init_dummy_swarmkv(); /*******************temporary for test******************************/ void stub_shaper_stat_send(int thread_seq); -- cgit v1.2.3