diff options
Diffstat (limited to 'shaping/src/shaper_stat.cpp')
| -rw-r--r-- | shaping/src/shaper_stat.cpp | 101 |
1 files changed, 75 insertions, 26 deletions
diff --git a/shaping/src/shaper_stat.cpp b/shaping/src/shaper_stat.cpp index 735e154..808abd1 100644 --- a/shaping/src/shaper_stat.cpp +++ b/shaping/src/shaper_stat.cpp @@ -14,7 +14,7 @@ #define SHAPER_STAT_ROW_NAME "traffic_shaping_rule_hits" -#define SHAPER_STAT_REFRESH_TIME_NS 10000000 //10 ms +#define SHAPER_STAT_REFRESH_TIME_US 10000 //10 ms struct shaper_stat_conf { int enable_backgroud_thread; @@ -150,10 +150,11 @@ static void shaper_stat_swarmkv_hincrby_cb(const struct swarmkv_reply *reply, vo if (reply->type != SWARMKV_REPLY_INTEGER) { shaper_global_stat_async_hincrby_failed_inc(&ctx->thread_global_stat); + arg->start_time_us = curr_time_us; shaper_global_stat_async_invoke_inc(&ctx->thread_global_stat);//hincrby failed, retry shaper_global_stat_hincrby_invoke_inc(&ctx->thread_global_stat); - LOG_INFO("%s: shaping stat hincrby failed, retry for profile id %d priority %d, operate queue_len %lld", LOG_TAG_STAT, arg->profile_id, arg->priority, arg->queue_len); + LOG_DEBUG("%s: shaping stat hincrby failed, retry for profile id %d priority %d, operate queue_len %lld", LOG_TAG_STAT, arg->profile_id, arg->priority, arg->queue_len); swarmkv_async_command(ctx->swarmkv_db, shaper_stat_swarmkv_hincrby_cb, arg, "HINCRBY tsg-shaping-%d priority-%d %lld", arg->profile_id, arg->priority, arg->queue_len); return; @@ -164,13 +165,67 @@ static void shaper_stat_swarmkv_hincrby_cb(const struct swarmkv_reply *reply, vo return; } -static void shaper_stat_profile_metirc_refresh(struct shaping_thread_ctx *ctx, int vsys_id, int thread_id, int rule_id, struct shaping_profile_info *profile, int profile_type, int need_update_guage) +static void shaper_stat_priority_queue_len_refresh(struct shaping_thread_ctx *ctx, struct shaping_profile_hash_node *profile_hash_node, int priority, long long curr_time_us) +{ + if (profile_hash_node->local_queue_len[priority] == 0) { + return; + } + + if (curr_time_us - profile_hash_node->local_queue_len_update_time_us[priority] < SHAPER_STAT_REFRESH_TIME_US) { + return; + } + + struct shaping_hincrby_cb_arg *arg = (struct shaping_hincrby_cb_arg *)calloc(1, sizeof(struct shaping_hincrby_cb_arg)); + + arg->ctx = ctx; + arg->start_time_us = curr_time_us; + arg->profile_id = profile_hash_node->id; + arg->priority = priority; + arg->queue_len = profile_hash_node->local_queue_len[priority]; + shaper_global_stat_async_invoke_inc(&ctx->thread_global_stat); + shaper_global_stat_hincrby_invoke_inc(&ctx->thread_global_stat); + swarmkv_async_command(ctx->swarmkv_db, shaper_stat_swarmkv_hincrby_cb, arg, "HINCRBY tsg-shaping-%d priority-%d %lld", arg->profile_id, arg->priority, arg->queue_len); + + profile_hash_node->local_queue_len_update_time_us[priority] = curr_time_us; + profile_hash_node->local_queue_len[priority] = 0; + + return; +} + +void shaper_stat_priority_queue_len_refresh_all(struct shaping_thread_ctx *ctx, struct shaping_profile_hash_node *profile_hash_node) +{ + struct timespec curr_time; + long long curr_time_us; + + clock_gettime(CLOCK_MONOTONIC_COARSE, &curr_time); + curr_time_us = curr_time.tv_sec * MICRO_SECONDS_PER_SEC + curr_time.tv_nsec / NANO_SECONDS_PER_MICRO_SEC; + + for (int i = 0; i < SHAPING_PRIORITY_NUM_MAX; i++) { + shaper_stat_priority_queue_len_refresh(ctx, profile_hash_node, i, curr_time_us); + } + + return; +} + +static void shaper_stat_profile_metirc_refresh(struct shaping_thread_ctx *ctx, struct shaping_rule_info *rule, struct shaping_profile_info *profile, int profile_type, int need_refresh_stat, int need_update_guage, long long curr_time_us) { struct shaping_stat_for_profile *profile_stat = &profile->stat; struct shaping_stat *stat = ctx->stat; + int priority = profile->priority; + int thread_id = ctx->thread_index; unsigned long long old_latency; + + if (need_update_guage) { + profile->hash_node->local_queue_len[priority] += profile_stat->priority_queue_len; + profile_stat->priority_queue_len = 0; + shaper_stat_priority_queue_len_refresh(ctx, profile->hash_node, priority, curr_time_us); + } + + if (!need_refresh_stat) { + return; + } - shaper_stat_tags_build(vsys_id, rule_id, profile->id, profile->priority, profile_type); + shaper_stat_tags_build(rule->vsys_id, rule->id, profile->id, priority, profile_type); fieldstat_dynamic_table_metric_value_incrby(stat->instance, stat->table_id, stat->column_ids[IN_DROP_PKTS_IDX], SHAPER_STAT_ROW_NAME, profile_stat->in.drop_pkts, tags, TAG_IDX_MAX, thread_id); fieldstat_dynamic_table_metric_value_incrby(stat->instance, stat->table_id, stat->column_ids[IN_PKTS_IDX], SHAPER_STAT_ROW_NAME, profile_stat->in.pkts, tags, TAG_IDX_MAX, thread_id); fieldstat_dynamic_table_metric_value_incrby(stat->instance, stat->table_id, stat->column_ids[IN_BYTES_IDX], SHAPER_STAT_ROW_NAME, profile_stat->in.bytes, tags, TAG_IDX_MAX, thread_id); @@ -195,19 +250,6 @@ static void shaper_stat_profile_metirc_refresh(struct shaping_thread_ctx *ctx, i fieldstat_dynamic_table_metric_value_incrby(stat->instance, stat->table_id, stat->column_ids[OUT_QUEUE_LEN_IDX], SHAPER_STAT_ROW_NAME, profile_stat->out.queue_len, tags, TAG_IDX_MAX, thread_id); } - struct shaping_hincrby_cb_arg *arg = (struct shaping_hincrby_cb_arg *)calloc(1, sizeof(struct shaping_hincrby_cb_arg)); - struct timespec curr_time; - - clock_gettime(CLOCK_MONOTONIC, &curr_time); - arg->ctx = ctx; - arg->start_time_us = curr_time.tv_sec * MICRO_SECONDS_PER_SEC + curr_time.tv_nsec / NANO_SECONDS_PER_MICRO_SEC; - arg->profile_id = profile->id; - arg->priority = profile->priority; - arg->queue_len = profile_stat->in.queue_len + profile_stat->out.queue_len; - shaper_global_stat_async_invoke_inc(&ctx->thread_global_stat); - shaper_global_stat_hincrby_invoke_inc(&ctx->thread_global_stat); - swarmkv_async_command(ctx->swarmkv_db, shaper_stat_swarmkv_hincrby_cb, arg, "HINCRBY tsg-shaping-%d priority-%d %lld", arg->profile_id, arg->priority, arg->queue_len); - memset(profile_stat, 0, sizeof(struct shaping_stat_for_profile)); } else { profile_stat->in.pkts = 0; @@ -224,34 +266,37 @@ static void shaper_stat_profile_metirc_refresh(struct shaping_thread_ctx *ctx, i return; } -void shaper_stat_refresh(struct shaping_thread_ctx *ctx, struct shaping_flow *sf, int thread_id, int force) +void shaper_stat_refresh(struct shaping_thread_ctx *ctx, struct shaping_flow *sf, int force) { struct shaping_rule_info *rule; struct timespec curr_time; int need_refresh = 0; + long long curr_time_us; + + clock_gettime(CLOCK_MONOTONIC_COARSE, &curr_time); + curr_time_us = curr_time.tv_sec * MICRO_SECONDS_PER_SEC + curr_time.tv_nsec / NANO_SECONDS_PER_MICRO_SEC; if (force) { need_refresh = 1; } else { - clock_gettime(CLOCK_MONOTONIC, &curr_time); - if (curr_time.tv_sec - sf->stat_update_time.tv_sec > 0 || curr_time.tv_nsec - sf->stat_update_time.tv_nsec >= SHAPER_STAT_REFRESH_TIME_NS) { + if (curr_time_us - sf->stat_update_time_us >= SHAPER_STAT_REFRESH_TIME_US) { need_refresh = 1; - memcpy(&sf->stat_update_time, &curr_time, sizeof(struct timespec)); + sf->stat_update_time_us = curr_time_us; } } - if (!need_refresh) { + int need_update_guage = sf->processed_pkts > CONFIRM_PRIORITY_PKTS ? 1 : 0; + + if (!need_refresh && !need_update_guage) { return; } - int need_update_guage = sf->processed_pkts > CONFIRM_PRIORITY_PKTS ? 1 : 0; - for (int i = 0; i < sf->rule_num; i++) { rule = &sf->matched_rule_infos[i]; - shaper_stat_profile_metirc_refresh(ctx, rule->vsys_id, thread_id, rule->id, &rule->primary, PROFILE_IN_RULE_TYPE_PRIMARY, need_update_guage); + shaper_stat_profile_metirc_refresh(ctx, rule, &rule->primary, PROFILE_IN_RULE_TYPE_PRIMARY, need_refresh, need_update_guage, curr_time_us); for (int j = 0; j < rule->borrowing_num; j++) { - shaper_stat_profile_metirc_refresh(ctx, rule->vsys_id, thread_id, rule->id, &rule->borrowing[j], PROFILE_IN_RULE_TYPE_BORROW, need_update_guage); + shaper_stat_profile_metirc_refresh(ctx, rule, &rule->borrowing[j], PROFILE_IN_RULE_TYPE_BORROW, need_refresh, need_update_guage, curr_time_us); } } @@ -303,6 +348,8 @@ void shaper_stat_queueing_pkt_inc(struct shaping_stat_for_profile *profile_stat, profile_stat->out.queue_len++; } + profile_stat->priority_queue_len++; + return; } @@ -314,6 +361,8 @@ void shaper_stat_queueing_pkt_dec(struct shaping_stat_for_profile *profile_stat, profile_stat->out.queue_len--; } + profile_stat->priority_queue_len--; + return; } |
