summaryrefslogtreecommitdiff
path: root/service/src
diff options
context:
space:
mode:
authorsongyanchao <[email protected]>2024-04-17 06:22:48 +0000
committersongyanchao <[email protected]>2024-04-17 06:22:48 +0000
commit637d1ee965e8cba77a16170cecedce8e307daf28 (patch)
tree55e9e091702ac762e72312f284e4bb2ff7dffbeb /service/src
parent0d3e4c03ded5766e6a55e104abc7502329b89969 (diff)
🎈 perf: Refine counting structure in forwarder node.
Refine counting structure in forwarder node.
Diffstat (limited to 'service/src')
-rw-r--r--service/src/node_forwarder.c127
1 files changed, 90 insertions, 37 deletions
diff --git a/service/src/node_forwarder.c b/service/src/node_forwarder.c
index da403b5..38a6b77 100644
--- a/service/src/node_forwarder.c
+++ b/service/src/node_forwarder.c
@@ -1,3 +1,4 @@
+#include <bits/stdint-uintn.h>
#include <rte_graph.h>
#include <rte_graph_worker.h>
@@ -19,16 +20,30 @@ enum
FORWARDER_NEXT_MAX
};
+/* Forwarder drop reason */
+enum forwarder_drop_reason
+{
+ FORWARDER_DROP_RSN_POP_SID_ERR = 0,
+ FORWARDER_DROP_RSN_INVALID_SID,
+ FORWARDER_DROP_RSN_MAX
+};
+
+/* Forwarder drop reason string */
+static const char * forwarder_drop_reason_str[FORWARDER_DROP_RSN_MAX] = {
+ "pop sid err",
+ "invalid sid",
+};
+
/* Forwarder stats */
struct forwarder_stats
{
- volatile uint64_t deal_pkts;
+ volatile uint64_t total_pkts;
+ volatile uint64_t pkts_per_batch;
volatile uint64_t to_load_balance;
volatile uint64_t to_vwire_egress;
volatile uint64_t to_ef_egress;
volatile uint64_t to_tera_egress;
- volatile uint64_t drop_for_pop_sid_err;
- volatile uint64_t drop_for_invalid_sid;
+ volatile uint64_t drop_reason[FORWARDER_DROP_RSN_MAX];
} __rte_cache_aligned;
struct forwarder_main
@@ -93,7 +108,7 @@ void forwarder_table_inserter(uint16_t sid, uint16_t type)
/* Generate and store the trace information */
static __rte_always_inline void gen_store_trace_info(struct rte_node * node, struct rte_mbuf * mbuf,
uint16_t next_node_index, struct forwarder_stats * stats,
- struct forwarder_stats * stats_for_trace)
+ enum forwarder_drop_reason drop_reason)
{
struct dp_trace_record_meta meta = {.appsym = MR_TRACE_APPSYM, .module = node->name};
@@ -107,16 +122,7 @@ static __rte_always_inline void gen_store_trace_info(struct rte_node * node, str
if (unlikely(next_node_index == FORWARDER_PKT_DROP))
{
- if (stats_for_trace->drop_for_pop_sid_err != stats->drop_for_pop_sid_err)
- {
- len += snprintf(str_record + len, sizeof(str_record) - len, ", rsn:pop sid err");
- stats_for_trace->drop_for_pop_sid_err = stats->drop_for_pop_sid_err;
- }
- else if (stats_for_trace->drop_for_invalid_sid != stats->drop_for_invalid_sid)
- {
- len += snprintf(str_record + len, sizeof(str_record) - len, ", rsn:invalid sid");
- stats_for_trace->drop_for_invalid_sid = stats->drop_for_invalid_sid;
- }
+ len += snprintf(str_record + len, sizeof(str_record) - len, ", rsn:%s", forwarder_drop_reason_str[drop_reason]);
}
else
{
@@ -144,7 +150,7 @@ static __rte_always_inline uint16_t forwarder_node_process(struct rte_graph * gr
struct rte_mbuf ** pkts = (struct rte_mbuf **)objs;
void ** batch_pkts = objs;
struct forwarder_stats stats = {};
- struct forwarder_stats stats_for_trace = {};
+ enum forwarder_drop_reason drop_reason = FORWARDER_DROP_RSN_MAX;
unsigned int next_node_index_counters[FORWARDER_NEXT_MAX] = {};
@@ -176,7 +182,8 @@ static __rte_always_inline uint16_t forwarder_node_process(struct rte_graph * gr
rte_pktmbuf_dump(stderr, mbuf, rte_pktmbuf_data_len(mbuf));
*/
- stats.drop_for_pop_sid_err++;
+ drop_reason = FORWARDER_DROP_RSN_POP_SID_ERR;
+ stats.drop_reason[drop_reason]++;
next_node_index = FORWARDER_PKT_DROP;
goto node_enqueue;
}
@@ -191,7 +198,8 @@ static __rte_always_inline uint16_t forwarder_node_process(struct rte_graph * gr
/* Deal invalid sid */
if (unlikely(next_node_index == 0))
{
- stats.drop_for_invalid_sid++;
+ drop_reason = FORWARDER_DROP_RSN_INVALID_SID;
+ stats.drop_reason[drop_reason]++;
next_node_index = FORWARDER_PKT_DROP;
goto node_enqueue;
}
@@ -203,7 +211,7 @@ static __rte_always_inline uint16_t forwarder_node_process(struct rte_graph * gr
/* Check if tracing is enabled for the current Mbuf */
if (unlikely(dp_trace_record_can_emit(mbuf)))
{
- gen_store_trace_info(node, mbuf, next_node_index, &stats, &stats_for_trace);
+ gen_store_trace_info(node, mbuf, next_node_index, &stats, drop_reason);
// gen_store_trace_info_sid_list(node, mbuf);
}
@@ -225,13 +233,17 @@ static __rte_always_inline uint16_t forwarder_node_process(struct rte_graph * gr
/* Update graph stats */
struct forwarder_stats * graph_stats = &stats_per_graph[graph->id];
- graph_stats->deal_pkts += cnt;
+ graph_stats->total_pkts += cnt;
+ graph_stats->pkts_per_batch = cnt;
graph_stats->to_load_balance += next_node_index_counters[FORWARDER_NEXT_LB];
graph_stats->to_vwire_egress += next_node_index_counters[FORWARDER_VWIRE_EGRESS];
graph_stats->to_ef_egress += next_node_index_counters[FORWARDER_EF_EGRESS];
graph_stats->to_tera_egress += next_node_index_counters[FORWARDER_TERA_EGRESS];
- graph_stats->drop_for_pop_sid_err += stats.drop_for_pop_sid_err;
- graph_stats->drop_for_invalid_sid += stats.drop_for_invalid_sid;
+
+ for (uint16_t i = 0; i < FORWARDER_DROP_RSN_MAX; i++)
+ {
+ graph_stats->drop_reason[i] += stats.drop_reason[i];
+ }
/* Process the remaining packets */
if (likely(last_spec > 0))
@@ -263,32 +275,73 @@ RTE_NODE_REGISTER(forwarder_node_base);
/************************************** Forwarder statistics **************************************/
cJSON * forwarder_node_monit_loop(struct sc_main * sc)
{
- unsigned int nr_graph_total = sc->nr_io_thread;
cJSON * json_root = cJSON_CreateObject();
+ unsigned int nr_graphs = sc->nr_io_thread;
+
+ uint64_t total_pkts[nr_graphs];
+ uint64_t pkts_per_batch[nr_graphs];
+ uint64_t to_load_balance[nr_graphs];
+ uint64_t to_vwire_egress[nr_graphs];
+ uint64_t to_ef_egress[nr_graphs];
+ uint64_t to_tera_egress[nr_graphs];
+ uint64_t drop_reason[nr_graphs][FORWARDER_DROP_RSN_MAX];
- for (uint32_t graph_id = 0; graph_id < nr_graph_total; graph_id++)
+ for (uint32_t graph_id = 0; graph_id < nr_graphs; graph_id++)
{
struct forwarder_stats * stats = &stats_per_graph[graph_id];
- if (stats->deal_pkts == 0)
+ if (stats->total_pkts == 0)
{
+ total_pkts[graph_id] = 0;
+ pkts_per_batch[graph_id] = 0;
+ to_load_balance[graph_id] = 0;
+ to_vwire_egress[graph_id] = 0;
+ to_ef_egress[graph_id] = 0;
+ to_tera_egress[graph_id] = 0;
+
+ memset(drop_reason[graph_id], 0, sizeof(drop_reason[graph_id]));
continue;
}
- cJSON * graph_obj = cJSON_CreateObject();
- cJSON_AddNumberToObject(graph_obj, "deal_pkts", stats->deal_pkts);
- cJSON_AddNumberToObject(graph_obj, "to_load_balance", stats->to_load_balance);
- cJSON_AddNumberToObject(graph_obj, "to_vwire_egress", stats->to_vwire_egress);
- cJSON_AddNumberToObject(graph_obj, "to_ef_egress", stats->to_ef_egress);
- cJSON_AddNumberToObject(graph_obj, "to_tera_egress", stats->to_tera_egress);
- cJSON_AddNumberToObject(graph_obj, "drop_for_pop_sid_err", stats->drop_for_pop_sid_err);
- cJSON_AddNumberToObject(graph_obj, "drop_for_invalid_sid", stats->drop_for_invalid_sid);
-
- char str_graph_idx[MR_STRING_MAX] = {};
- snprintf(str_graph_idx, sizeof(str_graph_idx) - 1, "graph-%u", graph_id);
- cJSON_AddItemToObject(json_root, str_graph_idx, graph_obj);
+ total_pkts[graph_id] = stats->total_pkts;
+ pkts_per_batch[graph_id] = stats->pkts_per_batch;
+ to_load_balance[graph_id] = stats->to_load_balance;
+ to_vwire_egress[graph_id] = stats->to_vwire_egress;
+ to_ef_egress[graph_id] = stats->to_ef_egress;
+ to_tera_egress[graph_id] = stats->to_tera_egress;
+
+ for (int i = 0; i < FORWARDER_DROP_RSN_MAX; i++)
+ {
+ drop_reason[graph_id][i] = stats->drop_reason[i];
+ }
+ }
+
+ cJSON * json_total_pkts = create_uint64_array(total_pkts, nr_graphs);
+ cJSON_AddItemToObject(json_root, "forwarder, total pkts", json_total_pkts);
+
+ cJSON * json_pkts_per_batch = create_uint64_array(pkts_per_batch, nr_graphs);
+ cJSON_AddItemToObject(json_root, "forwarder, pkts per batch", json_pkts_per_batch);
+
+ cJSON * json_to_load_balance = create_uint64_array(to_load_balance, nr_graphs);
+ cJSON_AddItemToObject(json_root, "forwarder, to load balance", json_to_load_balance);
+
+ cJSON * json_to_vwire_egress = create_uint64_array(to_vwire_egress, nr_graphs);
+ cJSON_AddItemToObject(json_root, "forwarder, to vwire egress", json_to_vwire_egress);
+
+ cJSON * json_to_ef_egress = create_uint64_array(to_ef_egress, nr_graphs);
+ cJSON_AddItemToObject(json_root, "forwarder, to etherfabric egress", json_to_ef_egress);
+
+ cJSON * json_to_tera_egress = create_uint64_array(to_tera_egress, nr_graphs);
+ cJSON_AddItemToObject(json_root, "forwarder, to tera egress", json_to_tera_egress);
+
+ for (int i = 0; i < FORWARDER_DROP_RSN_MAX; i++)
+ {
+ char str_title[MR_STRING_MAX];
+ snprintf(str_title, sizeof(str_title), "forwarder, %s", forwarder_drop_reason_str[i]);
+
+ cJSON * json_drop_reason = create_uint64_array(drop_reason[i], nr_graphs);
+ cJSON_AddItemToObject(json_root, str_title, json_drop_reason);
}
- cJSON_AddNumberToObject(json_root, "nr_graph_total", nr_graph_total);
return json_root;
}