diff options
| author | songyanchao <[email protected]> | 2024-04-17 04:35:00 +0000 |
|---|---|---|
| committer | songyanchao <[email protected]> | 2024-04-17 04:35:00 +0000 |
| commit | 0d3e4c03ded5766e6a55e104abc7502329b89969 (patch) | |
| tree | 6ed27e3d65c4f7dddb51a3bb7bb048264d8c75a3 /service/src | |
| parent | e30ad9587bb1a26e0696ab6fba14ec94f333a1ef (diff) | |
✨ feat: Refine counting structure in eth ingress node.
Refine counting structure in eth ingress node.
Diffstat (limited to 'service/src')
| -rw-r--r-- | service/src/node_classifier.c | 1 | ||||
| -rw-r--r-- | service/src/node_eth_ingress.c | 219 |
2 files changed, 137 insertions, 83 deletions
diff --git a/service/src/node_classifier.c b/service/src/node_classifier.c index 19afcba..aad24ff 100644 --- a/service/src/node_classifier.c +++ b/service/src/node_classifier.c @@ -1,5 +1,4 @@ #include <MESA_prof_load.h> -#include <bits/stdint-uintn.h> #include <pkt_classifier_engine.h> #include <sc_node.h> #include <sc_node_common.h> diff --git a/service/src/node_eth_ingress.c b/service/src/node_eth_ingress.c index f26898e..29abe17 100644 --- a/service/src/node_eth_ingress.c +++ b/service/src/node_eth_ingress.c @@ -25,10 +25,29 @@ enum ETH_INGRESS_NEXT_MAX, }; +/* Eth ingress drop reason */ +enum eth_ingress_drop_reason +{ + ETH_INGR_DROP_RSN_INVALID_ROLE_TYPE = 0, + ETH_INGR_DROP_RSN_INVALID_LAYERS, + ETH_INGR_DROP_RSN_INVALID_ICMP_TYPE, + ETH_INGR_DROP_RSN_INVALID_ADAPTER_TYPE, + ETH_INGR_DROP_RSN_NONCOMPLIANT_EF, + ETH_INGR_DROP_RSN_NONCOMPLIANT_TERA, + ETH_INGR_DROP_RSN_MAX, +}; + +/* Eth ingress drop reason string */ +static const char * eth_ingress_drop_reason_str[ETH_INGR_DROP_RSN_MAX] = { + "invalid role type", "invalid pkt layers", "invalid icmp type", + "invalid adapter type", "pkt noncompliant with ef", "pkt noncompliant with tera", +}; + /* Eth ingress statistics struct */ struct eth_ingress_stats { - volatile uint64_t deal_pkts; + volatile uint64_t total_pkts; + volatile uint64_t pkts_per_batch; volatile uint64_t to_bridge; volatile uint64_t to_vwire; volatile uint64_t to_health_check; @@ -37,12 +56,7 @@ struct eth_ingress_stats volatile uint64_t to_ef_ingress; volatile uint64_t to_eth_egress; volatile uint64_t to_tera_ingress; - volatile uint64_t drop_for_invalid_role_type; - volatile uint64_t drop_for_invalid_layers; - volatile uint64_t drop_for_invalid_icmp_type; - volatile uint64_t drop_for_invalid_adapter_type; - volatile uint64_t drop_for_noncompliant_ef; - volatile uint64_t drop_for_noncompliant_tera; + volatile uint64_t drop_reason[ETH_INGR_DROP_RSN_MAX]; } __rte_cache_aligned; static struct eth_ingress_stats stats_per_graph[RTE_MAX_LCORE] = {}; @@ -258,7 +272,8 @@ static int kernel_resp_dev_filter(struct mr_dev_desc * dev_desc, struct rte_mbuf /* Endpoint device packet handler */ static int endpoint_dev_packet_handler(struct mr_dev_desc * dev_desc, struct eth_ingress_stats * stats, - struct rte_mbuf * mbuf, struct mrb_metadata * mrb_meta) + struct rte_mbuf * mbuf, struct mrb_metadata * mrb_meta, + enum eth_ingress_drop_reason * out_drop_reason) { struct pkt_parser_result * parser_result = &mrb_meta->pkt_parser_result; @@ -294,7 +309,8 @@ static int endpoint_dev_packet_handler(struct mr_dev_desc * dev_desc, struct eth } else { - stats->drop_for_invalid_layers++; + *out_drop_reason = ETH_INGR_DROP_RSN_INVALID_LAYERS; + stats->drop_reason[*out_drop_reason]++; return ETH_INGRESS_NEXT_PKT_DROP; } } @@ -315,7 +331,8 @@ static int endpoint_dev_packet_handler(struct mr_dev_desc * dev_desc, struct eth } else { - stats->drop_for_invalid_icmp_type++; + *out_drop_reason = ETH_INGR_DROP_RSN_INVALID_ICMP_TYPE; + stats->drop_reason[*out_drop_reason]++; return ETH_INGRESS_NEXT_PKT_DROP; } } @@ -344,19 +361,22 @@ static int endpoint_dev_packet_handler(struct mr_dev_desc * dev_desc, struct eth parser_result->layers[1].type_id != LAYER_TYPE_ID_IPV4 || parser_result->layers[2].type_id != LAYER_TYPE_ID_UDP)) { - stats->drop_for_noncompliant_ef++; + *out_drop_reason = ETH_INGR_DROP_RSN_NONCOMPLIANT_EF; + stats->drop_reason[*out_drop_reason]++; return ETH_INGRESS_NEXT_PKT_DROP; } if (unlikely(ipv4_hdr->dst_addr != dev_desc->in_addr.s_addr)) { - stats->drop_for_noncompliant_ef++; + *out_drop_reason = ETH_INGR_DROP_RSN_NONCOMPLIANT_EF; + stats->drop_reason[*out_drop_reason]++; return ETH_INGRESS_NEXT_PKT_DROP; } if (unlikely(dst_port != rte_cpu_to_be_16(G_VXLAN_DPORT))) { - stats->drop_for_noncompliant_ef++; + *out_drop_reason = ETH_INGR_DROP_RSN_NONCOMPLIANT_EF; + stats->drop_reason[*out_drop_reason]++; return ETH_INGRESS_NEXT_PKT_DROP; } @@ -369,7 +389,8 @@ static int endpoint_dev_packet_handler(struct mr_dev_desc * dev_desc, struct eth if (unlikely(parser_result->layers[0].type_id != LAYER_TYPE_ID_ETHER || parser_result->layers[1].type_id != LAYER_TYPE_ID_VLAN)) { - stats->drop_for_noncompliant_tera++; + *out_drop_reason = ETH_INGR_DROP_RSN_NONCOMPLIANT_TERA; + stats->drop_reason[*out_drop_reason]++; return ETH_INGRESS_NEXT_PKT_DROP; } @@ -377,15 +398,16 @@ static int endpoint_dev_packet_handler(struct mr_dev_desc * dev_desc, struct eth return ETH_INGRESS_NEXT_TERA_INGRESS; } - stats->drop_for_invalid_adapter_type++; + *out_drop_reason = ETH_INGR_DROP_RSN_INVALID_ADAPTER_TYPE; + stats->drop_reason[*out_drop_reason]++; return ETH_INGRESS_NEXT_PKT_DROP; } /* Generate and store the trace information */ static __rte_always_inline void gen_store_trace_info_ingress(struct rte_node * node, struct rte_mbuf * mbuf, uint16_t next_node_index, struct eth_ingress_stats * stats, - struct eth_ingress_stats * stats_for_trace, - struct mr_dev_desc * dev_desc) + struct mr_dev_desc * dev_desc, + enum eth_ingress_drop_reason drop_reason) { struct dp_trace_record_meta meta = {.appsym = MR_TRACE_APPSYM, .module = node->name}; @@ -398,36 +420,9 @@ static __rte_always_inline void gen_store_trace_info_ingress(struct rte_node * n switch (next_node_index) { case ETH_INGRESS_NEXT_PKT_DROP: { - if (stats_for_trace->drop_for_invalid_role_type != stats->drop_for_invalid_role_type) - { - len += snprintf(str_record + len, sizeof(str_record) - len, ", rsn:invalid role type"); - stats_for_trace->drop_for_invalid_role_type = stats->drop_for_invalid_role_type; - } - else if (stats_for_trace->drop_for_invalid_layers != stats->drop_for_invalid_layers) - { - len += snprintf(str_record + len, sizeof(str_record) - len, ", rsn:invalid pkt layers"); - stats_for_trace->drop_for_invalid_layers = stats->drop_for_invalid_layers; - } - else if (stats_for_trace->drop_for_invalid_icmp_type != stats->drop_for_invalid_icmp_type) - { - len += snprintf(str_record + len, sizeof(str_record) - len, ", rsn:invalid icmp type"); - stats_for_trace->drop_for_invalid_icmp_type = stats->drop_for_invalid_icmp_type; - } - else if (stats_for_trace->drop_for_invalid_adapter_type != stats->drop_for_invalid_adapter_type) - { - len += snprintf(str_record + len, sizeof(str_record) - len, ", rsn:invalid adapter type"); - stats_for_trace->drop_for_invalid_adapter_type = stats->drop_for_invalid_adapter_type; - } - else if (stats_for_trace->drop_for_noncompliant_ef != stats->drop_for_noncompliant_ef) - { - len += snprintf(str_record + len, sizeof(str_record) - len, ", rsn:pkt noncompliant with ef"); - stats_for_trace->drop_for_noncompliant_ef = stats->drop_for_noncompliant_ef; - } - else if (stats_for_trace->drop_for_noncompliant_tera != stats->drop_for_noncompliant_tera) - { - len += snprintf(str_record + len, sizeof(str_record) - len, ", rsn:pkt noncompliant with tera"); - stats_for_trace->drop_for_noncompliant_tera = stats->drop_for_noncompliant_tera; - } + len += + snprintf(str_record + len, sizeof(str_record) - len, ", rsn:%s", eth_ingress_drop_reason_str[drop_reason]); + break; } break; case ETH_INGRESS_NEXT_BRIDGE: @@ -478,7 +473,7 @@ static __rte_always_inline uint16_t eth_ingress_node_process(struct rte_graph * /* Single Packet Processing */ uint16_t last_port_ingress = UINT16_MAX; struct eth_ingress_stats stats = {}; - // struct eth_ingress_stats stats_for_trace = {}; + enum eth_ingress_drop_reason drop_reason = ETH_INGR_DROP_RSN_MAX; struct mr_dev_desc * dev_desc = NULL; struct devmgr_main * devmgr_main = sc_main_get()->devmgr_main; @@ -514,7 +509,7 @@ static __rte_always_inline uint16_t eth_ingress_node_process(struct rte_graph * break; case MR_DEV_ROLE_ENDPOINT_INTERFACE: case MR_DEV_ROLE_KERNEL_RESP_INTERFACE: - next_node_index = endpoint_dev_packet_handler(dev_desc, &stats, mbuf, mrb_meta); + next_node_index = endpoint_dev_packet_handler(dev_desc, &stats, mbuf, mrb_meta, &drop_reason); break; case MR_DEV_ROLE_NF_INTERFACE: { /* check health check pkt */ @@ -531,17 +526,18 @@ static __rte_always_inline uint16_t eth_ingress_node_process(struct rte_graph * } break; default: - stats.drop_for_invalid_role_type++; + drop_reason = ETH_INGR_DROP_RSN_INVALID_ROLE_TYPE; + stats.drop_reason[drop_reason]++; next_node_index = ETH_INGRESS_NEXT_PKT_DROP; break; } node_enqueue: -#if 0 +#if 1 /* Check if tracing is enabled for the current Mbuf */ if (unlikely(dp_trace_record_can_emit(mbuf))) { - gen_store_trace_info_ingress(node, mbuf, next_node_index, &stats, &stats_for_trace, dev_desc); + gen_store_trace_info_ingress(node, mbuf, next_node_index, &stats, dev_desc, drop_reason); } #endif /* Check if the next index needs to be changed */ @@ -569,7 +565,8 @@ static __rte_always_inline uint16_t eth_ingress_node_process(struct rte_graph * /* Update graph stats */ struct eth_ingress_stats * graph_stats = &stats_per_graph[graph->id]; - graph_stats->deal_pkts += cnt; + graph_stats->total_pkts += cnt; + graph_stats->pkts_per_batch = cnt; graph_stats->to_bridge += stats.to_bridge; graph_stats->to_vwire += stats.to_vwire; graph_stats->to_health_check += stats.to_health_check; @@ -578,12 +575,11 @@ static __rte_always_inline uint16_t eth_ingress_node_process(struct rte_graph * graph_stats->to_ef_ingress += stats.to_ef_ingress; graph_stats->to_eth_egress += stats.to_eth_egress; graph_stats->to_tera_ingress += stats.to_tera_ingress; - graph_stats->drop_for_invalid_role_type += stats.drop_for_invalid_role_type; - graph_stats->drop_for_invalid_layers += stats.drop_for_invalid_layers; - graph_stats->drop_for_invalid_icmp_type += stats.drop_for_invalid_icmp_type; - graph_stats->drop_for_invalid_adapter_type += stats.drop_for_invalid_adapter_type; - graph_stats->drop_for_noncompliant_ef += stats.drop_for_noncompliant_ef; - graph_stats->drop_for_noncompliant_tera += stats.drop_for_noncompliant_tera; + + for (int i = 0; i < ETH_INGR_DROP_RSN_MAX; i++) + { + graph_stats->drop_reason[i] += stats.drop_reason[i]; + } return cnt; } @@ -622,36 +618,95 @@ RTE_NODE_REGISTER(eth_ingress_node_base); cJSON * eth_ingress_node_monit_loop(struct sc_main * sc) { cJSON * json_root = cJSON_CreateObject(); - for (uint32_t graph_id = 0; graph_id < sc->nr_io_thread; graph_id++) + unsigned int nr_graphs = sc->nr_io_thread; + + uint64_t total_pkts[nr_graphs]; + uint64_t pkts_per_batch[nr_graphs]; + uint64_t to_bridge[nr_graphs]; + uint64_t to_vwire[nr_graphs]; + uint64_t to_health_check[nr_graphs]; + uint64_t to_forwarder[nr_graphs]; + uint64_t to_bfd[nr_graphs]; + uint64_t to_ef_ingress[nr_graphs]; + uint64_t to_eth_egress[nr_graphs]; + uint64_t to_tera_ingress[nr_graphs]; + uint64_t drop_reason[nr_graphs][ETH_INGR_DROP_RSN_MAX]; + + for (uint32_t graph_id = 0; graph_id < nr_graphs; graph_id++) { struct eth_ingress_stats * stats = &stats_per_graph[graph_id]; - if (stats->deal_pkts == 0) + if (stats->total_pkts == 0) { + total_pkts[graph_id] = 0; + pkts_per_batch[graph_id] = 0; + to_bridge[graph_id] = 0; + to_vwire[graph_id] = 0; + to_health_check[graph_id] = 0; + to_forwarder[graph_id] = 0; + to_bfd[graph_id] = 0; + to_ef_ingress[graph_id] = 0; + to_eth_egress[graph_id] = 0; + to_tera_ingress[graph_id] = 0; + + memset(drop_reason[graph_id], 0, sizeof(drop_reason[graph_id])); continue; } - cJSON * graph_obj = cJSON_CreateObject(); - cJSON_AddNumberToObject(graph_obj, "deal_pkts", stats->deal_pkts); - cJSON_AddNumberToObject(graph_obj, "to_bridge", stats->to_bridge); - cJSON_AddNumberToObject(graph_obj, "to_vwire", stats->to_vwire); - cJSON_AddNumberToObject(graph_obj, "to_health_check", stats->to_health_check); - cJSON_AddNumberToObject(graph_obj, "to_forwarder", stats->to_forwarder); - cJSON_AddNumberToObject(graph_obj, "to_bfd", stats->to_bfd); - cJSON_AddNumberToObject(graph_obj, "to_ef_ingress", stats->to_ef_ingress); - cJSON_AddNumberToObject(graph_obj, "to_eth_egress", stats->to_eth_egress); - cJSON_AddNumberToObject(graph_obj, "to_tera_ingress", stats->to_tera_ingress); - cJSON_AddNumberToObject(graph_obj, "drop_for_invalid_role_type", stats->drop_for_invalid_role_type); - cJSON_AddNumberToObject(graph_obj, "drop_for_invalid_layers", stats->drop_for_invalid_layers); - cJSON_AddNumberToObject(graph_obj, "drop_for_invalid_icmp_type", stats->drop_for_invalid_icmp_type); - cJSON_AddNumberToObject(graph_obj, "drop_for_invalid_adapter_type", stats->drop_for_invalid_adapter_type); - cJSON_AddNumberToObject(graph_obj, "drop_for_noncompliant_ef", stats->drop_for_noncompliant_ef); - cJSON_AddNumberToObject(graph_obj, "drop_for_noncompliant_tera", stats->drop_for_noncompliant_tera); - - char str_graph_idx[MR_STRING_MAX]; - snprintf(str_graph_idx, sizeof(str_graph_idx) - 1, "graph-%u", graph_id); - cJSON_AddItemToObject(json_root, str_graph_idx, graph_obj); + total_pkts[graph_id] = stats->total_pkts; + pkts_per_batch[graph_id] = stats->pkts_per_batch; + to_bridge[graph_id] = stats->to_bridge; + to_vwire[graph_id] = stats->to_vwire; + to_health_check[graph_id] = stats->to_health_check; + to_forwarder[graph_id] = stats->to_forwarder; + to_bfd[graph_id] = stats->to_bfd; + to_ef_ingress[graph_id] = stats->to_ef_ingress; + to_eth_egress[graph_id] = stats->to_eth_egress; + to_tera_ingress[graph_id] = stats->to_tera_ingress; + + for (int i = 0; i < ETH_INGR_DROP_RSN_MAX; i++) + { + drop_reason[graph_id][i] = stats->drop_reason[i]; + } + } + + cJSON * json_total_pkts = create_uint64_array(total_pkts, nr_graphs); + cJSON_AddItemToObject(json_root, "eth_ingress, total pkts", json_total_pkts); + + cJSON * json_pkts_per_batch = create_uint64_array(pkts_per_batch, nr_graphs); + cJSON_AddItemToObject(json_root, "eth_ingress, pkts per batch", json_pkts_per_batch); + + cJSON * json_to_bridge = create_uint64_array(to_bridge, nr_graphs); + cJSON_AddItemToObject(json_root, "eth_ingress, to bridge", json_to_bridge); + + cJSON * json_to_vwire = create_uint64_array(to_vwire, nr_graphs); + cJSON_AddItemToObject(json_root, "eth_ingress, to vwire", json_to_vwire); + + cJSON * json_to_health_check = create_uint64_array(to_health_check, nr_graphs); + cJSON_AddItemToObject(json_root, "eth_ingress, to health check", json_to_health_check); + + cJSON * json_to_forwarder = create_uint64_array(to_forwarder, nr_graphs); + cJSON_AddItemToObject(json_root, "eth_ingress, to forwarder", json_to_forwarder); + + cJSON * json_to_bfd = create_uint64_array(to_bfd, nr_graphs); + cJSON_AddItemToObject(json_root, "eth_ingress, to bfd", json_to_bfd); + + cJSON * json_to_ef_ingress = create_uint64_array(to_ef_ingress, nr_graphs); + cJSON_AddItemToObject(json_root, "eth_ingress, to ef ingress", json_to_ef_ingress); + + cJSON * json_to_eth_egress = create_uint64_array(to_eth_egress, nr_graphs); + cJSON_AddItemToObject(json_root, "eth_ingress, to eth egress", json_to_eth_egress); + + cJSON * json_to_tera_ingress = create_uint64_array(to_tera_ingress, nr_graphs); + cJSON_AddItemToObject(json_root, "eth_ingress, to tera ingress", json_to_tera_ingress); + + for (int i = 0; i < ETH_INGR_DROP_RSN_MAX; i++) + { + char str_title[MR_STRING_MAX]; + snprintf(str_title, sizeof(str_title), "eth_ingress, %s", eth_ingress_drop_reason_str[i]); + + cJSON * json_drop_reason = create_uint64_array(drop_reason[i], nr_graphs); + cJSON_AddItemToObject(json_root, str_title, json_drop_reason); } - cJSON_AddNumberToObject(json_root, "nr_graph_total", sc->nr_io_thread); return json_root; } |
