#include #include #include #include #include #include #include #include #include /* Eth egress next node */ enum { ETH_EGRESS_NEXT_PKT_DROP = 0, ETH_EGRESS_NEXT_MAX }; /* Eth egress stat struct */ struct eth_egress_stats { volatile uint64_t total_pkts; volatile uint64_t pkts_per_batch; } __rte_cache_aligned; /* Eth egress main struct */ struct node_eth_egress_main { unsigned int tb_port_id_to_edge[MR_DEVICE_MAX]; }; /* Global eth egress main */ static struct node_eth_egress_main g_eth_egress_main = {}; static struct eth_egress_stats stats_per_graph[RTE_MAX_LCORE] = {}; static inline struct node_eth_egress_main * node_eth_egress_main_get(void) { return &g_eth_egress_main; } /* Device attribute table init */ int node_eth_egress_edges_update() { /* for each device, add the tx node as egress node's edge */ unsigned int dev_desc_iterator = 0; struct mr_dev_desc * dev_desc = NULL; struct node_eth_egress_main * eth_egress_main = node_eth_egress_main_get(); rte_node_t eth_egress_node_id = rte_node_from_name("eth_egress"); unsigned int eth_egress_edge_id = rte_node_edge_count(eth_egress_node_id); while ((dev_desc = mr_dev_desc_iterate(sc_main_get()->devmgr_main, &dev_desc_iterator)) != NULL) { rte_node_t tx_node_id = dev_desc->tx_node_id; /* continue when the tx_node_id is invalid */ if (tx_node_id == RTE_NODE_ID_INVALID) { continue; } /* update eth_egress edge nodes */ const char * tx_node_name = rte_node_id_to_name(tx_node_id); rte_node_edge_update(eth_egress_node_id, RTE_EDGE_ID_INVALID, &tx_node_name, 1); /* add the port_id -> edge_id map */ eth_egress_main->tb_port_id_to_edge[dev_desc->port_id] = eth_egress_edge_id; eth_egress_edge_id++; } return RT_SUCCESS; } /* Init eth egress */ int node_eth_egress_init(struct node_manager_main * node_mgr_main) { node_eth_egress_edges_update(); return RT_SUCCESS; } /* Generate and store the trace information */ static __rte_always_inline void gen_store_trace_info_egress(struct rte_node * node, struct rte_mbuf * mbuf, uint16_t next_node_index) { /* Populate the next node infomation */ char str_record[MR_STRING_MAX]; struct mrb_metadata * mrb_meta = mrbuf_cz_data(mbuf, MR_NODE_CTRLZONE_ID); snprintf(str_record, sizeof(str_record), "next node:%s, tx:%u", node->nodes[next_node_index]->name, mrb_meta->port_egress); /* Emit the trace record */ struct dp_trace_record_meta meta = {.appsym = MR_TRACE_APPSYM, .module = node->name}; dp_trace_record_emit_str(sc_main_get()->trace, mbuf, rte_lcore_id(), &meta, str_record); } /* Eth Egress Node Process Function */ static __rte_always_inline uint16_t eth_egress_node_process(struct rte_graph * graph, struct rte_node * node, void ** objs, uint16_t cnt) { uint16_t n_left_from = cnt; uint16_t last_spec = 0; uint16_t next_node_index = 0; struct rte_mbuf ** pkts = (struct rte_mbuf **)objs; void ** batch_pkts = objs; uint16_t batch_next_node_index = ETH_EGRESS_NEXT_PKT_DROP; struct node_eth_egress_main * eth_egress_main = node_eth_egress_main_get(); /* Single Packet Processing */ while (n_left_from > 0) { struct rte_mbuf * mbuf = pkts[0]; pkts += 1; n_left_from -= 1; struct mrb_metadata * mrb_meta = mrbuf_cz_data(mbuf, MR_NODE_CTRLZONE_ID); uint16_t port_egress = mrb_meta->port_egress; assert(port_egress < RTE_DIM(eth_egress_main->tb_port_id_to_edge)); next_node_index = eth_egress_main->tb_port_id_to_edge[port_egress]; #if 0 /* Check if tracing is enabled for the current Mbuf */ if (unlikely(dp_trace_record_can_emit(mbuf, DP_TRACE_MEASUREMENT_TYPE_TRACE))) { gen_store_trace_info_egress(node, mbuf, next_node_index); } #endif /* Judge the next index whether to change */ if (unlikely(batch_next_node_index != next_node_index)) { /* If the next index has been changed,enqueue last pkts */ rte_node_enqueue(graph, node, batch_next_node_index, batch_pkts, last_spec); batch_pkts += last_spec; last_spec = 1; batch_next_node_index = next_node_index; } else { /* If the next index not change, update the lasts */ last_spec++; } } /* Process the remaining packets */ if (likely(last_spec > 0)) rte_node_enqueue(graph, node, batch_next_node_index, batch_pkts, last_spec); /* Update graph stat */ stats_per_graph[graph->id].total_pkts += cnt; stats_per_graph[graph->id].pkts_per_batch = cnt; return cnt; } /* Eth egress node Base */ static struct rte_node_register eth_egress_node_base = { .process = eth_egress_node_process, .name = "eth_egress", .init = NULL, .nb_edges = ETH_EGRESS_NEXT_MAX, .next_nodes = { [ETH_EGRESS_NEXT_PKT_DROP] = "pkt_drop_trap", }, }; RTE_NODE_REGISTER(eth_egress_node_base); /************************************** Eth egress statistics **************************************/ cJSON * eth_egress_node_monit_loop(struct sc_main * sc) { cJSON * json_root = cJSON_CreateObject(); unsigned int nr_graphs = sc->nr_io_thread; uint64_t total_pkts[nr_graphs]; uint64_t pkts_per_batch[nr_graphs]; for (uint32_t graph_id = 0; graph_id < nr_graphs; graph_id++) { struct eth_egress_stats * stats = &stats_per_graph[graph_id]; if (stats->total_pkts == 0) { total_pkts[graph_id] = 0; pkts_per_batch[graph_id] = 0; continue; } total_pkts[graph_id] = stats->total_pkts; pkts_per_batch[graph_id] = stats->pkts_per_batch; } cJSON * json_total_pkts = create_uint64_array(total_pkts, nr_graphs); cJSON_AddItemToObject(json_root, "eth egress, total_pkts", json_total_pkts); cJSON * json_pkts_per_batch = create_uint64_array(pkts_per_batch, nr_graphs); cJSON_AddItemToObject(json_root, "eth egress, pkts_per_batch", json_pkts_per_batch); return json_root; }