#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include enum { SHMDEV_RX_NEXT_NODE_FORWARDER, SHMDEV_RX_NEXT_NODE_HEALTH_CHECK, SHMDEV_RX_NEXT_NODE_PKT_DROP, SHMDEV_RX_NEXT_NODE_MAX }; struct shmdev_stat_per_lcore { volatile uint64_t total_tx_pkts; volatile uint64_t total_rx_pkts; volatile uint64_t rx_pkts_per_batch; volatile uint64_t tx_pkts_per_batch; volatile uint64_t original_pkts; volatile uint64_t nf_create_pkts; volatile uint64_t to_eth_ingress_pkts; } __rte_cache_aligned; struct shmdev_stat_per_lcore shmdev_graph_stat[RTE_MAX_LCORE]; enum packet_direction { PACKET_DIRECTION_TX, PACKET_DIRECTION_RX }; uint16_t shmdev_rx_node_process(struct rte_graph * graph, struct rte_node * node, void ** objs, uint16_t cnt) { struct dev_node_ctx * ctx = (struct dev_node_ctx *)node->ctx; struct mr_dev_desc * dev_desc = ctx->dev_desc; struct vdev * shm_dev_desc = dev_desc->shm_dev_desc; RTE_SET_USED(objs); RTE_SET_USED(cnt); struct mr_dev_desc_qid_map * qid_map = dev_desc->rx_qid_map; unsigned int core_id = rte_lcore_id(); /* check the core can do recv for this device or not */ if (qid_map->qid_enabled[core_id] == 0) { return 0; } unsigned int qid = qid_map->qid_map[core_id]; unsigned int nr_mbufs = vdev_collect(shm_dev_desc, qid, (struct rte_mbuf **)node->objs, RTE_GRAPH_BURST_SIZE, VDEV_COLLECT_DATA); if (unlikely(nr_mbufs == 0)) return 0; /* Save number of objects used */ node->idx = nr_mbufs; unsigned int mbuf_alloc_at_nf = 0; unsigned int mbuf_alloc_at_serv = 0; /* Deal all pkt */ for (unsigned int i = 0; i < node->idx; i++) { /* prefetch next mbuf */ if (i < node->idx - 1) { rte_prefetch0(node->objs[i + 1]); rte_prefetch0(mrbuf_cz_data(node->objs[i + 1], MR_NODE_CTRLZONE_ID)); } struct rte_mbuf * mbuf0 = (struct rte_mbuf *)node->objs[i]; struct mrb_metadata * mrb_meta = mrbuf_cz_data(mbuf0, MR_NODE_CTRLZONE_ID); assert(mbuf0 != NULL); assert(mrb_meta != NULL); __rte_mbuf_sanity_check(mbuf0, 1); /* Check pkt */ if (likely(mrb_meta->packet_create_from_nf == 0)) { mbuf_alloc_at_serv++; } else { /* prepare to parse the ingress pkt */ struct pkt_parser pkt_parser; pkt_parser_init(&pkt_parser, &mrb_meta->pkt_parser_result, LAYER_TYPE_ALL, MR_PKT_PARSER_LAYERS_MAX); pkt_parser_exec(&pkt_parser, mbuf0); mbuf_alloc_at_nf++; } /* Fill port ingress */ mrb_meta->port_ingress = dev_desc->port_id; /* Check if tracing is enabled for the current Mbuf */ if (unlikely(dp_trace_record_can_emit(mbuf0, DP_TRACE_MEASUREMENT_TYPE_TRACE))) { gen_store_trace_info_rx(node, mbuf0, dev_desc, qid); if (unlikely(mrb_meta->packet_create_from_nf == 1)) { gen_store_trace_info_sid_list(node, mbuf0); gen_store_trace_info_pkt_parser(node, mbuf0); } } } /* Update total rx pkts */ struct shmdev_stat_per_lcore * graph_stats = &shmdev_graph_stat[graph->id]; graph_stats->total_rx_pkts += nr_mbufs; graph_stats->rx_pkts_per_batch = nr_mbufs; graph_stats->original_pkts += mbuf_alloc_at_serv; graph_stats->nf_create_pkts += mbuf_alloc_at_nf; graph_stats->to_eth_ingress_pkts += mbuf_alloc_at_nf; /* move to next node */ rte_node_next_stream_move(graph, node, 0); return nr_mbufs; } uint16_t shmdev_tx_node_process(struct rte_graph * graph, struct rte_node * node, void ** objs, uint16_t cnt) { struct dev_node_ctx * ctx = (struct dev_node_ctx *)node->ctx; struct rte_mbuf ** mbufs = (struct rte_mbuf **)node->objs; struct mr_dev_desc * dev_desc = ctx->dev_desc; struct vdev * shm_dev_desc = dev_desc->shm_dev_desc; unsigned int nr_pkts = cnt; for (unsigned int i = 0; i < nr_pkts; i++) { /* Check if tracing is enabled for the current Mbuf */ if (unlikely(dp_trace_record_can_emit(mbufs[i], DP_TRACE_MEASUREMENT_TYPE_TRACE))) { gen_store_trace_info_tx(node, mbufs[i], dev_desc, graph->id); // gen_store_trace_info_rte_mbuf(node, mbufs[i]); } } while (nr_pkts) { unsigned int nr_mbufs_this_batch = (nr_pkts > RTE_GRAPH_BURST_SIZE) ? RTE_GRAPH_BURST_SIZE : nr_pkts; int ret = vdev_dispatch(shm_dev_desc, graph->id, mbufs, nr_mbufs_this_batch, 0); if (unlikely(ret < 0)) { /* these mbufs should be dropped */ rte_node_enqueue(graph, node, 0, (void **)mbufs, nr_mbufs_this_batch); } nr_pkts -= nr_mbufs_this_batch; mbufs += nr_mbufs_this_batch; /* retrieve the backpressure packets */ struct rte_mbuf * rt_mbufs[RTE_GRAPH_BURST_SIZE]; ret = vdev_rt_pkts_retrieve(shm_dev_desc, graph->id, rt_mbufs, RTE_GRAPH_BURST_SIZE); /* these packet to pkt drop node */ if (unlikely(ret > 0)) { rte_node_enqueue(graph, node, 0, (void **)rt_mbufs, ret); } } struct shmdev_stat_per_lcore * graph_stats = &shmdev_graph_stat[graph->id]; graph_stats->total_tx_pkts += cnt; graph_stats->tx_pkts_per_batch = cnt; return cnt; } /************************************** Shmdev Statistics **************************************/ cJSON * shmdev_tx_node_monit_loop(struct sc_main * sc) { cJSON * json_root = cJSON_CreateObject(); unsigned int nr_graphs = sc->nr_io_thread; uint64_t total_pkts[nr_graphs]; uint64_t pkts_per_batch[nr_graphs]; for (uint32_t graph_id = 0; graph_id < nr_graphs; graph_id++) { struct shmdev_stat_per_lcore * stat_item = &shmdev_graph_stat[graph_id]; if (stat_item->total_tx_pkts == 0) { total_pkts[graph_id] = 0; pkts_per_batch[graph_id] = 0; continue; } total_pkts[graph_id] = stat_item->total_tx_pkts; pkts_per_batch[graph_id] = stat_item->tx_pkts_per_batch; } cJSON * json_total_pkts = create_uint64_array(total_pkts, nr_graphs); cJSON_AddItemToObject(json_root, "shmdev_tx, total_pkts", json_total_pkts); cJSON * json_pkts_per_batch = create_uint64_array(pkts_per_batch, nr_graphs); cJSON_AddItemToObject(json_root, "shmdev_tx, pkts_per_batch", json_pkts_per_batch); return json_root; } cJSON * shmdev_rx_node_monit_loop(struct sc_main * sc) { cJSON * json_root = cJSON_CreateObject(); unsigned int nr_graphs = sc->nr_io_thread; uint64_t total_pkts[nr_graphs]; uint64_t pkts_per_batch[nr_graphs]; uint64_t original_pkts[nr_graphs]; uint64_t nf_create_pkts[nr_graphs]; uint64_t to_eth_ingress_pkts[nr_graphs]; for (uint32_t graph_id = 0; graph_id < nr_graphs; graph_id++) { struct shmdev_stat_per_lcore * stat_item = &shmdev_graph_stat[graph_id]; if (stat_item->total_rx_pkts == 0) { total_pkts[graph_id] = 0; pkts_per_batch[graph_id] = 0; original_pkts[graph_id] = 0; nf_create_pkts[graph_id] = 0; to_eth_ingress_pkts[graph_id] = 0; continue; } total_pkts[graph_id] = stat_item->total_rx_pkts; pkts_per_batch[graph_id] = stat_item->rx_pkts_per_batch; original_pkts[graph_id] = stat_item->original_pkts; nf_create_pkts[graph_id] = stat_item->nf_create_pkts; to_eth_ingress_pkts[graph_id] = stat_item->to_eth_ingress_pkts; } cJSON * json_total_pkts = create_uint64_array(total_pkts, nr_graphs); cJSON_AddItemToObject(json_root, "shmdev_rx, total_pkts", json_total_pkts); cJSON * json_pkts_per_batch = create_uint64_array(pkts_per_batch, nr_graphs); cJSON_AddItemToObject(json_root, "shmdev_rx, pkts_per_batch", json_pkts_per_batch); cJSON * json_original_pkts = create_uint64_array(original_pkts, nr_graphs); cJSON_AddItemToObject(json_root, "shmdev_rx, original_pkts", json_original_pkts); cJSON * json_nf_create_pkts = create_uint64_array(nf_create_pkts, nr_graphs); cJSON_AddItemToObject(json_root, "shmdev_rx, nf_create_pkts", json_nf_create_pkts); cJSON * json_to_eth_ingress_pkts = create_uint64_array(to_eth_ingress_pkts, nr_graphs); cJSON_AddItemToObject(json_root, "shmdev_rx, to_eth_ingress_pkts", json_to_eth_ingress_pkts); return json_root; }