summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLu Qiuwen <[email protected]>2024-03-26 17:04:39 +0800
committer陆秋文 <[email protected]>2024-04-14 12:38:19 +0000
commit7dfa9e3812f312e4b01c6be643313142d1fd53b7 (patch)
tree3cc9c37276c499f2187fb386d522f3194fb52d72
parent984ab961fbd6c64e7281f58e3f678bce0caaed9e (diff)
add rx spin time stat for i/o cores.
-rw-r--r--service/src/monit.c2
-rw-r--r--service/src/node_phydev.c78
2 files changed, 78 insertions, 2 deletions
diff --git a/service/src/monit.c b/service/src/monit.c
index 447bc15..4157392 100644
--- a/service/src/monit.c
+++ b/service/src/monit.c
@@ -223,6 +223,7 @@ extern cJSON * olp_manager_monit_loop(struct sc_main * sc);
extern cJSON * tera_ingress_node_monit_loop(struct sc_main * sc);
extern cJSON * tera_egress_node_monit_loop(struct sc_main * sc);
extern cJSON * devmgr_monit_loop(struct sc_main * sc);
+extern cJSON * phydev_rx_node_monit_loop(struct sc_main * sc);
static cJSON * monit_root(struct sc_main * sc)
{
@@ -239,6 +240,7 @@ static cJSON * monit_root(struct sc_main * sc)
cJSON_AddItemToObject(j_root, "pkt_latency", monit_pkt_latency_global(sc));
// cJSON_AddItemToObject(j_root, "offload", smartoffload_monit_loop(sc));
+ cJSON_AddItemToObject(j_root, "phydev", phydev_rx_node_monit_loop(sc));
cJSON_AddItemToObject(j_root, "eth-ingress", eth_ingress_node_monit_loop(sc));
cJSON_AddItemToObject(j_root, "bridge", bridge_node_monit_loop(sc));
cJSON_AddItemToObject(j_root, "vwire-ingress", vwire_ingress_node_monit_loop(sc));
diff --git a/service/src/node_phydev.c b/service/src/node_phydev.c
index 2c7ca84..51c80f9 100644
--- a/service/src/node_phydev.c
+++ b/service/src/node_phydev.c
@@ -10,6 +10,7 @@
#include <rte_memcpy.h>
#include <sc_node.h>
#include <sc_node_common.h>
+#include <cJSON.h>
struct dev_node_elem
{
@@ -24,6 +25,16 @@ struct dev_node_main
TAILQ_HEAD(dev_node_elem_head, dev_node_elem) elems_head;
};
+struct phydev_stat_per_core
+{
+ volatile uint64_t total_rx_pkts;
+ volatile uint64_t total_tx_pkts;
+ volatile uint32_t rx_zero_iterations;
+ volatile uint32_t rx_non_zero_iterations;
+} __rte_cache_aligned;
+
+static struct phydev_stat_per_core phydev_stat[RTE_MAX_LCORE];
+
static struct dev_node_main st_dev_node_main;
static struct dev_node_main * p_dev_node_main = &st_dev_node_main;
@@ -343,6 +354,7 @@ static __rte_always_inline uint16_t dpdk_dev_rx_node_process(struct rte_graph *
struct mr_dev_desc_qid_map * qid_map = dev_desc->rx_qid_map;
unsigned int core_id = rte_lcore_id();
+ struct phydev_stat_per_core * stat_per_core = &phydev_stat[graph->id];
/* check the core can do recv for this device or not */
if (qid_map->qid_enabled[core_id] == 0)
@@ -351,13 +363,18 @@ static __rte_always_inline uint16_t dpdk_dev_rx_node_process(struct rte_graph *
}
unsigned int qid = qid_map->qid_map[core_id];
- unsigned int nr_mbufs =
- rte_eth_rx_burst(dev_desc->port_id, qid, (struct rte_mbuf **)node->objs, RTE_GRAPH_BURST_SIZE);
+ unsigned int nr_mbufs = rte_eth_rx_burst(dev_desc->port_id, qid, (struct rte_mbuf **)node->objs, RTE_GRAPH_BURST_SIZE);
if (nr_mbufs == 0)
{
+ stat_per_core->rx_zero_iterations++;
return 0;
}
+ else
+ {
+ stat_per_core->rx_non_zero_iterations++;
+ stat_per_core->total_rx_pkts+= nr_mbufs;
+ }
/* hash calculate */
node->idx = nr_mbufs;
@@ -469,6 +486,9 @@ static __rte_always_inline uint16_t dpdk_dev_tx_node_process(struct rte_graph *
rte_node_enqueue(graph, node, 0, &objs[tx_nr_mbufs], cnt - tx_nr_mbufs);
}
+ /* stat */
+ struct phydev_stat_per_core * stat_per_core = &phydev_stat[graph->id];
+ stat_per_core->total_tx_pkts += tx_nr_mbufs;
return cnt;
}
@@ -759,6 +779,60 @@ int node_manager_dev_init(struct sc_main * sc, struct node_manager_main * node_m
return 0;
}
+static cJSON * create_uint64_array(const uint64_t * value, unsigned int nr_value)
+{
+ struct cJSON * uint64_array = cJSON_CreateArray();
+ for (int i = 0; i < nr_value; i++)
+ {
+ cJSON_AddItemToArray(uint64_array, cJSON_CreateNumber(value[i]));
+ }
+
+ return uint64_array;
+}
+
+cJSON * phydev_rx_node_monit_loop(struct sc_main * sc)
+{
+ unsigned int nr_graphs = sc->nr_io_thread;
+ cJSON * json_root = cJSON_CreateObject();
+
+ /* total_rx_pkts, total_tx_pkts, rx_zero_iterations, rx_total_iterations */
+ uint64_t total_rx_pkts[RTE_MAX_LCORE];
+ uint64_t total_tx_pkts[RTE_MAX_LCORE];
+ double rx_spin_time[RTE_MAX_LCORE];
+
+ for (unsigned int i = 0; i < nr_graphs; i++)
+ {
+ total_rx_pkts[i] = phydev_stat[i].total_rx_pkts;
+ total_tx_pkts[i] = phydev_stat[i].total_tx_pkts;
+
+ /* calculate the spin time */
+ uint32_t rx_zero_iterations = phydev_stat[i].rx_zero_iterations;
+ uint32_t rx_non_zero_iterations = phydev_stat[i].rx_non_zero_iterations;
+ phydev_stat[i].rx_non_zero_iterations = 0;
+ phydev_stat[i].rx_zero_iterations = 0;
+
+ uint32_t rx_total_iterations = rx_zero_iterations + rx_non_zero_iterations;
+ if (rx_total_iterations != 0)
+ {
+ rx_spin_time[i] = (double)rx_zero_iterations / (double)rx_total_iterations;
+ }
+ else
+ {
+ rx_spin_time[i] = 0;
+ }
+ }
+
+ cJSON * json_total_rx_pkts = create_uint64_array(total_rx_pkts, nr_graphs);
+ cJSON_AddItemToObject(json_root, "total_rx_pkts", json_total_rx_pkts);
+
+ cJSON * json_total_tx_pkts = create_uint64_array(total_tx_pkts, nr_graphs);
+ cJSON_AddItemToObject(json_root, "total_tx_pkts", json_total_tx_pkts);
+
+ cJSON * json_rx_spin_time = cJSON_CreateDoubleArray(rx_spin_time, (int)nr_graphs);
+ cJSON_AddItemToObject(json_root, "rx_spin_time", json_rx_spin_time);
+ return json_root;
+}
+
int node_manager_phydev_deinit(struct node_manager_main * node_mgr_main)
{
return 0;