summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorsongyanchao <[email protected]>2024-04-22 12:05:25 +0000
committersongyanchao <[email protected]>2024-04-22 12:05:25 +0000
commit3d294e63dc5a2cad32795145e5b1086b4a126789 (patch)
tree2ef0abb92a6aa919440dde5f605956fe8c72388b
parent5867cc2ee781146151d129b48f3bb53ce7cf02e2 (diff)
🐞 fix: Fix mbufs_to_send array out of bounds.
Fix mbufs_to_send array out of bounds.
-rw-r--r--service/src/node_phydev.c135
1 files changed, 73 insertions, 62 deletions
diff --git a/service/src/node_phydev.c b/service/src/node_phydev.c
index ba092f5..2886431 100644
--- a/service/src/node_phydev.c
+++ b/service/src/node_phydev.c
@@ -5,11 +5,11 @@
#include <rte_graph_worker.h>
#include <rte_mbuf.h>
+#include <cJSON.h>
#include <metadata_define.h>
#include <rte_memcpy.h>
#include <sc_node.h>
#include <sc_node_common.h>
-#include <cJSON.h>
struct dev_node_elem
{
@@ -345,9 +345,8 @@ static unsigned int mbufs_msgpack_decode(struct rte_mbuf ** mbufs, unsigned int
return out_nr_mbufs;
}
-
-void df_calculate_packet_input(struct df_stat_counters * df_counters, unsigned int MR,
- struct rte_mbuf * mbufs[], unsigned int nr_mbufs)
+void df_calculate_packet_input(struct df_stat_counters * df_counters, unsigned int MR, struct rte_mbuf * mbufs[],
+ unsigned int nr_mbufs)
{
/* now tsc */
uint64_t now_tsc = rte_get_timer_cycles();
@@ -359,7 +358,7 @@ void df_calculate_packet_input(struct df_stat_counters * df_counters, unsigned i
Sj += mbufs[i]->pkt_len;
}
- if(Sj == 0)
+ if (Sj == 0)
{
return;
}
@@ -440,7 +439,8 @@ static __rte_always_inline uint16_t dpdk_dev_rx_node_process(struct rte_graph *
}
unsigned int qid = qid_map->qid_map[core_id];
- unsigned int nr_mbufs = rte_eth_rx_burst(dev_desc->port_id, qid, (struct rte_mbuf **)node->objs, RTE_GRAPH_BURST_SIZE);
+ unsigned int nr_mbufs =
+ rte_eth_rx_burst(dev_desc->port_id, qid, (struct rte_mbuf **)node->objs, RTE_GRAPH_BURST_SIZE);
if (nr_mbufs == 0)
{
@@ -450,7 +450,7 @@ static __rte_always_inline uint16_t dpdk_dev_rx_node_process(struct rte_graph *
else
{
stat_per_core->rx_non_zero_iterations++;
- stat_per_core->total_rx_pkts+= nr_mbufs;
+ stat_per_core->total_rx_pkts += nr_mbufs;
}
#if MR_PHYDEV_ENABLE_DF_CALCULATE
@@ -554,10 +554,11 @@ static __rte_always_inline uint16_t dpdk_msgpack_dev_tx_node_process(struct rte_
return tx_nr_mbufs;
}
-#define MR_PHYDEV_RETRY_WHEN_TX_FAILED 1
+#define MR_PHYDEV_RETRY_WHEN_TX_FAILED 1
-static __rte_always_inline void do_tx_burst(struct rte_graph * graph, struct rte_node * node, struct mr_dev_desc * dev_desc,
- struct rte_mbuf ** mbufs_to_send, unsigned int nr_mbufs_to_send)
+static __rte_always_inline void do_tx_burst(struct rte_graph * graph, struct rte_node * node,
+ struct mr_dev_desc * dev_desc, struct rte_mbuf ** mbufs_to_send,
+ unsigned int nr_mbufs_to_send)
{
unsigned int tx_nr_mbufs = 0;
while (tx_nr_mbufs < nr_mbufs_to_send)
@@ -592,70 +593,80 @@ static __rte_always_inline uint16_t dpdk_dev_tx_node_process(struct rte_graph *
df_calculate_packet_input(&stat_per_core->tx_df, DF_MR_DEFAULT, (struct rte_mbuf **)objs, cnt);
#endif
- struct rte_mbuf * mbufs_to_send[RTE_GRAPH_BURST_SIZE];
- unsigned int nr_mbufs_to_send = 0;
+ uint16_t n_left_from = cnt;
- struct rte_mbuf * mbufs_to_drop[RTE_GRAPH_BURST_SIZE];
- unsigned int nr_mbufs_to_drop = 0;
-
- unsigned int counter_tx_meter_green = 0;
- unsigned int counter_tx_meter_yellow = 0;
- unsigned int counter_tx_meter_red = 0;
-
- if (dpdk_dev_desc->en_tx_meter)
+ while (n_left_from > 0)
{
- struct rte_meter_srtcm * meter = dpdk_dev_desc->tx_meter[graph->id];
- struct rte_meter_srtcm_profile * profile = dpdk_dev_desc->tx_meter_profile[graph->id];
- for (unsigned int i = 0; i < cnt; i++)
- {
- struct rte_mbuf * mbuf = (struct rte_mbuf *)objs[i];
- uint64_t tsc = rte_get_timer_cycles();
- enum rte_color color = rte_meter_srtcm_color_blind_check(meter, profile, tsc, mbuf->pkt_len);
+ struct rte_mbuf * mbufs_to_send[RTE_GRAPH_BURST_SIZE];
+ unsigned int nr_mbufs_to_send = 0;
- switch (color)
- {
- case RTE_COLORS:
- case RTE_COLOR_GREEN:
- mbufs_to_send[nr_mbufs_to_send++] = mbuf;
- counter_tx_meter_green++;
- break;
+ struct rte_mbuf * mbufs_to_drop[RTE_GRAPH_BURST_SIZE];
+ unsigned int nr_mbufs_to_drop = 0;
- case RTE_COLOR_YELLOW:
- mbufs_to_send[nr_mbufs_to_send++] = mbuf;
- do_tx_burst(graph, node, dev_desc, mbufs_to_send, nr_mbufs_to_send);
- nr_mbufs_to_send = 0;
+ unsigned int counter_tx_meter_green = 0;
+ unsigned int counter_tx_meter_yellow = 0;
+ unsigned int counter_tx_meter_red = 0;
- delay_ns_block(dpdk_dev_desc->tx_meter_yellow_pkt_delay_ns);
- counter_tx_meter_yellow++;
- break;
+ uint16_t nr_curr_send = n_left_from > RTE_DIM(mbufs_to_send) ? RTE_DIM(mbufs_to_send) : n_left_from;
- case RTE_COLOR_RED:
- stat_per_core->tx_meter_red++;
- mbufs_to_drop[nr_mbufs_to_drop++] = mbuf;
- break;
+ n_left_from -= nr_curr_send;
+
+ if (dpdk_dev_desc->en_tx_meter)
+ {
+ struct rte_meter_srtcm * meter = dpdk_dev_desc->tx_meter[graph->id];
+ struct rte_meter_srtcm_profile * profile = dpdk_dev_desc->tx_meter_profile[graph->id];
+ for (unsigned int i = 0; i < nr_curr_send; i++)
+ {
+ struct rte_mbuf * mbuf = (struct rte_mbuf *)objs[i];
+ uint64_t tsc = rte_get_timer_cycles();
+ enum rte_color color = rte_meter_srtcm_color_blind_check(meter, profile, tsc, mbuf->pkt_len);
+
+ switch (color)
+ {
+ case RTE_COLORS:
+ case RTE_COLOR_GREEN:
+ mbufs_to_send[nr_mbufs_to_send++] = mbuf;
+ counter_tx_meter_green++;
+ break;
+
+ case RTE_COLOR_YELLOW:
+ mbufs_to_send[nr_mbufs_to_send++] = mbuf;
+ do_tx_burst(graph, node, dev_desc, mbufs_to_send, nr_mbufs_to_send);
+ nr_mbufs_to_send = 0;
+
+ delay_ns_block(dpdk_dev_desc->tx_meter_yellow_pkt_delay_ns);
+ counter_tx_meter_yellow++;
+ break;
+
+ case RTE_COLOR_RED:
+ stat_per_core->tx_meter_red++;
+ mbufs_to_drop[nr_mbufs_to_drop++] = mbuf;
+ break;
+ }
}
}
- }
- else
- {
- rte_memcpy(mbufs_to_send, objs, cnt * sizeof(void *));
- nr_mbufs_to_send = cnt;
- }
+ else
+ {
+ rte_memcpy(mbufs_to_send, objs, nr_curr_send * sizeof(void *));
+ nr_mbufs_to_send = nr_curr_send;
+ }
- do_tx_burst(graph, node, dev_desc, mbufs_to_send, nr_mbufs_to_send);
+ do_tx_burst(graph, node, dev_desc, mbufs_to_send, nr_mbufs_to_send);
- /* enqueue drop packet to next node */
- if (nr_mbufs_to_drop > 0)
- {
- rte_node_enqueue(graph, node, 0, (void **)mbufs_to_drop, nr_mbufs_to_drop);
+ /* enqueue drop packet to next node */
+ if (nr_mbufs_to_drop > 0)
+ {
+ rte_node_enqueue(graph, node, 0, (void **)mbufs_to_drop, nr_mbufs_to_drop);
+ }
+
+ /* stat */
+ stat_per_core->tx_meter_red += counter_tx_meter_red;
+ stat_per_core->tx_meter_yellow += counter_tx_meter_yellow;
+ stat_per_core->tx_meter_green += counter_tx_meter_green;
+ stat_per_core->total_tx_pkts += nr_mbufs_to_send;
+ stat_per_core->total_tx_drop_pkts += nr_mbufs_to_drop;
}
- /* stat */
- stat_per_core->tx_meter_red += counter_tx_meter_red;
- stat_per_core->tx_meter_yellow += counter_tx_meter_yellow;
- stat_per_core->tx_meter_green += counter_tx_meter_green;
- stat_per_core->total_tx_pkts += nr_mbufs_to_send;
- stat_per_core->total_tx_drop_pkts += nr_mbufs_to_drop;
return cnt;
}