diff options
| author | Lu Qiuwen <[email protected]> | 2022-12-30 17:34:07 +0800 |
|---|---|---|
| committer | 陆秋文 <[email protected]> | 2022-12-30 09:35:21 +0000 |
| commit | eb084047b431a1502a7cc46ae93e83838917fd0a (patch) | |
| tree | bf664f5a23eadef34fde6bb0659a4fa311250929 | |
| parent | bb21654d5b0bcc85c7c66e3a9941af1ad11a5a7c (diff) | |
修正vxlan协议解析中发现的部分问题。v4.6.11-20230103
| -rw-r--r-- | infra/include/ldbc.h | 22 | ||||
| -rw-r--r-- | service/src/node_bfd.c | 6 | ||||
| -rw-r--r-- | service/src/node_eth_ingress.c | 3 | ||||
| -rw-r--r-- | service/src/node_etherfabric.c | 2 |
4 files changed, 28 insertions, 5 deletions
diff --git a/infra/include/ldbc.h b/infra/include/ldbc.h index 48b3d9b..e6738e1 100644 --- a/infra/include/ldbc.h +++ b/infra/include/ldbc.h @@ -167,6 +167,28 @@ static inline void * complex_layer_jump_to_outermost(struct pkt_parser_result * return NULL; } +static inline void complex_layer_adjust(struct pkt_parser_result * pkt_parser_result, unsigned int len) +{ + uint8_t adjust_start_layers = pkt_parser_result->start_layers; + for (uint8_t i = pkt_parser_result->start_layers; i < pkt_parser_result->nr_layers; i++) + { + /* this layer need to adjust */ + struct pkt_layer_result * layer_result = &pkt_parser_result->layers[i]; + if (layer_result->offset < len) + { + layer_result->type_id = 0; + layer_result->offset = 0; + adjust_start_layers++; + } + else + { + layer_result->offset -= len; + } + } + + pkt_parser_result->start_layers = adjust_start_layers; +} + static inline void * complex_layer_jump_to_innermost(struct pkt_parser_result * pkt_parser_result, const void * start, uint16_t type_id) { diff --git a/service/src/node_bfd.c b/service/src/node_bfd.c index 0418d80..13c81a2 100644 --- a/service/src/node_bfd.c +++ b/service/src/node_bfd.c @@ -196,8 +196,6 @@ static __rte_always_inline uint16_t bfd_node_process(struct rte_graph * graph, s /* Single Packet Processing */ while (n_left_from > 0) { - struct pkt_head_info pkt_head_info_item = {}; - mbuf0 = pkts[0]; pkts += 1; n_left_from -= 1; @@ -224,6 +222,8 @@ static __rte_always_inline uint16_t bfd_node_process(struct rte_graph * graph, s goto node_enqueue; } + struct rte_ether_hdr * ether_hdr = rte_pktmbuf_mtod(mbuf0, struct rte_ether_hdr *); + struct rte_ipv4_hdr * ipv4_hdr = complex_layer_jump_to_outermost(pkt_parser_result, rte_pktmbuf_mtod(mbuf0, void *), LAYER_TYPE_ID_IPV4); @@ -244,7 +244,7 @@ static __rte_always_inline uint16_t bfd_node_process(struct rte_graph * graph, s /* Reply Bfd Request */ struct bfd_header_t * bfd_hdr = (struct bfd_header_t *)(udp_hdr + 1); - bfd_reply(pkt_head_info_item._ether_hdr, pkt_head_info_item._ipv4_hdr, bfd_hdr); + bfd_reply(ether_hdr, ipv4_hdr, bfd_hdr); MR_BFD_STAT_ADD_FOR_SESSION_ID(bfd_main, gid, bfd_session_id, total_pkts, 1); /* Update prev_tsc */ diff --git a/service/src/node_eth_ingress.c b/service/src/node_eth_ingress.c index ed8e127..5a560c1 100644 --- a/service/src/node_eth_ingress.c +++ b/service/src/node_eth_ingress.c @@ -75,7 +75,7 @@ static int endpoint_pkt_filter(struct mr_dev_desc * dev_desc, unsigned int graph } if (parser_result->layers[0].type_id != LAYER_TYPE_ID_ETHER || - parser_result->layers[1].type_id != LAYER_TYPE_IPV4 || parser_result->layers[2].type_id != LAYER_TYPE_UDP) + parser_result->layers[1].type_id != LAYER_TYPE_ID_IPV4 || parser_result->layers[2].type_id != LAYER_TYPE_ID_UDP) { ETH_INGRESS_STAT_ADD(g_eth_ingress_main, graph_id, invalid_pkts, 1); return ETH_INGRESS_NEXT_PKT_DROP; @@ -111,6 +111,7 @@ static int endpoint_pkt_filter(struct mr_dev_desc * dev_desc, unsigned int graph return ETH_INGRESS_NEXT_PKT_DROP; } + /* Eth Ingress Node Process Function */ static __rte_always_inline uint16_t eth_ingress_node_process(struct rte_graph * graph, struct rte_node * node, void ** objs, uint16_t cnt) diff --git a/service/src/node_etherfabric.c b/service/src/node_etherfabric.c index caf660d..c4b4451 100644 --- a/service/src/node_etherfabric.c +++ b/service/src/node_etherfabric.c @@ -563,7 +563,7 @@ static __rte_always_inline uint16_t etherfabric_ingress_node_process(struct rte_ static const int ef_pkt_encap_nr_layers = 4; - pkt_parser_result->start_layers += ef_pkt_encap_nr_layers; + complex_layer_adjust(pkt_parser_result, ef_pkt_encap_len); rte_pktmbuf_adj(mbuf, ef_pkt_encap_len); /* Send the pkt to classifier */ |
