#include #include #include #include #include #include #include #include #define MR_ETH_INGRESS_BFD_PORT 3784 /* Eth ingress next node */ enum { ETH_INGRESS_NEXT_EF_INGRESS = 0, ETH_INGRESS_NEXT_BRIDGE, ETH_INGRESS_NEXT_BFD, ETH_INGRESS_NEXT_VWIRE_INGRESS, ETH_INGRESS_NEXT_TERA_INGRESS, ETH_INGRESS_NEXT_FORWARDER, ETH_INGRESS_NEXT_HEALTH_CHECK, ETH_INGRESS_NEXT_ETH_EGRESS, ETH_INGRESS_NEXT_PKT_DROP, ETH_INGRESS_NEXT_MAX, }; /* Eth ingress drop reason */ enum eth_ingress_drop_reason { ETH_INGR_DROP_RSN_INVALID_ROLE_TYPE = 0, ETH_INGR_DROP_RSN_INVALID_LAYERS, ETH_INGR_DROP_RSN_INVALID_ICMP_TYPE, ETH_INGR_DROP_RSN_INVALID_ADAPTER_TYPE, ETH_INGR_DROP_RSN_NONCOMPLIANT_EF, ETH_INGR_DROP_RSN_NONCOMPLIANT_TERA, ETH_INGR_DROP_RSN_MAX, }; /* Eth ingress drop reason string */ static const char * eth_ingress_drop_reason_str[ETH_INGR_DROP_RSN_MAX] = { "drop_rsn_invalid_role_type", "drop_rsn_invalid_pkt_layers", "drop_rsn_invalid_icmp_type", "drop_rsn_invalid_adapter_type", "drop_rsn_pkt_noncompliant_with_ef", "drop_rsn_pkt_noncompliant_with_tera", }; /* Eth ingress statistics struct */ struct eth_ingress_stats { volatile uint64_t total_pkts; volatile uint64_t pkts_per_batch; volatile uint64_t to_bridge; volatile uint64_t to_vwire; volatile uint64_t to_health_check; volatile uint64_t to_forwarder; volatile uint64_t to_bfd; volatile uint64_t to_ef_ingress; volatile uint64_t to_eth_egress; volatile uint64_t to_tera_ingress; volatile uint64_t drop_reason[ETH_INGR_DROP_RSN_MAX]; } __rte_cache_aligned; static struct eth_ingress_stats stats_per_graph[RTE_MAX_LCORE] = {}; /* Eth ingress node context */ struct eth_ingress_node_ctx { /* Cached next index */ uint16_t next_index; }; #define ETH_INGRESS_NODE_LAST_NEXT(ctx) (((struct eth_ingress_node_ctx *)ctx)->next_index) /* Filtering ARP replay packet */ static int arp_reply_filter(struct mr_dev_desc * dev_desc, struct rte_mbuf * mbuf) { struct rte_ether_hdr * eth_hdr = rte_pktmbuf_mtod(mbuf, struct rte_ether_hdr *); struct rte_arp_hdr * arp_hdr = (struct rte_arp_hdr *)((char *)eth_hdr + sizeof(struct rte_ether_hdr)); if ((eth_hdr->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) && (arp_hdr->arp_opcode == rte_cpu_to_be_16(RTE_ARP_OP_REQUEST)) && (arp_hdr->arp_data.arp_tip == dev_desc->in_addr.s_addr)) { arp_hdr->arp_opcode = rte_cpu_to_be_16(RTE_ARP_OP_REPLY); /* Switch src and dst data and set bonding MAC */ rte_ether_addr_copy(ð_hdr->src_addr, ð_hdr->dst_addr); rte_ether_addr_copy(&dev_desc->eth_addr, ð_hdr->src_addr); rte_ether_addr_copy(&arp_hdr->arp_data.arp_sha, &arp_hdr->arp_data.arp_tha); arp_hdr->arp_data.arp_tip = arp_hdr->arp_data.arp_sip; rte_ether_addr_copy(&dev_desc->eth_addr, &arp_hdr->arp_data.arp_sha); arp_hdr->arp_data.arp_sip = dev_desc->in_addr.s_addr; return RT_SUCCESS; } return RT_ERR; } /* Filtering ICMP replay packet */ static int icmp_reply_filter(struct mr_dev_desc * dev_desc, struct rte_mbuf * mbuf) { struct rte_ether_hdr * eth_hdr = rte_pktmbuf_mtod(mbuf, struct rte_ether_hdr *); struct rte_ipv4_hdr * ipv4_hdr = (struct rte_ipv4_hdr *)((char *)eth_hdr + sizeof(struct rte_ether_hdr)); struct rte_icmp_hdr * icmp_hdr = (struct rte_icmp_hdr *)((char *)ipv4_hdr + sizeof(struct rte_ipv4_hdr)); if ((icmp_hdr->icmp_type == RTE_IP_ICMP_ECHO_REQUEST) && (ipv4_hdr->dst_addr == dev_desc->in_addr.s_addr)) { /* Icmp type set adn recalculate the checksum */ icmp_hdr->icmp_type = RTE_IP_ICMP_ECHO_REPLY; uint32_t cksum = ~icmp_hdr->icmp_cksum & 0xffff; cksum += ~RTE_BE16(RTE_IP_ICMP_ECHO_REQUEST << 8) & 0xffff; cksum += RTE_BE16(RTE_IP_ICMP_ECHO_REPLY << 8); cksum = (cksum & 0xffff) + (cksum >> 16); cksum = (cksum & 0xffff) + (cksum >> 16); icmp_hdr->icmp_cksum = ~cksum; /* Switch src and dst data and set bonding IP */ uint32_t ip_addr = ipv4_hdr->src_addr; ipv4_hdr->src_addr = ipv4_hdr->dst_addr; ipv4_hdr->dst_addr = ip_addr; /* Recalculate the checksum */ ipv4_hdr->hdr_checksum = 0; ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr); /* Switch src and dst data and set bonding MAC */ struct rte_ether_addr ether_addr_swap; rte_ether_addr_copy(ð_hdr->src_addr, ðer_addr_swap); rte_ether_addr_copy(ð_hdr->dst_addr, ð_hdr->src_addr); rte_ether_addr_copy(ðer_addr_swap, ð_hdr->dst_addr); return RT_SUCCESS; } return RT_ERR; } /* Filtering kernel resp device */ static int kernel_resp_dev_filter(struct mr_dev_desc * dev_desc, struct rte_mbuf * mbuf, struct pkt_parser_result * parser_result) { /* not local's mac addr or broadcast packet, ignore it */ const struct rte_ether_hdr * ether_hdr = rte_pktmbuf_mtod(mbuf, const struct rte_ether_hdr *); if (rte_is_broadcast_ether_addr(ðer_hdr->dst_addr) == 0 && rte_is_multicast_ether_addr(ðer_hdr->dst_addr) == 0 && rte_is_same_ether_addr(ðer_hdr->dst_addr, &dev_desc->eth_addr) == 0) { return 0; } const struct rte_vlan_hdr * vlan_hdr = NULL; rte_be16_t eth_proto = ether_hdr->ether_type; if (eth_proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN)) { /* VLAN requires the device to be operating in trunk mode */ if (dev_desc->dev_mode != MR_DEV_MODE_TRUNK) { return 0; } vlan_hdr = rte_pktmbuf_mtod_offset(mbuf, const struct rte_vlan_hdr *, parser_result->layers[1].offset); eth_proto = vlan_hdr->eth_proto; } /* for arp, rarp and lldp, only check the dest's mac address */ struct representor_config * resp_cfg = dev_desc->representor_config; if (eth_proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP) && resp_cfg->redirect_local_arp > 0) { return 1; } else if (eth_proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_RARP) && resp_cfg->redirect_local_rarp > 0) { return 1; } else if (eth_proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_LLDP) && resp_cfg->redirect_local_lldp > 0) { return 1; } /* Check vlan and dst ip addr */ int ret; uint16_t vlan_id = 0; struct mr_generic_ip_hdr generic_ip_hdr; if (eth_proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) { if (vlan_hdr != NULL) { const struct rte_ipv4_hdr * ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf, const struct rte_ipv4_hdr *, parser_result->layers[2].offset); vlan_id = vlan_hdr->vlan_tci; generic_ip_hdr.sa_family = AF_INET; generic_ip_hdr.ipv4_hdr = ipv4_hdr; ret = mr_is_local_addr_for_trunk(dev_desc, vlan_id, &generic_ip_hdr); } else { const struct rte_ipv4_hdr * ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf, const struct rte_ipv4_hdr *, parser_result->layers[1].offset); generic_ip_hdr.sa_family = AF_INET; generic_ip_hdr.ipv4_hdr = ipv4_hdr; ret = mr_is_local_addr(dev_desc, &generic_ip_hdr); } if (ret == RT_ERR) { return 0; } } else if (eth_proto == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) { if (vlan_hdr != NULL) { const struct rte_ipv6_hdr * ipv6_hdr = rte_pktmbuf_mtod_offset(mbuf, const struct rte_ipv6_hdr *, parser_result->layers[2].offset); vlan_id = vlan_hdr->vlan_tci; generic_ip_hdr.sa_family = AF_INET6; generic_ip_hdr.ipv6_hdr = ipv6_hdr; ret = mr_is_local_addr_for_trunk(dev_desc, vlan_id, &generic_ip_hdr); } else { const struct rte_ipv6_hdr * ipv6_hdr = rte_pktmbuf_mtod_offset(mbuf, const struct rte_ipv6_hdr *, parser_result->layers[1].offset); generic_ip_hdr.sa_family = AF_INET6; generic_ip_hdr.ipv6_hdr = ipv6_hdr; ret = mr_is_local_addr(dev_desc, &generic_ip_hdr); } if ((ret == RT_ERR) && (!MR_IS_IPV6_SOLICITED_NODE_MCAST(generic_ip_hdr.ipv6_hdr->dst_addr))) { return 0; } } else { return 0; } /* Get udp dst port,current only support eth+vlan+ip+udp/eth+ip+udp */ struct rte_udp_hdr * udp_hdr = NULL; if ((vlan_hdr != NULL) && (parser_result->layers[3].type_id == LAYER_TYPE_ID_UDP)) { udp_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_udp_hdr *, parser_result->layers[3].offset); } else if ((vlan_hdr == NULL) && (parser_result->layers[2].type_id == LAYER_TYPE_ID_UDP)) { udp_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_udp_hdr *, parser_result->layers[2].offset); } else { return 1; } /* BFD and GVXLAN packets are not sent to the kernel or respective devices */ if (udp_hdr->dst_port == rte_cpu_to_be_16(MR_ETH_INGRESS_BFD_PORT)) { return 0; } else if (udp_hdr->dst_port == rte_cpu_to_be_16(G_VXLAN_DPORT)) { return 0; } else { return 1; } assert(false); } /* Endpoint device packet handler */ static int endpoint_dev_packet_handler(struct mr_dev_desc * dev_desc, struct eth_ingress_stats * stats, struct rte_mbuf * mbuf, struct mrb_metadata * mrb_meta, enum eth_ingress_drop_reason * out_drop_reason) { struct pkt_parser_result * parser_result = &mrb_meta->pkt_parser_result; /* this device has a kernel resp device, redirect all arp, icmp and bfd packets to resp device */ if (dev_desc->device_representor != NULL) { /* should go to kernel resp device */ if (kernel_resp_dev_filter(dev_desc->device_representor, mbuf, parser_result) > 0) { /* goto the kernel resp device directly */ mrb_meta->port_egress = dev_desc->device_representor->port_id; stats->to_eth_egress++; return ETH_INGRESS_NEXT_ETH_EGRESS; } } /* as a resp device, all packet from resp device should go to represented device directly */ else if (dev_desc->represented_device != NULL) { mrb_meta->port_egress = dev_desc->represented_device->port_id; stats->to_eth_egress++; return ETH_INGRESS_NEXT_ETH_EGRESS; } /* arp reply */ if (unlikely(parser_result->nr_layers < 3)) { if (arp_reply_filter(dev_desc, mbuf) == RT_SUCCESS) { mrb_meta->port_egress = mrb_meta->port_ingress; stats->to_eth_egress++; return ETH_INGRESS_NEXT_ETH_EGRESS; } else { *out_drop_reason = ETH_INGR_DROP_RSN_INVALID_LAYERS; stats->drop_reason[*out_drop_reason]++; return ETH_INGRESS_NEXT_PKT_DROP; } } /* icmp reply,only support v4 */ const struct rte_ipv4_hdr * ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_ipv4_hdr *, parser_result->layers[1].offset); assert(ipv4_hdr != NULL); if (unlikely((parser_result->layers[2].type_id == LAYER_TYPE_ID_ICMP) && (ipv4_hdr->dst_addr == dev_desc->in_addr.s_addr))) { if (icmp_reply_filter(dev_desc, mbuf) == RT_SUCCESS) { mrb_meta->port_egress = mrb_meta->port_ingress; stats->to_eth_egress++; return ETH_INGRESS_NEXT_ETH_EGRESS; } else { *out_drop_reason = ETH_INGR_DROP_RSN_INVALID_ICMP_TYPE; stats->drop_reason[*out_drop_reason]++; return ETH_INGRESS_NEXT_PKT_DROP; } } /* bfd pkt check */ const struct rte_udp_hdr * udp_hdr = rte_pktmbuf_mtod_offset(mbuf, struct rte_udp_hdr *, parser_result->layers[2].offset); assert(udp_hdr != NULL); const uint16_t dst_port = udp_hdr->dst_port; if (unlikely(dst_port == rte_cpu_to_be_16(MR_ETH_INGRESS_BFD_PORT))) { if (unlikely((ipv4_hdr->dst_addr == dev_desc->in_addr.s_addr) && (parser_result->layers[2].type_id == LAYER_TYPE_ID_UDP))) { stats->to_bfd++; return ETH_INGRESS_NEXT_BFD; } } /* from port id match adapter type */ enum adapter_type adapter_type = port_adapter_mapping_match(dev_desc->port_id); if (adapter_type == ADAPTER_TYPE_EF) { if (unlikely(parser_result->layers[0].type_id != LAYER_TYPE_ID_ETHER || parser_result->layers[1].type_id != LAYER_TYPE_ID_IPV4 || parser_result->layers[2].type_id != LAYER_TYPE_ID_UDP)) { *out_drop_reason = ETH_INGR_DROP_RSN_NONCOMPLIANT_EF; stats->drop_reason[*out_drop_reason]++; return ETH_INGRESS_NEXT_PKT_DROP; } if (unlikely(ipv4_hdr->dst_addr != dev_desc->in_addr.s_addr)) { *out_drop_reason = ETH_INGR_DROP_RSN_NONCOMPLIANT_EF; stats->drop_reason[*out_drop_reason]++; return ETH_INGRESS_NEXT_PKT_DROP; } if (unlikely(dst_port != rte_cpu_to_be_16(G_VXLAN_DPORT))) { *out_drop_reason = ETH_INGR_DROP_RSN_NONCOMPLIANT_EF; stats->drop_reason[*out_drop_reason]++; return ETH_INGRESS_NEXT_PKT_DROP; } stats->to_ef_ingress++; return ETH_INGRESS_NEXT_EF_INGRESS; } else if (adapter_type == ADAPTER_TYPE_TERA) { /* Tera adapter requires the NIC to be in non-promiscuous mode */ if (unlikely(parser_result->layers[0].type_id != LAYER_TYPE_ID_ETHER || parser_result->layers[1].type_id != LAYER_TYPE_ID_VLAN)) { *out_drop_reason = ETH_INGR_DROP_RSN_NONCOMPLIANT_TERA; stats->drop_reason[*out_drop_reason]++; return ETH_INGRESS_NEXT_PKT_DROP; } stats->to_tera_ingress++; return ETH_INGRESS_NEXT_TERA_INGRESS; } *out_drop_reason = ETH_INGR_DROP_RSN_INVALID_ADAPTER_TYPE; stats->drop_reason[*out_drop_reason]++; return ETH_INGRESS_NEXT_PKT_DROP; } /* Generate and store the trace information */ static __rte_always_inline void gen_store_trace_info_ingress(struct rte_node * node, struct rte_mbuf * mbuf, uint16_t next_node_index, struct eth_ingress_stats * stats, struct mr_dev_desc * dev_desc, enum eth_ingress_drop_reason drop_reason) { /* Populate the next node infomation */ char str_record[MR_STRING_MAX]; int len = snprintf(str_record, sizeof(str_record), "next node:%s", node->nodes[next_node_index]->name); /* Populate the reason for next node */ switch (next_node_index) { case ETH_INGRESS_NEXT_PKT_DROP: { assert(drop_reason < ETH_INGR_DROP_RSN_MAX); len += snprintf(str_record + len, sizeof(str_record) - len, ", rsn:%s", eth_ingress_drop_reason_str[drop_reason]); break; } break; case ETH_INGRESS_NEXT_BRIDGE: len += snprintf(str_record + len, sizeof(str_record) - len, ", rsn:bridge,id %u", dev_desc->bridge_index); break; case ETH_INGRESS_NEXT_BFD: len += snprintf(str_record + len, sizeof(str_record) - len, ", rsn:local bfd pkt "); break; case ETH_INGRESS_NEXT_VWIRE_INGRESS: len += snprintf(str_record + len, sizeof(str_record) - len, ", rsn:role is vwire"); break; case ETH_INGRESS_NEXT_TERA_INGRESS: len += snprintf(str_record + len, sizeof(str_record) - len, ", rsn:tera adapter"); break; case ETH_INGRESS_NEXT_FORWARDER: len += snprintf(str_record + len, sizeof(str_record) - len, ", rsn:role is nf"); break; case ETH_INGRESS_NEXT_HEALTH_CHECK: len += snprintf(str_record + len, sizeof(str_record) - len, ", rsn:health check pkt"); break; case ETH_INGRESS_NEXT_ETH_EGRESS: len += snprintf(str_record + len, sizeof(str_record) - len, ", rsn:resp or arp/icmp"); break; case ETH_INGRESS_NEXT_EF_INGRESS: len += snprintf(str_record + len, sizeof(str_record) - len, ", rsn:ef adapter"); break; default: len += snprintf(str_record + len, sizeof(str_record) - len, ", rsn:unknown"); break; } /* Emit the trace record */ struct dp_trace_record_meta meta = { .measurement_type = DP_TRACE_MEASUREMENT_TYPE_TRACE, .appsym = MR_TRACE_APPSYM, .module = node->name}; dp_trace_record_emit_str(sc_main_get()->trace, mbuf, rte_lcore_id(), &meta, str_record); } /* Eth ingress node process function */ static __rte_always_inline uint16_t eth_ingress_node_process(struct rte_graph * graph, struct rte_node * node, void ** objs, uint16_t cnt) { /* Get pkt num and pkt buffer */ uint16_t last_spec = 0; uint16_t n_left_from = cnt; uint16_t batch_next_node_index = ETH_INGRESS_NODE_LAST_NEXT(node->ctx); void ** batch_pkts = objs; struct rte_mbuf ** pkts = (struct rte_mbuf **)objs; /* Single Packet Processing */ uint16_t last_port_ingress = UINT16_MAX; struct eth_ingress_stats stats = {}; enum eth_ingress_drop_reason drop_reason = ETH_INGR_DROP_RSN_MAX; struct mr_dev_desc * dev_desc = NULL; struct devmgr_main * devmgr_main = sc_main_get()->devmgr_main; while (n_left_from > 0) { struct rte_mbuf * mbuf = pkts[0]; pkts += 1; n_left_from -= 1; /* Get the device descriptor */ struct mrb_metadata * mrb_meta = (struct mrb_metadata *)mrbuf_cz_data(mbuf, MR_NODE_CTRLZONE_ID); if (unlikely(mrb_meta->port_ingress != last_port_ingress)) { last_port_ingress = mrb_meta->port_ingress; dev_desc = mr_dev_desc_lookup_by_port_id(devmgr_main, mrb_meta->port_ingress); assert(dev_desc != NULL); } /* Bridge interface need go to bridge node */ uint16_t next_node_index; if (dev_desc->bridge_index != UINT8_MAX) { stats.to_bridge++; next_node_index = ETH_INGRESS_NEXT_BRIDGE; goto node_enqueue; } switch (dev_desc->role_type) { case MR_DEV_ROLE_VWIRE_INTERFACE: stats.to_vwire++; next_node_index = ETH_INGRESS_NEXT_VWIRE_INGRESS; break; case MR_DEV_ROLE_ENDPOINT_INTERFACE: case MR_DEV_ROLE_KERNEL_RESP_INTERFACE: next_node_index = endpoint_dev_packet_handler(dev_desc, &stats, mbuf, mrb_meta, &drop_reason); break; case MR_DEV_ROLE_NF_INTERFACE: { /* check health check pkt */ if (likely(mrb_meta->health_check != 1)) { stats.to_forwarder++; next_node_index = ETH_INGRESS_NEXT_FORWARDER; } else { stats.to_health_check++; next_node_index = ETH_INGRESS_NEXT_HEALTH_CHECK; } } break; default: drop_reason = ETH_INGR_DROP_RSN_INVALID_ROLE_TYPE; stats.drop_reason[drop_reason]++; next_node_index = ETH_INGRESS_NEXT_PKT_DROP; break; } node_enqueue: #if 0 /* Check if tracing is enabled for the current Mbuf */ if (unlikely(dp_trace_record_can_emit(mbuf, DP_TRACE_MEASUREMENT_TYPE_TRACE))) { gen_store_trace_info_ingress(node, mbuf, next_node_index, &stats, dev_desc, drop_reason); } #endif /* Check if the next index needs to be changed */ if (unlikely(batch_next_node_index != next_node_index)) { /* Enqueue the last packets if the next index has changed */ rte_node_enqueue(graph, node, batch_next_node_index, batch_pkts, last_spec); batch_pkts += last_spec; last_spec = 1; batch_next_node_index = next_node_index; } else { /* If the next index hasn't changed, update the last packets */ last_spec++; } } /* Process any remaining packets */ if (likely(last_spec > 0)) rte_node_enqueue(graph, node, batch_next_node_index, batch_pkts, last_spec); /* Update last next index */ ETH_INGRESS_NODE_LAST_NEXT(node->ctx) = batch_next_node_index; /* Update graph stats */ struct eth_ingress_stats * graph_stats = &stats_per_graph[graph->id]; graph_stats->total_pkts += cnt; graph_stats->pkts_per_batch = cnt; graph_stats->to_bridge += stats.to_bridge; graph_stats->to_vwire += stats.to_vwire; graph_stats->to_health_check += stats.to_health_check; graph_stats->to_forwarder += stats.to_forwarder; graph_stats->to_bfd += stats.to_bfd; graph_stats->to_ef_ingress += stats.to_ef_ingress; graph_stats->to_eth_egress += stats.to_eth_egress; graph_stats->to_tera_ingress += stats.to_tera_ingress; for (int i = 0; i < ETH_INGR_DROP_RSN_MAX; i++) { graph_stats->drop_reason[i] += stats.drop_reason[i]; } return cnt; } /* Eth ingress node init function */ static int eth_ingress_node_init(const struct rte_graph * graph, struct rte_node * node) { ETH_INGRESS_NODE_LAST_NEXT(node->ctx) = ETH_INGRESS_NEXT_EF_INGRESS; return 0; } /* Eth ingress node base */ static struct rte_node_register eth_ingress_node_base = { .process = eth_ingress_node_process, .name = "eth_ingress", .init = NULL, .nb_edges = ETH_INGRESS_NEXT_MAX, .next_nodes = { [ETH_INGRESS_NEXT_EF_INGRESS] = "ef_ingress", [ETH_INGRESS_NEXT_BRIDGE] = "bridge", [ETH_INGRESS_NEXT_BFD] = "bfd", [ETH_INGRESS_NEXT_VWIRE_INGRESS] = "vwire_ingress", [ETH_INGRESS_NEXT_TERA_INGRESS] = "tera_ingress", [ETH_INGRESS_NEXT_ETH_EGRESS] = "eth_egress", [ETH_INGRESS_NEXT_FORWARDER] = "forwarder", [ETH_INGRESS_NEXT_HEALTH_CHECK] = "health_check_deal_answer", [ETH_INGRESS_NEXT_PKT_DROP] = "pkt_drop_trap", }, .init = eth_ingress_node_init, }; RTE_NODE_REGISTER(eth_ingress_node_base); /************************************** Eth Ingress Statistics **************************************/ cJSON * eth_ingress_node_monit_loop(struct sc_main * sc) { cJSON * json_root = cJSON_CreateObject(); unsigned int nr_graphs = sc->nr_io_thread; uint64_t total_pkts[nr_graphs]; uint64_t pkts_per_batch[nr_graphs]; uint64_t to_bridge[nr_graphs]; uint64_t to_vwire[nr_graphs]; uint64_t to_health_check[nr_graphs]; uint64_t to_forwarder[nr_graphs]; uint64_t to_bfd[nr_graphs]; uint64_t to_ef_ingress[nr_graphs]; uint64_t to_eth_egress[nr_graphs]; uint64_t to_tera_ingress[nr_graphs]; uint64_t drop_reason[ETH_INGR_DROP_RSN_MAX][nr_graphs]; for (uint32_t graph_id = 0; graph_id < nr_graphs; graph_id++) { struct eth_ingress_stats * stats = &stats_per_graph[graph_id]; if (stats->total_pkts == 0) { total_pkts[graph_id] = 0; pkts_per_batch[graph_id] = 0; to_bridge[graph_id] = 0; to_vwire[graph_id] = 0; to_health_check[graph_id] = 0; to_forwarder[graph_id] = 0; to_bfd[graph_id] = 0; to_ef_ingress[graph_id] = 0; to_eth_egress[graph_id] = 0; to_tera_ingress[graph_id] = 0; for (int i = 0; i < ETH_INGR_DROP_RSN_MAX; i++) { drop_reason[i][graph_id] = 0; } continue; } total_pkts[graph_id] = stats->total_pkts; pkts_per_batch[graph_id] = stats->pkts_per_batch; to_bridge[graph_id] = stats->to_bridge; to_vwire[graph_id] = stats->to_vwire; to_health_check[graph_id] = stats->to_health_check; to_forwarder[graph_id] = stats->to_forwarder; to_bfd[graph_id] = stats->to_bfd; to_ef_ingress[graph_id] = stats->to_ef_ingress; to_eth_egress[graph_id] = stats->to_eth_egress; to_tera_ingress[graph_id] = stats->to_tera_ingress; for (int i = 0; i < ETH_INGR_DROP_RSN_MAX; i++) { drop_reason[i][graph_id] = stats->drop_reason[i]; } } cJSON * json_total_pkts = create_uint64_array(total_pkts, nr_graphs); cJSON_AddItemToObject(json_root, "eth_ingress, total_pkts", json_total_pkts); cJSON * json_pkts_per_batch = create_uint64_array(pkts_per_batch, nr_graphs); cJSON_AddItemToObject(json_root, "eth_ingress, pkts_per_batch", json_pkts_per_batch); cJSON * json_to_bridge = create_uint64_array(to_bridge, nr_graphs); cJSON_AddItemToObject(json_root, "eth_ingress, to_bridge", json_to_bridge); cJSON * json_to_vwire = create_uint64_array(to_vwire, nr_graphs); cJSON_AddItemToObject(json_root, "eth_ingress, to_vwire", json_to_vwire); cJSON * json_to_health_check = create_uint64_array(to_health_check, nr_graphs); cJSON_AddItemToObject(json_root, "eth_ingress, to_health_check", json_to_health_check); cJSON * json_to_forwarder = create_uint64_array(to_forwarder, nr_graphs); cJSON_AddItemToObject(json_root, "eth_ingress, to_forwarder", json_to_forwarder); cJSON * json_to_bfd = create_uint64_array(to_bfd, nr_graphs); cJSON_AddItemToObject(json_root, "eth_ingress, to_bfd", json_to_bfd); cJSON * json_to_ef_ingress = create_uint64_array(to_ef_ingress, nr_graphs); cJSON_AddItemToObject(json_root, "eth_ingress, to_ef_ingress", json_to_ef_ingress); cJSON * json_to_eth_egress = create_uint64_array(to_eth_egress, nr_graphs); cJSON_AddItemToObject(json_root, "eth_ingress, to_eth_egress", json_to_eth_egress); cJSON * json_to_tera_ingress = create_uint64_array(to_tera_ingress, nr_graphs); cJSON_AddItemToObject(json_root, "eth_ingress, to_tera_ingress", json_to_tera_ingress); for (int i = 0; i < ETH_INGR_DROP_RSN_MAX; i++) { char str_title[MR_STRING_MAX]; snprintf(str_title, sizeof(str_title), "eth_ingress, %s", eth_ingress_drop_reason_str[i]); cJSON * json_drop_reason = create_uint64_array(drop_reason[i], nr_graphs); cJSON_AddItemToObject(json_root, str_title, json_drop_reason); } return json_root; }