summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorsongyanchao <[email protected]>2022-08-25 08:10:30 +0000
committersongyanchao <[email protected]>2022-08-25 08:10:30 +0000
commit0170accae0b5b32ea22d3595488d94a4ba17dd3e (patch)
tree906486ece24430aed7df5f4edeafa5793de5a7e7
parent1c48cd707fb752c7a418604a6515822e81aeeb53 (diff)
🎈 perf(TSG-11429): 更改节点计数顺序v4.6.0-20220826
更改节点计数顺序
-rw-r--r--service/src/monit.c6
-rw-r--r--service/src/node_classifier.c98
-rw-r--r--service/src/node_eth_ingress.c103
-rw-r--r--service/src/node_etherfabric.c203
-rw-r--r--service/src/node_lb.c73
5 files changed, 219 insertions, 264 deletions
diff --git a/service/src/monit.c b/service/src/monit.c
index 6f4b8f7..c09a938 100644
--- a/service/src/monit.c
+++ b/service/src/monit.c
@@ -281,12 +281,12 @@ static cJSON * monit_root(struct sc_main * sc)
cJSON_AddItemToObject(j_root, "app", app_monit_loop(sc));
cJSON_AddItemToObject(j_root, "service", service_monit_loop(sc));
cJSON_AddItemToObject(j_root, "offload", smartoffload_monit_loop(sc));
+ cJSON_AddItemToObject(j_root, "eth-ingress", eth_ingress_node_monit_loop(sc));
+ cJSON_AddItemToObject(j_root, "bfd", bfd_node_monit_loop(sc));
+ cJSON_AddItemToObject(j_root, "etherfabric-ingress", etherfabric_ingress_node_monit_loop(sc));
cJSON_AddItemToObject(j_root, "classifier", classifier_node_monit_loop(sc));
cJSON_AddItemToObject(j_root, "lb", lb_node_monit_loop(sc));
- cJSON_AddItemToObject(j_root, "etherfabric-ingress", etherfabric_ingress_node_monit_loop(sc));
cJSON_AddItemToObject(j_root, "etherfabric-egress", etherfabric_egress_node_monit_loop(sc));
- cJSON_AddItemToObject(j_root, "eth-ingress", eth_ingress_node_monit_loop(sc));
- cJSON_AddItemToObject(j_root, "bfd", bfd_node_monit_loop(sc));
return j_root;
}
diff --git a/service/src/node_classifier.c b/service/src/node_classifier.c
index 3190e0b..9fa03a7 100644
--- a/service/src/node_classifier.c
+++ b/service/src/node_classifier.c
@@ -163,10 +163,10 @@
#define MR_CLASSIFIER_CJSON_KEY_NEXT_GROUP "NextGroup"
#endif
-#define CLASSIFIER_STAT_ADD(st, gid, si,counter, value) \
+#define CLASSIFIER_STAT_ADD(st, graph_id, si,counter, value) \
do \
{ \
- st->stat_per_graph[gid].counter[si] += value; \
+ st->stat_per_graph[graph_id].counter[si] += value; \
} while(0) \
/* Table Name Flag */
@@ -375,11 +375,11 @@ static __rte_always_inline uint16_t classifier_node_process(struct rte_graph * g
void ** objs, uint16_t cnt)
{
uint16_t n_left_from = 0, last_spec = 0, max_si_id = 0;
- uint16_t next_index,next0;
+ uint16_t batch_next_node_index,next_node_index;
uint32_t lcore_id =0;
- rte_graph_t gid;
- void ** from;
- struct rte_mbuf * mbuf0, ** pkts;
+ rte_graph_t graph_id;
+ void ** batch_pkts;
+ struct rte_mbuf * mbuf, ** pkts;
struct classifier_management * _classifier_management = NULL;
struct rte_rcu_qsbr * qsv;
struct rte_acl_ctx * acx = NULL;
@@ -395,9 +395,9 @@ static __rte_always_inline uint16_t classifier_node_process(struct rte_graph * g
acx = _classifier_management->classifier_acx;
n_left_from = cnt;
pkts = (struct rte_mbuf **)objs;
- from = objs;
- next_index = CLASSIFIER_NEXT_LB;
- gid = graph->id;
+ batch_pkts = objs;
+ batch_next_node_index = CLASSIFIER_NEXT_LB;
+ graph_id = graph->id;
/* Multiple Packet Processing, Current Not Support */
rte_prefetch0(pkts[0]);
@@ -415,12 +415,12 @@ static __rte_always_inline uint16_t classifier_node_process(struct rte_graph * g
const uint8_t * match_field_array[MR_CLASSIFIER_MAX_PKT_BURST];
struct pkt_head_info pkt_head_info_item = {};
- mbuf0 = pkts[0];
+ mbuf = pkts[0];
pkts += 1;
n_left_from -= 1;
/* Get Ctrlzone */
- private_ctrlzone = mrbuf_cz_data(mbuf0, 0);
+ private_ctrlzone = mrbuf_cz_data(mbuf, 0);
pkt_parser_ptr = &private_ctrlzone->pkt_parser;
/* Pkt Prefetch */
@@ -433,8 +433,8 @@ static __rte_always_inline uint16_t classifier_node_process(struct rte_graph * g
if (unlikely(acx == NULL))
{
/* Update Classifier Total Pkts Stats */
- CLASSIFIER_STAT_ADD(global_classifier_main,gid,si,total_pkts_si,1);
- CLASSIFIER_STAT_ADD(global_classifier_main,gid,si,no_classifier_rule_si,1);
+ CLASSIFIER_STAT_ADD(global_classifier_main,graph_id,si,total_pkts_si,1);
+ CLASSIFIER_STAT_ADD(global_classifier_main,graph_id,si,no_classifier_rule_si,1);
goto no_match_process;
}
@@ -447,7 +447,7 @@ static __rte_always_inline uint16_t classifier_node_process(struct rte_graph * g
ret = get_pkt_head_info(pkt_parser_ptr,&pkt_head_info_item,head_flg);
if (unlikely(ret == RT_ERR))
{
- CLASSIFIER_STAT_ADD(global_classifier_main,gid,si,invalid_pkts,1);
+ CLASSIFIER_STAT_ADD(global_classifier_main,graph_id,si,invalid_pkts,1);
goto no_match_process;
}
@@ -464,7 +464,7 @@ static __rte_always_inline uint16_t classifier_node_process(struct rte_graph * g
}
else
{
- CLASSIFIER_STAT_ADD(global_classifier_main,gid,si,no_support_pkt,1);
+ CLASSIFIER_STAT_ADD(global_classifier_main,graph_id,si,no_support_pkt,1);
goto no_match_process;
}
@@ -479,36 +479,36 @@ get_si_id:
else
{
/* Update Classifier Total Pkts Stats */
- CLASSIFIER_STAT_ADD(global_classifier_main,gid,si,total_pkts_si,1);
+ CLASSIFIER_STAT_ADD(global_classifier_main,graph_id,si,total_pkts_si,1);
goto no_match_process;
}
/* Update Classifier Total Pkts Stats */
- CLASSIFIER_STAT_ADD(global_classifier_main,gid,si,total_pkts_si,1);
+ CLASSIFIER_STAT_ADD(global_classifier_main,graph_id,si,total_pkts_si,1);
rte_acl_classify(acx,match_field_array,res_array,1,MR_CLASSIFIER_DEFAULT_MAX_CATEGORIES);
/* Checks Whether A rule Is Matched */
if (likely(res_array[0] != 0))
{
/* According To The Results Find The Action */
- CLASSIFIER_STAT_ADD(global_classifier_main,gid,si,hits_si,1);
+ CLASSIFIER_STAT_ADD(global_classifier_main,graph_id,si,hits_si,1);
struct mr_action * action = &_classifier_management->classifier_local_action[res_array[0]];
if (likely(action->action_type == CLASSIFIER_ACTION_NF_STEERING))
{
- next0 = CLASSIFIER_NEXT_LB;
+ next_node_index = CLASSIFIER_NEXT_LB;
private_ctrlzone->lb_group_id = action->next_group_id;
goto node_enqueue;
}
else if (action->action_type == CLASSIFIER_ACTION_DROP)
{
- next0 = CLASSIFIER_NEXT_PKT_DROP;
+ next_node_index = CLASSIFIER_NEXT_PKT_DROP;
goto node_enqueue;
}
/* Current Only Support DROP And NF_STEERING */
}
else
{
- CLASSIFIER_STAT_ADD(global_classifier_main,gid,si,missed_si,1);
+ CLASSIFIER_STAT_ADD(global_classifier_main,graph_id,si,missed_si,1);
goto get_si_id;
}
@@ -517,8 +517,8 @@ no_match_process:
switch (private_ctrlzone->service_type)
{
case MR_NODE_COMMON_ETHERFABRIC_SERVICE:
- next0 = CLASSIFIER_NEXT_ETHERFABRIC_EGRESS;
- CLASSIFIER_STAT_ADD(global_classifier_main,gid,si,to_etherfabric,1);
+ next_node_index = CLASSIFIER_NEXT_ETHERFABRIC_EGRESS;
+ CLASSIFIER_STAT_ADD(global_classifier_main,graph_id,si,to_etherfabric,1);
goto node_enqueue;
break;
@@ -527,18 +527,18 @@ no_match_process:
}
/* Send To Phydev On The Basis Of Shared Ctx,But Current No Support,Default Drop */
- next0 = CLASSIFIER_NEXT_PKT_DROP;
+ next_node_index = CLASSIFIER_NEXT_PKT_DROP;
node_enqueue:
/* Judge The Next Index Whether To Change */
- if (unlikely(next_index != next0))
+ if (unlikely(batch_next_node_index != next_node_index))
{
/* If The Next Index Has Been Changed,Enqueue Last Pkts */
for (uint16_t i = 0; i < last_spec; i++)
- rte_node_enqueue_x1(graph, node, next_index, from[i]);
- from += last_spec;
+ rte_node_enqueue_x1(graph, node, batch_next_node_index, batch_pkts[i]);
+ batch_pkts += last_spec;
last_spec = 1;
- next_index = next0;
+ batch_next_node_index = next_node_index;
}
else
{
@@ -551,7 +551,7 @@ node_enqueue:
if (likely(last_spec > 0))
{
for (uint16_t i = 0; i < last_spec; i++)
- rte_node_enqueue_x1(graph, node, next_index, from[i]);
+ rte_node_enqueue_x1(graph, node, batch_next_node_index, batch_pkts[i]);
}
/* Update Quiescent State Counter */
@@ -859,7 +859,6 @@ int setup_classifier(struct classifier_management * _classifier_management)
/* Prepare For Build */
memset(&classifier_build_param, 0, sizeof(classifier_build_param));
-
classifier_build_param.num_categories = MR_CLASSIFIER_DEFAULT_MAX_CATEGORIES;
classifier_build_param.num_fields = dim;
memcpy(&classifier_build_param.defs,defs,def_size);
@@ -877,7 +876,6 @@ int setup_classifier(struct classifier_management * _classifier_management)
/* Save The Context */
_classifier_management->classifier_acx = context;
- MR_INFO("Classifier Table Context Create Success. ");
table_name_flg = (table_name_flg == CLASSIFIER_TABLE_A)?CLASSIFIER_TABLE_B:CLASSIFIER_TABLE_A;
return RT_SUCCESS;
@@ -942,36 +940,27 @@ int parse_classifier_rule_for_add(struct classifier_management * _classifier_man
/* Rule Id */
cj_rule_id = cJSON_GetObjectItem(c_rule,MR_CLASSIFIER_CJSON_KEY_RULE_ID);
-
/* IP */
cj_src_ip_v4 = cJSON_GetObjectItem(c_rule,MR_CLASSIFIER_CJSON_KEY_SRC_IP_V4);
cj_dst_ip_v4 = cJSON_GetObjectItem(c_rule,MR_CLASSIFIER_CJSON_KEY_DST_IP_V4);
-
/* IP Mask */
cj_src_ip_mask_v4 = cJSON_GetObjectItem(c_rule,MR_CLASSIFIER_CJSON_KEY_SRC_IP_MASK_V4);
cj_dst_ip_mask_v4 = cJSON_GetObjectItem(c_rule,MR_CLASSIFIER_CJSON_KEY_DST_IP_MASK_V4);
-
/* Port */
cj_src_port_low = cJSON_GetObjectItem(c_rule,MR_CLASSIFIER_CJSON_KEY_SRC_PORT_LOW);
cj_src_port_high = cJSON_GetObjectItem(c_rule,MR_CLASSIFIER_CJSON_KEY_SRC_PORT_HIGH);
cj_dst_port_low = cJSON_GetObjectItem(c_rule,MR_CLASSIFIER_CJSON_KEY_DST_PORT_LOW);
cj_dst_port_high = cJSON_GetObjectItem(c_rule,MR_CLASSIFIER_CJSON_KEY_DST_PORT_HIGH);
-
/* Proto */
cj_proto = cJSON_GetObjectItem(c_rule,MR_CLASSIFIER_CJSON_KEY_PROTO);
-
/* Priority */
cj_priority = cJSON_GetObjectItem(c_rule,MR_CLASSIFIER_CJSON_KEY_PRIORITY);
-
/* Si Id */
cj_si_id = cJSON_GetObjectItem(c_rule,MR_CLASSIFIER_CJSON_KEY_SI_ID);
-
/* Action */
cj_action = cJSON_GetObjectItem(c_rule,MR_CLASSIFIER_CJSON_KEY_ACTION);
-
/* Next Group */
cj_next_group = cJSON_GetObjectItem(c_rule,MR_CLASSIFIER_CJSON_KEY_NEXT_GROUP);
-
/* Rule check for necessary field */
if (cj_rule_id == NULL)
{
@@ -1323,7 +1312,6 @@ int single_rule_add(cJSON * c_rule)
/* Create A New Classifier Rule Management */
new_rule_management = create_classifier_management();
-
/* Check Current Classifier Rule Num,No Support IPv6 */
if (old_rule_management->classifier_rule_num >= MAX_CLASSIFIER_RULE_NUM)
{
@@ -1333,7 +1321,6 @@ int single_rule_add(cJSON * c_rule)
/* Copy Classifier Management Item */
classifier_management_copy(old_rule_management,new_rule_management);
-
/* Parse Rule */
ret = parse_classifier_rule_for_add(new_rule_management,c_rule);
if (ret != DYNAMIC_CLASSIFIER_RULE_ADD_SUCCESS)
@@ -1360,6 +1347,7 @@ int single_rule_add(cJSON * c_rule)
add_err:
free_classifier_management(new_rule_management);
add_success:
+ MR_INFO("Single Rule Add Success. ");
return ret;
}
@@ -1458,6 +1446,7 @@ int single_rule_delete(cJSON * c_rule)
del_err:
free_classifier_management(new_rule_management);
del_success:
+ MR_INFO("Single Rule Del Success. ");
return ret;
}
@@ -1482,7 +1471,6 @@ static int __classifier_single_rule_del_request_handler(cJSON *req, cJSON **rsp,
cJSON *response = cJSON_CreateObject();
ret = single_rule_delete(req);
-
cJSON_AddNumberToObject(response, MR_CLASSIFIER_RPC_RESULT, ret);
*rsp = response;
return 0;
@@ -1496,14 +1484,12 @@ int parser_classifier_rule(struct sc_main * sc, struct classifier_management * _
/* Parsing Classifier Rule Number */
int ret = MESA_load_profile_uint_def(MR_CLASSIFIER_DEFAULT_CFG, "classifier_rule", "rule_num", &classifier_rule_num, MR_CLASSIFIER_INVALID_RULE_ARG);
-
/* No Config Classifier Rules ,Return Success */
if (ret < 0)
{
_classifier_management->classifier_rule_num = MR_CLASSIFIER_INVALID_RULE_ARG;
return RT_SUCCESS;
}
-
/* Check The Classifier Rule Number */
if (classifier_rule_num > MAX_CLASSIFIER_RULE_NUM)
{
@@ -1512,7 +1498,6 @@ int parser_classifier_rule(struct sc_main * sc, struct classifier_management * _
}
_classifier_management->classifier_rule_num = classifier_rule_num;
-
/* Circulation Parsing All Classifier Rule */
for ( parsed_rule_id = MR_CLASSIFIER_RULE_ID_INVALID_NUM; parsed_rule_id < classifier_rule_num + MR_CLASSIFIER_RULE_ID_INVALID_NUM; parsed_rule_id++)
{
@@ -1530,7 +1515,6 @@ int parser_classifier_rule(struct sc_main * sc, struct classifier_management * _
struct mr_action * action;
snprintf(str_section, sizeof(str_section), "classifier_rule:%lu", parsed_rule_id);
-
/* Get Free Rule Item Id */
rule_item_id = get_free_rule_item_id(_classifier_management);
if (rule_item_id == MR_CLASSIFIER_RULE_INVALID_FLAG)
@@ -1978,32 +1962,32 @@ int classifier_init(struct sc_main * sc)
/************************************** Classifier Statistics **************************************/
cJSON * classifier_node_monit_loop(struct sc_main * sc)
{
- uint32_t gid = 0,si_num = 0,graph_num = 0;
+ uint32_t graph_id = 0,si_num = 0,graph_num = 0;
cJSON * json_root = NULL,* graph_obj = NULL,* si_obj = NULL;
struct node_classifier_main * classifier_main = sc->classifier_node_main;
unsigned int nr_io_thread = sc->nr_io_thread;
json_root = cJSON_CreateObject();
- for (gid = 0; gid < nr_io_thread; gid++)
+ for (graph_id = 0; graph_id < nr_io_thread; graph_id++)
{
char graph_index[MR_STRING_MAX];
graph_obj = cJSON_CreateObject();
for (int i = 0; i < MR_CLASSIFIER_MAX_SI_NUM; i++)
{
char stat_index[MR_STRING_MAX];
- uint64_t total_pkts = classifier_main->stat_per_graph[gid].total_pkts_si[i];
+ uint64_t total_pkts = classifier_main->stat_per_graph[graph_id].total_pkts_si[i];
if (total_pkts == 0)
{
continue;
}
- uint64_t invalid_pkts = classifier_main->stat_per_graph[gid].invalid_pkts[i];
- uint64_t no_classifier_rule = classifier_main->stat_per_graph[gid].no_classifier_rule_si[i];
- uint64_t hits = classifier_main->stat_per_graph[gid].hits_si[i];
- uint64_t missed = classifier_main->stat_per_graph[gid].missed_si[i];
- uint64_t no_support_pkt = classifier_main->stat_per_graph[gid].no_support_pkt[i];
- uint64_t to_etherfabric = classifier_main->stat_per_graph[gid].to_etherfabric[i];
+ uint64_t invalid_pkts = classifier_main->stat_per_graph[graph_id].invalid_pkts[i];
+ uint64_t no_classifier_rule = classifier_main->stat_per_graph[graph_id].no_classifier_rule_si[i];
+ uint64_t hits = classifier_main->stat_per_graph[graph_id].hits_si[i];
+ uint64_t missed = classifier_main->stat_per_graph[graph_id].missed_si[i];
+ uint64_t no_support_pkt = classifier_main->stat_per_graph[graph_id].no_support_pkt[i];
+ uint64_t to_etherfabric = classifier_main->stat_per_graph[graph_id].to_etherfabric[i];
si_obj = cJSON_CreateObject();
@@ -2026,7 +2010,7 @@ cJSON * classifier_node_monit_loop(struct sc_main * sc)
continue;
}
- cJSON_AddNumberToObject(graph_obj, "graph_id", gid);
+ cJSON_AddNumberToObject(graph_obj, "graph_id", graph_id);
cJSON_AddNumberToObject(graph_obj, "si_num", si_num);
sprintf(graph_index,"graph-%u",graph_num);
diff --git a/service/src/node_eth_ingress.c b/service/src/node_eth_ingress.c
index ef2d0d1..1e2df91 100644
--- a/service/src/node_eth_ingress.c
+++ b/service/src/node_eth_ingress.c
@@ -1,3 +1,4 @@
+#include "rte_branch_prediction.h"
#include <common.h>
#include <netinet/in.h>
#include <sys/socket.h>
@@ -20,11 +21,11 @@
#define MR_ETH_INGRESS_BFD_PORT 3784
#endif
-#define MR_ETH_INGRESS_STAT_ADD(st, graph_id, counter, value) \
-do \
-{ \
- st->graph_stat[graph_id].counter += value; \
-} while(0) \
+#define MR_ETH_INGRESS_STAT_ADD(st, graph_id, counter, value) \
+do \
+{ \
+ st->graph_stat[graph_id].counter += value; \
+} while(0) \
/* Eth Ingress Next Node */
enum {
@@ -142,18 +143,18 @@ static int eth_ingress_ingress_node_init(const struct rte_graph * graph, struct
/* Eth Ingress Node Process Function */
static __rte_always_inline uint16_t eth_ingress_node_process(struct rte_graph * graph, struct rte_node * node, void ** objs, uint16_t cnt)
{
- uint16_t n_left_from = 0, last_spec = 0,next_index = 0,next0 = 0;
+ uint16_t n_left_from = 0, last_spec = 0,batch_next_node_index = 0,next_node_index = 0;
rte_graph_t graph_id;
- struct rte_mbuf * mbuf0, ** pkts;
- void ** from;
+ struct rte_mbuf * mbuf, ** pkts;
+ void ** batch_pkts;
struct eth_ingress_ip_listen_management * ip_listen_manage = NULL;
static struct node_eth_ingress_main * eth_ingress_main = NULL;
/* Get Pkts Num And Pkts Buffer */
n_left_from = cnt;
pkts = (struct rte_mbuf **)objs;
- from = objs;
- next_index = ETH_INGRESS_NEXT_ETHERFABRIC_INGRESS;
+ batch_pkts = objs;
+ batch_next_node_index = ETH_INGRESS_NEXT_ETHERFABRIC_INGRESS;
eth_ingress_main = global_eth_ingress_main;
ip_listen_manage = &global_eth_ingress_main->ip_listen_manage;
graph_id = graph->id;
@@ -167,11 +168,11 @@ static __rte_always_inline uint16_t eth_ingress_node_process(struct rte_graph *
struct pkt_parser * pkt_parser_ptr = NULL;
struct pkt_head_info pkt_head_info_item = {};
- mbuf0 = pkts[0];
+ mbuf = pkts[0];
pkts += 1;
n_left_from -= 1;
- private_ctrlzone = mrbuf_cz_data(mbuf0, 0);
+ private_ctrlzone = mrbuf_cz_data(mbuf, 0);
pkt_parser_ptr = &private_ctrlzone->pkt_parser;
MR_ETH_INGRESS_STAT_ADD(eth_ingress_main,graph_id,total_pkts,1);
@@ -185,65 +186,57 @@ static __rte_always_inline uint16_t eth_ingress_node_process(struct rte_graph *
goto exception_handling;
}
- switch (pkt_head_info_item.ip_version)
+ /* Judge IP Version */
+ if (pkt_head_info_item.ip_version == MR_NODE_COMMON_IP_VERSION_V4)
{
- case MR_NODE_COMMON_IP_VERSION_V4:
+ if(unlikely(listen_ip_check(ip_listen_manage,pkt_head_info_item._ipv4_hdr) != RT_SUCCESS))
{
- if(listen_ip_check(ip_listen_manage,pkt_head_info_item._ipv4_hdr) == RT_SUCCESS)
- {
- MR_ETH_INGRESS_STAT_ADD(eth_ingress_main,graph_id,hit_pkts_v4,1);
-
- /* Should Enter Local Deal Process,But Current Specify Next Node Is Etherfabric */
- if ((pkt_head_info_item.proto_id == IPPROTO_UDP) && (pkt_head_info_item.dst_port == ntohs(MR_ETH_INGRESS_BFD_PORT)))
- {
- /* Bfd Pkt Should Send To Bfd Node */
- MR_ETH_INGRESS_STAT_ADD(eth_ingress_main,graph_id,bfd_pkts,1);
- next0 = ETH_INGRESS_NEXT_BFD;
- goto node_enqueue;
- }
- else
- {
- /* Current Specify Next Node Is Etherfabric */
- MR_ETH_INGRESS_STAT_ADD(eth_ingress_main,graph_id,etherfabric_pkts,1);
- next0 = ETH_INGRESS_NEXT_ETHERFABRIC_INGRESS;
- goto node_enqueue;
- }
- }
- else
- {
- /* No Match Listen Ip Pkt,Need According Ingress Port Match Rout Type, But Current Not Support,Default Drop */
- MR_ETH_INGRESS_STAT_ADD(eth_ingress_main,graph_id,miss_pkts_v4,1);
- goto exception_handling;
- }
+ /* No Match Listen Ip Pkt,Need According Ingress Port Match Rout Type, But Current Not Support,Default Drop */
+ MR_ETH_INGRESS_STAT_ADD(eth_ingress_main,graph_id,miss_pkts_v4,1);
+ goto exception_handling;
}
- break;
- case MR_NODE_COMMON_IP_VERSION_V6:
+
+ MR_ETH_INGRESS_STAT_ADD(eth_ingress_main,graph_id,hit_pkts_v4,1);
+ /* Should Enter Local Deal Process,But Current Specify Next Node Is Etherfabric */
+ if (unlikely((pkt_head_info_item.proto_id == IPPROTO_UDP) && (pkt_head_info_item.dst_port == ntohs(MR_ETH_INGRESS_BFD_PORT))))
{
- /* IPv6 Pkt,Need According Ingress Port Match Rout Type, But Current Not Support,Default Drop */
- MR_ETH_INGRESS_STAT_ADD(eth_ingress_main,graph_id,miss_pkts_v6,1);
- goto exception_handling;
+ /* Bfd Pkt Should Send To Bfd Node */
+ MR_ETH_INGRESS_STAT_ADD(eth_ingress_main,graph_id,bfd_pkts,1);
+ next_node_index = ETH_INGRESS_NEXT_BFD;
+ goto node_enqueue;
}
- break;
- default:
+ else
{
- goto exception_handling;
+ /* Current Specify Next Node Is Etherfabric */
+ MR_ETH_INGRESS_STAT_ADD(eth_ingress_main,graph_id,etherfabric_pkts,1);
+ next_node_index = ETH_INGRESS_NEXT_ETHERFABRIC_INGRESS;
+ goto node_enqueue;
}
- break;
+ }
+ else if (pkt_head_info_item.ip_version == MR_NODE_COMMON_IP_VERSION_V6)
+ {
+ /* IPv6 Pkt,Need According Ingress Port Match Rout Type, But Current Not Support,Default Drop */
+ MR_ETH_INGRESS_STAT_ADD(eth_ingress_main,graph_id,miss_pkts_v6,1);
+ goto exception_handling;
+ }
+ else
+ {
+ goto exception_handling;
}
exception_handling:
- next0 = ETH_INGRESS_NEXT_PKT_DROP;
+ next_node_index = ETH_INGRESS_NEXT_PKT_DROP;
node_enqueue:
/* Judge The Next Index Whether To Change */
- if (unlikely(next_index != next0))
+ if (unlikely(batch_next_node_index != next_node_index))
{
/* If The Next Index Has Been Changed,Enqueue Last Pkts */
for (uint16_t i = 0; i < last_spec; i++)
- rte_node_enqueue_x1(graph, node, next_index, from[i]);
- from += last_spec;
+ rte_node_enqueue_x1(graph, node, batch_next_node_index, batch_pkts[i]);
+ batch_pkts += last_spec;
last_spec = 1;
- next_index = next0;
+ batch_next_node_index = next_node_index;
}
else
{
@@ -256,7 +249,7 @@ node_enqueue:
if (likely(last_spec > 0))
{
for (uint16_t i = 0; i < last_spec; i++)
- rte_node_enqueue_x1(graph, node, next_index, from[i]);
+ rte_node_enqueue_x1(graph, node, batch_next_node_index, batch_pkts[i]);
}
return cnt;
}
diff --git a/service/src/node_etherfabric.c b/service/src/node_etherfabric.c
index 1f9cdcb..2aa2388 100644
--- a/service/src/node_etherfabric.c
+++ b/service/src/node_etherfabric.c
@@ -12,36 +12,36 @@
#include <sc_node_common.h>
#ifndef MR_ETHERFABRIC_MAX_RULE
-#define MR_ETHERFABRIC_MAX_RULE 1024
+#define MR_ETHERFABRIC_MAX_RULE 1024
#endif
#ifndef MR_ETHERFABRIC_DEFAULT_CFG
-#define MR_ETHERFABRIC_DEFAULT_CFG "/opt/tsg/mrzcpd/etc/etherfabric.conf"
+#define MR_ETHERFABRIC_DEFAULT_CFG "/opt/tsg/mrzcpd/etc/etherfabric.conf"
#endif
#ifndef MR_ETHERFABRIC_INVALID_RULE_ARG
-#define MR_ETHERFABRIC_INVALID_RULE_ARG 0
+#define MR_ETHERFABRIC_INVALID_RULE_ARG 0
#endif
#ifndef MR_ETHERFABRIC_INVALID_RULE_STRING_ARG
-#define MR_ETHERFABRIC_INVALID_RULE_STRING_ARG ""
+#define MR_ETHERFABRIC_INVALID_RULE_STRING_ARG ""
#endif
#ifndef MR_ETHERFABRIC_LAYER_OFFSET_V4
-#define MR_ETHERFABRIC_LAYER_OFFSET_V4 4
+#define MR_ETHERFABRIC_LAYER_OFFSET_V4 4
#endif
-#define MR_ETHERFABRIC_INGRESS_STAT_ADD(st, gid, counter, value) \
-do \
-{ \
- st->ingress_stat_per_graph[gid].counter += value; \
-} while(0) \
+#define MR_ETHERFABRIC_INGRESS_STAT_ADD(st, graph_id, counter, value) \
+do \
+{ \
+ st->ingress_stat_per_graph[graph_id].counter += value; \
+} while(0) \
-#define MR_ETHERFABRIC_EGRESS_STAT_ADD(st, gid, counter, value) \
+#define MR_ETHERFABRIC_EGRESS_STAT_ADD(st, graph_id, counter, value) \
do \
{ \
- st->egress_stat_per_graph[gid].counter += value; \
+ st->egress_stat_per_graph[graph_id].counter += value; \
} while(0) \
/* Etherfabric Ingress Next Node */
@@ -125,7 +125,6 @@ struct node_etherfabric_main
/* Global Etherfabric Main */
static struct node_etherfabric_main * global_etherfabric_main = NULL;
-
extern int eth_ingress_update_ip_listen_num(uint16_t num,uint32_t * listen_ip);
/************************************* Etherfabric Config **************************************/
@@ -134,7 +133,7 @@ int etherfabric_string_to_mac(char *s,struct rte_ether_addr * addr )
{
unsigned char a[6];
int rc = sscanf(s, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", a + 0, a + 1, a + 2, a + 3, a + 4, a + 5);
- if(rc !=6 )
+ if(rc != 6 )
{
return RT_ERR;
}
@@ -173,7 +172,7 @@ int parser_etherfabric_service_conf(struct sc_main * sc, struct etherfabric_mana
char tx_node_name[MR_SYMBOL_MAX];
char **next_edges = NULL;
- /* 1. Parsing Service Rule Num */
+ /* Parsing Service Rule Num */
int ret = MESA_load_profile_uint_def(MR_ETHERFABRIC_DEFAULT_CFG, "service_conf", "rule_num", &es_rule_num, MR_ETHERFABRIC_INVALID_RULE_ARG);
if (ret < 0)
@@ -183,17 +182,17 @@ int parser_etherfabric_service_conf(struct sc_main * sc, struct etherfabric_mana
return RT_SUCCESS;
}
- /* 2. Check The Etherfabric Rule Num */
+ /* Check The Etherfabric Rule Num */
if ((es_rule_num == MR_ETHERFABRIC_INVALID_RULE_ARG) || (es_rule_num > MR_ETHERFABRIC_MAX_RULE))
{
MR_ERROR("The Etherfabric Rule Num: %u Is Invalid,The Rule Num Range From 1 to '%d' ",es_rule_num,MR_ETHERFABRIC_MAX_RULE);
return RT_ERR;
}
- /* 3. Save The Etherfabric Rule Num */
+ /* Save The Etherfabric Rule Num */
etherfabric_manage->es_rule_num = (uint16_t)es_rule_num;
- /* 4. Parsing All Rule Config */
+ /* Parsing All Rule Config */
for (int i = 0; i < es_rule_num; i++)
{
snprintf(rule_str_section, sizeof(rule_str_section), "etherfabric_service:%d", i);
@@ -244,7 +243,6 @@ int parser_etherfabric_service_conf(struct sc_main * sc, struct etherfabric_mana
/* Save listen_ip */
etherfabric_manage->es_rule_array[i].listen_ip_v4 = ip_addr.s_addr;
-
ret = MESA_load_profile_uint_def(MR_ETHERFABRIC_DEFAULT_CFG, rule_str_section, "listen_port", &listen_port, MR_ETHERFABRIC_INVALID_RULE_ARG);
if (ret < 0)
{
@@ -379,7 +377,6 @@ void dump_etherfabric_service_config(struct etherfabric_management * etherfabric
for (int i = 0; i < etherfabric_manage->es_rule_num; i++)
{
struct etherfabric_service_rule * es_rule_item = &etherfabric_manage->es_rule_array[i];
-
listen_addr.s_addr = es_rule_item->listen_ip_v4;
MR_INFO(" ");
@@ -415,7 +412,6 @@ void dump_etherfabric_link_config(struct etherfabric_management * etherfabric_ma
for (int i = 0; i < etherfabric_manage->el_rule_num; i++)
{
struct etherfabric_link_rule * el_rule_item = &etherfabric_manage->el_rule_array[i];
-
etherfabric_ip_v4.s_addr = el_rule_item->etherfabric_ip_v4;
etherfabric_mac_to_string(mac_addr, sizeof(mac_addr), &el_rule_item->etherfabric_mac_addr);
@@ -440,10 +436,10 @@ int etherfabric_init(struct sc_main * sc)
etherfabric_manage = &etherfabric_main->etherfabric_manage;
sc->etherfabric_node_main = etherfabric_main;
- /* 1. Parsing The Etherfabric Service Rule Config */
+ /* Parsing The Etherfabric Service Rule Config */
ret = parser_etherfabric_service_conf(sc,etherfabric_manage);
- /* 2. Dump The Config And Update Ip Listen Num */
+ /* Dump The Config And Update Ip Listen Num */
if (ret != RT_ERR)
{
uint32_t listen_ip_v4[MR_ETHERFABRIC_MAX_RULE];
@@ -514,24 +510,21 @@ static int etherfabric_ingress_node_init(const struct rte_graph * graph, struct
static __rte_always_inline uint16_t etherfabric_ingress_node_process(struct rte_graph * graph, struct rte_node * node,
void ** objs, uint16_t cnt)
{
- uint16_t n_left_from = 0, last_spec = 0,next_index = 0,next0 = 0;
- uint16_t pkt_data_offset = MR_NODE_COMMON_ETHERFABRIC_PKT_OFFSET;
- rte_graph_t gid;
- struct rte_mbuf *mbuf0, ** pkts;
- void **from;
+ uint16_t n_left_from = 0, last_spec = 0,batch_next_node_index = 0,next_node_index = 0;
+ rte_graph_t graph_id;
+ struct rte_mbuf *mbuf, ** pkts;
+ void **batch_pkts;
struct etherfabric_management * etherfabric_manage = NULL;
static struct node_etherfabric_main * etherfabric_main = NULL;
/* Get Pkts Num And Pkts Buffer */
n_left_from = cnt;
pkts = (struct rte_mbuf **)objs;
- from = objs;
- next_index = ETHERFABRIC_INGRESS_NEXT_PKT_DROP;
+ batch_pkts = objs;
+ batch_next_node_index = ETHERFABRIC_INGRESS_NEXT_PKT_DROP;
etherfabric_main = global_etherfabric_main;
etherfabric_manage = &global_etherfabric_main->etherfabric_manage;
- gid = graph->id;
-
- /* Multiple Packet Processing, Current Not Support */
+ graph_id = graph->id;
/* Single Packet Processing */
while (n_left_from > 0)
@@ -539,19 +532,19 @@ static __rte_always_inline uint16_t etherfabric_ingress_node_process(struct rte_
uint32_t head_flg = MR_PARSE_NONE;
int ret = RT_SUCCESS;
struct pkt_parser * pkt_parser_ptr = NULL;
- struct etherfabric_service_tag * tag_item = NULL;
+ struct etherfabric_service_tag * service_tag_item = NULL;
struct etherfabric_service_rule * es_rule_item = NULL;
struct private_data * private_ctrlzone = NULL;
struct pkt_head_info pkt_head_info_item = {};
- mbuf0 = pkts[0];
+ mbuf = pkts[0];
pkts += 1;
n_left_from -= 1;
- private_ctrlzone = mrbuf_cz_data(mbuf0, 0);
+ private_ctrlzone = mrbuf_cz_data(mbuf, 0);
pkt_parser_ptr = &private_ctrlzone->pkt_parser;
- tag_item = &private_ctrlzone->tag.etherfabric;
- MR_ETHERFABRIC_INGRESS_STAT_ADD(etherfabric_main,gid,total_pkts,1);
+ service_tag_item = &private_ctrlzone->tag.etherfabric;
+ MR_ETHERFABRIC_INGRESS_STAT_ADD(etherfabric_main,graph_id,total_pkts,1);
/* Get Pkt Head Info */
set_parse_flg(head_flg,MR_PARSE_ETHER);
@@ -561,34 +554,34 @@ static __rte_always_inline uint16_t etherfabric_ingress_node_process(struct rte_
ret = get_pkt_head_info(pkt_parser_ptr,&pkt_head_info_item,head_flg);
if (unlikely(ret == RT_ERR))
{
- MR_ETHERFABRIC_INGRESS_STAT_ADD(etherfabric_main,gid,invalid_pkts,1);
+ MR_ETHERFABRIC_INGRESS_STAT_ADD(etherfabric_main,graph_id,invalid_pkts,1);
goto exception_handling;
}
if (unlikely(pkt_head_info_item.ip_version != MR_NODE_COMMON_IP_VERSION_V4))
{
- MR_ETHERFABRIC_INGRESS_STAT_ADD(etherfabric_main,gid,no_ipv4_pkts,1);
+ MR_ETHERFABRIC_INGRESS_STAT_ADD(etherfabric_main,graph_id,no_ipv4_pkts,1);
goto exception_handling;
}
if (unlikely(pkt_head_info_item.proto_id != IPPROTO_UDP))
{
- MR_ETHERFABRIC_INGRESS_STAT_ADD(etherfabric_main,gid,no_udp_pkts,1);
+ MR_ETHERFABRIC_INGRESS_STAT_ADD(etherfabric_main,graph_id,no_udp_pkts,1);
goto exception_handling;
}
/* Match Etherfabric Service Rule */
- es_rule_item = match_etherfabric_service_rule(etherfabric_manage,pkt_head_info_item._ipv4_hdr,pkt_head_info_item._udp_hdr,&tag_item->service_index);
+ es_rule_item = match_etherfabric_service_rule(etherfabric_manage,pkt_head_info_item._ipv4_hdr,pkt_head_info_item._udp_hdr,&service_tag_item->service_index);
if (unlikely(es_rule_item == NULL))
{
- MR_ETHERFABRIC_INGRESS_STAT_ADD(etherfabric_main,gid,no_match_service_pkts,1);
+ MR_ETHERFABRIC_INGRESS_STAT_ADD(etherfabric_main,graph_id,no_match_service_pkts,1);
goto exception_handling;
}
/* Match Etherfabric Link Rule */
- if (unlikely(match_etherfabric_link_rule(etherfabric_manage,pkt_head_info_item._ether_hdr,pkt_head_info_item._ipv4_hdr,&tag_item->link_index) == RT_ERR))
+ if (unlikely(match_etherfabric_link_rule(etherfabric_manage,pkt_head_info_item._ether_hdr,pkt_head_info_item._ipv4_hdr,&service_tag_item->link_index) == RT_ERR))
{
- MR_ETHERFABRIC_INGRESS_STAT_ADD(etherfabric_main,gid,no_match_link_pkts,1);
+ MR_ETHERFABRIC_INGRESS_STAT_ADD(etherfabric_main,graph_id,no_match_link_pkts,1);
goto exception_handling;
}
@@ -597,25 +590,25 @@ static __rte_always_inline uint16_t etherfabric_ingress_node_process(struct rte_
pkt_parser_ptr->layer_offset += MR_ETHERFABRIC_LAYER_OFFSET_V4;
/* Set The Pkt Date Off */
- rte_pktmbuf_adj(mbuf0,pkt_data_offset);
+ rte_pktmbuf_adj(mbuf,MR_NODE_COMMON_ETHERFABRIC_PKT_OFFSET);
/* Send The Pkt To Classifier */
- next0 = ETHERFABRIC_INGRESS_NEXT_CLASSIFIER;
- MR_ETHERFABRIC_INGRESS_STAT_ADD(etherfabric_main,gid,match_pkts,1);
+ next_node_index = ETHERFABRIC_INGRESS_NEXT_CLASSIFIER;
+ MR_ETHERFABRIC_INGRESS_STAT_ADD(etherfabric_main,graph_id,match_pkts,1);
goto node_enqueue;
exception_handling:
- next0 = ETHERFABRIC_INGRESS_NEXT_PKT_DROP;
+ next_node_index = ETHERFABRIC_INGRESS_NEXT_PKT_DROP;
node_enqueue:
/* Judge The Next Index Whether To Change */
- if (unlikely(next_index != next0))
+ if (unlikely(batch_next_node_index != next_node_index))
{
/* If The Next Index Has Been Changed,Enqueue Last Pkts */
for (uint16_t i = 0; i < last_spec; i++)
- rte_node_enqueue_x1(graph, node, next_index, from[i]);
- from += last_spec;
+ rte_node_enqueue_x1(graph, node, batch_next_node_index, batch_pkts[i]);
+ batch_pkts += last_spec;
last_spec = 1;
- next_index = next0;
+ batch_next_node_index = next_node_index;
}
else
{
@@ -628,7 +621,7 @@ node_enqueue:
if (likely(last_spec > 0))
{
for (uint16_t i = 0; i < last_spec; i++)
- rte_node_enqueue_x1(graph, node, next_index, from[i]);
+ rte_node_enqueue_x1(graph, node, batch_next_node_index, batch_pkts[i]);
}
return cnt;
@@ -677,7 +670,7 @@ void fill_ipv4_pkt_for_original_packet(struct rte_mbuf *mbuf)
}
/* Fill Ether IPv4 Udp Vxlan Hdr For The Constructed Packet */
-void fill_ipv4_pkt_for_constructed_packet(struct sc_main * sc,struct etherfabric_service_tag * tag_item,struct rte_mbuf *mbuf,uint16_t port_ingress)
+void fill_ipv4_pkt_for_constructed_packet(struct sc_main * sc,struct etherfabric_service_tag * service_tag_item,struct rte_mbuf *mbuf,uint16_t port_ingress)
{
struct rte_ether_hdr * eth_hdr = NULL;
struct rte_ipv4_hdr * ip_hdr = NULL;
@@ -690,8 +683,8 @@ void fill_ipv4_pkt_for_constructed_packet(struct sc_main * sc,struct etherfabric
struct phydev * dev = phydev_lookup_id(sc->phydev_main,port_ingress);
etherfabric_node_main = sc->etherfabric_node_main;
- es_rule_item = &etherfabric_node_main->etherfabric_manage.es_rule_array[tag_item->service_index];
- el_rule_item = &etherfabric_node_main->etherfabric_manage.el_rule_array[tag_item->link_index];
+ es_rule_item = &etherfabric_node_main->etherfabric_manage.es_rule_array[service_tag_item->service_index];
+ el_rule_item = &etherfabric_node_main->etherfabric_manage.el_rule_array[service_tag_item->link_index];
eth_hdr = rte_pktmbuf_mtod(mbuf, struct rte_ether_hdr *);
ip_hdr = rte_pktmbuf_mtod_offset(mbuf,struct rte_ipv4_hdr *, sizeof(struct rte_ether_hdr));
@@ -730,7 +723,7 @@ void fill_ipv4_pkt_for_constructed_packet(struct sc_main * sc,struct etherfabric
g_vxlan_hdr->vlan_id_half_high = 0x00;
g_vxlan_hdr->link_layer_type = 0x00;
g_vxlan_hdr->vlan_id_half_low = 0x00;
- g_vxlan_hdr->dir = tag_item->dir;
+ g_vxlan_hdr->dir = service_tag_item->dir;
g_vxlan_hdr->link_id = el_rule_item->etherfabric_link_id;
g_vxlan_hdr->online_test = 0x00;
g_vxlan_hdr->r7 = 0x00;
@@ -747,114 +740,111 @@ void fill_ipv4_pkt_for_constructed_packet(struct sc_main * sc,struct etherfabric
static __rte_always_inline uint16_t etherfabric_egress_node_process(struct rte_graph * graph, struct rte_node * node,
void ** objs, uint16_t cnt)
{
- uint16_t n_left_from = 0, last_spec = 0,next_index = 0,next0 = 0;
- rte_graph_t gid;
- struct rte_mbuf *mbuf0, ** pkts;
- void **from;
+ uint16_t n_left_from = 0, last_spec = 0,batch_next_node_index = 0,next_node_index = 0;
+ rte_graph_t graph_id;
+ struct rte_mbuf *mbuf, ** pkts;
+ void **batch_pkts;
struct etherfabric_management * etherfabric_manage = NULL;
static struct node_etherfabric_main * etherfabric_main = NULL;
struct sc_main * sc = NULL;
- /* 1. Get Pkts Num And Pkts Buffer */
+ /* Get Pkts Num And Pkts Buffer */
n_left_from = cnt;
pkts = (struct rte_mbuf **)objs;
- from = objs;
- next_index = ETHERFABRIC_INGRESS_NEXT_PKT_DROP;
+ batch_pkts = objs;
+ batch_next_node_index = ETHERFABRIC_INGRESS_NEXT_PKT_DROP;
sc = sc_main_get();
etherfabric_main = global_etherfabric_main;
etherfabric_manage = &global_etherfabric_main->etherfabric_manage;
- gid = graph->id;
-
- /* 2. Multiple Packet Processing, Current Not Support */
+ graph_id = graph->id;
- /* 3. Single Packet Processing */
+ /* Single Packet Processing */
while (n_left_from > 0)
{
uint16_t port_ingress = 0;
- uint16_t pkt_data_offset = MR_NODE_COMMON_ETHERFABRIC_PKT_OFFSET;
uint32_t head_flg = MR_PARSE_NONE;
int ret = RT_SUCCESS;
struct pkt_parser * pkt_parser_ptr = NULL;
struct private_data * private_ctrlzone = NULL;
- struct etherfabric_service_tag * tag_item = NULL;
+ struct etherfabric_service_tag * service_tag_item = NULL;
struct pkt_head_info pkt_head_info_item = {};
- mbuf0 = pkts[0];
+ mbuf = pkts[0];
pkts += 1;
n_left_from -= 1;
- private_ctrlzone = mrbuf_cz_data(mbuf0, 0);
+ private_ctrlzone = mrbuf_cz_data(mbuf, 0);
pkt_parser_ptr = &private_ctrlzone->pkt_parser;
- tag_item = &private_ctrlzone->tag.etherfabric;
+ service_tag_item = &private_ctrlzone->tag.etherfabric;
port_ingress = private_ctrlzone->port_ingress;
- MR_ETHERFABRIC_EGRESS_STAT_ADD(etherfabric_main,gid,total_pkts,1);
+ MR_ETHERFABRIC_EGRESS_STAT_ADD(etherfabric_main,graph_id,total_pkts,1);
/* Get Pkt Head Info */
set_parse_flg(head_flg,MR_PARSE_IP);
ret = get_pkt_head_info(pkt_parser_ptr,&pkt_head_info_item,head_flg);
if (unlikely(ret == RT_ERR))
{
- MR_ETHERFABRIC_EGRESS_STAT_ADD(etherfabric_main,gid,invalid_pkts,1);
+ MR_ETHERFABRIC_EGRESS_STAT_ADD(etherfabric_main,graph_id,invalid_pkts,1);
goto exception_handling;
}
/* Set The Pkt Offset */
- rte_pktmbuf_prepend(mbuf0,pkt_data_offset);
-
+ rte_pktmbuf_prepend(mbuf,MR_NODE_COMMON_ETHERFABRIC_PKT_OFFSET);
+ /* Judge IP Version */
if (pkt_head_info_item.ip_version == MR_NODE_COMMON_IP_VERSION_V4)
{
/* Fill Ether IPv4 Udp Vxlan Hdr For The Pkt */
if (likely(private_ctrlzone->original_packet_flag == MR_NODE_COMMON_ORIGINAL_PKT))
{
- fill_ipv4_pkt_for_original_packet(mbuf0);
- MR_ETHERFABRIC_EGRESS_STAT_ADD(etherfabric_main,gid,ipv4_no_set_tag,1);
+ fill_ipv4_pkt_for_original_packet(mbuf);
+ MR_ETHERFABRIC_EGRESS_STAT_ADD(etherfabric_main,graph_id,ipv4_no_set_tag,1);
/* From Port Id Get Next Node Index */
- next0 = etherfabric_manage->port_tx_map[port_ingress];
+ next_node_index = etherfabric_manage->port_tx_map[port_ingress];
goto node_enqueue;
}
else
{
- if (unlikely((tag_item->link_index >= MR_ETHERFABRIC_MAX_RULE)))
+ if (unlikely((service_tag_item->link_index >= MR_ETHERFABRIC_MAX_RULE)))
{
- MR_ETHERFABRIC_EGRESS_STAT_ADD(etherfabric_main,gid,link_index_invalid,1);
+ MR_ETHERFABRIC_EGRESS_STAT_ADD(etherfabric_main,graph_id,link_index_invalid,1);
goto exception_handling;
}
- if (unlikely((tag_item->service_index >= MR_ETHERFABRIC_MAX_RULE)))
+ if (unlikely((service_tag_item->service_index >= MR_ETHERFABRIC_MAX_RULE)))
{
- MR_ETHERFABRIC_EGRESS_STAT_ADD(etherfabric_main,gid,service_index_invalid,1);
+ MR_ETHERFABRIC_EGRESS_STAT_ADD(etherfabric_main,graph_id,service_index_invalid,1);
goto exception_handling;
}
- fill_ipv4_pkt_for_constructed_packet(sc,tag_item,mbuf0,port_ingress);
- MR_ETHERFABRIC_EGRESS_STAT_ADD(etherfabric_main,gid,ipv4_set_tag,1);
+ fill_ipv4_pkt_for_constructed_packet(sc,service_tag_item,mbuf,port_ingress);
+ MR_ETHERFABRIC_EGRESS_STAT_ADD(etherfabric_main,graph_id,ipv4_set_tag,1);
/* From Port Id Get Next Node Index */
- next0 = etherfabric_manage->port_tx_map[port_ingress];
+ next_node_index = etherfabric_manage->port_tx_map[port_ingress];
goto node_enqueue;
}
}
else
{
/* Current Not Support IPv6 */
- MR_ETHERFABRIC_EGRESS_STAT_ADD(etherfabric_main,gid,no_suport_pkt,1);
+ MR_ETHERFABRIC_EGRESS_STAT_ADD(etherfabric_main,graph_id,no_suport_pkt,1);
goto exception_handling;
}
exception_handling:
- next0 = ETHERFABRIC_EGRESS_NEXT_PKT_DROP;
+ next_node_index = ETHERFABRIC_EGRESS_NEXT_PKT_DROP;
node_enqueue:
/* Judge The Next Index Whether To Change */
- if (unlikely(next_index != next0))
+ if (unlikely(batch_next_node_index != next_node_index))
{
/* If The Next Index Has Been Changed,Enqueue Last Pkts */
for (uint16_t i = 0; i < last_spec; i++)
- rte_node_enqueue_x1(graph, node, next_index, from[i]);
- from += last_spec;
+ rte_node_enqueue_x1(graph, node, batch_next_node_index, batch_pkts[i]);
+ batch_pkts += last_spec;
last_spec = 1;
- next_index = next0;
+ batch_next_node_index = next_node_index;
}
else
{
@@ -867,7 +857,7 @@ node_enqueue:
if (likely(last_spec > 0))
{
for (uint16_t i = 0; i < last_spec; i++)
- rte_node_enqueue_x1(graph, node, next_index, from[i]);
+ rte_node_enqueue_x1(graph, node, batch_next_node_index, batch_pkts[i]);
}
return cnt;
@@ -889,21 +879,20 @@ RTE_NODE_REGISTER(etherfabric_egress_node_base);
/************************************** Etherfabric Statistics **************************************/
cJSON * etherfabric_ingress_node_monit_loop(struct sc_main * sc)
{
- uint32_t gid = 0,graph_num = 0;
+ uint32_t graph_id = 0,graph_num = 0;
cJSON * json_root = NULL,* graph_obj = NULL;
struct node_etherfabric_main * etherfabric_main = sc->etherfabric_node_main;
unsigned int nr_io_thread = sc->nr_io_thread;
json_root = cJSON_CreateObject();
- for (gid = 0; gid < nr_io_thread; gid++)
+ for (graph_id = 0; graph_id < nr_io_thread; graph_id++)
{
char graph_index[MR_STRING_MAX];
uint64_t stats = 0;
graph_obj = cJSON_CreateObject();
- struct etherfabric_ingress_stat_per_lcore * stat_item = &etherfabric_main->ingress_stat_per_graph[gid];
-
+ struct etherfabric_ingress_stat_per_lcore * stat_item = &etherfabric_main->ingress_stat_per_graph[graph_id];
stats = stat_item->total_pkts;
if (stats > 0)
@@ -928,11 +917,9 @@ cJSON * etherfabric_ingress_node_monit_loop(struct sc_main * sc)
continue;
}
- cJSON_AddNumberToObject(graph_obj, "graph_id", gid);
-
+ cJSON_AddNumberToObject(graph_obj, "graph_id", graph_id);
sprintf(graph_index,"graph-%u",graph_num);
cJSON_AddItemToObject(json_root,graph_index,graph_obj);
-
graph_num ++;
}
@@ -943,23 +930,21 @@ cJSON * etherfabric_ingress_node_monit_loop(struct sc_main * sc)
cJSON * etherfabric_egress_node_monit_loop(struct sc_main * sc)
{
- uint32_t gid = 0,graph_num = 0;
+ uint32_t graph_id = 0,graph_num = 0;
cJSON * json_root = NULL,* graph_obj = NULL;
struct node_etherfabric_main * etherfabric_main = sc->etherfabric_node_main;
unsigned int nr_io_thread = sc->nr_io_thread;
json_root = cJSON_CreateObject();
- for (gid = 0; gid < nr_io_thread; gid++)
+ for (graph_id = 0; graph_id < nr_io_thread; graph_id++)
{
char graph_index[MR_STRING_MAX];
uint64_t stats = 0;
graph_obj = cJSON_CreateObject();
- struct etherfabric_egress_stat_per_lcore * stat_item = &etherfabric_main->egress_stat_per_graph[gid];
-
+ struct etherfabric_egress_stat_per_lcore * stat_item = &etherfabric_main->egress_stat_per_graph[graph_id];
stats = stat_item->total_pkts;
-
if (stats > 0)
{
cJSON_AddNumberToObject(graph_obj, "total_pkts", stats);
@@ -978,11 +963,9 @@ cJSON * etherfabric_egress_node_monit_loop(struct sc_main * sc)
continue;
}
- cJSON_AddNumberToObject(graph_obj, "graph_id", gid);
-
+ cJSON_AddNumberToObject(graph_obj, "graph_id", graph_id);
sprintf(graph_index,"graph-%u",graph_num);
cJSON_AddItemToObject(json_root,graph_index,graph_obj);
-
graph_num ++;
}
diff --git a/service/src/node_lb.c b/service/src/node_lb.c
index 36be9b6..38b4c04 100644
--- a/service/src/node_lb.c
+++ b/service/src/node_lb.c
@@ -15,7 +15,7 @@
/* Global Config */
#ifndef MR_LB_MAX_GROUP
-#define MR_LB_MAX_GROUP 1024
+#define MR_LB_MAX_GROUP 1024
#endif
#ifndef MR_LB_GROUP_ITEM_ID_INVALID
@@ -23,31 +23,31 @@
#endif
#ifndef MR_LB_MAX_DEV_FOR_SINGLE_GROUP
-#define MR_LB_MAX_DEV_FOR_SINGLE_GROUP 1024
+#define MR_LB_MAX_DEV_FOR_SINGLE_GROUP 1024
#endif
#ifndef MR_LB_DEFAULT_CFG
-#define MR_LB_DEFAULT_CFG "/opt/tsg/mrzcpd/etc/lb.conf"
+#define MR_LB_DEFAULT_CFG "/opt/tsg/mrzcpd/etc/lb.conf"
#endif
#ifndef MR_LB_INVALID_RULE_STRING_ARG
-#define MR_LB_INVALID_RULE_STRING_ARG ""
+#define MR_LB_INVALID_RULE_STRING_ARG ""
#endif
#ifndef MR_LB_INVALID_RULE_ARG
-#define MR_LB_INVALID_RULE_ARG 0
+#define MR_LB_INVALID_RULE_ARG 0
#endif
#ifndef MR_LB_DEV_INVALID
-#define MR_LB_DEV_INVALID 0
+#define MR_LB_DEV_INVALID 0
#endif
#ifndef MR_LB_DEV_NEXT_NODE_VALID
-#define MR_LB_DEV_NEXT_NODE_VALID 1
+#define MR_LB_DEV_NEXT_NODE_VALID 1
#endif
#ifndef MR_LB_DEV_VALID
-#define MR_LB_DEV_VALID 2
+#define MR_LB_DEV_VALID 2
#endif
/* Dynamic LB Rule Field */
@@ -92,10 +92,10 @@
#define MR_LB_RPC_RESULT_ERR_DEV_ERR_CODE "ErrCode"
#endif
-#define MR_LB_STAT_ADD(st, gid, counter, value) \
+#define MR_LB_STAT_ADD(st, graph_id, counter, value) \
do \
{ \
- st->stat_per_graph[gid].counter += value; \
+ st->stat_per_graph[graph_id].counter += value; \
} while(0) \
/* LB Next Node */
@@ -985,11 +985,11 @@ static __rte_always_inline uint16_t lb_node_process(struct rte_graph * graph, st
void ** objs, uint16_t cnt)
{
uint16_t n_left_from = 0, last_spec = 0;
- uint16_t next_index,next0;
+ uint16_t batch_next_node_index,next_node_index;
uint32_t lcore_id = 0;
rte_graph_t graph_id;
- struct rte_mbuf *mbuf0, ** pkts;
- void **from;
+ struct rte_mbuf * mbuf, ** pkts;
+ void ** batch_pkts;
struct node_lb_main * lb_main = NULL;
struct rte_rcu_qsbr * qsv;
@@ -1001,41 +1001,38 @@ static __rte_always_inline uint16_t lb_node_process(struct rte_graph * graph, st
/* Get Pkts Num And Pkts Buffer */
n_left_from = cnt;
pkts = (struct rte_mbuf **)objs;
- from = objs;
- next_index = LB_NEXT_CLASSIFIER;
+ batch_pkts = objs;
+ batch_next_node_index = LB_NEXT_CLASSIFIER;
lb_main = global_lb_main;
graph_id = graph->id;
/* Single Packet Processing */
while (n_left_from > 0)
{
- uint16_t gid = 0,group_item_id = MR_LB_GROUP_ITEM_ID_INVALID;
+ uint16_t graph_id = 0,group_item_id = MR_LB_GROUP_ITEM_ID_INVALID;
struct private_data * private_ctrlzone = NULL;
struct lb_group * lb_group_item = NULL;
- mbuf0 = pkts[0];
+ mbuf = pkts[0];
pkts += 1;
n_left_from -= 1;
- private_ctrlzone = mrbuf_cz_data(mbuf0, 0);
- gid = private_ctrlzone->lb_group_id;
+ private_ctrlzone = mrbuf_cz_data(mbuf, 0);
+ graph_id = private_ctrlzone->lb_group_id;
MR_LB_STAT_ADD(lb_main,graph_id,total_pkts,1);
- group_item_id = get_lb_item_index_from_group_id(lb_main->lb_manage,gid);
- if (likely(group_item_id != MR_LB_GROUP_ITEM_ID_INVALID))
- {
- lb_group_item = &lb_main->lb_manage->lb_groups[group_item_id];
- }
- else
+ group_item_id = get_lb_item_index_from_group_id(lb_main->lb_manage,graph_id);
+ if (likely(group_item_id == MR_LB_GROUP_ITEM_ID_INVALID))
{
/* Group Id Is Invalid,Sent To Classifier */
MR_LB_STAT_ADD(lb_main,graph_id,group_id_invalid_pkts,1);
goto exception_handling;
}
- next0 = lb_group_item->dispatch_func(lb_main,lb_group_item,mbuf0->hash.usr);
+ lb_group_item = &lb_main->lb_manage->lb_groups[group_item_id];
+ next_node_index = lb_group_item->dispatch_func(lb_main,lb_group_item,mbuf->hash.usr);
- if (likely(next0 != LB_NEXT_CLASSIFIER))
+ if (likely(next_node_index != LB_NEXT_CLASSIFIER))
{
MR_LB_STAT_ADD(lb_main,graph_id,success_pkt,1);
goto node_enqueue;
@@ -1047,17 +1044,17 @@ static __rte_always_inline uint16_t lb_node_process(struct rte_graph * graph, st
}
exception_handling:
- next0 = LB_NEXT_CLASSIFIER;
+ next_node_index = LB_NEXT_CLASSIFIER;
node_enqueue:
- if (unlikely(next_index != next0))
+ if (unlikely(batch_next_node_index != next_node_index))
{
/* If The Next Index Has Been Changed,Enqueue Last Pkts */
for (uint16_t i = 0; i < last_spec; i++)
- rte_node_enqueue_x1(graph, node, next_index, from[i]);
- from += last_spec;
+ rte_node_enqueue_x1(graph, node, batch_next_node_index, batch_pkts[i]);
+ batch_pkts += last_spec;
last_spec = 1;
- next_index = next0;
+ batch_next_node_index = next_node_index;
}
else
{
@@ -1070,7 +1067,7 @@ node_enqueue:
if (likely(last_spec > 0))
{
for (uint16_t i = 0; i < last_spec; i++)
- rte_node_enqueue_x1(graph, node, next_index, from[i]);
+ rte_node_enqueue_x1(graph, node, batch_next_node_index, batch_pkts[i]);
}
/* Update Quiescent State Counter */
@@ -1096,21 +1093,20 @@ RTE_NODE_REGISTER(lb_node_base);
/************************************** LB Statistics **************************************/
cJSON * lb_node_monit_loop(struct sc_main * sc)
{
- uint32_t gid = 0,graph_num = 0;
+ uint32_t graph_id = 0,graph_num = 0;
cJSON * json_root = NULL,* graph_obj = NULL;
struct node_lb_main * lb_main = sc->lb_node_main;
unsigned int nr_io_thread = sc->nr_io_thread;
json_root = cJSON_CreateObject();
- for (gid = 0; gid < nr_io_thread; gid++)
+ for (graph_id = 0; graph_id < nr_io_thread; graph_id++)
{
char graph_index[MR_STRING_MAX];
uint64_t stats = 0;
graph_obj = cJSON_CreateObject();
- struct lb_stat_per_lcore * stat_item = &lb_main->stat_per_graph[gid];
-
+ struct lb_stat_per_lcore * stat_item = &lb_main->stat_per_graph[graph_id];
stats = stat_item->total_pkts;
if (stats > 0)
@@ -1129,8 +1125,7 @@ cJSON * lb_node_monit_loop(struct sc_main * sc)
continue;
}
- cJSON_AddNumberToObject(graph_obj, "graph_id", gid);
-
+ cJSON_AddNumberToObject(graph_obj, "graph_id", graph_id);
sprintf(graph_index,"graph-%u",graph_num);
cJSON_AddItemToObject(json_root,graph_index,graph_obj);