summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorsongyanchao <[email protected]>2022-08-15 08:11:33 +0000
committersongyanchao <[email protected]>2022-08-15 08:11:33 +0000
commita2cf2b42b4f703fae84eba0cf289311151eb4329 (patch)
tree5b063c82736413061b3f6831f5594f7e2a7d8594
parent7b81d3d4410f0c30dcb3e367ece9422dae2a4240 (diff)
✨ feat(TSG-11662): 添加Etherfabric Link配置项,并优化数据包解析流程
添加Etherfabric Link配置项,并优化数据包解析流程
-rw-r--r--conf/etherfabric.conf15
-rw-r--r--infra/include/ldbc.h2
-rw-r--r--service/include/sc_etherfabric.h30
-rw-r--r--service/include/sc_node_common.h34
-rw-r--r--service/src/node_classifier.c98
-rw-r--r--service/src/node_eth_ingress.c21
-rw-r--r--service/src/node_etherfabric.c495
-rw-r--r--service/src/node_lb.c12
8 files changed, 440 insertions, 267 deletions
diff --git a/conf/etherfabric.conf b/conf/etherfabric.conf
index 23ed20b..fbd3f8e 100644
--- a/conf/etherfabric.conf
+++ b/conf/etherfabric.conf
@@ -1,16 +1,25 @@
#[service_conf]
#rule_num=1
-#[rule:0]
+#[etherfabric_service:0]
#name=etherfabric_ingress_eth0_0
#type=etherfabric
#mode=virtual-wire
#listen_ip=10.0.0.1
#listen_port=4789
-#[rule:1]
+#[etherfabric_service:1]
#name=etherfabric_ingress_eth1_1
#type=etherfabric
#mode=tap
#listen_ip=10.0.0.2
-#listen_port=4789 \ No newline at end of file
+#listen_port=4789
+
+#[link_conf]
+#rule_num=1
+
+#[etherfabric_link:0]
+#etherfabric_link_id=1
+#virtual_wire_id=100
+#etherfabric_ip=10.10.1.40
+#etherfabric_mac=10:70:fd:03:c0:bd \ No newline at end of file
diff --git a/infra/include/ldbc.h b/infra/include/ldbc.h
index da2ce9c..e995585 100644
--- a/infra/include/ldbc.h
+++ b/infra/include/ldbc.h
@@ -93,6 +93,7 @@ struct pkt_parser
struct pkt_parser_result results[MR_PKT_PARSE_RESULT_MAX];
unsigned int nr_results;
+ unsigned int layer_offset;
};
struct distributer
@@ -127,6 +128,7 @@ static inline void pkt_parser_init(struct pkt_parser * pkt_parser, enum complex_
pkt_parser->expect_layer_type = expect_layer_type;
pkt_parser->nr_expect_results = nr_expect_results;
pkt_parser->nr_results = 0;
+ pkt_parser->layer_offset = 0;
}
static inline int pkt_parser_push(struct pkt_parser * pkt_parser, enum complex_layer this_layer_type, const char * data)
diff --git a/service/include/sc_etherfabric.h b/service/include/sc_etherfabric.h
deleted file mode 100644
index 7aa2e7e..0000000
--- a/service/include/sc_etherfabric.h
+++ /dev/null
@@ -1,30 +0,0 @@
-#pragma once
-
-/* Etherfabric Service Type */
-enum {
- SERVICE_INVALID = 0,
- SERVICE_ETHERFABRIC,
-};
-
-/* Service Tag */
-struct service_tag
-{
- uint8_t vlan_id_half_high;
- uint8_t link_layer_type : 4;
- uint8_t vlan_id_half_low : 4;
- uint8_t dir : 1;
- uint8_t link_id : 6;
- uint8_t online_test : 1;
- uint32_t src_addr_ipv4;
- uint32_t dst_addr_ipv4;
- struct rte_ether_addr src_mac_addr;
-};
-
-struct etherfabric_tag
-{
- uint8_t service_type;
- uint8_t tag_set_flag : 4;
- uint8_t package_type : 4;
- struct service_tag ingress_tag;
- struct service_tag egress_tag;
-}; \ No newline at end of file
diff --git a/service/include/sc_node_common.h b/service/include/sc_node_common.h
index 37f4d25..746cff4 100644
--- a/service/include/sc_node_common.h
+++ b/service/include/sc_node_common.h
@@ -1,14 +1,36 @@
#pragma once
-#include <sc_etherfabric.h>
-
#ifndef MR_NODE_COMMON_ORIGINAL_PKT
#define MR_NODE_COMMON_ORIGINAL_PKT 1
#endif
+/* Package Layer Parsed Flag */
+enum {
+ CURRENT_LAYER_NOT_PARSED = 0,
+ CURRENT_LAYER_ALREADY_PARSED
+};
+
+/* Service Type */
+enum {
+ MR_NODE_COMMON_ETHERFABRIC_SERVICE,
+};
+
+/* Etherfabric Service Tag */
+struct etherfabric_service_tag
+{
+ uint8_t dir;
+ uint16_t link_index;
+ uint16_t service_index;
+};
+
/* Mbuf Private Date */
struct private_data
{
+ union
+ {
+ struct etherfabric_service_tag etherfabric;
+ }tag;
+
struct
{
uint64_t port_ingress_is_shmdev : 1;
@@ -16,16 +38,10 @@ struct private_data
uint64_t original_packet_flag : 1;
};
+ uint8_t service_type;
uint16_t port_ingress;
uint16_t port_egress;
uint16_t lb_group_id;
uint32_t si;
struct pkt_parser pkt_parser;
-
- // Temporary Adaptation
- uint8_t service_type;
- uint8_t tag_set_flag : 4;
- uint8_t package_type : 4;
- struct service_tag ingress_tag;
- struct service_tag egress_tag;
}; \ No newline at end of file
diff --git a/service/src/node_classifier.c b/service/src/node_classifier.c
index c13cc38..077fa9c 100644
--- a/service/src/node_classifier.c
+++ b/service/src/node_classifier.c
@@ -200,14 +200,14 @@ enum {
/* Classifier Next Node */
enum {
CLASSIFIER_NEXT_LB = 0,
- CLASSIFIER_NEXT_ETHERFABRIC,
+ CLASSIFIER_NEXT_ETHERFABRIC_EGRESS,
CLASSIFIER_NEXT_PKT_DROP,
CLASSIFIER_NEXT_MAX
};
/* Dynamic Classifier Rule Add Deal Result */
enum {
- DYNAMIC_CLASSIFIER_RULE_ADD_SUCESS = 0,
+ DYNAMIC_CLASSIFIER_RULE_ADD_SUCCESS = 0,
DYNAMIC_CLASSIFIER_RULE_ADD_SRC_IP_INVALID,
DYNAMIC_CLASSIFIER_RULE_ADD_SRC_IP_NO_CONFIG,
DYNAMIC_CLASSIFIER_RULE_ADD_SRC_IP_MASK_INVALID,
@@ -225,7 +225,7 @@ enum {
DYNAMIC_CLASSIFIER_RULE_ADD_ACTION_INVALID,
DYNAMIC_CLASSIFIER_RULE_ADD_NEXT_GROUP_INVALID,
DYNAMIC_CLASSIFIER_RULE_ADD_RULE_ID_INVALID,
- DYNAMIC_CLASSIFIER_RULE_ADD_NO_FREE_RUEL_ITEM,
+ DYNAMIC_CLASSIFIER_RULE_ADD_NO_FREE_RULE_ITEM,
DYNAMIC_CLASSIFIER_RULE_ADD_NUM_OUT_OF_MAX,
DYNAMIC_CLASSIFIER_RULE_ADD_NO_CONFIG_RULE_TYPE,
DYNAMIC_CLASSIFIER_RULE_ADD_REPEATED_RULE,
@@ -233,7 +233,7 @@ enum {
/* Dynamic Classifier Rule Delete Deal Result */
enum {
- DYNAMIC_CLASSIFIER_RULE_DEL_SUCESS = 0,
+ DYNAMIC_CLASSIFIER_RULE_DEL_SUCCESS = 0,
DYNAMIC_CLASSIFIER_RULE_DEL_RULE_NUM_IS_ZERO,
DYNAMIC_CLASSIFIER_RULE_DEL_RULE_ID_NO_CONFIG,
DYNAMIC_CLASSIFIER_RULE_DEL_RULE_ID_INVALID,
@@ -388,11 +388,11 @@ static __rte_always_inline uint16_t classifier_node_process(struct rte_graph * g
/* 3. Single Packet Processing */
while (n_left_from > 0)
{
+ uint8_t parse_ipv4_flag = CURRENT_LAYER_NOT_PARSED,parse_port_flag = CURRENT_LAYER_NOT_PARSED;
uint32_t res_buf[MR_CLASSIFIER_CLASSIFIER_MAX_PKT_BURST] = {};
uint32_t si = 0;
struct match_field mf;
- struct pkt_parser pkt_parser;
- struct pkt_parser * pkt_parser_ptr = &pkt_parser;
+ struct pkt_parser * pkt_parser_ptr = NULL;
struct rte_acl_ctx * acx = NULL;
struct private_data * private_ctrlzone = NULL;
const uint8_t * mf_buf[MR_CLASSIFIER_CLASSIFIER_MAX_PKT_BURST];
@@ -408,13 +408,14 @@ static __rte_always_inline uint16_t classifier_node_process(struct rte_graph * g
rte_prefetch0(pkts[0]);
}
- /* 1. Clear Match Field */
+ /* Clear Match Field */
memset(&mf,0,sizeof(mf));
- /* 2. Get And Update SI Id,Check The Si Id */
+ /* Get And Update SI Id,Check The Si Id */
private_ctrlzone = mrbuf_cz_data(mbuf0, 0);
si = private_ctrlzone->si;
+ pkt_parser_ptr = &private_ctrlzone->pkt_parser;
if (likely(si <= max_si_id))
{
@@ -429,50 +430,49 @@ static __rte_always_inline uint16_t classifier_node_process(struct rte_graph * g
goto no_match_process;
}
- /* 3. Parser PKT */
- pkt_parser_init(pkt_parser_ptr, LAYER_TYPE_ALL, MR_PKT_PARSE_RESULT_MAX);
- complex_parser_ether(pkt_parser_ptr, rte_pktmbuf_mtod(mbuf0,const char *));
-
- /* 4. Prepare Match Field Date */
- for (unsigned int i = 0; i < pkt_parser_ptr->nr_results; i++)
+ /* Prepare Match Field Date */
+ for (unsigned int i = pkt_parser_ptr->layer_offset; i < pkt_parser_ptr->nr_results; i++)
{
struct pkt_parser_result * result = &pkt_parser_ptr->results[i];
- /* 3.1 Current No Match Eth Field */
+ /* Current No Match Eth Field */
if (result->this_layer_type == LAYER_TYPE_ETHER)
{
continue;
}
- else if (result->this_layer_type == LAYER_TYPE_IPV4)
+ else if ((result->this_layer_type == LAYER_TYPE_IPV4) && (parse_ipv4_flag == CURRENT_LAYER_NOT_PARSED))
{
- /* 2.2 Fill The Src Addr、Dst Addr And */
+ /* Fill The Src Addr、Dst Addr And */
struct rte_ipv4_hdr * ipv4_hdr = (struct rte_ipv4_hdr *)(result->data);
mf.src_addr_ipv4 = ipv4_hdr->src_addr;
mf.dst_addr_ipv4 = ipv4_hdr->dst_addr;
mf.next_proto_id = ipv4_hdr->next_proto_id;
acx = c_manage->acx_ipv4;
+ parse_ipv4_flag = CURRENT_LAYER_ALREADY_PARSED;
continue;
}
else if (result->this_layer_type == LAYER_TYPE_IPV6)
{
- /* 2.3 Current Not Support IPv6 */
+ /* Current Not Support IPv6 */
continue;
}
- else if (result->this_layer_type == LAYER_TYPE_TCP)
+ else if ((result->this_layer_type == LAYER_TYPE_TCP) && (parse_port_flag == CURRENT_LAYER_NOT_PARSED))
{
- /* 2.4 Fill The Port */
+ /* Fill The Port */
struct rte_tcp_hdr * tcp_hdr = (struct rte_tcp_hdr *)(result->data);
mf.src_port = tcp_hdr->src_port;
mf.dst_port = tcp_hdr->dst_port;
+ parse_port_flag = CURRENT_LAYER_ALREADY_PARSED;
continue;
}
- else if (result->this_layer_type == LAYER_TYPE_UDP)
+ else if ((result->this_layer_type == LAYER_TYPE_UDP) && (parse_port_flag == CURRENT_LAYER_NOT_PARSED))
{
- /* 2.5 Fill The Port */
+ /* Fill The Port */
struct rte_udp_hdr * udp_hdr = (struct rte_udp_hdr *)(result->data);
mf.src_port = udp_hdr->src_port;
mf.dst_port = udp_hdr->dst_port;
+ parse_port_flag = CURRENT_LAYER_ALREADY_PARSED;
continue;
}
else
@@ -482,7 +482,7 @@ static __rte_always_inline uint16_t classifier_node_process(struct rte_graph * g
}
}
- /* 5. Check ACX And Match Classifier Table */
+ /* Check ACX And Match Classifier Table */
if (likely(acx != NULL))
{
while (likely(si <= max_si_id))
@@ -491,10 +491,10 @@ static __rte_always_inline uint16_t classifier_node_process(struct rte_graph * g
CLASSIFIER_STAT_ADD(global_classifier_main,gid,si,total_pkts_si,1);
rte_acl_classify(acx,mf_buf,res_buf,1,MR_CLASSIFIER_CLASSIFIER_DEFAULT_MAX_CATEGORIES);
- /* 5.1 Checks Whether A rule Is Matched */
+ /* Checks Whether A rule Is Matched */
if (likely(res_buf[0] != 0))
{
- /* 5.2. According To The Results Find The Action */
+ /* According To The Results Find The Action */
CLASSIFIER_STAT_ADD(global_classifier_main,gid,si,hits_si,1);
struct mr_action * action = &c_manage->classifier_v4_local_action[res_buf[0]];
@@ -531,11 +531,11 @@ static __rte_always_inline uint16_t classifier_node_process(struct rte_graph * g
no_match_process:
- /* 6. If None Rule Matched,According To The Service Type Chouse Next Node */
+ /* If None Rule Matched,According To The Service Type Choose Next Node */
switch (private_ctrlzone->service_type)
{
- case SERVICE_ETHERFABRIC:
- next0 = CLASSIFIER_NEXT_ETHERFABRIC;
+ case MR_NODE_COMMON_ETHERFABRIC_SERVICE:
+ next0 = CLASSIFIER_NEXT_ETHERFABRIC_EGRESS;
CLASSIFIER_STAT_ADD(global_classifier_main,gid,si,to_etherfabric,1);
goto node_enqueue;
break;
@@ -544,16 +544,16 @@ no_match_process:
break;
}
- /* 7. Send To Phydev On The Basis Of Shared Ctx,But Current No Support,Default Drop */
+ /* Send To Phydev On The Basis Of Shared Ctx,But Current No Support,Default Drop */
next0 = CLASSIFIER_NEXT_PKT_DROP;
CLASSIFIER_STAT_ADD(global_classifier_main,gid,si,no_support_pkt,1);
node_enqueue:
- /* 7. Judge The Next Index Whether To Change */
+ /* Judge The Next Index Whether To Change */
if (unlikely(next_index != next0))
{
- /* 7.1 If The Next Index Has Been Changed,Enqueue Last Pkts */
+ /* If The Next Index Has Been Changed,Enqueue Last Pkts */
for (uint16_t i = 0; i < last_spec; i++)
rte_node_enqueue_x1(graph, node, next_index, from[i]);
from += last_spec;
@@ -562,12 +562,12 @@ node_enqueue:
}
else
{
- /* 7.2 If The Next Index Not Change, Update The Lasts */
+ /* If The Next Index Not Change, Update The Lasts */
last_spec ++;
}
}
- /* 8. Process The Remaining Packets */
+ /* Process The Remaining Packets */
if (likely(last_spec > 0))
{
for (uint16_t i = 0; i < last_spec; i++)
@@ -588,7 +588,7 @@ static struct rte_node_register classifier_node_base = {
.nb_edges = CLASSIFIER_NEXT_MAX,
.next_nodes = {
[CLASSIFIER_NEXT_LB] = "lb",
- [CLASSIFIER_NEXT_ETHERFABRIC] = "etherfabric_egress",
+ [CLASSIFIER_NEXT_ETHERFABRIC_EGRESS] = "etherfabric_egress",
[CLASSIFIER_NEXT_PKT_DROP] = "pkt_drop",
},
};
@@ -702,7 +702,7 @@ void dump_classifier_v4_rule(struct classifier_v4_rule * rule,struct mr_action *
break;
case CLASSIFIER_ACTION_NF_STEERING:
- MR_INFO(" Classifier V4 Rule Action : Network Function Streeing");
+ MR_INFO(" Classifier V4 Rule Action : Network Function Steering");
MR_INFO(" Classifier V4 Rule Next Group : %u",action->next_group_id);
break;
@@ -721,8 +721,8 @@ void dump_classifier_rules(struct classifier_management * c_manage)
if (c_manage->classifier_v4_rule_num != 0)
{
MR_INFO(" ");
- MR_INFO("Classifier Ruls Dump For IPv4:");
- MR_INFO("Classifier Ruls Total Number:%lu",c_manage->classifier_v4_rule_num);
+ MR_INFO("Classifier Rules Dump For IPv4:");
+ MR_INFO("Classifier Rules Total Number:%lu",c_manage->classifier_v4_rule_num);
/* Because The Valid Rule Id From 1 Start,The Id 0 Is Invalid */
for (uint32_t i = 1; i < MAX_CLASSIFIER_RULE_ITEM_NUM; i++)
{
@@ -736,8 +736,8 @@ void dump_classifier_rules(struct classifier_management * c_manage)
else
{
MR_INFO(" ");
- MR_INFO("Classifier Ruls Dump For IPv4:");
- MR_INFO("Classifier Ruls Total Number:%lu",c_manage->classifier_v4_rule_num);
+ MR_INFO("Classifier Rules Dump For IPv4:");
+ MR_INFO("Classifier Rules Total Number:%lu",c_manage->classifier_v4_rule_num);
}
/* 2. Second Dump The IPv6 Classifier Rules,Temporary Unrealized */
@@ -839,7 +839,7 @@ int setup_classifier(struct classifier_management * c_manage,int classifier_type
classifier_param.rule_size = RTE_ACL_RULE_SZ(dim);
classifier_param.max_rule_num = MAX_CLASSIFIER_RULE_NUM;
- /* 3. Cteate Classifier Context */
+ /* 3. Create Classifier Context */
if ((context = rte_acl_create(&classifier_param)) == NULL)
{
MR_ERROR("IPv%d Classifier Table Context Create Fail !!!! ", classifier_type);
@@ -848,7 +848,7 @@ int setup_classifier(struct classifier_management * c_manage,int classifier_type
/* 4. Cuttent Use The Default Alg Config,So No Need Call "rte_acl_set_ctx_classify" */
- /* 5. Add Classifier Rull */
+ /* 5. Add Classifier Rule */
for (uint32_t i = 0; i < rule_num; i++)
{
while (rule_item_id < MAX_CLASSIFIER_RULE_NUM)
@@ -1002,7 +1002,7 @@ int parse_classifier_v4_rule_for_add(struct classifier_management * c_manage,cJS
rule_item_id = get_free_rule_item_id(c_manage);
if (rule_item_id == MR_CLASSIFIER_CLASSIFIER_RULE_INVALID_FLAG)
{
- return DYNAMIC_CLASSIFIER_RULE_ADD_NO_FREE_RUEL_ITEM;
+ return DYNAMIC_CLASSIFIER_RULE_ADD_NO_FREE_RULE_ITEM;
}
/* Fill Src Ip Addr */
@@ -1289,7 +1289,7 @@ int parse_classifier_v4_rule_for_add(struct classifier_management * c_manage,cJS
memcpy(&c_manage->classifier_v4_rule_buf[rule_item_id],rule,sizeof(struct classifier_v4_rule));
memcpy(&c_manage->classifier_v4_local_action[rule_item_id],action,sizeof(struct mr_action));
- return DYNAMIC_CLASSIFIER_RULE_ADD_SUCESS;
+ return DYNAMIC_CLASSIFIER_RULE_ADD_SUCCESS;
}
/* Add A Single Dynamic Classifier Rule Callback */
@@ -1326,7 +1326,7 @@ int ipv4_single_rule_add(cJSON * c_rule)
case CLASSIFIER_TABLE_IPV4:
{
ret = parse_classifier_v4_rule_for_add(new_am,c_rule);
- if (ret != DYNAMIC_CLASSIFIER_RULE_ADD_SUCESS)
+ if (ret != DYNAMIC_CLASSIFIER_RULE_ADD_SUCCESS)
{
goto add_err;
}
@@ -1397,7 +1397,7 @@ int parse_classifier_v4_rule_for_delete(struct classifier_management * c_manage,
c_manage->classifier_v4_rule_num --;
c_manage->classifier_v4_max_si_id = get_max_si_id(c_manage);
- return DYNAMIC_CLASSIFIER_RULE_DEL_SUCESS;
+ return DYNAMIC_CLASSIFIER_RULE_DEL_SUCCESS;
}
/* Delete A Single Dynamic Classifier Rule Callback */
@@ -1434,7 +1434,7 @@ int ipv4_single_rule_delete(cJSON * c_rule)
case CLASSIFIER_TABLE_IPV4:
{
ret = parse_classifier_v4_rule_for_delete(new_am,c_rule);
- if (ret != DYNAMIC_CLASSIFIER_RULE_DEL_SUCESS)
+ if (ret != DYNAMIC_CLASSIFIER_RULE_DEL_SUCCESS)
{
goto del_err;
}
@@ -1514,7 +1514,7 @@ int parser_classifier_rule_ipv4(struct sc_main * sc, struct classifier_managemen
/* Parsing Classifier Rule Number For IPv4 */
int ret = MESA_load_profile_uint_def(MR_CLASSIFIER_CLASSIFIER_DEFAULT_CFG, "classifier_rule:ipv4", "rule_num", &classifier_v4_rule_num, MR_CLASSIFIER_CLASSIFIER_INVALID_RULE_ARG);
- /* No Config Classifier Rules For IPv4,Returen Success */
+ /* No Config Classifier Rules For IPv4,Return Success */
if (ret < 0)
{
c_manage->classifier_v4_rule_num = MR_CLASSIFIER_CLASSIFIER_INVALID_RULE_ARG;
@@ -1870,7 +1870,7 @@ int parser_classifier_rule_ipv4(struct sc_main * sc, struct classifier_managemen
action->next_group_id = next_group_id;
}
- /* 11. Set Rule Action,The Rule Acion = Rule Id */
+ /* 11. Set Rule Action,The Rule Action = Rule Id */
rule->data.userdata = rule_item_id;
action->rule_id = parsed_rule_id;
@@ -1990,7 +1990,7 @@ cJSON * classifier_node_monit_loop(struct sc_main * sc)
cJSON_AddNumberToObject(si_obj, "hits", hits);
cJSON_AddNumberToObject(si_obj, "missed", missed);
cJSON_AddNumberToObject(si_obj, "no_support_pkt", no_support_pkt);
- cJSON_AddNumberToObject(si_obj, "to_etherfbric_service", to_etherfabric);
+ cJSON_AddNumberToObject(si_obj, "to_etherfabric_service", to_etherfabric);
cJSON_AddItemToObject(graph_obj,stat_index,si_obj);
si_num ++;
}
diff --git a/service/src/node_eth_ingress.c b/service/src/node_eth_ingress.c
index bcf4a22..711b594 100644
--- a/service/src/node_eth_ingress.c
+++ b/service/src/node_eth_ingress.c
@@ -34,12 +34,6 @@ enum {
ETH_INGRESS_NEXT_MAX
};
-/* Eth Ingress Package Parsed Flag */
-enum {
- ETH_INGRESS_NOT_PARSED = 0,
- ETH_INGRESS_ALREADY_PARSED
-};
-
/* Eth Ingress Management Struct */
struct eth_ingress_ip_listen_management
{
@@ -163,7 +157,7 @@ static __rte_always_inline uint16_t eth_ingress_node_process(struct rte_graph *
/* Single Packet Processing */
while (n_left_from > 0)
{
- uint8_t parse_ipv4_flag = ETH_INGRESS_NOT_PARSED,parse_udp_flag = ETH_INGRESS_NOT_PARSED;
+ uint8_t parse_ipv4_flag = CURRENT_LAYER_NOT_PARSED,parse_udp_flag = CURRENT_LAYER_NOT_PARSED;
struct pkt_parser * pkt_parser_ptr = NULL;
struct private_data * private_ctrlzone = NULL;
struct rte_ipv4_hdr * ipv4_hdr = NULL;
@@ -179,18 +173,15 @@ static __rte_always_inline uint16_t eth_ingress_node_process(struct rte_graph *
MR_ETH_INGRESS_STAT_ADD(eth_ingress_main,gid,total_pkts,1);
- /* Parser PKT */
- pkt_parser_init(pkt_parser_ptr, LAYER_TYPE_ALL, MR_PKT_PARSE_RESULT_MAX);
- complex_parser_ether(pkt_parser_ptr, rte_pktmbuf_mtod(mbuf0,const char *));
-
+ /* Parsing PKT */
for (unsigned int i = 0; i < pkt_parser_ptr->nr_results; i++)
{
struct pkt_parser_result * result = &pkt_parser_ptr->results[i];
- if ((result->this_layer_type == LAYER_TYPE_IPV4) && (parse_ipv4_flag == ETH_INGRESS_NOT_PARSED))
+ if ((result->this_layer_type == LAYER_TYPE_IPV4) && (parse_ipv4_flag == CURRENT_LAYER_NOT_PARSED))
{
ipv4_hdr = (struct rte_ipv4_hdr *)(result->data);
- parse_ipv4_flag = ETH_INGRESS_ALREADY_PARSED;
+ parse_ipv4_flag = CURRENT_LAYER_ALREADY_PARSED;
continue;
}
else if (result->this_layer_type == LAYER_TYPE_IPV6)
@@ -198,10 +189,10 @@ static __rte_always_inline uint16_t eth_ingress_node_process(struct rte_graph *
/* Current Not Support IPv6 */
continue;
}
- else if ((result->this_layer_type == LAYER_TYPE_UDP) && (parse_udp_flag == ETH_INGRESS_NOT_PARSED))
+ else if ((result->this_layer_type == LAYER_TYPE_UDP) && (parse_udp_flag == CURRENT_LAYER_NOT_PARSED))
{
udp_hdr = (struct rte_udp_hdr *)(result->data);
- parse_udp_flag = ETH_INGRESS_ALREADY_PARSED;
+ parse_udp_flag = CURRENT_LAYER_ALREADY_PARSED;
continue;
}
}
diff --git a/service/src/node_etherfabric.c b/service/src/node_etherfabric.c
index 4b5dc0b..d61578d 100644
--- a/service/src/node_etherfabric.c
+++ b/service/src/node_etherfabric.c
@@ -1,3 +1,4 @@
+#include "rte_branch_prediction.h"
#include <common.h>
#include <netinet/in.h>
#include <sys/socket.h>
@@ -27,6 +28,10 @@
#define MR_ETHERFABRIC_INVALID_RULE_STRING_ARG ""
#endif
+#ifndef MR_ETHERFABRIC_LAYER_OFFSET_V4
+#define MR_ETHERFABRIC_LAYER_OFFSET_V4 4
+#endif
+
#define MR_ETHERFABRIC_INGRESS_STAT_ADD(st, gid, counter, value) \
do \
{ \
@@ -40,18 +45,6 @@ do
st->egress_stat_per_graph[gid].counter += value; \
} while(0) \
-/* Etherfabric Ip Save Flag */
-enum {
- ETHERFABRIC_IP_NO_SAVE = 0,
- ETHERFABRIC_IP_ALREADY_SAVE
-};
-
-/* Etherfabric Udp Save Flag */
-enum {
- ETHERFABRIC_UDP_NO_SAVE = 0,
- ETHERFABRIC_UDP_ALREADY_SAVE
-};
-
/* Etherfabric Ingress Next Node */
enum {
ETHERFABRIC_INGRESS_NEXT_CLASSIFIER = 0,
@@ -72,13 +65,6 @@ enum {
ETHERFABRIC_EGRESS_NEXT_MAX
};
-/* Etherfabric TAG SET */
-enum {
- NONE_TAG_SET = 0,
- INGRESS_TAG_SET,
- EGRESS_TAG_SET
-};
-
/* Etherfabric Service Mode */
enum {
MODE_INVALID = 0,
@@ -86,21 +72,31 @@ enum {
};
/* Service Rule Struct */
-struct service_rule
+struct etherfabric_service_rule
{
- char name[MR_SYMBOL_MAX];
- uint8_t type;
uint8_t mode;
uint16_t listen_port;
uint32_t listen_ip_v4;
+ char name[MR_SYMBOL_MAX];
+};
+
+/* Link Rule Struct */
+struct etherfabric_link_rule
+{
+ uint16_t etherfabric_link_id;
+ uint16_t virtual_wire_id;
+ uint32_t etherfabric_ip_v4;
+ struct rte_ether_addr etherfabric_mac_addr;
};
/* Etherfabric Service Management Struct */
struct etherfabric_management
{
- uint16_t rule_num;
- uint16_t id_to_node[MR_PHYDEV_MAX];
- struct service_rule sr[MR_ETHERFABRIC_MAX_RULE];
+ uint16_t es_rule_num;
+ uint16_t el_rule_num;
+ uint16_t port_tx_map[MR_PHYDEV_MAX];
+ struct etherfabric_service_rule es_rule_buf[MR_ETHERFABRIC_MAX_RULE];
+ struct etherfabric_link_rule el_rule_buf[MR_ETHERFABRIC_MAX_RULE];
};
/* Etherfabric Ingress Stat Struct */
@@ -108,7 +104,8 @@ struct etherfabric_ingress_stat_per_lcore
{
volatile uint64_t total_pkts;
volatile uint64_t invalid_pkts;
- volatile uint64_t no_match_pkts;
+ volatile uint64_t no_match_service_pkts;
+ volatile uint64_t no_match_link_pkts;
volatile uint64_t match_pkts;
};
@@ -119,6 +116,8 @@ struct etherfabric_egress_stat_per_lcore
volatile uint64_t ipv4_no_set_tag;
volatile uint64_t ipv4_set_tag;
volatile uint64_t no_suport_pkt;
+ volatile uint64_t link_index_invalid;
+ volatile uint64_t service_index_invalid;
};
/* Etherfabric Main Struct */
@@ -135,10 +134,42 @@ static struct node_etherfabric_main * global_etherfabric_main = NULL;
extern int eth_ingress_update_ip_listen_num(uint16_t num,uint32_t * listen_ip);
/************************************* Etherfabric Config **************************************/
-/* Parser The Etherfabric Service Rule Config */
-int parser_etherfabric_conf(struct sc_main * sc, struct etherfabric_management * e_manage)
+/* String to mac */
+int etherfabric_string_to_mac(char *s,struct rte_ether_addr * addr )
{
- uint32_t listen_port = 0,rule_num = 0,count = 0;
+ unsigned char a[6];
+ int rc = sscanf(s, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", a + 0, a + 1, a + 2, a + 3, a + 4, a + 5);
+ if(rc !=6 )
+ {
+ return RT_ERR;
+ }
+
+ addr->addr_bytes[0] = (uint8_t)a[0];
+ addr->addr_bytes[1] = (uint8_t)a[1];
+ addr->addr_bytes[2] = (uint8_t)a[2];
+ addr->addr_bytes[3] = (uint8_t)a[3];
+ addr->addr_bytes[4] = (uint8_t)a[4];
+ addr->addr_bytes[5] = (uint8_t)a[5];
+
+ return RT_SUCCESS;
+}
+
+/* rte_ether_addr to str */
+void etherfabric_mac_to_string(char *buf, uint16_t size, const struct rte_ether_addr *eth_addr)
+{
+ snprintf(buf, size, "%02X:%02X:%02X:%02X:%02X:%02X",
+ eth_addr->addr_bytes[0],
+ eth_addr->addr_bytes[1],
+ eth_addr->addr_bytes[2],
+ eth_addr->addr_bytes[3],
+ eth_addr->addr_bytes[4],
+ eth_addr->addr_bytes[5]);
+}
+
+/* Parsing The Etherfabric Service Rule Config */
+int parser_etherfabric_service_conf(struct sc_main * sc, struct etherfabric_management * e_manage)
+{
+ uint32_t listen_port = 0,es_rule_num = 0,count = 0;
struct in_addr ip_addr;
struct phydev * phydev_iter = NULL;
char str_buf[MR_STRING_MAX];
@@ -148,30 +179,30 @@ int parser_etherfabric_conf(struct sc_main * sc, struct etherfabric_management *
char **next_edges = NULL;
/* 1. Parsing Service Rule Num */
- int ret = MESA_load_profile_uint_def(MR_ETHERFABRIC_DEFAULT_CFG, "service_conf", "rule_num", &rule_num, MR_ETHERFABRIC_INVALID_RULE_ARG);
+ int ret = MESA_load_profile_uint_def(MR_ETHERFABRIC_DEFAULT_CFG, "service_conf", "rule_num", &es_rule_num, MR_ETHERFABRIC_INVALID_RULE_ARG);
if (ret < 0)
{
- /* No Config The Etherfabric Rule Num,Return Error */
- MR_ERROR("No Config The Etherfabric Rule Num !!!!");
+ /* No Config The Etherfabric Service Rule Num */
+ MR_INFO("No Config The Etherfabric Rule Num !!!!");
return RT_SUCCESS;
}
/* 2. Check The Etherfabric Rule Num */
- if ((rule_num == MR_ETHERFABRIC_INVALID_RULE_ARG) || (rule_num > MR_ETHERFABRIC_MAX_RULE))
+ if ((es_rule_num == MR_ETHERFABRIC_INVALID_RULE_ARG) || (es_rule_num > MR_ETHERFABRIC_MAX_RULE))
{
- MR_ERROR("The Etherfabric Rule Num: %u Is Invalid,The Rule Num Range From 1 to '%d' ",rule_num,MR_ETHERFABRIC_MAX_RULE);
+ MR_ERROR("The Etherfabric Rule Num: %u Is Invalid,The Rule Num Range From 1 to '%d' ",es_rule_num,MR_ETHERFABRIC_MAX_RULE);
return RT_ERR;
}
/* 3. Save The Etherfabric Rule Num */
- e_manage->rule_num = (uint16_t)rule_num;
+ e_manage->es_rule_num = (uint16_t)es_rule_num;
/* 4. Parsing All Rule Config */
- for (int i = 0; i < rule_num; i++)
+ for (int i = 0; i < es_rule_num; i++)
{
- snprintf(rule_str_section, sizeof(rule_str_section), "rule:%d", i);
- ret = MESA_load_profile_string_nodef(MR_ETHERFABRIC_DEFAULT_CFG, rule_str_section, "name", &e_manage->sr[i].name, MR_ETHERFABRIC_INVALID_RULE_STRING_ARG);
+ snprintf(rule_str_section, sizeof(rule_str_section), "etherfabric_service:%d", i);
+ ret = MESA_load_profile_string_nodef(MR_ETHERFABRIC_DEFAULT_CFG, rule_str_section, "name", &e_manage->es_rule_buf[i].name, MR_ETHERFABRIC_INVALID_RULE_STRING_ARG);
if (ret < 0)
{
MR_ERROR("The : %s ,No Config The 'name' !!!",rule_str_section);
@@ -184,16 +215,6 @@ int parser_etherfabric_conf(struct sc_main * sc, struct etherfabric_management *
MR_ERROR("The : %s ,No Config The 'type' !!!",rule_str_section);
return RT_ERR;
}
- /* Save type */
- if (strcmp(str_buf, "etherfabric") == 0)
- {
- e_manage->sr[i].type = SERVICE_ETHERFABRIC;
- }
- else
- {
- MR_ERROR("The Rule '%s' 'type' Invalid,Current Only Support 'etherfabric' !!! ",rule_str_section);
- return RT_ERR;
- }
ret = MESA_load_profile_string_nodef(MR_ETHERFABRIC_DEFAULT_CFG, rule_str_section, "mode", &str_buf, MR_ETHERFABRIC_INVALID_RULE_STRING_ARG);
if (ret < 0)
@@ -204,7 +225,7 @@ int parser_etherfabric_conf(struct sc_main * sc, struct etherfabric_management *
/* Save mode */
if (strcmp(str_buf, "virtual-wire") == 0)
{
- e_manage->sr[i].mode = MODE_VIRTUAL_WIRE;
+ e_manage->es_rule_buf[i].mode = MODE_VIRTUAL_WIRE;
}
else
{
@@ -227,7 +248,7 @@ int parser_etherfabric_conf(struct sc_main * sc, struct etherfabric_management *
}
/* Save listen_ip */
- e_manage->sr[i].listen_ip_v4 = ip_addr.s_addr;
+ e_manage->es_rule_buf[i].listen_ip_v4 = ip_addr.s_addr;
ret = MESA_load_profile_uint_def(MR_ETHERFABRIC_DEFAULT_CFG, rule_str_section, "listen_port", &listen_port, MR_ETHERFABRIC_INVALID_RULE_ARG);
if (ret < 0)
@@ -243,7 +264,7 @@ int parser_etherfabric_conf(struct sc_main * sc, struct etherfabric_management *
}
/* Save listen_port */
- e_manage->sr[i].listen_port = htons((uint16_t)listen_port);
+ e_manage->es_rule_buf[i].listen_port = htons((uint16_t)listen_port);
}
/* Create 'Port Id To Tx Node Index' Table */
@@ -260,7 +281,7 @@ int parser_etherfabric_conf(struct sc_main * sc, struct etherfabric_management *
{
if (strncmp(next_edges[i],tx_node_name,sizeof(tx_node_name)) == 0)
{
- e_manage->id_to_node[phydev_iter->port_id] = i;
+ e_manage->port_tx_map[phydev_iter->port_id] = i;
break;
}
}
@@ -269,37 +290,108 @@ int parser_etherfabric_conf(struct sc_main * sc, struct etherfabric_management *
return RT_SUCCESS;
}
-/* Dump Etherfabric Config */
-void dump_etherfabric_config(struct etherfabric_management * e_manage)
+/* Parsing The Etherfabric Link Rule Config */
+int parser_etherfabric_link_conf(struct sc_main * sc, struct etherfabric_management * e_manage)
{
- struct in_addr listen_addr;
+ uint32_t rule_num = 0,etherfabric_link_id = 0,virtual_wire_id = 0;;
+ struct in_addr ip_addr;
+ char rule_str_section[MR_STRING_MAX];
+ char str_ip_addr[MR_STRING_MAX];
+ char str_mac_addr[MR_STRING_MAX];
- MR_INFO(" ");
- MR_INFO("Etherfabric Config:");
- MR_INFO(" Total Rule Num : %u",e_manage->rule_num);
+ /* Parsing Etherfabric Link Rule Num */
+ int ret = MESA_load_profile_uint_def(MR_ETHERFABRIC_DEFAULT_CFG, "link_conf", "rule_num", &rule_num, MR_ETHERFABRIC_INVALID_RULE_ARG);
- for (int i = 0; i < e_manage->rule_num; i++)
+ if (ret < 0)
{
- struct service_rule * sr = &e_manage->sr[i];
+ /* No Config The Etherfabric Link Rule Num */
+ MR_INFO("No Config The Etherfabric Link Num !!!!");
+ return RT_SUCCESS;
+ }
- listen_addr.s_addr = sr->listen_ip_v4;
+ /* Check The Etherfabric Link Rule Num */
+ if ((rule_num == MR_ETHERFABRIC_INVALID_RULE_ARG) || (rule_num > MR_ETHERFABRIC_MAX_RULE))
+ {
+ MR_ERROR("The Etherfabric Link Rule Num: %u Is Invalid,The Rule Num Range From 1 to '%d' ",rule_num,MR_ETHERFABRIC_MAX_RULE);
+ return RT_ERR;
+ }
- MR_INFO(" ");
- MR_INFO(" Rule Id : %d",i);
- MR_INFO(" Rule Name : %s",sr->name);
+ /* Save The Etherfabric Link Rule Num */
+ e_manage->el_rule_num = (uint16_t)rule_num;
- switch (sr->type)
+ /* Parsing All Link Rule Config */
+ for (int i = 0; i < rule_num; i++)
+ {
+ struct etherfabric_link_rule * el_rule_item = &e_manage->el_rule_buf[i];
+ snprintf(rule_str_section, sizeof(rule_str_section), "etherfabric_link:%d", i);
+ ret = MESA_load_profile_uint_def(MR_ETHERFABRIC_DEFAULT_CFG, rule_str_section, "etherfabric_link_id", &etherfabric_link_id, MR_ETHERFABRIC_INVALID_RULE_ARG);
+ if (ret < 0)
{
- case SERVICE_ETHERFABRIC:
- MR_INFO(" Rule Type : etherfabric");
- break;
+ MR_ERROR("The : %s ,No Config The 'etherfabric_link_id' !!!",rule_str_section);
+ return RT_ERR;
+ }
- default:
+ ret = MESA_load_profile_uint_def(MR_ETHERFABRIC_DEFAULT_CFG, rule_str_section, "virtual_wire_id", &virtual_wire_id, MR_ETHERFABRIC_INVALID_RULE_ARG);
+ if (ret < 0)
+ {
+ MR_ERROR("The : %s ,No Config The 'virtual_wire_id' !!!",rule_str_section);
+ return RT_ERR;
+ }
- break;
+ ret = MESA_load_profile_string_nodef(MR_ETHERFABRIC_DEFAULT_CFG, rule_str_section, "etherfabric_ip", &str_ip_addr, MR_ETHERFABRIC_INVALID_RULE_STRING_ARG);
+ if (ret < 0)
+ {
+ MR_ERROR("The : %s ,No Config The 'etherfabric_ip' !!!",rule_str_section);
+ return RT_ERR;
+ }
+
+ ret = inet_pton(AF_INET, str_ip_addr, &ip_addr);
+ if (ret < 0)
+ {
+ MR_ERROR("The : %s 'etherfabric_ip' Is Invalid: %s",rule_str_section,str_ip_addr);
+ return RT_ERR;
}
- switch (sr->mode)
+ ret = MESA_load_profile_string_nodef(MR_ETHERFABRIC_DEFAULT_CFG, rule_str_section, "etherfabric_mac", &str_mac_addr, MR_ETHERFABRIC_INVALID_RULE_STRING_ARG);
+ if (ret < 0)
+ {
+ MR_ERROR("The : %s ,No Config The 'str_mac_addr' !!!",rule_str_section);
+ return RT_ERR;
+ }
+
+ if(etherfabric_string_to_mac(str_mac_addr,&el_rule_item->etherfabric_mac_addr) == RT_ERR)
+ {
+ MR_ERROR("The : %s 'etherfabric_mac' Is Invalid: %s",rule_str_section,str_mac_addr);
+ return RT_ERR;
+ }
+
+ el_rule_item->etherfabric_link_id = (uint16_t)etherfabric_link_id;
+ el_rule_item->virtual_wire_id = (uint16_t)virtual_wire_id;
+ el_rule_item->etherfabric_ip_v4 = ip_addr.s_addr;
+ }
+ return RT_SUCCESS;
+}
+
+/* Dump Etherfabric Service Config */
+void dump_etherfabric_service_config(struct etherfabric_management * e_manage)
+{
+ struct in_addr listen_addr;
+
+ MR_INFO(" ");
+ MR_INFO("Etherfabric Service Config:");
+ MR_INFO(" Total Rule Num : %u",e_manage->es_rule_num);
+
+ for (int i = 0; i < e_manage->es_rule_num; i++)
+ {
+ struct etherfabric_service_rule * es_rule_item = &e_manage->es_rule_buf[i];
+
+ listen_addr.s_addr = es_rule_item->listen_ip_v4;
+
+ MR_INFO(" ");
+ MR_INFO(" Rule Id : %d",i);
+ MR_INFO(" Rule Name : %s",es_rule_item->name);
+
+ switch (es_rule_item->mode)
{
case MODE_VIRTUAL_WIRE:
MR_INFO(" Rule Mode : virtual-wire");
@@ -311,7 +403,33 @@ void dump_etherfabric_config(struct etherfabric_management * e_manage)
}
MR_INFO(" Listen Ip : %s", inet_ntoa(listen_addr));
- MR_INFO(" Listen Port : %u",htons(sr->listen_port));
+ MR_INFO(" Listen Port : %u",htons(es_rule_item->listen_port));
+ }
+}
+
+/* Dump Etherfabric Link Config */
+void dump_etherfabric_link_config(struct etherfabric_management * e_manage)
+{
+ struct in_addr etherfabric_ip_v4;
+ char mac_addr[MR_STRING_MAX];
+
+ MR_INFO(" ");
+ MR_INFO("Etherfabric Link Config:");
+ MR_INFO(" Total Rule Num : %u",e_manage->el_rule_num);
+
+ for (int i = 0; i < e_manage->el_rule_num; i++)
+ {
+ struct etherfabric_link_rule * el_rule_item = &e_manage->el_rule_buf[i];
+
+ etherfabric_ip_v4.s_addr = el_rule_item->etherfabric_ip_v4;
+ etherfabric_mac_to_string(mac_addr, sizeof(mac_addr), &el_rule_item->etherfabric_mac_addr);
+
+ MR_INFO(" ");
+ MR_INFO(" Rule Id : %d",i);
+ MR_INFO(" Link Id : %u",el_rule_item->etherfabric_link_id);
+ MR_INFO(" Virtual Wire Id : %u",el_rule_item->virtual_wire_id);
+ MR_INFO(" Etherfabric IP : %s",inet_ntoa(etherfabric_ip_v4));
+ MR_INFO(" Etherfabric Mac : %s",mac_addr);
}
}
@@ -327,21 +445,27 @@ int etherfabric_init(struct sc_main * sc)
e_manage = &em->e_manage;
sc->etherfabric_node_main = em;
- /* 1. Parser The Etherfabric Service Rule Config */
- ret = parser_etherfabric_conf(sc,e_manage);
+ /* 1. Parsing The Etherfabric Service Rule Config */
+ ret = parser_etherfabric_service_conf(sc,e_manage);
/* 2. Dump The Config And Update Ip Listen Num */
if (ret != RT_ERR)
{
uint32_t listen_ip_v4[MR_ETHERFABRIC_MAX_RULE];
- for (int i = 0; i < e_manage->rule_num; i++)
+ for (int i = 0; i < e_manage->es_rule_num; i++)
{
- listen_ip_v4[i] = e_manage->sr[i].listen_ip_v4;
+ listen_ip_v4[i] = e_manage->es_rule_buf[i].listen_ip_v4;
}
- dump_etherfabric_config(e_manage);
- eth_ingress_update_ip_listen_num(e_manage->rule_num,listen_ip_v4);
+ dump_etherfabric_service_config(e_manage);
+ eth_ingress_update_ip_listen_num(e_manage->es_rule_num,listen_ip_v4);
+ }
+
+ ret = parser_etherfabric_link_conf(sc,e_manage);
+ if (ret != RT_ERR)
+ {
+ dump_etherfabric_link_config(e_manage);
}
return ret;
@@ -349,22 +473,42 @@ int etherfabric_init(struct sc_main * sc)
/************************************** Etherfabric Node **************************************/
/* Match Etherfabric Service Rule */
-struct service_rule * match_etherfabric_rule(struct etherfabric_management * e_manage,struct rte_ipv4_hdr * ipv4_hdr,struct rte_udp_hdr * udp_hdr)
+struct etherfabric_service_rule * match_etherfabric_service_rule(struct etherfabric_management * e_manage,struct rte_ipv4_hdr * ipv4_hdr,struct rte_udp_hdr * udp_hdr,uint16_t * service_index)
{
- struct service_rule * sr = NULL;
+ struct etherfabric_service_rule * es_rule_item = NULL;
- for (int i = 0; i < e_manage->rule_num; i++)
+ for (int i = 0; i < e_manage->es_rule_num; i++)
{
- sr = &e_manage->sr[i];
- if ((sr->listen_port == udp_hdr->dst_port) && (sr->listen_ip_v4 == ipv4_hdr->dst_addr))
+ es_rule_item = &e_manage->es_rule_buf[i];
+ if ((es_rule_item->listen_port == udp_hdr->dst_port) && (es_rule_item->listen_ip_v4 == ipv4_hdr->dst_addr))
{
- return sr;
+ *service_index = i;
+ return es_rule_item;
}
}
return NULL;
}
+/* Match Etherfabric Link Rule */
+int match_etherfabric_link_rule(struct etherfabric_management * e_manage,struct rte_ether_hdr * ether_hdr,struct rte_ipv4_hdr * ipv4_hdr,uint16_t * link_index)
+{
+ struct etherfabric_link_rule * el_rule_item = NULL;
+ for (int i = 0; e_manage->el_rule_num; i++)
+ {
+ el_rule_item = &e_manage->el_rule_buf[i];
+ if (el_rule_item->etherfabric_ip_v4 == ipv4_hdr->src_addr)
+ {
+ if (memcmp(&el_rule_item->etherfabric_mac_addr, &ether_hdr->src_addr, sizeof(struct rte_ether_addr)) == 0)
+ {
+ *link_index = i;
+ return RT_SUCCESS;
+ }
+ }
+ }
+ return RT_ERR;
+}
+
/* Etherfabric Node Init Function */
static int etherfabric_ingress_node_init(const struct rte_graph * graph, struct rte_node * node)
{
@@ -396,15 +540,14 @@ static __rte_always_inline uint16_t etherfabric_ingress_node_process(struct rte_
/* 3. Single Packet Processing */
while (n_left_from > 0)
{
- uint8_t ip_save_flg = ETHERFABRIC_IP_NO_SAVE,udp_save_flg = ETHERFABRIC_UDP_NO_SAVE;
- struct pkt_parser pkt_parser;
- struct pkt_parser * pkt_parser_ptr = &pkt_parser;
- struct service_tag * ingress_tag = NULL,* egress_tag = NULL;
+ uint8_t parse_ether_flag = CURRENT_LAYER_NOT_PARSED,parse_ipv4_flag = CURRENT_LAYER_NOT_PARSED,parse_udp_flag = CURRENT_LAYER_NOT_PARSED;
+ struct pkt_parser * pkt_parser_ptr = NULL;
+ struct etherfabric_service_tag * tag_item = NULL;
struct rte_ether_hdr * ether_hdr = NULL;
struct rte_ipv4_hdr * ipv4_hdr = NULL;
struct rte_udp_hdr * udp_hdr = NULL;
struct g_vxlan_hdr * g_vxlan_hdr = NULL;
- struct service_rule * sr = NULL;
+ struct etherfabric_service_rule * es_rule_item = NULL;
struct private_data * private_ctrlzone = NULL;
mbuf0 = pkts[0];
@@ -413,32 +556,27 @@ static __rte_always_inline uint16_t etherfabric_ingress_node_process(struct rte_
n_left_from -= 1;
private_ctrlzone = mrbuf_cz_data(mbuf0, 0);
- ingress_tag = &private_ctrlzone->ingress_tag;
- egress_tag = &private_ctrlzone->egress_tag;
+ pkt_parser_ptr = &private_ctrlzone->pkt_parser;
+ tag_item = &private_ctrlzone->tag.etherfabric;
MR_ETHERFABRIC_INGRESS_STAT_ADD(em,gid,total_pkts,1);
- /* 1. Parser PKT */
- pkt_parser_init(pkt_parser_ptr, LAYER_TYPE_ALL, MR_PKT_PARSE_RESULT_MAX);
- complex_parser_ether(pkt_parser_ptr, rte_pktmbuf_mtod(mbuf0,const char *));
-
- for (unsigned int i = 0; i < pkt_parser_ptr->nr_results; i++)
+ /* Parsing PKT */
+ for (unsigned int i = pkt_parser_ptr->layer_offset; i < pkt_parser_ptr->nr_results; i++)
{
struct pkt_parser_result * result = &pkt_parser_ptr->results[i];
- /* 1.1 Get Src Mac Addr */
- if (result->this_layer_type == LAYER_TYPE_ETHER)
+ if ((result->this_layer_type == LAYER_TYPE_ETHER) && (parse_ether_flag == CURRENT_LAYER_NOT_PARSED))
{
ether_hdr = (struct rte_ether_hdr *)(result->data);
- ingress_tag->src_mac_addr = egress_tag->src_mac_addr = ether_hdr->src_addr;
+ parse_ether_flag = CURRENT_LAYER_ALREADY_PARSED;
continue;
}
- else if ((result->this_layer_type == LAYER_TYPE_IPV4) && (ip_save_flg == ETHERFABRIC_IP_NO_SAVE))
+ else if ((result->this_layer_type == LAYER_TYPE_IPV4) && (parse_ipv4_flag == CURRENT_LAYER_NOT_PARSED))
{
ipv4_hdr = (struct rte_ipv4_hdr *)(result->data);
- ingress_tag->src_addr_ipv4 = egress_tag->src_addr_ipv4 = ipv4_hdr->src_addr;
- private_ctrlzone->package_type = IPV4_PACKAGE;
offset = sizeof(struct rte_ether_hdr) + sizeof(struct rte_ipv4_hdr) + sizeof(struct rte_udp_hdr) + sizeof(struct g_vxlan_hdr);
- ip_save_flg = ETHERFABRIC_IP_ALREADY_SAVE;
+ pkt_parser_ptr->layer_offset += MR_ETHERFABRIC_LAYER_OFFSET_V4;
+ parse_ipv4_flag = CURRENT_LAYER_ALREADY_PARSED;
continue;
}
else if (result->this_layer_type == LAYER_TYPE_IPV6)
@@ -446,58 +584,61 @@ static __rte_always_inline uint16_t etherfabric_ingress_node_process(struct rte_
/* Current Not Support IPv6 */
continue;
}
- else if ((result->this_layer_type == LAYER_TYPE_UDP) && (udp_save_flg == ETHERFABRIC_UDP_NO_SAVE))
+ else if ((result->this_layer_type == LAYER_TYPE_UDP) && (parse_udp_flag == CURRENT_LAYER_NOT_PARSED))
{
udp_hdr = (struct rte_udp_hdr *)(result->data);
- udp_save_flg = ETHERFABRIC_UDP_ALREADY_SAVE;
+ parse_udp_flag = CURRENT_LAYER_ALREADY_PARSED;
continue;
}
else if (result->this_layer_type == LAYER_TYPE_G_VXLAN)
{
g_vxlan_hdr = (struct g_vxlan_hdr *)(result->data);
- ingress_tag->vlan_id_half_high = egress_tag->vlan_id_half_high = g_vxlan_hdr->vlan_id_half_high;
- ingress_tag->link_layer_type = egress_tag->link_layer_type = g_vxlan_hdr->link_layer_type;
- ingress_tag->vlan_id_half_low = egress_tag->vlan_id_half_low = g_vxlan_hdr->vlan_id_half_low;
- ingress_tag->dir = egress_tag->dir = g_vxlan_hdr->dir;
- ingress_tag->link_id = egress_tag->link_id = g_vxlan_hdr->link_id;
- ingress_tag->online_test = egress_tag->online_test = g_vxlan_hdr->online_test;
+ tag_item->dir = g_vxlan_hdr->dir;
continue;
}
}
- /* 2. Check Pkt */
- if (((ether_hdr == NULL) || (ipv4_hdr == NULL) || (udp_hdr == NULL) || (g_vxlan_hdr == NULL)))
+ /* Check Pkt */
+ if (unlikely((ether_hdr == NULL) || (ipv4_hdr == NULL) || (udp_hdr == NULL) || (g_vxlan_hdr == NULL)))
{
next0 = ETHERFABRIC_INGRESS_NEXT_PKT_DROP;
MR_ETHERFABRIC_INGRESS_STAT_ADD(em,gid,invalid_pkts,1);
goto node_enqueue;
}
- /* 3. Match Etherfabric Service Rule */
- sr = match_etherfabric_rule(e_manage,ipv4_hdr,udp_hdr);
- if (sr == NULL)
+ /* Match Etherfabric Service Rule */
+ es_rule_item = match_etherfabric_service_rule(e_manage,ipv4_hdr,udp_hdr,&tag_item->service_index);
+ if (unlikely(es_rule_item == NULL))
{
next0 = ETHERFABRIC_INGRESS_NEXT_PKT_DROP;
- MR_ETHERFABRIC_INGRESS_STAT_ADD(em,gid,no_match_pkts,1);
+ MR_ETHERFABRIC_INGRESS_STAT_ADD(em,gid,no_match_service_pkts,1);
goto node_enqueue;
}
- /* 4. Fill Etherfabric Tag,And Save The Etherfabric Tag Ctrlzone,Current Not Support IPv6 */
- private_ctrlzone->service_type = sr->type;
+ /* Match Etherfabric Link Rule */
+ if (unlikely(match_etherfabric_link_rule(e_manage,ether_hdr,ipv4_hdr,&tag_item->link_index) == RT_ERR))
+ {
+ next0 = ETHERFABRIC_INGRESS_NEXT_PKT_DROP;
+ MR_ETHERFABRIC_INGRESS_STAT_ADD(em,gid,no_match_link_pkts,1);
+ goto node_enqueue;
+ }
+
+ /* Fill Etherfabric Tag,And Save The Etherfabric Tag Ctrlzone,Current Not Support IPv6 */
+ private_ctrlzone->service_type = MR_NODE_COMMON_ETHERFABRIC_SERVICE;
- /* 5. Set The Pkt Date Off */
+ /* Set The Pkt Date Off */
rte_pktmbuf_adj(mbuf0,offset);
- /* 6. Send The Pkt To Classifier */
+ /* Send The Pkt To Classifier */
next0 = ETHERFABRIC_INGRESS_NEXT_CLASSIFIER;
MR_ETHERFABRIC_INGRESS_STAT_ADD(em,gid,match_pkts,1);
node_enqueue:
- /* 7. Judge The Next Index Whether To Change */
+ /* Judge The Next Index Whether To Change */
if (unlikely(next_index != next0))
{
- /* 7.1 If The Next Index Has Been Changed,Enqueue Last Pkts */
+ /* If The Next Index Has Been Changed,Enqueue Last Pkts */
for (uint16_t i = 0; i < last_spec; i++)
rte_node_enqueue_x1(graph, node, next_index, from[i]);
from += last_spec;
@@ -506,12 +647,12 @@ node_enqueue:
}
else
{
- /* 7.2 If The Next Index Not Change, Update The Lasts */
+ /* If The Next Index Not Change, Update The Lasts */
last_spec ++;
}
}
- /* 8. Process The Remaining Packets */
+ /* Process The Remaining Packets */
if (likely(last_spec > 0))
{
for (uint16_t i = 0; i < last_spec; i++)
@@ -541,8 +682,8 @@ static int etherfabric_egress_node_init(const struct rte_graph * graph, struct r
return 0;
}
-/* Fill Ether IPv4 Udp Vxlan Hdr For The No Set Tag Pkt */
-void fill_ipv4_pkt_for_no_set_tag(struct rte_mbuf *mbuf)
+/* Fill Ether IPv4 Udp Vxlan Hdr For The Original Packet */
+void fill_ipv4_pkt_for_original_packet(struct rte_mbuf *mbuf)
{
struct rte_ether_hdr * eth_hdr = NULL,swap_eth_hdr;
@@ -550,10 +691,11 @@ void fill_ipv4_pkt_for_no_set_tag(struct rte_mbuf *mbuf)
/* swap eth_hdr */
eth_hdr = rte_pktmbuf_mtod(mbuf, struct rte_ether_hdr *);
- swap_eth_hdr.dst_addr = eth_hdr->dst_addr;
- swap_eth_hdr.src_addr = eth_hdr->src_addr;
- eth_hdr->dst_addr = swap_eth_hdr.src_addr ;
- eth_hdr->src_addr = swap_eth_hdr.dst_addr ;
+ rte_ether_addr_copy(&eth_hdr->dst_addr, &swap_eth_hdr.dst_addr);
+ rte_ether_addr_copy(&eth_hdr->src_addr, &swap_eth_hdr.src_addr);
+ rte_ether_addr_copy(&swap_eth_hdr.src_addr, &eth_hdr->dst_addr);
+ rte_ether_addr_copy(&swap_eth_hdr.dst_addr, &eth_hdr->src_addr);
+
/* swap ip_hdr */
ip_hdr = rte_pktmbuf_mtod_offset(mbuf,struct rte_ipv4_hdr *, sizeof(struct rte_ether_hdr));
swap_ip_hdr.src_addr = ip_hdr->src_addr;
@@ -562,16 +704,23 @@ void fill_ipv4_pkt_for_no_set_tag(struct rte_mbuf *mbuf)
ip_hdr->dst_addr = swap_ip_hdr.src_addr;
}
-/* Fill Ether IPv4 Udp Vxlan Hdr For The Set Tag Pkt */
-void fill_ipv4_pkt_for_set_tag(struct sc_main * sc,struct service_tag * egress_tag,struct rte_mbuf *mbuf,uint16_t port_ingress)
+/* Fill Ether IPv4 Udp Vxlan Hdr For The Constructed Packet */
+void fill_ipv4_pkt_for_constructed_packet(struct sc_main * sc,struct etherfabric_service_tag * tag_item,struct rte_mbuf *mbuf,uint16_t port_ingress)
{
struct rte_ether_hdr * eth_hdr = NULL;
struct rte_ipv4_hdr * ip_hdr = NULL;
struct rte_udp_hdr * udp_hdr = NULL;
struct g_vxlan_hdr * g_vxlan_hdr = NULL;
struct rte_ipv4_hdr * ip_hdr_inner = NULL;
+ struct node_etherfabric_main * etherfabric_node_main = NULL;
+ struct etherfabric_service_rule * es_rule_item = NULL;
+ struct etherfabric_link_rule * el_rule_item = NULL;
struct phydev * dev = phydev_lookup_id(sc->phydev_main,port_ingress);
+ etherfabric_node_main = sc->etherfabric_node_main;
+ es_rule_item = &etherfabric_node_main->e_manage.es_rule_buf[tag_item->service_index];
+ el_rule_item = &etherfabric_node_main->e_manage.el_rule_buf[tag_item->link_index];
+
eth_hdr = rte_pktmbuf_mtod(mbuf, struct rte_ether_hdr *);
ip_hdr = rte_pktmbuf_mtod_offset(mbuf,struct rte_ipv4_hdr *, sizeof(struct rte_ether_hdr));
udp_hdr = rte_pktmbuf_mtod_offset(mbuf,struct rte_udp_hdr *, sizeof(struct rte_ether_hdr) + sizeof(struct rte_ipv4_hdr));
@@ -579,8 +728,8 @@ void fill_ipv4_pkt_for_set_tag(struct sc_main * sc,struct service_tag * egress_t
ip_hdr_inner = rte_pktmbuf_mtod_offset(mbuf,struct rte_ipv4_hdr *, 2*sizeof(struct rte_ether_hdr) + sizeof(struct rte_ipv4_hdr) + sizeof(struct rte_udp_hdr) + sizeof(struct g_vxlan_hdr));
/* Fill eth_hdr */
- eth_hdr->dst_addr = egress_tag->src_mac_addr;
- eth_hdr->src_addr = dev->ether_addr;
+ rte_ether_addr_copy(&el_rule_item->etherfabric_mac_addr, &eth_hdr->dst_addr);
+ rte_ether_addr_copy(&dev->ether_addr, &eth_hdr->src_addr);
eth_hdr->ether_type = 0x08;
/* Fill ip_hdr */
@@ -590,8 +739,8 @@ void fill_ipv4_pkt_for_set_tag(struct sc_main * sc,struct service_tag * egress_t
ip_hdr->fragment_offset = 0x0000;
ip_hdr->time_to_live = 0x40;
ip_hdr->next_proto_id = IPPROTO_UDP;
- ip_hdr->src_addr = egress_tag->dst_addr_ipv4;
- ip_hdr->dst_addr = egress_tag->src_addr_ipv4;
+ ip_hdr->src_addr = es_rule_item->listen_ip_v4;
+ ip_hdr->dst_addr = el_rule_item->etherfabric_ip_v4;
ip_hdr->total_length = ip_hdr_inner->total_length + sizeof(struct rte_ipv4_hdr) + sizeof(struct rte_udp_hdr) + sizeof(struct g_vxlan_hdr) + sizeof(struct rte_ether_hdr);
ip_hdr->hdr_checksum = rte_ipv4_cksum(ip_hdr);
@@ -606,12 +755,12 @@ void fill_ipv4_pkt_for_set_tag(struct sc_main * sc,struct service_tag * egress_t
g_vxlan_hdr->reserved[0] = 0x00;
g_vxlan_hdr->reserved[1] = 0x00;
g_vxlan_hdr->reserved[2] = 0x00;
- g_vxlan_hdr->vlan_id_half_high = egress_tag->vlan_id_half_high;
- g_vxlan_hdr->link_layer_type = egress_tag->link_layer_type;
- g_vxlan_hdr->vlan_id_half_low = egress_tag->vlan_id_half_low;
- g_vxlan_hdr->dir = egress_tag->dir;
- g_vxlan_hdr->link_id = egress_tag->link_id;
- g_vxlan_hdr->online_test = egress_tag->online_test;
+ g_vxlan_hdr->vlan_id_half_high = 0x00;
+ g_vxlan_hdr->link_layer_type = 0x00;
+ g_vxlan_hdr->vlan_id_half_low = 0x00;
+ g_vxlan_hdr->dir = tag_item->dir;
+ g_vxlan_hdr->link_id = el_rule_item->etherfabric_link_id;
+ g_vxlan_hdr->online_test = 0x00;
g_vxlan_hdr->r7 = 0x00;
g_vxlan_hdr->r6 = 0x00;
g_vxlan_hdr->r5= 0x00;
@@ -650,9 +799,11 @@ static __rte_always_inline uint16_t etherfabric_egress_node_process(struct rte_g
/* 3. Single Packet Processing */
while (n_left_from > 0)
{
+ uint8_t parse_ip_version_flag = CURRENT_LAYER_NOT_PARSED,ip_version = 0;
uint16_t port_ingress = 0;
- struct service_tag * egress_tag = NULL;
+ struct pkt_parser * pkt_parser_ptr = NULL;
struct private_data * private_ctrlzone = NULL;
+ struct etherfabric_service_tag * tag_item = NULL;
mbuf0 = pkts[0];
@@ -660,12 +811,27 @@ static __rte_always_inline uint16_t etherfabric_egress_node_process(struct rte_g
n_left_from -= 1;
private_ctrlzone = mrbuf_cz_data(mbuf0, 0);
+ pkt_parser_ptr = &private_ctrlzone->pkt_parser;
+ tag_item = &private_ctrlzone->tag.etherfabric;
port_ingress = private_ctrlzone->port_ingress;
- egress_tag = &private_ctrlzone->egress_tag;
+
MR_ETHERFABRIC_EGRESS_STAT_ADD(em,gid,total_pkts,1);
- /* 1. Fill The Package */
- switch (private_ctrlzone->package_type)
+ /* Parsing The Package */
+ for (unsigned int i = pkt_parser_ptr->layer_offset; i < pkt_parser_ptr->nr_results; i++)
+ {
+ struct pkt_parser_result * result = &pkt_parser_ptr->results[i];
+
+ if ((result->this_layer_type == LAYER_TYPE_IPV4) && (parse_ip_version_flag == CURRENT_LAYER_NOT_PARSED))
+ {
+ ip_version = IPV4_PACKAGE;
+ parse_ip_version_flag = CURRENT_LAYER_ALREADY_PARSED;
+ continue;
+ }
+ }
+
+ /* Fill The Package */
+ switch (ip_version)
{
case IPV4_PACKAGE:
{
@@ -674,15 +840,35 @@ static __rte_always_inline uint16_t etherfabric_egress_node_process(struct rte_g
rte_pktmbuf_prepend(mbuf0,offset);
/* Fill Ether IPv4 Udp Vxlan Hdr For The Pkt */
- if (likely(private_ctrlzone->tag_set_flag == NONE_TAG_SET))
+ if (likely(private_ctrlzone->original_packet_flag == MR_NODE_COMMON_ORIGINAL_PKT))
{
- fill_ipv4_pkt_for_no_set_tag(mbuf0);
+ fill_ipv4_pkt_for_original_packet(mbuf0);
MR_ETHERFABRIC_EGRESS_STAT_ADD(em,gid,ipv4_no_set_tag,1);
+ /* From Port Id Get Next Node Index */
+ next0 = e_manage->port_tx_map[port_ingress];
+ goto node_enqueue;
}
else
{
- fill_ipv4_pkt_for_set_tag(sc,egress_tag,mbuf0,port_ingress);
+ if (unlikely((tag_item->link_index >= MR_ETHERFABRIC_MAX_RULE )))
+ {
+ next0 = ETHERFABRIC_EGRESS_NEXT_PKT_DROP;
+ MR_ETHERFABRIC_EGRESS_STAT_ADD(em,gid,link_index_invalid,1);
+ goto node_enqueue;
+ }
+
+ if (unlikely((tag_item->service_index >= MR_ETHERFABRIC_MAX_RULE )))
+ {
+ next0 = ETHERFABRIC_EGRESS_NEXT_PKT_DROP;
+ MR_ETHERFABRIC_EGRESS_STAT_ADD(em,gid,service_index_invalid,1);
+ goto node_enqueue;
+ }
+
+ fill_ipv4_pkt_for_constructed_packet(sc,tag_item,mbuf0,port_ingress);
MR_ETHERFABRIC_EGRESS_STAT_ADD(em,gid,ipv4_set_tag,1);
+ /* From Port Id Get Next Node Index */
+ next0 = e_manage->port_tx_map[port_ingress];
+ goto node_enqueue;
}
}
break;
@@ -694,9 +880,6 @@ static __rte_always_inline uint16_t etherfabric_egress_node_process(struct rte_g
break;
}
- /* 2. From Port Id Get Next Node Index */
- next0 = e_manage->id_to_node[port_ingress];
-
node_enqueue:
/* 3. Judge The Next Index Whether To Change */
if (unlikely(next_index != next0))
@@ -763,8 +946,10 @@ cJSON * etherfabric_ingress_node_monit_loop(struct sc_main * sc)
cJSON_AddNumberToObject(graph_obj, "total_pkts", stats);
stats = stat_item->invalid_pkts;
cJSON_AddNumberToObject(graph_obj, "invalid_pkts", stats);
- stats = stat_item->no_match_pkts;
- cJSON_AddNumberToObject(graph_obj, "no_match_pkts", stats);
+ stats = stat_item->no_match_service_pkts;
+ cJSON_AddNumberToObject(graph_obj, "no_match_service_pkts", stats);
+ stats = stat_item->no_match_link_pkts;
+ cJSON_AddNumberToObject(graph_obj, "no_match_link_pkts", stats);
stats = stat_item->match_pkts;
cJSON_AddNumberToObject(graph_obj, "match_pkts", stats);
}
diff --git a/service/src/node_lb.c b/service/src/node_lb.c
index 36a2708..8efa674 100644
--- a/service/src/node_lb.c
+++ b/service/src/node_lb.c
@@ -138,7 +138,7 @@ enum
/* Dynamic Load Balance Rule Add Deal Result */
enum {
- DYNAMIC_LB_RULE_ADD_SUCESS = 0,
+ DYNAMIC_LB_RULE_ADD_SUCCESS = 0,
DYNAMIC_LB_RULE_ADD_NUM_OUT_OF_MAX,
DYNAMIC_LB_RULE_ADD_NO_FREE_GROUP_ITEM,
DYNAMIC_LB_RULE_ADD_GROUP_ID_NO_CONFIG,
@@ -157,7 +157,7 @@ enum {
/* Dynamic Load Balance Rule Delete Deal Result */
enum {
- DYNAMIC_LB_RULE_DEL_SUCESS = 0,
+ DYNAMIC_LB_RULE_DEL_SUCCESS = 0,
DYNAMIC_LB_RULE_DEL_RULE_NUM_IS_ZERO,
DYNAMIC_LB_RULE_DEL_GROUP_ID_NO_CONFIG,
DYNAMIC_LB_RULE_DEL_GROUP_ID_INVALID
@@ -779,7 +779,7 @@ int parse_lb_rule_for_add(struct sc_main * sc,struct lb_management * lb_manage,c
lb_manage->group_num ++;
memcpy(&lb_manage->lb_groups[item_id],group_item,sizeof(struct lb_group));
- return DYNAMIC_LB_RULE_ADD_SUCESS;
+ return DYNAMIC_LB_RULE_ADD_SUCCESS;
}
/* Add A Single Dynamic Load Balance Rule Callback */
@@ -803,7 +803,7 @@ int lb_single_rule_add(struct sc_main * sc,cJSON * j_rule,cJSON *response)
/* LB Management Copy */
lb_management_copy(old_lb_management,new_lb_management);
ret = parse_lb_rule_for_add(sc,new_lb_management,j_rule,response);
- if (ret == DYNAMIC_LB_RULE_ADD_SUCESS)
+ if (ret == DYNAMIC_LB_RULE_ADD_SUCCESS)
{
/* Update LB Management */
global_lb_main->lb_manage = new_lb_management;
@@ -866,7 +866,7 @@ int parse_lb_rule_for_del(struct lb_management * lb_manage,cJSON * j_rule,cJSON
memset(del_group_item,0,sizeof(struct lb_group));
lb_manage->group_num --;
- return DYNAMIC_LB_RULE_DEL_SUCESS;
+ return DYNAMIC_LB_RULE_DEL_SUCCESS;
}
/* Delete A Single Dynamic Load Balance Rule Callback */
@@ -890,7 +890,7 @@ int lb_single_rule_del(cJSON * j_rule,cJSON *response)
/* LB Management Copy */
lb_management_copy(old_lb_management,new_lb_management);
ret = parse_lb_rule_for_del(new_lb_management,j_rule,response);
- if (ret == DYNAMIC_LB_RULE_DEL_SUCESS)
+ if (ret == DYNAMIC_LB_RULE_DEL_SUCCESS)
{
/* Update LB Management */
global_lb_main->lb_manage = new_lb_management;