summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorzhangchengwei <[email protected]>2019-02-21 17:29:40 +0800
committerzhangchengwei <[email protected]>2019-02-21 17:29:40 +0800
commit774164ac95f053e7c9b4d56e56110a4444678cd0 (patch)
treed908575d42aec5b35740d84e13ac657e9cdf807e
parent6b0b29a28e3453e813191baf2a8eeb2a924e6fca (diff)
解封带开关的IP域表时使用独立的线程执行;要求开关表初始化以及加载顺序必须在域表之前,开关不存在时域表被丢弃;开关表不要和域表在同一个maat实例(开关收取遗失);
-rw-r--r--bin/PANGU_VALVE_CONF_rawfile/MAAT_REDIS_INFO.conf18
-rw-r--r--bin/PANGU_VALVE_CONF_rawfile/MAAT_REDIS_INFO.conf.last_wired8
-rw-r--r--bin/PANGU_VALVE_CONF_rawfile/SERVICE_ID_MAP.conf11
-rw-r--r--bin/PANGU_VALVE_CONF_rawfile/SERVICE_ID_MAP.conf.last_wired64
-rw-r--r--bin/conf/maat_table_info_valid.conf1
-rw-r--r--bin/conf/pangu_valve.conf14
-rw-r--r--bin/conf/table_info/service_id_map.conf39
-rw-r--r--bin/conf/table_info/table_info_one.conf19
-rw-r--r--bin/conf/table_info/table_info_tree.conf2
-rw-r--r--src/pg_valve_c3.cpp34
-rw-r--r--src/pg_valve_c3.h1
-rw-r--r--src/pg_valve_deal.cpp201
-rw-r--r--src/pg_valve_deal.h12
-rw-r--r--src/pg_valve_maat.cpp54
-rw-r--r--src/pg_valve_maat.h3
-rw-r--r--src/pg_valve_main.cpp8
-rw-r--r--src/pg_valve_main.h5
-rw-r--r--src/pg_valve_stat.cpp8
18 files changed, 217 insertions, 285 deletions
diff --git a/bin/PANGU_VALVE_CONF_rawfile/MAAT_REDIS_INFO.conf b/bin/PANGU_VALVE_CONF_rawfile/MAAT_REDIS_INFO.conf
index f0900e4..602356d 100644
--- a/bin/PANGU_VALVE_CONF_rawfile/MAAT_REDIS_INFO.conf
+++ b/bin/PANGU_VALVE_CONF_rawfile/MAAT_REDIS_INFO.conf
@@ -1,10 +1,8 @@
-#INS_NAME MAAT_ROOT VERSION_FILE REDIS_IP RPORT RINDEX TABLE_NAMES_LIST
-#STATIC ./conf/maat_cfg1/ ./conf/version_static.txt 10.4.34.1 6379 5 APP_POLICY;APP_DOMAIN;PXY_INTERCEPT_DOMAIN;APP_STATIC_SEV_IP;PXY_INTERCEPT_IP;INLINE_IP_CB;WHITE_LIST_IP;PXY_OBJ_SPOOFING_IP_POOL
-#DYN_IPD ./conf/maat_cfg2/ ./conf/version_dynipd.txt 10.4.20.151 6379 0 APP_DYN_SEV_IP_CB;PXY_DYN_SEV_IP_CB
-#DYN_IPD ./conf/maat_cfg2/ ./conf/version_dynipd.txt 10.4.20.151 6379 0 PXY_DYN_SEV_IP_CB
-#DYN_YSP ./conf/maat_cfg2/ ./conf/version_dynysp.txt 10.4.20.152 6379 1 MM_DYN_VOIP_DROP;ANTI_DDOS_ATTACK_CB
-#DYN_DK ./conf/maat_cfg2/ ./conf/version_dyndk.txt 10.4.20.153 6379 10 DK_CLI_IP_CB
-DYNTEST1 ./conf/maat_cfg2/ ./conf/version_dyn1.txt 192.168.10.27 7001 2 APP_POLICY;IR_INTERCEPT_IP
-DYNTEST2 ./conf/maat_cfg2/ ./conf/version_dyn2.txt 192.168.10.27 7002 2 IR_INTERCEPT_IP
-DYNTEST3 ./conf/maat_cfg2/ ./conf/version_dyn3.txt 192.168.10.27 7003 2 IR_INTERCEPT_IP
-DYNTEST4 ./conf/maat_cfg2/ ./conf/version_dyn4.txt 192.168.10.27 7004 2 IR_INTERCEPT_IP
+#No. INS_NAME MAAT_ROOT REDIS_IP RPORT RINDEX TABLE_NAMES_LIST
+1 STATIC ./conf/maat_cfg1/ 10.4.34.1 6379 5 APP_POLICY;APP_DOMAIN;PXY_INTERCEPT_DOMAIN;APP_STATIC_SEV_IP;PXY_INTERCEPT_IP;INLINE_IP_CB;WHITE_LIST_IP;PXY_OBJ_SPOOFING_IP_POOL
+#2 DYN_IPD ./conf/maat_cfg2/ 10.4.20.151 6379 0 APP_DYN_SEV_IP_CB;IR_INTERCEPT_IP
+3 DYN_IPD ./conf/maat_cfg2/ 10.4.20.151 6379 0 PXY_DYN_SEV_IP_CB
+4 DYN_YSP ./conf/maat_cfg2/ 10.4.20.152 6379 1 MM_DYN_VOIP_DROP
+#5 DYN_DK ./conf/maat_cfg2/ 10.4.20.153 6379 10 DK_CLI_IP_CB
+6 DYN_DOS ./conf/maat_cfg2/ 10.4.20.154 6379 0 ANTI_DDOS_ATTACK_CB
+
diff --git a/bin/PANGU_VALVE_CONF_rawfile/MAAT_REDIS_INFO.conf.last_wired b/bin/PANGU_VALVE_CONF_rawfile/MAAT_REDIS_INFO.conf.last_wired
deleted file mode 100644
index 751382a..0000000
--- a/bin/PANGU_VALVE_CONF_rawfile/MAAT_REDIS_INFO.conf.last_wired
+++ /dev/null
@@ -1,8 +0,0 @@
-#INS_NAME MAAT_ROOT VERSION_FILE REDIS_IP RPORT RINDEX TABLE_NAMES_LIST
-STATIC ./conf/maat_cfg1/ ./conf/version_static.txt 10.4.34.1 6379 5 APP_POLICY;APP_DOMAIN;PXY_INTERCEPT_DOMAIN;APP_STATIC_SEV_IP;PXY_INTERCEPT_IP;INLINE_IP_CB;WHITE_LIST_IP
-STATIC_TMP ./conf/maat_cfg1/ ./conf/version_static.txt 10.4.34.1 6379 4 PXY_OBJ_SPOOFING_IP_POOL
-#DYN_IPD ./conf/maat_cfg2/ ./conf/version_dynipd.txt 10.4.20.151 6379 0 APP_DYN_SEV_IP_CB;PXY_DYN_SEV_IP_CB
-DYN_IPD ./conf/maat_cfg2/ ./conf/version_dynipd.txt 10.4.20.151 6379 0 PXY_DYN_SEV_IP_CB
-DYN_YSP ./conf/maat_cfg2/ ./conf/version_dynysp.txt 10.4.20.152 6379 1 MM_DYN_VOIP_DROP
-#DYN_DK ./conf/maat_cfg2/ ./conf/version_dyndk.txt 10.4.20.153 6379 10 DK_CLI_IP_CB
-DYN_DOS ./conf/maat_cfg2/ ./conf/version_dyndos.txt 10.4.20.154 6379 0 ANTI_DDOS_ATTACK_CB;IR_INTERCEPT_IP
diff --git a/bin/PANGU_VALVE_CONF_rawfile/SERVICE_ID_MAP.conf b/bin/PANGU_VALVE_CONF_rawfile/SERVICE_ID_MAP.conf
index 8e2e6bf..301edb4 100644
--- a/bin/PANGU_VALVE_CONF_rawfile/SERVICE_ID_MAP.conf
+++ b/bin/PANGU_VALVE_CONF_rawfile/SERVICE_ID_MAP.conf
@@ -1,4 +1,4 @@
-#service_id limit_rate grule_serv_type RULE_SCOPE limit_num
+#service_id limit_rate grule_serv_type durable limit_num
#WHITE_LIST_IP white list
1 0 4 1 1000
#INLINE_IP_CB DROP
@@ -48,17 +48,18 @@
#APP_POLICY REJECT 0x21
33 0 8 0 1000
-1028 0 8 1 1000
+1028 0 8 0 1000
#APP_POLICY MONITOR 0x91
-145 0 18 1 1000
+145 0 18 0 1000
#IPD_DYN_SUBSCIBE_IP
#DK_CLI_IP_CB
-1045 0 18 1 1000
+1045 0 18 0 1000
#APP_POLICY DROP 0x410
-1040 0 8 1 1000
+1040 0 8 0 1000
##APP_POLICY LIMIT 0x420
1056 0 246 1 1000
1056 10 247 1 1000
1056 20 248 1 1000
1056 30 249 1 1000
1056 40 250 1 1000
+
diff --git a/bin/PANGU_VALVE_CONF_rawfile/SERVICE_ID_MAP.conf.last_wired b/bin/PANGU_VALVE_CONF_rawfile/SERVICE_ID_MAP.conf.last_wired
deleted file mode 100644
index f6e8c2b..0000000
--- a/bin/PANGU_VALVE_CONF_rawfile/SERVICE_ID_MAP.conf.last_wired
+++ /dev/null
@@ -1,64 +0,0 @@
-#service_id limit_rate grule_serv_type RULE_SCOPE limit_num
-#WHITE_LIST_IP white list
-1 0 4 1 1000
-#INLINE_IP_CB DROP
-3 0 10 1 1000
-#ANTIDDOS DROP
-5 0 17 1 1000
-#INLINE_IP_CB DROP IPSEC/GRE 0x19/0x1C
-25 0 10 1 1000
-28 0 10 1 1000
-
-#MM_DYN_VOIP_DROP DROP 0x112
-274 0 9 1 1000
-#IR_INTERCEPT_IP LOOP 0x340
-832 0 19 1 1000
-
-#PXY_INTERCEPT_IP MONITOR 0x200/0x202/0x205
-512 0 21 1 1000
-514 0 21 1 1000
-514 10 21 1 1000
-514 20 21 1 1000
-514 30 21 1 1000
-514 40 21 1 1000
-514 50 21 1 1000
-514 60 21 1 1000
-514 70 21 1 1000
-514 80 21 1 1000
-514 90 21 1 1000
-514 100 21 1 1000
-517 0 21 1 1000
-#PXY_INTERCEPT_IP/PXY_OBJ_SPOOFING_IP_POOL loop 0x206/0x282
-518 0 16 1 1000
-642 0 16 1 1000
-
-#PXY_INTERCEPT_DOMAIN MONITOR 0x201/0x203
-513 0 22 1 1000
-515 0 22 1 1000
-515 10 22 1 1000
-515 20 22 1 1000
-515 30 22 1 1000
-515 40 22 1 1000
-515 50 22 1 1000
-515 60 22 1 1000
-515 70 22 1 1000
-515 80 22 1 1000
-515 90 22 1 1000
-515 100 22 1 1000
-
-#APP_POLICY REJECT 0x21
-33 0 8 1 1000
-1028 0 8 1 1000
-#APP_POLICY MONITOR 0x91
-145 0 18 1 1000
-#IPD_DYN_SUBSCIBE_IP
-#DK_CLI_IP_CB
-1045 0 18 1 1000
-#APP_POLICY DROP 0x410
-1040 0 8 1 1000
-##APP_POLICY LIMIT 0x420
-1056 0 246 1 1000
-1056 10 247 1 1000
-1056 20 248 1 1000
-1056 30 249 1 1000
-1056 40 250 1 1000
diff --git a/bin/conf/maat_table_info_valid.conf b/bin/conf/maat_table_info_valid.conf
index 2b70698..ef6242e 100644
--- a/bin/conf/maat_table_info_valid.conf
+++ b/bin/conf/maat_table_info_valid.conf
@@ -19,4 +19,3 @@
10 IPD_DYN_SUBSCIBE_IP plugin 9
11 ANTI_DDOS_ATTACK_CB plugin 14
12 MM_DYN_VOIP_DROP plugin 14
-13 WHITE_LIST_IP plugin 14
diff --git a/bin/conf/pangu_valve.conf b/bin/conf/pangu_valve.conf
index 4afedfb..5539a97 100644
--- a/bin/conf/pangu_valve.conf
+++ b/bin/conf/pangu_valve.conf
@@ -1,10 +1,10 @@
[WIRED_INFO]
APP_NAME=PANGU_VALVE_CONF
SELF_IP=10.4.20.161
-REMOTE_DIR=ASTANA
+REMOTE_DIR=ASTANA/MASTER
KEY_CNT=24
RAWFILE_CNT=2
-WIRED_TIME=2018-12-29T14:59:43
+WIRED_TIME=2019-02-19T11:27:15
[WIRED_RAW_FILE]
MAAT_REDIS_INFO.conf=./PANGU_VALVE_CONF_rawfile/MAAT_REDIS_INFO.conf
@@ -19,20 +19,20 @@ FS_STAT_DST_PORT=8125
FS_STAT_MODE=1
FS_STAT_TRIG=1
RUN_LOG_DIR=./log
-RUN_LOG_LV=30
+RUN_LOG_LV=20
STATISTIC_INTERVAL=60
[SYSTEM]
C3_AUTH_DATA=AABBCCDDEEFF1122
-C3_CCC_LISTS=10.0.6.203:10005;
+C3_CCC_LISTS=10.4.34.1:10005;10.4.34.2:10005;10.4.34.3:10005;
CONSUL_LEADER_KEY=PanguValve/ValveLeader01
-CONSUL_LOCK_DELAY=5
+CONSUL_LOCK_DELAY=2
CONSUL_REQ_TIMEOUT=4
CONSUL_SESSION_TTL=10
-CONSUL_SWITCH=0
+CONSUL_SWITCH=1
#C3_CCC_LISTS=10.4.34.1:10006;
HASH_TABLE_SIZE=16777216
-LOCAL_NET_NAME=em2
+LOCAL_NET_NAME=eth2
#1-file; 2-redis
MAAT_CONFIG_RECV_WAY=2
MAAT_EFFECTIVE_RANGE={"tags":[{"tag":"location","value":"Astana"}]}
diff --git a/bin/conf/table_info/service_id_map.conf b/bin/conf/table_info/service_id_map.conf
index 86416b7..5b6246f 100644
--- a/bin/conf/table_info/service_id_map.conf
+++ b/bin/conf/table_info/service_id_map.conf
@@ -1,8 +1,10 @@
#service_id limit_rate grule_serv_type RULE_SCOPE limit_num
+#WHITE_LIST_IP white list
+1 0 4 1 1000
#INLINE_IP_CB DROP
3 0 10 1 1000
#ANTIDDOS DROP
-5 0 7 1 1000
+5 0 17 1 1000
#INLINE_IP_CB DROP IPSEC/GRE 0x19/0x1C
25 0 10 1 1000
28 0 10 1 1000
@@ -15,27 +17,48 @@
#PXY_INTERCEPT_IP MONITOR 0x200/0x202/0x205
512 0 21 1 1000
514 0 21 1 1000
+514 10 21 1 1000
+514 20 21 1 1000
+514 30 21 1 1000
+514 40 21 1 1000
+514 50 21 1 1000
+514 60 21 1 1000
+514 70 21 1 1000
+514 80 21 1 1000
+514 90 21 1 1000
+514 100 21 1 1000
517 0 21 1 1000
-#PXY_INTERCEPT_DOMAIN MONITOR 0x201
+#PXY_INTERCEPT_IP/PXY_OBJ_SPOOFING_IP_POOL loop 0x206/0x282
+518 0 16 1 1000
+642 0 16 1 1000
+
+#PXY_INTERCEPT_DOMAIN MONITOR 0x201/0x203
513 0 22 1 1000
515 0 22 1 1000
+515 10 22 1 1000
+515 20 22 1 1000
+515 30 22 1 1000
+515 40 22 1 1000
+515 50 22 1 1000
+515 60 22 1 1000
+515 70 22 1 1000
+515 80 22 1 1000
+515 90 22 1 1000
+515 100 22 1 1000
#APP_POLICY REJECT 0x21
-33 0 8 1 1000
+33 0 8 0 1000
+1028 0 8 0 1000
#APP_POLICY MONITOR 0x91
145 0 18 1 1000
#IPD_DYN_SUBSCIBE_IP
#DK_CLI_IP_CB
1045 0 18 1 1000
#APP_POLICY DROP 0x410
-1040 0 8 1 1000
+1040 0 8 0 1000
##APP_POLICY LIMIT 0x420
1056 0 246 1 1000
1056 10 247 1 1000
1056 20 248 1 1000
1056 30 249 1 1000
1056 40 250 1 1000
-
-#TEST
-11 0 8 1 10
-
diff --git a/bin/conf/table_info/table_info_one.conf b/bin/conf/table_info/table_info_one.conf
index d5eea1a..949920c 100644
--- a/bin/conf/table_info/table_info_one.conf
+++ b/bin/conf/table_info/table_info_one.conf
@@ -1,11 +1,10 @@
-#table_one:tabid dynamic type
-PXY_INTERCEPT_IP:2 REGION_TYPE_IP
-INLINE_IP_CB:3 REGION_TYPE_IP
-IR_INTERCEPT_IP:4 REGION_TYPE_IP
-#DK_CLI_IP_CB:5 REGION_TYPE_IP
-IPD_DYN_SUBSCIBE_IP:6 REGION_TYPE_POOL
-ANTI_DDOS_ATTACK_CB:7 REGION_TYPE_IP
-MM_DYN_VOIP_DROP:8 REGION_TYPE_IP
-WHITE_LIST_IP:9 REGION_TYPE_IP
-PXY_OBJ_SPOOFING_IP_POOL:10 REGION_TYPE_POOL
+#table_one:tabid type
+PXY_INTERCEPT_IP:1 REGION_TYPE_IP
+INLINE_IP_CB:2 REGION_TYPE_IP
+IR_INTERCEPT_IP:3 REGION_TYPE_IP
+IPD_DYN_SUBSCIBE_IP:4 REGION_TYPE_POOL
+ANTI_DDOS_ATTACK_CB:5 REGION_TYPE_IP
+MM_DYN_VOIP_DROP:6 REGION_TYPE_IP
+WHITE_LIST_IP:7 REGION_TYPE_IP
+PXY_OBJ_SPOOFING_IP_POOL:8 REGION_TYPE_POOL
diff --git a/bin/conf/table_info/table_info_tree.conf b/bin/conf/table_info/table_info_tree.conf
index 14f0ebe..da8ecf2 100644
--- a/bin/conf/table_info/table_info_tree.conf
+++ b/bin/conf/table_info/table_info_tree.conf
@@ -1,4 +1,4 @@
-#three two onesw dynamic type
+#three two onesw type
APP_POLICY:11 APP_DOMAIN:12 APP_DYN_SEV_IP_CB:13 REGION_TYPE_FIND
APP_POLICY:11 NULL APP_STATIC_SEV_IP:14 REGION_TYPE_IP
NULL PXY_INTERCEPT_DOMAIN:15 PXY_DYN_SEV_IP_CB:16 REGION_TYPE_FIND
diff --git a/src/pg_valve_c3.cpp b/src/pg_valve_c3.cpp
index afa0b72..9ffdda8 100644
--- a/src/pg_valve_c3.cpp
+++ b/src/pg_valve_c3.cpp
@@ -113,7 +113,7 @@ void construct_grule_reference_key(grule_reference_hkey_t *hkey_refer, grule_t &
}
}
-void write_grule_log(const char *table_name, grule_t &grule, int disp_status, int errcode, int refernce_cnt)
+void write_grule_log(const char *table_name, int redis_id, grule_t &grule, int disp_status, int errcode, int refernce_cnt)
{
char sip[128], dip[128], smask[128], dmask[128];
u_int16_t sport, dport;
@@ -126,8 +126,8 @@ void write_grule_log(const char *table_name, grule_t &grule, int disp_status, in
sport = grule.s4.sport;
dport = grule.s4.dport;
MESA_HANDLE_RUNTIME_LOGV2(g_pgvalve_info.log_runtime,RLOG_LV_INFO, MODULE_NAME,
- "pz_dispatch_%s: %s, refernce: %u, simple4, rule_id: 0x%lx-%lu, double_dir: %s, action: %s, rule_type: 0x%hx, serv_type: %d, dev_id: %d, tuple4: %s_%d>%s_%d(%s)",
- get_disp_status_str(disp_status), table_name, refernce_cnt, grule.rule_id, grule.rule_id, grule.rule_type.ddir_flag?"yes":"no", grule.action==GRULE_ACTION_ADD?"ADD":"DEL",
+ "pz_dispatch_%s: %s, refernce: %u, simple4, rule_id: %u-%lu, double_dir: %s, action: %s, rule_type: 0x%hx, serv_type: %d, dev_id: %d, tuple4: %s_%d>%s_%d(%s)",
+ get_disp_status_str(disp_status), table_name, refernce_cnt, redis_id, grule.rule_id, grule.rule_type.ddir_flag?"yes":"no", grule.action==GRULE_ACTION_ADD?"ADD":"DEL",
grule.rule_type, grule.srv_type, grule.dev_id, sip, ntohs(sport), dip, ntohs(dport), errcode?grule_error_str(errcode):"");
break;
@@ -137,8 +137,8 @@ void write_grule_log(const char *table_name, grule_t &grule, int disp_status, in
sport = grule.s6.sport;
dport = grule.s6.dport;
MESA_HANDLE_RUNTIME_LOGV2(g_pgvalve_info.log_runtime,RLOG_LV_INFO, MODULE_NAME,
- "pz_dispatch_%s: %s, refernce: %u, simple6, rule_id: 0x%lx-%lu, double_dir: %s, action: %s, rule_type: 0x%hx, serv_type: %d, dev_id: %d, tuple4: %s_%d>%s_%d(%s)",
- get_disp_status_str(disp_status), table_name, refernce_cnt, grule.rule_id, grule.rule_id, grule.rule_type.ddir_flag?"yes":"no", grule.action==GRULE_ACTION_ADD?"ADD":"DEL",
+ "pz_dispatch_%s: %s, refernce: %u, simple6, rule_id: %u-%lu, double_dir: %s, action: %s, rule_type: 0x%hx, serv_type: %d, dev_id: %d, tuple4: %s_%d>%s_%d(%s)",
+ get_disp_status_str(disp_status), table_name, refernce_cnt, redis_id, grule.rule_id, grule.rule_type.ddir_flag?"yes":"no", grule.action==GRULE_ACTION_ADD?"ADD":"DEL",
grule.rule_type, grule.srv_type, grule.dev_id, sip, ntohs(sport), dip, ntohs(dport), errcode?grule_error_str(errcode):"");
break;
@@ -150,8 +150,8 @@ void write_grule_log(const char *table_name, grule_t &grule, int disp_status, in
sport = grule.m4.sport;
dport = grule.m4.dport;
MESA_HANDLE_RUNTIME_LOGV2(g_pgvalve_info.log_runtime,RLOG_LV_INFO, MODULE_NAME,
- "pz_dispatch_%s: %s, refernce: %u, mask4, rule_id: 0x%lx-%lu, double_dir: %s, action: %s, rule_type: 0x%hx, serv_type: %d, dev_id: %d, tuple4: %s_%d>%s_%d, mask4: %s_%d>%s_%d(%s)",
- get_disp_status_str(disp_status), table_name, refernce_cnt, grule.rule_id, grule.rule_id, grule.rule_type.ddir_flag?"yes":"no", grule.action==GRULE_ACTION_ADD?"ADD":"DEL",
+ "pz_dispatch_%s: %s, refernce: %u, mask4, rule_id: %u-%lu, double_dir: %s, action: %s, rule_type: 0x%hx, serv_type: %d, dev_id: %d, tuple4: %s_%d>%s_%d, mask4: %s_%d>%s_%d(%s)",
+ get_disp_status_str(disp_status), table_name, refernce_cnt, redis_id, grule.rule_id, grule.rule_type.ddir_flag?"yes":"no", grule.action==GRULE_ACTION_ADD?"ADD":"DEL",
grule.rule_type, grule.srv_type, grule.dev_id, sip, ntohs(sport), dip, ntohs(dport), smask, ntohs(grule.m4.sport_mask), dmask, ntohs(grule.m4.dport_mask), errcode?grule_error_str(errcode):"");
break;
@@ -163,8 +163,8 @@ void write_grule_log(const char *table_name, grule_t &grule, int disp_status, in
sport = grule.m6.sport;
dport = grule.m6.dport;
MESA_HANDLE_RUNTIME_LOGV2(g_pgvalve_info.log_runtime,RLOG_LV_INFO, MODULE_NAME,
- "pz_dispatch_%s: %s, refernce: %u, mask6, rule_id: 0x%lx-%lu, double_dir: %s, action: %s, rule_type: 0x%hx, serv_type: %d, dev_id: %d, tuple4: %s_%d>%s_%d, mask4: %s_%d>%s_%d(%s)",
- get_disp_status_str(disp_status), table_name, refernce_cnt, grule.rule_id, grule.rule_id, grule.rule_type.ddir_flag?"yes":"no", grule.action==GRULE_ACTION_ADD?"ADD":"DEL",
+ "pz_dispatch_%s: %s, refernce: %u, mask6, rule_id: %u-%lu, double_dir: %s, action: %s, rule_type: 0x%hx, serv_type: %d, dev_id: %d, tuple4: %s_%d>%s_%d, mask4: %s_%d>%s_%d(%s)",
+ get_disp_status_str(disp_status), table_name, refernce_cnt, redis_id, grule.rule_id, grule.rule_type.ddir_flag?"yes":"no", grule.action==GRULE_ACTION_ADD?"ADD":"DEL",
grule.rule_type, grule.srv_type, grule.dev_id, sip, ntohs(sport), dip, ntohs(dport), smask, ntohs(grule.m6.sport_mask), dmask, ntohs(grule.m6.dport_mask), errcode?grule_error_str(errcode):"");
break;
}
@@ -347,7 +347,7 @@ int do_dispatch_config_to_c3(grule_t &grule, int *errcode)
return DISP_RETRY;
}
- while((send_num = grule_send(g_grule_handle.grule_handle, &grule, 1, 0))==0 && retry_num<3)
+ while((send_num = grule_send(g_grule_handle.grule_handle, &grule, 1, 0))==0 && retry_num<10)
{
retry_num++;
usleep(10000);
@@ -366,7 +366,7 @@ int do_dispatch_config_to_c3(grule_t &grule, int *errcode)
return ret;
}
-int dispatch_dynamic_config_to_c3(const char *table_name, int table_id, grule_t &grule)
+int dispatch_dynamic_config_to_c3(const char *table_name, int table_id, int redis_id, grule_t &grule)
{
int ret, errcode=0;
@@ -382,11 +382,11 @@ int dispatch_dynamic_config_to_c3(const char *table_name, int table_id, grule_t
pz_trans_statistic_count(table_id, 1, STAT_FIELD_DISP_SUCC);
}
- write_grule_log(table_name, grule, ret, errcode, 0);
+ write_grule_log(table_name, redis_id, grule, ret, errcode, 0);
return ret;
}
-int dispatch_durable_config_to_c3(const char *table_name, int table_id, grule_t &grule)
+int dispatch_durable_config_to_c3(const char *table_name, int table_id, int redis_id, grule_t &grule)
{
int ret, disp_ret, errcode=0, reference_count=0;
grule_reference_hkey_t hkey_refer;
@@ -442,7 +442,7 @@ int dispatch_durable_config_to_c3(const char *table_name, int table_id, grule_t
}
pthread_mutex_unlock(&g_grule_handle.mutex_lock);
- write_grule_log(table_name, grule, disp_ret, errcode, reference_count);
+ write_grule_log(table_name, redis_id, grule, disp_ret, errcode, reference_count);
return ret;
}
@@ -480,18 +480,18 @@ int dispatch_config_to_c3(const char *table_name, int table_id, one_config_hnode
//���⶯̬��������DEL�����
if(g_pgvalve_info.service_limit_sw && config_node.disp_status==0 && (config_node.grule.action==GRULE_ACTION_DEL || service_grule_reach_limit(config_node.grule.srv_type)))
{
- write_grule_log(table_name, config_node.grule, DISP_LIMIT, 0, 0);
+ write_grule_log(table_name, config_node.redisID, config_node.grule, DISP_LIMIT, 0, 0);
pz_trans_statistic_count(table_id, 1, STAT_FIELD_DISP_LIMIT);
return DISP_LIMIT;
}
if(!config_node.grule.durable)
{
- ret = dispatch_dynamic_config_to_c3(table_name, table_id, config_node.grule);
+ ret = dispatch_dynamic_config_to_c3(table_name, table_id, config_node.redisID, config_node.grule);
}
else
{
- ret = dispatch_durable_config_to_c3(table_name, table_id, config_node.grule);
+ ret = dispatch_durable_config_to_c3(table_name, table_id, config_node.redisID, config_node.grule);
type = update_grule_status(config_node.disp_status, ret, config_node.grule.action);
if(type != STAT_SERVICE_NONE)
{
diff --git a/src/pg_valve_c3.h b/src/pg_valve_c3.h
index 71894c9..3797089 100644
--- a/src/pg_valve_c3.h
+++ b/src/pg_valve_c3.h
@@ -23,6 +23,7 @@ typedef struct __one_config_hnode
{
grule_t grule;
int32_t disp_status; //���������·���״̬���·��ɹ�����1�����ɹ���δ�·�ʱ��0��STATUS_BIT_ORIGINAL/STATUS_BIT_REVERSE
+ int32_t redisID;
}one_config_hnode_t;
int dispatch_config_to_c3(const char *table_name, int table_id, one_config_hnode_t &config_node);
diff --git a/src/pg_valve_deal.cpp b/src/pg_valve_deal.cpp
index 17e0394..42d0e0c 100644
--- a/src/pg_valve_deal.cpp
+++ b/src/pg_valve_deal.cpp
@@ -19,6 +19,13 @@
#include "pg_valve_maat.h"
#include "pg_valve_stat.h"
+struct thread_sw_drive_pdata
+{
+ onesw_config_hnode_t *hnode;
+ configure_table_t *table;
+ SW_STAT_t did_switch; //���ر�������
+};
+
extern pgvavle_global_info_t g_pgvalve_info;
SSCANF_ERROR_NO_t _wrap_grule_check(grule_t *grule);
@@ -76,7 +83,7 @@ SSCANF_ERROR_NO_t fill_in_dsetid_did_limitid(const char *user_region, int64_t *d
return SSCANF_OK;
}
-SSCANF_ERROR_NO_t fill_in_inline_device_id(const char *user_region, u_int8_t *device_id)
+SSCANF_ERROR_NO_t fill_in_inline_device_id(const char *user_region, u_int32_t *rule_scope, u_int8_t *device_id)
{
const char *pos;
int dev_id;
@@ -92,6 +99,13 @@ SSCANF_ERROR_NO_t fill_in_inline_device_id(const char *user_region, u_int8_t *de
*device_id = dev_id;
}
}
+ if(rule_scope!=NULL && NULL!=(pos = strcasestr(user_region, "rule_scope=")))
+ {
+ if(sscanf(pos+strlen("rule_scope="), "%u", rule_scope) != 1)
+ {
+ return SSCANF_ERROR_DID;
+ }
+ }
return SSCANF_OK;
}
@@ -180,6 +194,7 @@ void one_maat_start_callback(int32_t update_type, void* u_para)
SSCANF_ERROR_NO_t one_fill_in_region_grule_ip(const char* table_line, one_config_hnode_t *iprule, int64_t *did, int64_t *dsetid)
{
int32_t ret, addr_type, protocol, direction, is_valid, action, service, limit_rate=0;
+ u_int32_t rule_scope=1;
int64_t region_id, group_id;
char src_ip[128], src_ip_mask[128], src_port[20], src_port_mask[20];
char dst_ip[128], dst_ip_mask[128], dst_port[20], dst_port_mask[20];
@@ -199,13 +214,13 @@ SSCANF_ERROR_NO_t one_fill_in_region_grule_ip(const char* table_line, one_config
}
iprule->disp_status = 0;
iprule->grule.rule_id = region_id;
- if(fill_in_dsetid_did_limitid(user_region, did, dsetid, &limit_rate) || fill_in_inline_device_id(user_region, &iprule->grule.dev_id))
+ if(fill_in_dsetid_did_limitid(user_region, did, dsetid, &limit_rate) || fill_in_inline_device_id(user_region, &rule_scope, &iprule->grule.dev_id))
return SSCANF_ERROR_LIMIT;
if(service_to_c3_servtype(service, (limit_rate/10)*10, &gmap_info))
return SSCANF_ERROR_SERVICE;
iprule->grule.srv_type = gmap_info.serv_type;
iprule->grule.durable = gmap_info.is_durable;
- iprule->grule.rule_scope = 1;
+ iprule->grule.rule_scope = rule_scope;
iprule->grule.action = is_valid?GRULE_ACTION_ADD:GRULE_ACTION_DEL;
iprule->grule.rule_type.grule_type = 0;
iprule->grule.rule_type.ddir_flag = (direction==G_DIR_DOUBLE)?1:0;
@@ -295,6 +310,7 @@ SSCANF_ERROR_NO_t one_fill_in_region_grule_ip(const char* table_line, one_config
SSCANF_ERROR_NO_t one_fill_in_region_grule_pool(const char* table_line, one_config_hnode_t *iprule)
{
int32_t ret, addr_type, direction, location, is_valid, service, protocol, limit_rate=0;
+ u_int32_t rule_scope=1;
int64_t region_id;
char ipaddr[128], port[8], user_region[4096];
grule_map_info_t gmap_info;
@@ -307,13 +323,13 @@ SSCANF_ERROR_NO_t one_fill_in_region_grule_pool(const char* table_line, one_conf
}
iprule->disp_status = 0;
iprule->grule.rule_id = region_id;
- if(fill_in_dsetid_did_limitid(user_region, NULL, NULL, &limit_rate))
+ if(fill_in_dsetid_did_limitid(user_region, NULL, NULL, &limit_rate) || fill_in_inline_device_id(user_region, &rule_scope, &iprule->grule.dev_id))
return SSCANF_ERROR_LIMIT;
if(service_to_c3_servtype(service, (limit_rate/10)*10, &gmap_info))
return SSCANF_ERROR_SERVICE;
iprule->grule.srv_type = gmap_info.serv_type;
iprule->grule.durable = gmap_info.is_durable;
- iprule->grule.rule_scope = 1;
+ iprule->grule.rule_scope = rule_scope;
iprule->grule.action = is_valid?GRULE_ACTION_ADD:GRULE_ACTION_DEL;
iprule->grule.rule_type.grule_type = 0;
iprule->grule.rule_type.ddir_flag = (direction==G_DIR_DOUBLE)?1:0;
@@ -410,9 +426,10 @@ void one_maat_update_callback(int32_t table_id, const char* table_line, void* u_
priv.table = maat_data->table;
priv.dsetid = 0;
priv.did = 0;
+ priv.iprule.redisID= maat_data->maat_service->get_instance_id();
hash_key.table_id = maat_data->table->table_id_key;
- hash_key.dsetid = 0;
+ hash_key.dsetid = priv.iprule.redisID;
hash_key.did = priv.iprule.grule.rule_id;
pthread_rwlock_rdlock(&g_pgvalve_info.rwlock);
@@ -459,59 +476,54 @@ configure_table_t *new_table_instance_one(const char *table_name, int32_t table_
//disp_activeͳһ������Ч��ʧЧ������(�������������)
//�������������ã���֤iprule.disp_statusΪ0
-static int32_t onesw_update_config_set_full(const char *table_name, int table_id, onesw_config_hnode_t *hnode, one_config_hnode_t &iprule, bool disp_active)
+static int32_t onesw_update_config_set_full(const char *table_name, int table_id, onesw_config_hnode_t *hnode, one_config_hnode_t &iprule)
{
int ret = 0;
+ OneswRedisInstanceKey regionKey(iprule.redisID, iprule.grule.rule_id);
if(iprule.grule.action == GRULE_ACTION_ADD)
{
- if(hnode->full.find(iprule.grule.rule_id) != hnode->full.end())
+ if(hnode->region_ip.find(regionKey) != hnode->region_ip.end())
{
- hnode->full[iprule.grule.rule_id].grule = iprule.grule; //�¼ӵĸ���(��ԭ��������DEL�����ܸ����·�״̬)
+ hnode->region_ip[regionKey].grule = iprule.grule; //�¼ӵĸ���(��ԭ��������DEL�����ܸ����·�״̬)
}
else
{
- hnode->full[iprule.grule.rule_id] = iprule;
+ hnode->region_ip[regionKey] = iprule;
}
- if(disp_active)
+ ret = dispatch_config_to_c3(table_name, table_id, hnode->region_ip[regionKey]);
+ if(ret==DISP_LIMIT && hnode->region_ip[regionKey].disp_status==0) //�������ޣ���֮ǰδ�·�
{
- ret = dispatch_config_to_c3(table_name, table_id, hnode->full[iprule.grule.rule_id]);
- if(ret==DISP_LIMIT && hnode->full[iprule.grule.rule_id].disp_status==0) //�������ޣ���֮ǰδ�·�
- {
- hnode->full.erase(iprule.grule.rule_id);
- }
+ hnode->region_ip.erase(regionKey);
}
}
else if(!iprule.grule.durable) //��ʱ�����·�ɾ����ֱ���Ƴ�
{
- hnode->full.erase(iprule.grule.rule_id);
+ hnode->region_ip.erase(regionKey);
}
else
{
- if(disp_active)
+ if(hnode->region_ip.find(regionKey) != hnode->region_ip.end())
{
- if(hnode->full.find(iprule.grule.rule_id) != hnode->full.end())
- {
- iprule.disp_status = hnode->full[iprule.grule.rule_id].disp_status;
- ret = dispatch_config_to_c3(table_name, table_id, iprule);
- hnode->full[iprule.grule.rule_id].disp_status = iprule.disp_status;
- }
- else
- {
- ret = dispatch_config_to_c3(table_name, table_id, iprule);
- }
+ iprule.disp_status = hnode->region_ip[regionKey].disp_status;
+ ret = dispatch_config_to_c3(table_name, table_id, iprule);
+ hnode->region_ip[regionKey].disp_status = iprule.disp_status;
+ }
+ else
+ {
+ ret = dispatch_config_to_c3(table_name, table_id, iprule);
}
//δɾ���ɹ�������������Ȼ�´�ʧЧ������ʱ���޷��������޼���
- if(hnode->full.find(iprule.grule.rule_id) != hnode->full.end())
+ if(hnode->region_ip.find(regionKey) != hnode->region_ip.end())
{
- if(hnode->full[iprule.grule.rule_id].disp_status == 0)
+ if(hnode->region_ip[regionKey].disp_status == 0)
{
- hnode->full.erase(iprule.grule.rule_id);
+ hnode->region_ip.erase(regionKey);
}
else
{ //δɾ���ɹ��ģ����Ϊɾ��״̬
- hnode->full[iprule.grule.rule_id].grule.action = GRULE_ACTION_DEL;
+ hnode->region_ip[regionKey].grule.action = GRULE_ACTION_DEL;
}
}
}
@@ -526,7 +538,6 @@ int64_t onesw_deal_config_incr_cb(void *data, const uchar *key, uint size, void
htable_privdata_t *priv = (htable_privdata_t *)arg;
configure_table_t *table = priv->table;
int32_t ret;
- swtable_state_t sw_state;
if(hnode == NULL)
{
@@ -542,78 +553,82 @@ int64_t onesw_deal_config_incr_cb(void *data, const uchar *key, uint size, void
hnode->did = priv->did;
}
- if(!table->parent->get_hnode_sw_action_recur(table->parent, priv->dsetid, priv->did, &sw_state))
- {
- onesw_update_config_set_full(priv->table->table_name, priv->table->table_id_key, hnode, priv->iprule, false);
- MESA_HANDLE_RUNTIME_LOGV2(g_pgvalve_info.log_runtime,RLOG_LV_DEBUG, MODULE_NAME, "pz_dispatch: onesw table %s dsetid=%lu;did=%lu;cfgid=%lu dont dispatch because parent does not exist.",
- table->table_name, priv->dsetid, priv->did, priv->iprule.grule.rule_id);
- return 0;
- }
-
- if(sw_state.switcher == SW_STAT_DEACTIVATE)
- {
- onesw_update_config_set_full(priv->table->table_name, priv->table->table_id_key, hnode, priv->iprule, false);
- MESA_HANDLE_RUNTIME_LOGV2(g_pgvalve_info.log_runtime,RLOG_LV_DEBUG, MODULE_NAME, "pz_dispatch: onesw table %s dsetid=%lu;did=%lu;cfgid=%lu dont dispatch because parent is closed.",
- table->table_name, priv->dsetid, priv->did, priv->iprule.grule.rule_id);
- return 0;
- }
-
- onesw_update_config_set_full(priv->table->table_name, priv->table->table_id_key, hnode, priv->iprule, true);
+ onesw_update_config_set_full(priv->table->table_name, priv->table->table_id_key, hnode, priv->iprule);
return 1;
}
-//������������DID���Ͽ���
-static int64_t onesw_drive_full_cfg_by_two_cb(void *data, const uchar *key, uint size, void *arg)
+static int64_t onesw_drive_by_two_nothing_cb(void *data, const uchar *key, uint size, void *arg)
{
- onesw_config_hnode_t *hnode = (onesw_config_hnode_t *)data;
- htable_privdata_t *priv = (htable_privdata_t *)arg;
- map<int64_t, one_config_hnode_t>::iterator iter;
+ return 0;
+}
+
+static void *thread_onesw_drive_cfg_by_two(void *arg)
+{
+ struct thread_sw_drive_pdata *thread_priv=(struct thread_sw_drive_pdata *)arg;
+ map<OneswRedisInstanceKey, one_config_hnode_t>::iterator iter;
int ret=0;
- if(hnode != NULL)
+ prctl(PR_SET_NAME, "pgvalve_drivesw");
+
+ for(iter=thread_priv->hnode->region_ip.begin(); iter!=thread_priv->hnode->region_ip.end() && ret!=DISP_RETRY; )
{
- for(iter=hnode->full.begin(); iter!=hnode->full.end() && ret!=DISP_RETRY; )
- {
- one_config_hnode_t iprule = iter->second; //�洢�Ŀ�����ʧЧ��(ɾ��δ�ɹ�)
+ one_config_hnode_t iprule = iter->second; //�洢�Ŀ�����ʧЧ��(ɾ��δ�ɹ�)
- if(priv->did_switch == SW_STAT_DEACTIVATE)
- {
- iprule.grule.action = GRULE_ACTION_DEL;
- }
+ if(thread_priv->did_switch == SW_STAT_DEACTIVATE)
+ {
+ iprule.grule.action = GRULE_ACTION_DEL;
+ }
- ret = dispatch_config_to_c3(priv->table->table_name, priv->table->table_id_key, iprule);
- iter->second.disp_status = iprule.disp_status;
- if(iprule.disp_status == 0 && (iter->second.grule.action == GRULE_ACTION_DEL || ret==DISP_LIMIT))
- {
- hnode->full.erase(iter++); //map����ɾ��������
- }
- else
- {
- iter++;
- }
+ ret = dispatch_config_to_c3(thread_priv->table->table_name, thread_priv->table->table_id_key, iprule);
+ iter->second.disp_status = iprule.disp_status;
+ if(iprule.disp_status == 0 && (iter->second.grule.action == GRULE_ACTION_DEL || ret==DISP_LIMIT))
+ {
+ thread_priv->hnode->region_ip.erase(iter++); //map����ɾ��������
+ }
+ else
+ {
+ iter++;
}
}
-
- return ret;
+
+ if(thread_priv->did_switch == SW_STAT_DEACTIVATE)
+ {
+ delete thread_priv->hnode;
+ }
+ free(thread_priv);
+ return NULL;
}
int32_t onesw_drive_full_cfg_by_two(configure_table_t *table, int64_t dsetid, int64_t did, swtable_state_t *sw_state)
{
table_hash_key_t hash_key;
- htable_privdata_t priv;
+ struct thread_sw_drive_pdata *thread_priv;
+ onesw_config_hnode_t *hnode;
int64_t cb_ret;
+ pthread_t thread_desc;
+ pthread_attr_t attr;
hash_key.table_id = table->table_id_key;
hash_key.dsetid = dsetid;
hash_key.did = did;
-
- priv.table = table;
- priv.dsetid = dsetid;
- priv.did = did;
- priv.did_switch = sw_state->switcher;
- MESA_htable_search_cb(table->hash_handle, (unsigned char *)&hash_key, sizeof(table_hash_key_t), onesw_drive_full_cfg_by_two_cb, (void*)&priv, &cb_ret);
- return cb_ret;
+ hnode = (onesw_config_hnode_t *)MESA_htable_search_cb(table->hash_handle, (unsigned char *)&hash_key, sizeof(table_hash_key_t), onesw_drive_by_two_nothing_cb, NULL, &cb_ret);
+ if(hnode == NULL)
+ {
+ return 0;
+ }
+ if(sw_state->switcher == SW_STAT_DEACTIVATE) //��Ԫ���Ƴ������ͷ�
+ {
+ MESA_htable_del(table->hash_handle, (unsigned char *)&hash_key, sizeof(table_hash_key_t), htable_destroy_node_nothing);
+ }
+ thread_priv = (struct thread_sw_drive_pdata*)malloc(sizeof(struct thread_sw_drive_pdata));
+ thread_priv->hnode = hnode;
+ thread_priv->table = table;
+ thread_priv->did_switch = sw_state->switcher;
+ pthread_attr_init(&attr);
+ pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
+ pthread_create(&thread_desc, &attr, thread_onesw_drive_cfg_by_two, thread_priv);
+ return 0;
}
void onesw_maat_start_callback(int32_t update_type,void* u_para)
@@ -624,6 +639,7 @@ void onesw_maat_start_callback(int32_t update_type,void* u_para)
SSCANF_ERROR_NO_t onesw_fill_in_region_grule_find(const char* table_line, one_config_hnode_t *iprule, int64_t *did,int64_t *dsetid)
{
int32_t ret, addr_type, protocol, direction, location, is_valid, service, limit_rate=0;
+ u_int32_t rule_scope=1;
int64_t region_id;
char ipaddr[128], port[8], user_region[4096];
grule_map_info_t gmap_info;
@@ -636,13 +652,13 @@ SSCANF_ERROR_NO_t onesw_fill_in_region_grule_find(const char* table_line, one_co
}
iprule->disp_status = 0;
iprule->grule.rule_id = region_id;
- if(fill_in_dsetid_did_limitid(user_region, did, dsetid, &limit_rate) || fill_in_inline_device_id(user_region, &iprule->grule.dev_id))
+ if(fill_in_dsetid_did_limitid(user_region, did, dsetid, &limit_rate) || fill_in_inline_device_id(user_region, &rule_scope, &iprule->grule.dev_id))
return SSCANF_ERROR_LIMIT;
if(service_to_c3_servtype(service, (limit_rate/10)*10, &gmap_info))
return SSCANF_ERROR_SERVICE;
iprule->grule.srv_type = gmap_info.serv_type;
iprule->grule.durable = gmap_info.is_durable;
- iprule->grule.rule_scope = 1;
+ iprule->grule.rule_scope = rule_scope;
iprule->grule.action = is_valid?GRULE_ACTION_ADD:GRULE_ACTION_DEL;
iprule->grule.rule_type.grule_type = 0;
iprule->grule.rule_type.ddir_flag = (direction==G_DIR_DOUBLE)?1:0;
@@ -714,6 +730,7 @@ void onesw_maat_update_callback(int32_t table_id,const char* table_line,void* u_
htable_privdata_t priv;
int64_t cb_ret, dsetid=0, did=0;
SSCANF_ERROR_NO_t code, check;
+ swtable_state_t sw_state;
memset(&priv, 0 ,sizeof(htable_privdata_t));
switch(maat_data->table->region_type)
@@ -735,9 +752,17 @@ void onesw_maat_update_callback(int32_t table_id,const char* table_line,void* u_
}
pz_trans_statistic_count(maat_data->table->table_id_key, 1, (priv.iprule.grule.action==GRULE_ACTION_ADD)?STAT_FIELD_RVALID:STAT_FIELD_RINVALID);
+ if(!maat_data->table->parent->get_hnode_sw_action_recur(maat_data->table->parent, dsetid, did, &sw_state) || (sw_state.switcher==SW_STAT_DEACTIVATE))
+ {
+ MESA_HANDLE_RUNTIME_LOGV2(g_pgvalve_info.log_runtime, RLOG_LV_DEBUG, MODULE_NAME, "pz_dispatch: onesw table %s dsetid=%lu;did=%lu;cfgid=%lu dont dispatch because parent is closed or does not exist.",
+ maat_data->table->table_name, dsetid, did, priv.iprule.grule.rule_id);
+ return ;
+ }
+
priv.table = maat_data->table;
priv.dsetid = dsetid;
priv.did = did;
+ priv.iprule.redisID= maat_data->maat_service->get_instance_id();
hash_key.table_id = maat_data->table->table_id_key;
hash_key.dsetid = dsetid;
@@ -1208,7 +1233,7 @@ static int onesw_drive_full_iterate_cb(const uchar * key, uint size, void * data
table_hash_key_t *table_key = (table_hash_key_t *)key;
onesw_config_hnode_t *hnode;
configure_table_t *table = (configure_table_t *)user;
- map<int64_t, one_config_hnode_t>::iterator iter;
+ map<OneswRedisInstanceKey, one_config_hnode_t>::iterator iter;
swtable_state_t sw_state;
if(table_key->table_id!=table->table_id_key)
@@ -1222,11 +1247,11 @@ static int onesw_drive_full_iterate_cb(const uchar * key, uint size, void * data
return ITERATE_CB_RET_CONTINUE_FLAG;
}
- for(iter=hnode->full.begin(); iter!=hnode->full.end(); )
+ for(iter=hnode->region_ip.begin(); iter!=hnode->region_ip.end(); )
{
if(iter->second.grule.action==GRULE_ACTION_DEL)
{
- hnode->full.erase(iter++);
+ hnode->region_ip.erase(iter++);
}
else
{
diff --git a/src/pg_valve_deal.h b/src/pg_valve_deal.h
index 6ddafbe..a1a0d57 100644
--- a/src/pg_valve_deal.h
+++ b/src/pg_valve_deal.h
@@ -55,6 +55,16 @@ typedef struct __swtable_state
SW_STAT_t switcher;
}swtable_state_t;
+struct OneswRedisInstanceKey
+{
+ int64_t RedisID;
+ int64_t regionID;
+ OneswRedisInstanceKey(int64_t instanceID, int64_t ruleID){RedisID=instanceID; regionID=ruleID;}
+ bool operator < (const struct OneswRedisInstanceKey &other) const
+ {
+ return this->RedisID < other.RedisID || (this->RedisID==other.RedisID && this->regionID<other.regionID);
+ }
+};
//�����صĶ������ر�
//��������KEY: ServiceID+TableID+DSET+DID�����ҵ�DID��Ͻ����������ID
//ȫ��or�����¼�����ʱ����:
@@ -63,7 +73,7 @@ typedef struct __onesw_config_hnode
{
int64_t dsetid; //���������������أ�����Ϊ0
int64_t did;
- map<int64_t, one_config_hnode_t> full; //KEY:cfg_id��DID����������Чȫ����
+ map<OneswRedisInstanceKey, one_config_hnode_t> region_ip; //KEY:cfg_id��DID����������Чȫ����
}onesw_config_hnode_t;
//��������KEY: ServiceID+TableID+DSET+DID���任TableID
diff --git a/src/pg_valve_maat.cpp b/src/pg_valve_maat.cpp
index 95b8223..255c390 100644
--- a/src/pg_valve_maat.cpp
+++ b/src/pg_valve_maat.cpp
@@ -50,60 +50,6 @@ string MaatService::get_instance_addr(void)
return string(redis_addr);
}
-long long MaatService::read_last_version(void)
-{
- if(access(table_relate->version_file, R_OK))
- {
- MESA_HANDLE_RUNTIME_LOGV2(g_pgvalve_info.log_runtime,RLOG_LV_FATAL, MODULE_NAME, "read_last_version %s failed: %s, using default version 0.", table_relate->version_file, strerror(errno));
- return 0;
- }
- FILE *fp = fopen(table_relate->version_file, "r");
- if(fp==NULL)
- {
- MESA_HANDLE_RUNTIME_LOGV2(g_pgvalve_info.log_runtime,RLOG_LV_FATAL, MODULE_NAME, "read_last_version open %s failed: %s.", table_relate->version_file, strerror(errno));
- assert(0);
- return -1;
- }
- if(fscanf(fp, "%llu\n", &version) != 1)
- {
- MESA_HANDLE_RUNTIME_LOGV2(g_pgvalve_info.log_runtime,RLOG_LV_FATAL, MODULE_NAME, "read_last_version fscanf %s failed: %s.", table_relate->version_file, strerror(errno));
- fclose(fp);
- assert(0);
- return -1;
- }
- else
- {
- MESA_HANDLE_RUNTIME_LOGV2(g_pgvalve_info.log_runtime,RLOG_LV_FATAL, MODULE_NAME, "read_last_version %s success, version: %u.", table_relate->version_file, version);
- }
- fclose(fp);
- return version;
-}
-
-int MaatService::store_latest_version(long long ver)
-{
- char verbuf[48];
-
- version = ver;
- FILE *fp = fopen(table_relate->version_file, "w");
- if(fp==NULL)
- {
- MESA_HANDLE_RUNTIME_LOGV2(g_pgvalve_info.log_runtime,RLOG_LV_FATAL, MODULE_NAME, "Open version file %s failed: %s.", table_relate->version_file, strerror(errno));
- return -1;
- }
-
- sprintf(verbuf, "%llu\n", version);
- if(fwrite(verbuf, 1, strlen(verbuf), fp) != strlen(verbuf))
- {
- MESA_HANDLE_RUNTIME_LOGV2(g_pgvalve_info.log_runtime,RLOG_LV_FATAL, MODULE_NAME, "fwrite version file %s failed: %s, version: %u.", table_relate->version_file, strerror(errno), version);
- fclose(fp);
- return -1;
- }
- fclose(fp);
-
- MESA_HANDLE_RUNTIME_LOGV2(g_pgvalve_info.log_runtime,RLOG_LV_FATAL, MODULE_NAME, "store_latest_version %s success, version: %u.", table_relate->version_file, version);
- return 0;
-}
-
int MaatService::maat_feather_start(void)
{
char tmp_buffer[1024];
diff --git a/src/pg_valve_maat.h b/src/pg_valve_maat.h
index 8ca8d3b..34e17e2 100644
--- a/src/pg_valve_maat.h
+++ b/src/pg_valve_maat.h
@@ -33,9 +33,8 @@ public:
Maat_feather_t get_maat_feather(void) {return feather;}
const char *get_instance_name(void) {return table_relate->instance_name;}
+ int get_instance_id(void) {return table_relate->instanceID;}
string get_instance_addr(void);
- int store_latest_version(long long ver);
- long long read_last_version(void);
int maat_feather_start(void);
};
diff --git a/src/pg_valve_main.cpp b/src/pg_valve_main.cpp
index 44142d3..ca49b3a 100644
--- a/src/pg_valve_main.cpp
+++ b/src/pg_valve_main.cpp
@@ -202,6 +202,9 @@ static int init_asmis_log_handle(const char *appname)
return ret;
}
+void htable_destroy_node_nothing(void *data)
+{
+}
void htable_destroy_node(void *data)
{
free(data);
@@ -522,9 +525,8 @@ int32_t fill_in_single_maat_instance(char *line_content, struct maat_table_relat
int ret, j;
char *ptmp, *save_ptr=NULL;
- ret = sscanf(line_content, "%[^ \t]%*[ \t]%s%*[ \t]%s%*[ \t]%s%*[ \t]%u%*[ \t]%u%*[ \t]%s",
- table->instance_name, maat_root, table->version_file,
- table->redisip, &table->redisport, &table->redis_index, table_list);
+ ret = sscanf(line_content, "%u%*[ \t]%s%*[ \t]%s%*[ \t]%s%*[ \t]%u%*[ \t]%u%*[ \t]%s",
+ &table->instanceID, table->instance_name, maat_root, table->redisip, &table->redisport, &table->redis_index, table_list);
if(ret != 7)
{
assert(0);
diff --git a/src/pg_valve_main.h b/src/pg_valve_main.h
index e601f49..df0a9bf 100644
--- a/src/pg_valve_main.h
+++ b/src/pg_valve_main.h
@@ -47,12 +47,12 @@ struct maat_table_relation{
char instance_name[64];
char full_dir[256];
char incr_dir[256];
- char version_file[256];
char redisip[128];
int redisport;
int redis_index;
- char **tables_name;
+ int instanceID;
int table_num;
+ char **tables_name;
redisContext *ctx;
};
@@ -134,6 +134,7 @@ typedef struct __iterator_table_priv
statistic_exist_t *stat_out;
}iterator_table_priv_t;
+void htable_destroy_node_nothing(void *data);
void htable_destroy_node(void *data);
MESA_htable_handle init_and_create_htable(unsigned int slot_size, int expire_time, void (* data_free)(void *data));
diff --git a/src/pg_valve_stat.cpp b/src/pg_valve_stat.cpp
index 8313c74..d4e1821 100644
--- a/src/pg_valve_stat.cpp
+++ b/src/pg_valve_stat.cpp
@@ -185,18 +185,18 @@ static int table_onesw_exist_iterate(const uchar * key, uint size, void * data,
{
onesw_config_hnode_t *hnode = (onesw_config_hnode_t *)data;
- iterator_priv->stat_out->num[STAT_FIELD_EXITS] += hnode->full.size();
+ iterator_priv->stat_out->num[STAT_FIELD_EXITS] += hnode->region_ip.size();
if(!iterator_priv->table->parent->get_hnode_sw_action_recur(iterator_priv->table->parent, table_key->dsetid, table_key->did, &sw_state))
{
- iterator_priv->stat_out->num[STAT_FIELD_PENDING] += hnode->full.size();
+ iterator_priv->stat_out->num[STAT_FIELD_PENDING] += hnode->region_ip.size();
}
else if(sw_state.switcher==SW_STAT_DEACTIVATE)
{
- iterator_priv->stat_out->num[STAT_FIELD_EINACTIVE] += hnode->full.size();
+ iterator_priv->stat_out->num[STAT_FIELD_EINACTIVE] += hnode->region_ip.size();
}
else
{
- iterator_priv->stat_out->num[STAT_FIELD_EACTIVE] += hnode->full.size();
+ iterator_priv->stat_out->num[STAT_FIELD_EACTIVE] += hnode->region_ip.size();
}
}
return ITERATE_CB_RET_CONTINUE_FLAG;