diff options
Diffstat (limited to 'MSH-PIC/flink/topology/completion/config')
18 files changed, 1404 insertions, 0 deletions
diff --git a/MSH-PIC/flink/topology/completion/config/ETL-ACTIVE-DEFENCE-EVENT-COMPLETED b/MSH-PIC/flink/topology/completion/config/ETL-ACTIVE-DEFENCE-EVENT-COMPLETED new file mode 100644 index 0000000..ad22a08 --- /dev/null +++ b/MSH-PIC/flink/topology/completion/config/ETL-ACTIVE-DEFENCE-EVENT-COMPLETED @@ -0,0 +1,78 @@ +#--------------------------------地址配置------------------------------# +#管理kafka地址 +source.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094 + +#管理输出kafka地址 +sink.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094 + +#用于分配log_id、连接hbase的zookeeper地址 +zookeeper.servers=192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181 + +#hdfs地址用于获取定位库 +hdfs.servers=192.168.20.193:9000,192.168.20.194:9000 + +#--------------------------------HTTP/定位库------------------------------# +#定位库存储文件系统类型,hdfs or local +knowledgebase.file.storage.type=hdfs + +#定位库地址,根据file.system.type配置填写对应地址路径。 +knowledgebase.file.storage.path=/knowledgebase/ETL-ACTIVE-DEFENCE-EVENT-COMPLETED/ + +#从知识库元数据中需要获取的文件type列表,配置为空则不过滤type;优先级比name高 +knowledgebase.type.list=ip_location,asn + +#从知识库元数据中需要获取文件的name列表;配置为空则不过滤name +knowledgebase.name.list=ip_v4_built_in,ip_v6_built_in,ip_v4_user_defined,ip_v6_user_defined,asn_v4,asn_v6 + +#工具库地址,存放秘钥文件等。 +tools.library=/home/tsg/olap/topology/data/ + +#--------------------------------nacos配置------------------------------# +#nacos 地址 +nacos.server=192.168.20.252:8848 + +#schema namespace名称 +nacos.schema.namespace=MSH + +#schema data id名称 +nacos.schema.data.id=active_defence_event.json + +#knowledgebase namespace名称 +nacos.knowledgebase.namespace= + +#knowledgebase data id名称 +nacos.knowledgebase.data.id=knowledge_base.json + +#--------------------------------Kafka消费/生产配置------------------------------# +#kafka 接收数据topic +source.kafka.topic=ACTIVE-DEFENCE-EVENT + +#补全数据 输出 topic +sink.kafka.topic=ACTIVE-DEFENCE-EVENT-COMPLETED + +#读取topic,存储该spout id的消费offset信息,可通过该拓扑命名;具体存储offset的位置,确定下次读取不重复的数据; +group.id=active-defence-log-20220408-1 + +#--------------------------------topology配置------------------------------# +#consumer 并行度 +source.parallelism=3 + +#转换函数并行度 +transform.parallelism=3 + +#kafka producer 并行度 +sink.parallelism=3 + +#数据中心,取值范围(0-31) +data.center.id.num=1 + +#hbase 更新时间,如填写0则不更新缓存 +hbase.tick.tuple.freq.secs=180 + +#--------------------------------默认值配置------------------------------# + +#0不需要补全原样输出日志,1需要补全 +log.need.complete=1 + +#生产者压缩模式 none or snappy +producer.kafka.compression.type=snappy diff --git a/MSH-PIC/flink/topology/completion/config/ETL-BGP-RECORD-COMPLETED b/MSH-PIC/flink/topology/completion/config/ETL-BGP-RECORD-COMPLETED new file mode 100644 index 0000000..330ab1a --- /dev/null +++ b/MSH-PIC/flink/topology/completion/config/ETL-BGP-RECORD-COMPLETED @@ -0,0 +1,78 @@ +#--------------------------------地址配置------------------------------# +#管理kafka地址 +source.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094 + +#管理输出kafka地址 +sink.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094 + +#用于分配log_id、连接hbase的zookeeper地址 +zookeeper.servers=192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181 + +#hdfs地址用于获取定位库 +hdfs.servers=192.168.20.193:9000,192.168.20.194:9000 + +#--------------------------------HTTP/定位库------------------------------# +#定位库存储文件系统类型,hdfs or local +knowledgebase.file.storage.type=hdfs + +#定位库地址,根据file.system.type配置填写对应地址路径。 +knowledgebase.file.storage.path=/knowledgebase/ETL-BGP-RECORD-COMPLETED/ + +#从知识库元数据中需要获取的文件type列表,配置为空则不过滤type;优先级比name高 +knowledgebase.type.list=ip_location,asn + +#从知识库元数据中需要获取文件的name列表;配置为空则不过滤name +knowledgebase.name.list=ip_v4_built_in,ip_v6_built_in,ip_v4_user_defined,ip_v6_user_defined,asn_v4,asn_v6 + +#工具库地址,存放秘钥文件等。 +tools.library=/home/tsg/olap/topology/data/ + +#--------------------------------nacos配置------------------------------# +#nacos 地址 +nacos.server=192.168.20.252:8848 + +#schema namespace名称 +nacos.schema.namespace=MSH + +#schema data id名称 +nacos.schema.data.id=bgp_record.json + +#knowledgebase namespace名称 +nacos.knowledgebase.namespace= + +#knowledgebase data id名称 +nacos.knowledgebase.data.id=knowledge_base.json + +#--------------------------------Kafka消费/生产配置------------------------------# +#kafka 接收数据topic +source.kafka.topic=BGP-RECORD + +#补全数据 输出 topic +sink.kafka.topic=BGP-RECORD-COMPLETED + +#读取topic,存储该spout id的消费offset信息,可通过该拓扑命名;具体存储offset的位置,确定下次读取不重复的数据; +group.id=bgp-record-20220801-1 + +#--------------------------------topology配置------------------------------# +#consumer 并行度 +source.parallelism=3 + +#转换函数并行度 +transform.parallelism=3 + +#kafka producer 并行度 +sink.parallelism=3 + +#数据中心,取值范围(0-31) +data.center.id.num=2 + +#hbase 更新时间,如填写0则不更新缓存 +hbase.tick.tuple.freq.secs=180 + +#--------------------------------默认值配置------------------------------# + +#0不需要补全原样输出日志,1需要补全 +log.need.complete=1 + +#生产者压缩模式 none or snappy +producer.kafka.compression.type=snappy diff --git a/MSH-PIC/flink/topology/completion/config/ETL-GTPC-RECORD-COMPLETED b/MSH-PIC/flink/topology/completion/config/ETL-GTPC-RECORD-COMPLETED new file mode 100644 index 0000000..b12330c --- /dev/null +++ b/MSH-PIC/flink/topology/completion/config/ETL-GTPC-RECORD-COMPLETED @@ -0,0 +1,78 @@ +#--------------------------------地址配置------------------------------# +#管理kafka地址 +source.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094 + +#管理输出kafka地址 +sink.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094 + +#用于分配log_id、连接hbase的zookeeper地址 +zookeeper.servers=192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181 + +#hdfs地址用于获取定位库 +hdfs.servers=192.168.20.193:9000,192.168.20.194:9000 + +#--------------------------------HTTP/定位库------------------------------# +#定位库存储文件系统类型,hdfs or local +knowledgebase.file.storage.type=hdfs + +#定位库地址,根据file.system.type配置填写对应地址路径。 +knowledgebase.file.storage.path=/knowledgebase/ETL-GTPC-RECORD-COMPLETED/ + +#从知识库元数据中需要获取的文件type列表,配置为空则不过滤type;优先级比name高 +knowledgebase.type.list=ip_location,asn + +#从知识库元数据中需要获取文件的name列表;配置为空则不过滤name +knowledgebase.name.list=ip_v4_built_in,ip_v6_built_in,ip_v4_user_defined,ip_v6_user_defined,asn_v4,asn_v6 + +#工具库地址,存放秘钥文件等。 +tools.library=/home/tsg/olap/topology/data/ + +#--------------------------------nacos配置------------------------------# +#nacos 地址 +nacos.server=192.168.20.252:8848 + +#schema namespace名称 +nacos.schema.namespace=MSH + +#schema data id名称 +nacos.schema.data.id=gtpc_record.json + +#knowledgebase namespace名称 +nacos.knowledgebase.namespace= + +#knowledgebase data id名称 +nacos.knowledgebase.data.id=knowledge_base.json + +#--------------------------------Kafka消费/生产配置------------------------------# +#kafka 接收数据topic +source.kafka.topic=GTPC-RECORD + +#补全数据 输出 topic +sink.kafka.topic=GTPC-RECORD-COMPLETED + +#读取topic,存储该spout id的消费offset信息,可通过该拓扑命名;具体存储offset的位置,确定下次读取不重复的数据; +group.id=gtpc-record-log-20220408-1 + +#--------------------------------topology配置------------------------------# +#consumer 并行度 +source.parallelism=3 + +#转换函数并行度 +transform.parallelism=3 + +#kafka producer 并行度 +sink.parallelism=3 + +#数据中心,取值范围(0-31) +data.center.id.num=3 + +#hbase 更新时间,如填写0则不更新缓存 +hbase.tick.tuple.freq.secs=180 + +#--------------------------------默认值配置------------------------------# + +#0不需要补全原样输出日志,1需要补全 +log.need.complete=1 + +#生产者压缩模式 none or snappy +producer.kafka.compression.type=snappy diff --git a/MSH-PIC/flink/topology/completion/config/ETL-INTERIM-SESSION-RECORD-COMPLETED b/MSH-PIC/flink/topology/completion/config/ETL-INTERIM-SESSION-RECORD-COMPLETED new file mode 100644 index 0000000..7d30bd9 --- /dev/null +++ b/MSH-PIC/flink/topology/completion/config/ETL-INTERIM-SESSION-RECORD-COMPLETED @@ -0,0 +1,78 @@ +#--------------------------------地址配置------------------------------# +#管理kafka地址 +source.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094 + +#管理输出kafka地址 +sink.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094 + +#用于分配log_id、连接hbase的zookeeper地址 +zookeeper.servers=192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181 + +#hdfs地址用于获取定位库 +hdfs.servers=192.168.20.193:9000,192.168.20.194:9000 + +#--------------------------------HTTP/定位库------------------------------# +#定位库存储文件系统类型,hdfs or local +knowledgebase.file.storage.type=hdfs + +#定位库地址,根据file.system.type配置填写对应地址路径。 +knowledgebase.file.storage.path=/knowledgebase/ETL-INTERIM-SESSION-RECORD-COMPLETED/ + +#从知识库元数据中需要获取的文件type列表,配置为空则不过滤type;优先级比name高 +knowledgebase.type.list=ip_location,asn + +#从知识库元数据中需要获取文件的name列表;配置为空则不过滤name +knowledgebase.name.list=ip_v4_built_in,ip_v6_built_in,ip_v4_user_defined,ip_v6_user_defined,asn_v4,asn_v6 + +#工具库地址,存放秘钥文件等。 +tools.library=/home/tsg/olap/topology/data/ + +#--------------------------------nacos配置------------------------------# +#nacos 地址 +nacos.server=192.168.20.252:8848 + +#schema namespace名称 +nacos.schema.namespace=MSH + +#schema data id名称 +nacos.schema.data.id=interim_session_record.json + +#knowledgebase namespace名称 +nacos.knowledgebase.namespace= + +#knowledgebase data id名称 +nacos.knowledgebase.data.id=knowledge_base.json + +#--------------------------------Kafka消费/生产配置------------------------------# +#kafka 接收数据topic +source.kafka.topic=INTERIM-SESSION-RECORD + +#补全数据 输出 topic +sink.kafka.topic=INTERIM-SESSION-RECORD-COMPLETED + +#读取topic,存储该spout id的消费offset信息,可通过该拓扑命名;具体存储offset的位置,确定下次读取不重复的数据; +group.id=linterim-session-record-20220408-1 + +#--------------------------------topology配置------------------------------# +#consumer 并行度 +source.parallelism=3 + +#转换函数并行度 +transform.parallelism=3 + +#kafka producer 并行度 +sink.parallelism=3 + +#数据中心,取值范围(0-31) +data.center.id.num=4 + +#hbase 更新时间,如填写0则不更新缓存 +hbase.tick.tuple.freq.secs=180 + +#--------------------------------默认值配置------------------------------# + +#0不需要补全原样输出日志,1需要补全 +log.need.complete=1 + +#生产者压缩模式 none or snappy +producer.kafka.compression.type=snappy diff --git a/MSH-PIC/flink/topology/completion/config/ETL-PROXY-EVENT-COMPLETED b/MSH-PIC/flink/topology/completion/config/ETL-PROXY-EVENT-COMPLETED new file mode 100644 index 0000000..74af8f9 --- /dev/null +++ b/MSH-PIC/flink/topology/completion/config/ETL-PROXY-EVENT-COMPLETED @@ -0,0 +1,78 @@ +#--------------------------------地址配置------------------------------# +#管理kafka地址 +source.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094 + +#管理输出kafka地址 +sink.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094 + +#用于分配log_id、连接hbase的zookeeper地址 +zookeeper.servers=192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181 + +#hdfs地址用于获取定位库 +hdfs.servers=192.168.20.193:9000,192.168.20.194:9000 + +#--------------------------------HTTP/定位库------------------------------# +#定位库存储文件系统类型,hdfs or local +knowledgebase.file.storage.type=hdfs + +#定位库地址,根据file.system.type配置填写对应地址路径。 +knowledgebase.file.storage.path=/knowledgebase/ETL-PROXY-EVENT-COMPLETED/ + +#从知识库元数据中需要获取的文件type列表,配置为空则不过滤type;优先级比name高 +knowledgebase.type.list=ip_location,asn + +#从知识库元数据中需要获取文件的name列表;配置为空则不过滤name +knowledgebase.name.list=ip_v4_built_in,ip_v6_built_in,ip_v4_user_defined,ip_v6_user_defined,asn_v4,asn_v6 + +#工具库地址,存放秘钥文件等。 +tools.library=/home/tsg/olap/topology/data/ + +#--------------------------------nacos配置------------------------------# +#nacos 地址 +nacos.server=192.168.20.252:8848 + +#schema namespace名称 +nacos.schema.namespace=MSH + +#schema data id名称 +nacos.schema.data.id=proxy_event.json + +#knowledgebase namespace名称 +nacos.knowledgebase.namespace= + +#knowledgebase data id名称 +nacos.knowledgebase.data.id=knowledge_base.json + +#--------------------------------Kafka消费/生产配置------------------------------# +#kafka 接收数据topic +source.kafka.topic=PROXY-EVENT + +#补全数据 输出 topic +sink.kafka.topic=PROXY-EVENT-COMPLETED + +#读取topic,存储该spout id的消费offset信息,可通过该拓扑命名;具体存储offset的位置,确定下次读取不重复的数据; +group.id=proxy-event-20220408-1 + +#--------------------------------topology配置------------------------------# +#consumer 并行度 +source.parallelism=24 + +#转换函数并行度 +transform.parallelism=24 + +#kafka producer 并行度 +sink.parallelism=24 + +#数据中心,取值范围(0-31) +data.center.id.num=5 + +#hbase 更新时间,如填写0则不更新缓存 +hbase.tick.tuple.freq.secs=180 + +#--------------------------------默认值配置------------------------------# + +#0不需要补全原样输出日志,1需要补全 +log.need.complete=1 + +#生产者压缩模式 none or snappy +producer.kafka.compression.type=snappy diff --git a/MSH-PIC/flink/topology/completion/config/ETL-RADIUS-RECORD-COMPLETED b/MSH-PIC/flink/topology/completion/config/ETL-RADIUS-RECORD-COMPLETED new file mode 100644 index 0000000..120a247 --- /dev/null +++ b/MSH-PIC/flink/topology/completion/config/ETL-RADIUS-RECORD-COMPLETED @@ -0,0 +1,78 @@ +#--------------------------------地址配置------------------------------# +#管理kafka地址 +source.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094 + +#管理输出kafka地址 +sink.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094 + +#用于分配log_id、连接hbase的zookeeper地址 +zookeeper.servers=192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181 + +#hdfs地址用于获取定位库 +hdfs.servers=192.168.20.193:9000,192.168.20.194:9000 + +#--------------------------------HTTP/定位库------------------------------# +#定位库存储文件系统类型,hdfs or local +knowledgebase.file.storage.type=hdfs + +#定位库地址,根据file.system.type配置填写对应地址路径。 +knowledgebase.file.storage.path=/knowledgebase/ETL-RADIUS-RECORD-COMPLETED/ + +#从知识库元数据中需要获取的文件type列表,配置为空则不过滤type;优先级比name高 +knowledgebase.type.list=ip_location,asn + +#从知识库元数据中需要获取文件的name列表;配置为空则不过滤name +knowledgebase.name.list=ip_v4_built_in,ip_v6_built_in,ip_v4_user_defined,ip_v6_user_defined,asn_v4,asn_v6 + +#工具库地址,存放秘钥文件等。 +tools.library=/home/tsg/olap/topology/data/ + +#--------------------------------nacos配置------------------------------# +#nacos 地址 +nacos.server=192.168.20.252:8848 + +#schema namespace名称 +nacos.schema.namespace=MSH + +#schema data id名称 +nacos.schema.data.id=radius_record.json + +#knowledgebase namespace名称 +nacos.knowledgebase.namespace= + +#knowledgebase data id名称 +nacos.knowledgebase.data.id=knowledge_base.json + +#--------------------------------Kafka消费/生产配置------------------------------# +#kafka 接收数据topic +source.kafka.topic=RADIUS-RECORD + +#补全数据 输出 topic +sink.kafka.topic=RADIUS-RECORD-COMPLETED + +#读取topic,存储该spout id的消费offset信息,可通过该拓扑命名;具体存储offset的位置,确定下次读取不重复的数据; +group.id=radius-record-log-20220408-1 + +#--------------------------------topology配置------------------------------# +#consumer 并行度 +source.parallelism=3 + +#转换函数并行度 +transform.parallelism=3 + +#kafka producer 并行度 +sink.parallelism=3 + +#数据中心,取值范围(0-31) +data.center.id.num=6 + +#hbase 更新时间,如填写0则不更新缓存 +hbase.tick.tuple.freq.secs=180 + +#--------------------------------默认值配置------------------------------# + +#0不需要补全原样输出日志,1需要补全 +log.need.complete=1 + +#生产者压缩模式 none or snappy +producer.kafka.compression.type=snappy diff --git a/MSH-PIC/flink/topology/completion/config/ETL-SECURITY-EVENT-COMPLETED b/MSH-PIC/flink/topology/completion/config/ETL-SECURITY-EVENT-COMPLETED new file mode 100644 index 0000000..9e22783 --- /dev/null +++ b/MSH-PIC/flink/topology/completion/config/ETL-SECURITY-EVENT-COMPLETED @@ -0,0 +1,78 @@ +#--------------------------------地址配置------------------------------# +#管理kafka地址 +source.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094 + +#管理输出kafka地址 +sink.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094 + +#用于分配log_id、连接hbase的zookeeper地址 +zookeeper.servers=192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181 + +#hdfs地址用于获取定位库 +hdfs.servers=192.168.20.193:9000,192.168.20.194:9000 + +#--------------------------------HTTP/定位库------------------------------# +#定位库存储文件系统类型,hdfs or local +knowledgebase.file.storage.type=hdfs + +#定位库地址,根据file.system.type配置填写对应地址路径。 +knowledgebase.file.storage.path=/knowledgebase/ETL-SECURITY-EVENT-COMPLETED/ + +#从知识库元数据中需要获取的文件type列表,配置为空则不过滤type;优先级比name高 +knowledgebase.type.list=ip_location,asn + +#从知识库元数据中需要获取文件的name列表;配置为空则不过滤name +knowledgebase.name.list=ip_v4_built_in,ip_v6_built_in,ip_v4_user_defined,ip_v6_user_defined,asn_v4,asn_v6 + +#工具库地址,存放秘钥文件等。 +tools.library=/home/tsg/olap/topology/data/ + +#--------------------------------nacos配置------------------------------# +#nacos 地址 +nacos.server=192.168.20.252:8848 + +#schema namespace名称 +nacos.schema.namespace=MSH + +#schema data id名称 +nacos.schema.data.id=security_event.json + +#knowledgebase namespace名称 +nacos.knowledgebase.namespace= + +#knowledgebase data id名称 +nacos.knowledgebase.data.id=knowledge_base.json + +#--------------------------------Kafka消费/生产配置------------------------------# +#kafka 接收数据topic +source.kafka.topic=SECURITY-EVENT + +#补全数据 输出 topic +sink.kafka.topic=SECURITY-EVENT-COMPLETED + +#读取topic,存储该spout id的消费offset信息,可通过该拓扑命名;具体存储offset的位置,确定下次读取不重复的数据; +group.id=security-event-log-20220408-1 + +#--------------------------------topology配置------------------------------# +#consumer 并行度 +source.parallelism=24 + +#转换函数并行度 +transform.parallelism=24 + +#kafka producer 并行度 +sink.parallelism=24 + +#数据中心,取值范围(0-31) +data.center.id.num=7 + +#hbase 更新时间,如填写0则不更新缓存 +hbase.tick.tuple.freq.secs=180 + +#--------------------------------默认值配置------------------------------# + +#0不需要补全原样输出日志,1需要补全 +log.need.complete=1 + +#生产者压缩模式 none or snappy +producer.kafka.compression.type=snappy diff --git a/MSH-PIC/flink/topology/completion/config/ETL-SESSION-RECORD-COMPLETED b/MSH-PIC/flink/topology/completion/config/ETL-SESSION-RECORD-COMPLETED new file mode 100644 index 0000000..1638f99 --- /dev/null +++ b/MSH-PIC/flink/topology/completion/config/ETL-SESSION-RECORD-COMPLETED @@ -0,0 +1,78 @@ +#--------------------------------地址配置------------------------------# +#管理kafka地址 +source.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094 + +#管理输出kafka地址 +sink.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094 + +#用于分配log_id、连接hbase的zookeeper地址 +zookeeper.servers=192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181 + +#hdfs地址用于获取定位库 +hdfs.servers=192.168.20.193:9000,192.168.20.194:9000 + +#--------------------------------HTTP/定位库------------------------------# +#定位库存储文件系统类型,hdfs or local +knowledgebase.file.storage.type=hdfs + +#定位库地址,根据file.system.type配置填写对应地址路径。 +knowledgebase.file.storage.path=/knowledgebase/ETL-SESSION-RECORD-COMPLETED/ + +#从知识库元数据中需要获取的文件type列表,配置为空则不过滤type;优先级比name高 +knowledgebase.type.list=ip_location,asn + +#从知识库元数据中需要获取文件的name列表;配置为空则不过滤name +knowledgebase.name.list=ip_v4_built_in,ip_v6_built_in,ip_v4_user_defined,ip_v6_user_defined,asn_v4,asn_v6 + +#工具库地址,存放秘钥文件等。 +tools.library=/home/tsg/olap/topology/data/ + +#--------------------------------nacos配置------------------------------# +#nacos 地址 +nacos.server=192.168.20.252:8848 + +#schema namespace名称 +nacos.schema.namespace=MSH + +#schema data id名称 +nacos.schema.data.id=session_record.json + +#knowledgebase namespace名称 +nacos.knowledgebase.namespace= + +#knowledgebase data id名称 +nacos.knowledgebase.data.id=knowledge_base.json + +#--------------------------------Kafka消费/生产配置------------------------------# +#kafka 接收数据topic +source.kafka.topic=SESSION-RECORD + +#补全数据 输出 topic +sink.kafka.topic=SESSION-RECORD-COMPLETED + +#读取topic,存储该spout id的消费offset信息,可通过该拓扑命名;具体存储offset的位置,确定下次读取不重复的数据; +group.id=session-record-log-20220408-1 + +#--------------------------------topology配置------------------------------# +#consumer 并行度 +source.parallelism=3 + +#转换函数并行度 +transform.parallelism=3 + +#kafka producer 并行度 +sink.parallelism=3 + +#数据中心,取值范围(0-31) +data.center.id.num=8 + +#hbase 更新时间,如填写0则不更新缓存 +hbase.tick.tuple.freq.secs=180 + +#--------------------------------默认值配置------------------------------# + +#0不需要补全原样输出日志,1需要补全 +log.need.complete=1 + +#生产者压缩模式 none or snappy +producer.kafka.compression.type=snappy diff --git a/MSH-PIC/flink/topology/completion/config/ETL-TRANSACTION-RECORD-COMPLETED b/MSH-PIC/flink/topology/completion/config/ETL-TRANSACTION-RECORD-COMPLETED new file mode 100644 index 0000000..a862abc --- /dev/null +++ b/MSH-PIC/flink/topology/completion/config/ETL-TRANSACTION-RECORD-COMPLETED @@ -0,0 +1,78 @@ +#--------------------------------地址配置------------------------------# +#管理kafka地址 +source.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094 + +#管理输出kafka地址 +sink.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094 + +#用于分配log_id、连接hbase的zookeeper地址 +zookeeper.servers=192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181 + +#hdfs地址用于获取定位库 +hdfs.servers=192.168.20.193:9000,192.168.20.194:9000 + +#--------------------------------HTTP/定位库------------------------------# +#定位库存储文件系统类型,hdfs or local +knowledgebase.file.storage.type=hdfs + +#定位库地址,根据file.system.type配置填写对应地址路径。 +knowledgebase.file.storage.path=/knowledgebase/ETL-TRANSACTION-RECORD-COMPLETED/ + +#从知识库元数据中需要获取的文件type列表,配置为空则不过滤type;优先级比name高 +knowledgebase.type.list=ip_location,asn + +#从知识库元数据中需要获取文件的name列表;配置为空则不过滤name +knowledgebase.name.list=ip_v4_built_in,ip_v6_built_in,ip_v4_user_defined,ip_v6_user_defined,asn_v4,asn_v6 + +#工具库地址,存放秘钥文件等。 +tools.library=/home/tsg/olap/topology/data/ + +#--------------------------------nacos配置------------------------------# +#nacos 地址 +nacos.server=192.168.20.252:8848 + +#schema namespace名称 +nacos.schema.namespace=MSH + +#schema data id名称 +nacos.schema.data.id=transaction_record.json + +#knowledgebase namespace名称 +nacos.knowledgebase.namespace= + +#knowledgebase data id名称 +nacos.knowledgebase.data.id=knowledge_base.json + +#--------------------------------Kafka消费/生产配置------------------------------# +#kafka 接收数据topic +source.kafka.topic=TRANSACTION-RECORD + +#补全数据 输出 topic +sink.kafka.topic=TRANSACTION-RECORD-COMPLETED + +#读取topic,存储该spout id的消费offset信息,可通过该拓扑命名;具体存储offset的位置,确定下次读取不重复的数据; +group.id=transaction-record-20220408-1 + +#--------------------------------topology配置------------------------------# +#consumer 并行度 +source.parallelism=3 + +#转换函数并行度 +transform.parallelism=3 + +#kafka producer 并行度 +sink.parallelism=3 + +#数据中心,取值范围(0-31) +data.center.id.num=10 + +#hbase 更新时间,如填写0则不更新缓存 +hbase.tick.tuple.freq.secs=180 + +#--------------------------------默认值配置------------------------------# + +#0不需要补全原样输出日志,1需要补全 +log.need.complete=1 + +#生产者压缩模式 none or snappy +producer.kafka.compression.type=snappy diff --git a/MSH-PIC/flink/topology/completion/config/MIRROR-DOS-SKETCH-RECORD b/MSH-PIC/flink/topology/completion/config/MIRROR-DOS-SKETCH-RECORD new file mode 100644 index 0000000..9996c66 --- /dev/null +++ b/MSH-PIC/flink/topology/completion/config/MIRROR-DOS-SKETCH-RECORD @@ -0,0 +1,78 @@ +#--------------------------------地址配置------------------------------# +#管理kafka地址 +source.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094 + +#管理输出kafka地址 +sink.kafka.servers=192.168.20.223:9094,192.168.20.224:9094,192.168.20.225:9094 + +#用于分配log_id、连接hbase的zookeeper地址 +zookeeper.servers=192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181 + +#hdfs地址用于获取定位库 +hdfs.servers=192.168.20.193:9000,192.168.20.194:9000 + +#--------------------------------HTTP/定位库------------------------------# +#定位库存储文件系统类型,hdfs or local +knowledgebase.file.storage.type=hdfs + +#定位库地址,根据file.system.type配置填写对应地址路径。 +knowledgebase.file.storage.path=/knowledgebase/ETL-ACTIVE-DEFENCE-EVENT-COMPLETED/ + +#从知识库元数据中需要获取的文件type列表,配置为空则不过滤type;优先级比name高 +knowledgebase.type.list=ip_location,asn + +#从知识库元数据中需要获取文件的name列表;配置为空则不过滤name +knowledgebase.name.list=ip_v4_built_in,ip_v6_built_in,ip_v4_user_defined,ip_v6_user_defined,asn_v4,asn_v6 + +#工具库地址,存放秘钥文件等。 +tools.library=/home/tsg/olap/topology/data/ + +#--------------------------------nacos配置------------------------------# +#nacos 地址 +nacos.server=192.168.20.252:8848 + +#schema namespace名称 +nacos.schema.namespace=MSH + +#schema data id名称 +nacos.schema.data.id=active_defence_event.json + +#knowledgebase namespace名称 +nacos.knowledgebase.namespace= + +#knowledgebase data id名称 +nacos.knowledgebase.data.id=knowledge_base.json + +#--------------------------------Kafka消费/生产配置------------------------------# +#kafka 接收数据topic +source.kafka.topic=DOS-SKETCH-RECORD + +#补全数据 输出 topic +sink.kafka.topic=DOS-SKETCH-RECORD + +#读取topic,存储该spout id的消费offset信息,可通过该拓扑命名;具体存储offset的位置,确定下次读取不重复的数据; +group.id=dos-sketch-record-20230629-1 + +#--------------------------------topology配置------------------------------# +#consumer 并行度 +source.parallelism=3 + +#转换函数并行度 +transform.parallelism=3 + +#kafka producer 并行度 +sink.parallelism=3 + +#数据中心,取值范围(0-31) +data.center.id.num=1 + +#hbase 更新时间,如填写0则不更新缓存 +hbase.tick.tuple.freq.secs=180 + +#--------------------------------默认值配置------------------------------# + +#0不需要补全原样输出日志,1需要补全 +log.need.complete=0 + +#生产者压缩模式 none or snappy +producer.kafka.compression.type=snappy diff --git a/MSH-PIC/flink/topology/completion/config/MIRROR-GTPC-RECORD-COMPLETED b/MSH-PIC/flink/topology/completion/config/MIRROR-GTPC-RECORD-COMPLETED new file mode 100644 index 0000000..7c2ab7f --- /dev/null +++ b/MSH-PIC/flink/topology/completion/config/MIRROR-GTPC-RECORD-COMPLETED @@ -0,0 +1,78 @@ +#--------------------------------地址配置------------------------------# +#管理kafka地址 +source.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094 + +#管理输出kafka地址 +sink.kafka.servers=192.168.20.223:9094,192.168.20.224:9094,192.168.20.225:9094 + +#用于分配log_id、连接hbase的zookeeper地址 +zookeeper.servers=192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181 + +#hdfs地址用于获取定位库 +hdfs.servers=192.168.20.193:9000,192.168.20.194:9000 + +#--------------------------------HTTP/定位库------------------------------# +#定位库存储文件系统类型,hdfs or local +knowledgebase.file.storage.type=hdfs + +#定位库地址,根据file.system.type配置填写对应地址路径。 +knowledgebase.file.storage.path=/knowledgebase/ETL-ACTIVE-DEFENCE-EVENT-COMPLETED/ + +#从知识库元数据中需要获取的文件type列表,配置为空则不过滤type;优先级比name高 +knowledgebase.type.list=ip_location,asn + +#从知识库元数据中需要获取文件的name列表;配置为空则不过滤name +knowledgebase.name.list=ip_v4_built_in,ip_v6_built_in,ip_v4_user_defined,ip_v6_user_defined,asn_v4,asn_v6 + +#工具库地址,存放秘钥文件等。 +tools.library=/home/tsg/olap/topology/data/ + +#--------------------------------nacos配置------------------------------# +#nacos 地址 +nacos.server=192.168.20.252:8848 + +#schema namespace名称 +nacos.schema.namespace=MSH + +#schema data id名称 +nacos.schema.data.id=active_defence_event.json + +#knowledgebase namespace名称 +nacos.knowledgebase.namespace= + +#knowledgebase data id名称 +nacos.knowledgebase.data.id=knowledge_base.json + +#--------------------------------Kafka消费/生产配置------------------------------# +#kafka 接收数据topic +source.kafka.topic=GTPC-RECORD-COMPLETED + +#补全数据 输出 topic +sink.kafka.topic=GTPC-RECORD-COMPLETED + +#读取topic,存储该spout id的消费offset信息,可通过该拓扑命名;具体存储offset的位置,确定下次读取不重复的数据; +group.id=gtpc-record-completed-20230629-1 + +#--------------------------------topology配置------------------------------# +#consumer 并行度 +source.parallelism=3 + +#转换函数并行度 +transform.parallelism=3 + +#kafka producer 并行度 +sink.parallelism=3 + +#数据中心,取值范围(0-31) +data.center.id.num=1 + +#hbase 更新时间,如填写0则不更新缓存 +hbase.tick.tuple.freq.secs=180 + +#--------------------------------默认值配置------------------------------# + +#0不需要补全原样输出日志,1需要补全 +log.need.complete=0 + +#生产者压缩模式 none or snappy +producer.kafka.compression.type=snappy diff --git a/MSH-PIC/flink/topology/completion/config/MIRROR-INTERNAL-PACKET-CAPTURE-EVENT b/MSH-PIC/flink/topology/completion/config/MIRROR-INTERNAL-PACKET-CAPTURE-EVENT new file mode 100644 index 0000000..c4363e4 --- /dev/null +++ b/MSH-PIC/flink/topology/completion/config/MIRROR-INTERNAL-PACKET-CAPTURE-EVENT @@ -0,0 +1,78 @@ +#--------------------------------地址配置------------------------------# +#管理kafka地址 +source.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094 + +#管理输出kafka地址 +sink.kafka.servers=192.168.20.223:9094,192.168.20.224:9094,192.168.20.225:9094 + +#用于分配log_id、连接hbase的zookeeper地址 +zookeeper.servers=192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181 + +#hdfs地址用于获取定位库 +hdfs.servers=192.168.20.193:9000,192.168.20.194:9000 + +#--------------------------------HTTP/定位库------------------------------# +#定位库存储文件系统类型,hdfs or local +knowledgebase.file.storage.type=hdfs + +#定位库地址,根据file.system.type配置填写对应地址路径。 +knowledgebase.file.storage.path=/knowledgebase/ETL-ACTIVE-DEFENCE-EVENT-COMPLETED/ + +#从知识库元数据中需要获取的文件type列表,配置为空则不过滤type;优先级比name高 +knowledgebase.type.list=ip_location,asn + +#从知识库元数据中需要获取文件的name列表;配置为空则不过滤name +knowledgebase.name.list=ip_v4_built_in,ip_v6_built_in,ip_v4_user_defined,ip_v6_user_defined,asn_v4,asn_v6 + +#工具库地址,存放秘钥文件等。 +tools.library=/home/tsg/olap/topology/data/ + +#--------------------------------nacos配置------------------------------# +#nacos 地址 +nacos.server=192.168.20.252:8848 + +#schema namespace名称 +nacos.schema.namespace=MSH + +#schema data id名称 +nacos.schema.data.id=active_defence_event.json + +#knowledgebase namespace名称 +nacos.knowledgebase.namespace= + +#knowledgebase data id名称 +nacos.knowledgebase.data.id=knowledge_base.json + +#--------------------------------Kafka消费/生产配置------------------------------# +#kafka 接收数据topic +source.kafka.topic=INTERNAL-PACKET-CAPTURE-EVENT + +#补全数据 输出 topic +sink.kafka.topic=INTERNAL-PACKET-CAPTURE-EVENT + +#读取topic,存储该spout id的消费offset信息,可通过该拓扑命名;具体存储offset的位置,确定下次读取不重复的数据; +group.id=internal-packet-capture-event-20230629-1 + +#--------------------------------topology配置------------------------------# +#consumer 并行度 +source.parallelism=3 + +#转换函数并行度 +transform.parallelism=3 + +#kafka producer 并行度 +sink.parallelism=3 + +#数据中心,取值范围(0-31) +data.center.id.num=1 + +#hbase 更新时间,如填写0则不更新缓存 +hbase.tick.tuple.freq.secs=180 + +#--------------------------------默认值配置------------------------------# + +#0不需要补全原样输出日志,1需要补全 +log.need.complete=0 + +#生产者压缩模式 none or snappy +producer.kafka.compression.type=snappy diff --git a/MSH-PIC/flink/topology/completion/config/MIRROR-NETWORK-TRAFFIC-METRICS b/MSH-PIC/flink/topology/completion/config/MIRROR-NETWORK-TRAFFIC-METRICS new file mode 100644 index 0000000..45ef126 --- /dev/null +++ b/MSH-PIC/flink/topology/completion/config/MIRROR-NETWORK-TRAFFIC-METRICS @@ -0,0 +1,78 @@ +#--------------------------------地址配置------------------------------# +#管理kafka地址 +source.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094 + +#管理输出kafka地址 +sink.kafka.servers=192.168.20.223:9094,192.168.20.224:9094,192.168.20.225:9094 + +#用于分配log_id、连接hbase的zookeeper地址 +zookeeper.servers=192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181 + +#hdfs地址用于获取定位库 +hdfs.servers=192.168.20.193:9000,192.168.20.194:9000 + +#--------------------------------HTTP/定位库------------------------------# +#定位库存储文件系统类型,hdfs or local +knowledgebase.file.storage.type=hdfs + +#定位库地址,根据file.system.type配置填写对应地址路径。 +knowledgebase.file.storage.path=/knowledgebase/ETL-ACTIVE-DEFENCE-EVENT-COMPLETED/ + +#从知识库元数据中需要获取的文件type列表,配置为空则不过滤type;优先级比name高 +knowledgebase.type.list=ip_location,asn + +#从知识库元数据中需要获取文件的name列表;配置为空则不过滤name +knowledgebase.name.list=ip_v4_built_in,ip_v6_built_in,ip_v4_user_defined,ip_v6_user_defined,asn_v4,asn_v6 + +#工具库地址,存放秘钥文件等。 +tools.library=/home/tsg/olap/topology/data/ + +#--------------------------------nacos配置------------------------------# +#nacos 地址 +nacos.server=192.168.20.252:8848 + +#schema namespace名称 +nacos.schema.namespace=MSH + +#schema data id名称 +nacos.schema.data.id=active_defence_event.json + +#knowledgebase namespace名称 +nacos.knowledgebase.namespace= + +#knowledgebase data id名称 +nacos.knowledgebase.data.id=knowledge_base.json + +#--------------------------------Kafka消费/生产配置------------------------------# +#kafka 接收数据topic +source.kafka.topic=NETWORK-TRAFFIC-METRICS + +#补全数据 输出 topic +sink.kafka.topic=NETWORK-TRAFFIC-METRICS + +#读取topic,存储该spout id的消费offset信息,可通过该拓扑命名;具体存储offset的位置,确定下次读取不重复的数据; +group.id=network-traffic-metrics-20230629-1 + +#--------------------------------topology配置------------------------------# +#consumer 并行度 +source.parallelism=3 + +#转换函数并行度 +transform.parallelism=3 + +#kafka producer 并行度 +sink.parallelism=3 + +#数据中心,取值范围(0-31) +data.center.id.num=1 + +#hbase 更新时间,如填写0则不更新缓存 +hbase.tick.tuple.freq.secs=180 + +#--------------------------------默认值配置------------------------------# + +#0不需要补全原样输出日志,1需要补全 +log.need.complete=0 + +#生产者压缩模式 none or snappy +producer.kafka.compression.type=snappy diff --git a/MSH-PIC/flink/topology/completion/config/MIRROR-POLICY-RULE-METRICS b/MSH-PIC/flink/topology/completion/config/MIRROR-POLICY-RULE-METRICS new file mode 100644 index 0000000..da02fdf --- /dev/null +++ b/MSH-PIC/flink/topology/completion/config/MIRROR-POLICY-RULE-METRICS @@ -0,0 +1,78 @@ +#--------------------------------地址配置------------------------------# +#管理kafka地址 +source.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094 + +#管理输出kafka地址 +sink.kafka.servers=192.168.20.223:9094,192.168.20.224:9094,192.168.20.225:9094 + +#用于分配log_id、连接hbase的zookeeper地址 +zookeeper.servers=192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181 + +#hdfs地址用于获取定位库 +hdfs.servers=192.168.20.193:9000,192.168.20.194:9000 + +#--------------------------------HTTP/定位库------------------------------# +#定位库存储文件系统类型,hdfs or local +knowledgebase.file.storage.type=hdfs + +#定位库地址,根据file.system.type配置填写对应地址路径。 +knowledgebase.file.storage.path=/knowledgebase/ETL-ACTIVE-DEFENCE-EVENT-COMPLETED/ + +#从知识库元数据中需要获取的文件type列表,配置为空则不过滤type;优先级比name高 +knowledgebase.type.list=ip_location,asn + +#从知识库元数据中需要获取文件的name列表;配置为空则不过滤name +knowledgebase.name.list=ip_v4_built_in,ip_v6_built_in,ip_v4_user_defined,ip_v6_user_defined,asn_v4,asn_v6 + +#工具库地址,存放秘钥文件等。 +tools.library=/home/tsg/olap/topology/data/ + +#--------------------------------nacos配置------------------------------# +#nacos 地址 +nacos.server=192.168.20.252:8848 + +#schema namespace名称 +nacos.schema.namespace=MSH + +#schema data id名称 +nacos.schema.data.id=active_defence_event.json + +#knowledgebase namespace名称 +nacos.knowledgebase.namespace= + +#knowledgebase data id名称 +nacos.knowledgebase.data.id=knowledge_base.json + +#--------------------------------Kafka消费/生产配置------------------------------# +#kafka 接收数据topic +source.kafka.topic=POLICY-RULE-METRICS + +#补全数据 输出 topic +sink.kafka.topic=POLICY-RULE-METRICS + +#读取topic,存储该spout id的消费offset信息,可通过该拓扑命名;具体存储offset的位置,确定下次读取不重复的数据; +group.id=policy-rule-metrics-20230629-1 + +#--------------------------------topology配置------------------------------# +#consumer 并行度 +source.parallelism=3 + +#转换函数并行度 +transform.parallelism=3 + +#kafka producer 并行度 +sink.parallelism=3 + +#数据中心,取值范围(0-31) +data.center.id.num=1 + +#hbase 更新时间,如填写0则不更新缓存 +hbase.tick.tuple.freq.secs=180 + +#--------------------------------默认值配置------------------------------# + +#0不需要补全原样输出日志,1需要补全 +log.need.complete=0 + +#生产者压缩模式 none or snappy +producer.kafka.compression.type=snappy diff --git a/MSH-PIC/flink/topology/completion/config/MIRROR-PXY-EXCH-INTERMEDIA-CERT b/MSH-PIC/flink/topology/completion/config/MIRROR-PXY-EXCH-INTERMEDIA-CERT new file mode 100644 index 0000000..d7fc346 --- /dev/null +++ b/MSH-PIC/flink/topology/completion/config/MIRROR-PXY-EXCH-INTERMEDIA-CERT @@ -0,0 +1,78 @@ +#--------------------------------地址配置------------------------------# +#管理kafka地址 +source.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094 + +#管理输出kafka地址 +sink.kafka.servers=192.168.20.223:9094,192.168.20.224:9094,192.168.20.225:9094 + +#用于分配log_id、连接hbase的zookeeper地址 +zookeeper.servers=192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181 + +#hdfs地址用于获取定位库 +hdfs.servers=192.168.20.193:9000,192.168.20.194:9000 + +#--------------------------------HTTP/定位库------------------------------# +#定位库存储文件系统类型,hdfs or local +knowledgebase.file.storage.type=hdfs + +#定位库地址,根据file.system.type配置填写对应地址路径。 +knowledgebase.file.storage.path=/knowledgebase/ETL-ACTIVE-DEFENCE-EVENT-COMPLETED/ + +#从知识库元数据中需要获取的文件type列表,配置为空则不过滤type;优先级比name高 +knowledgebase.type.list=ip_location,asn + +#从知识库元数据中需要获取文件的name列表;配置为空则不过滤name +knowledgebase.name.list=ip_v4_built_in,ip_v6_built_in,ip_v4_user_defined,ip_v6_user_defined,asn_v4,asn_v6 + +#工具库地址,存放秘钥文件等。 +tools.library=/home/tsg/olap/topology/data/ + +#--------------------------------nacos配置------------------------------# +#nacos 地址 +nacos.server=192.168.20.252:8848 + +#schema namespace名称 +nacos.schema.namespace=MSH + +#schema data id名称 +nacos.schema.data.id=active_defence_event.json + +#knowledgebase namespace名称 +nacos.knowledgebase.namespace= + +#knowledgebase data id名称 +nacos.knowledgebase.data.id=knowledge_base.json + +#--------------------------------Kafka消费/生产配置------------------------------# +#kafka 接收数据topic +source.kafka.topic=PXY-EXCH-INTERMEDIA-CERT + +#补全数据 输出 topic +sink.kafka.topic=PXY-EXCH-INTERMEDIA-CERT + +#读取topic,存储该spout id的消费offset信息,可通过该拓扑命名;具体存储offset的位置,确定下次读取不重复的数据; +group.id=pxy-exch-intermedia-cert-1 + +#--------------------------------topology配置------------------------------# +#consumer 并行度 +source.parallelism=3 + +#转换函数并行度 +transform.parallelism=3 + +#kafka producer 并行度 +sink.parallelism=3 + +#数据中心,取值范围(0-31) +data.center.id.num=1 + +#hbase 更新时间,如填写0则不更新缓存 +hbase.tick.tuple.freq.secs=180 + +#--------------------------------默认值配置------------------------------# + +#0不需要补全原样输出日志,1需要补全 +log.need.complete=0 + +#生产者压缩模式 none or snappy +producer.kafka.compression.type=snappy diff --git a/MSH-PIC/flink/topology/completion/config/MIRROR-SYS-PACKET-CAPTURE-EVENT b/MSH-PIC/flink/topology/completion/config/MIRROR-SYS-PACKET-CAPTURE-EVENT new file mode 100644 index 0000000..b361532 --- /dev/null +++ b/MSH-PIC/flink/topology/completion/config/MIRROR-SYS-PACKET-CAPTURE-EVENT @@ -0,0 +1,78 @@ +#--------------------------------地址配置------------------------------# +#管理kafka地址 +source.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094 + +#管理输出kafka地址 +sink.kafka.servers=192.168.20.223:9094,192.168.20.224:9094,192.168.20.225:9094 + +#用于分配log_id、连接hbase的zookeeper地址 +zookeeper.servers=192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181 + +#hdfs地址用于获取定位库 +hdfs.servers=192.168.20.193:9000,192.168.20.194:9000 + +#--------------------------------HTTP/定位库------------------------------# +#定位库存储文件系统类型,hdfs or local +knowledgebase.file.storage.type=hdfs + +#定位库地址,根据file.system.type配置填写对应地址路径。 +knowledgebase.file.storage.path=/knowledgebase/ETL-ACTIVE-DEFENCE-EVENT-COMPLETED/ + +#从知识库元数据中需要获取的文件type列表,配置为空则不过滤type;优先级比name高 +knowledgebase.type.list=ip_location,asn + +#从知识库元数据中需要获取文件的name列表;配置为空则不过滤name +knowledgebase.name.list=ip_v4_built_in,ip_v6_built_in,ip_v4_user_defined,ip_v6_user_defined,asn_v4,asn_v6 + +#工具库地址,存放秘钥文件等。 +tools.library=/home/tsg/olap/topology/data/ + +#--------------------------------nacos配置------------------------------# +#nacos 地址 +nacos.server=192.168.20.252:8848 + +#schema namespace名称 +nacos.schema.namespace=MSH + +#schema data id名称 +nacos.schema.data.id=active_defence_event.json + +#knowledgebase namespace名称 +nacos.knowledgebase.namespace= + +#knowledgebase data id名称 +nacos.knowledgebase.data.id=knowledge_base.json + +#--------------------------------Kafka消费/生产配置------------------------------# +#kafka 接收数据topic +source.kafka.topic=SYS-PACKET-CAPTURE-EVENT + +#补全数据 输出 topic +sink.kafka.topic=SYS-PACKET-CAPTURE-EVENT + +#读取topic,存储该spout id的消费offset信息,可通过该拓扑命名;具体存储offset的位置,确定下次读取不重复的数据; +group.id=sys-packet-capture-event-20230629-1 + +#--------------------------------topology配置------------------------------# +#consumer 并行度 +source.parallelism=3 + +#转换函数并行度 +transform.parallelism=3 + +#kafka producer 并行度 +sink.parallelism=3 + +#数据中心,取值范围(0-31) +data.center.id.num=1 + +#hbase 更新时间,如填写0则不更新缓存 +hbase.tick.tuple.freq.secs=180 + +#--------------------------------默认值配置------------------------------# + +#0不需要补全原样输出日志,1需要补全 +log.need.complete=0 + +#生产者压缩模式 none or snappy +producer.kafka.compression.type=snappy diff --git a/MSH-PIC/flink/topology/completion/config/MIRROR-TRAFFIC-TOP-METRICS b/MSH-PIC/flink/topology/completion/config/MIRROR-TRAFFIC-TOP-METRICS new file mode 100644 index 0000000..5ee5b31 --- /dev/null +++ b/MSH-PIC/flink/topology/completion/config/MIRROR-TRAFFIC-TOP-METRICS @@ -0,0 +1,78 @@ +#--------------------------------地址配置------------------------------# +#管理kafka地址 +source.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094 + +#管理输出kafka地址 +sink.kafka.servers=192.168.20.223:9094,192.168.20.224:9094,192.168.20.225:9094 + +#用于分配log_id、连接hbase的zookeeper地址 +zookeeper.servers=192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181 + +#hdfs地址用于获取定位库 +hdfs.servers=192.168.20.193:9000,192.168.20.194:9000 + +#--------------------------------HTTP/定位库------------------------------# +#定位库存储文件系统类型,hdfs or local +knowledgebase.file.storage.type=hdfs + +#定位库地址,根据file.system.type配置填写对应地址路径。 +knowledgebase.file.storage.path=/knowledgebase/ETL-ACTIVE-DEFENCE-EVENT-COMPLETED/ + +#从知识库元数据中需要获取的文件type列表,配置为空则不过滤type;优先级比name高 +knowledgebase.type.list=ip_location,asn + +#从知识库元数据中需要获取文件的name列表;配置为空则不过滤name +knowledgebase.name.list=ip_v4_built_in,ip_v6_built_in,ip_v4_user_defined,ip_v6_user_defined,asn_v4,asn_v6 + +#工具库地址,存放秘钥文件等。 +tools.library=/home/tsg/olap/topology/data/ + +#--------------------------------nacos配置------------------------------# +#nacos 地址 +nacos.server=192.168.20.252:8848 + +#schema namespace名称 +nacos.schema.namespace=MSH + +#schema data id名称 +nacos.schema.data.id=active_defence_event.json + +#knowledgebase namespace名称 +nacos.knowledgebase.namespace= + +#knowledgebase data id名称 +nacos.knowledgebase.data.id=knowledge_base.json + +#--------------------------------Kafka消费/生产配置------------------------------# +#kafka 接收数据topic +source.kafka.topic=TRAFFIC-TOP-METRICS + +#补全数据 输出 topic +sink.kafka.topic=TRAFFIC-TOP-METRICS + +#读取topic,存储该spout id的消费offset信息,可通过该拓扑命名;具体存储offset的位置,确定下次读取不重复的数据; +group.id=traffic-top-metrics-20230629-1 + +#--------------------------------topology配置------------------------------# +#consumer 并行度 +source.parallelism=3 + +#转换函数并行度 +transform.parallelism=3 + +#kafka producer 并行度 +sink.parallelism=3 + +#数据中心,取值范围(0-31) +data.center.id.num=1 + +#hbase 更新时间,如填写0则不更新缓存 +hbase.tick.tuple.freq.secs=180 + +#--------------------------------默认值配置------------------------------# + +#0不需要补全原样输出日志,1需要补全 +log.need.complete=0 + +#生产者压缩模式 none or snappy +producer.kafka.compression.type=snappy diff --git a/MSH-PIC/flink/topology/completion/config/MIRROR-VOIP-RECORD b/MSH-PIC/flink/topology/completion/config/MIRROR-VOIP-RECORD new file mode 100644 index 0000000..21e7267 --- /dev/null +++ b/MSH-PIC/flink/topology/completion/config/MIRROR-VOIP-RECORD @@ -0,0 +1,78 @@ +#--------------------------------地址配置------------------------------# +#管理kafka地址 +source.kafka.servers=192.168.20.193:9094,192.168.20.194:9094,192.168.20.195:9094 + +#管理输出kafka地址 +sink.kafka.servers=192.168.20.223:9094,192.168.20.224:9094,192.168.20.225:9094 + +#用于分配log_id、连接hbase的zookeeper地址 +zookeeper.servers=192.168.20.193:2181,192.168.20.194:2181,192.168.20.195:2181 + +#hdfs地址用于获取定位库 +hdfs.servers=192.168.20.193:9000,192.168.20.194:9000 + +#--------------------------------HTTP/定位库------------------------------# +#定位库存储文件系统类型,hdfs or local +knowledgebase.file.storage.type=hdfs + +#定位库地址,根据file.system.type配置填写对应地址路径。 +knowledgebase.file.storage.path=/knowledgebase/ETL-ACTIVE-DEFENCE-EVENT-COMPLETED/ + +#从知识库元数据中需要获取的文件type列表,配置为空则不过滤type;优先级比name高 +knowledgebase.type.list=ip_location,asn + +#从知识库元数据中需要获取文件的name列表;配置为空则不过滤name +knowledgebase.name.list=ip_v4_built_in,ip_v6_built_in,ip_v4_user_defined,ip_v6_user_defined,asn_v4,asn_v6 + +#工具库地址,存放秘钥文件等。 +tools.library=/home/tsg/olap/topology/data/ + +#--------------------------------nacos配置------------------------------# +#nacos 地址 +nacos.server=192.168.20.252:8848 + +#schema namespace名称 +nacos.schema.namespace=MSH + +#schema data id名称 +nacos.schema.data.id=active_defence_event.json + +#knowledgebase namespace名称 +nacos.knowledgebase.namespace= + +#knowledgebase data id名称 +nacos.knowledgebase.data.id=knowledge_base.json + +#--------------------------------Kafka消费/生产配置------------------------------# +#kafka 接收数据topic +source.kafka.topic=VOIP-RECORD + +#补全数据 输出 topic +sink.kafka.topic=VOIP-RECORD + +#读取topic,存储该spout id的消费offset信息,可通过该拓扑命名;具体存储offset的位置,确定下次读取不重复的数据; +group.id=voip-record-20230629-1 + +#--------------------------------topology配置------------------------------# +#consumer 并行度 +source.parallelism=3 + +#转换函数并行度 +transform.parallelism=3 + +#kafka producer 并行度 +sink.parallelism=3 + +#数据中心,取值范围(0-31) +data.center.id.num=1 + +#hbase 更新时间,如填写0则不更新缓存 +hbase.tick.tuple.freq.secs=180 + +#--------------------------------默认值配置------------------------------# + +#0不需要补全原样输出日志,1需要补全 +log.need.complete=0 + +#生产者压缩模式 none or snappy +producer.kafka.compression.type=snappy |
