summaryrefslogtreecommitdiff
path: root/PCAP-PIC/flink/topology/completion/tmp
diff options
context:
space:
mode:
Diffstat (limited to 'PCAP-PIC/flink/topology/completion/tmp')
-rw-r--r--PCAP-PIC/flink/topology/completion/tmp/ETL-PROXY-EVENT-COMPLETED78
-rw-r--r--PCAP-PIC/flink/topology/completion/tmp/ETL-SECURITY-EVENT-COMPLETED78
2 files changed, 156 insertions, 0 deletions
diff --git a/PCAP-PIC/flink/topology/completion/tmp/ETL-PROXY-EVENT-COMPLETED b/PCAP-PIC/flink/topology/completion/tmp/ETL-PROXY-EVENT-COMPLETED
new file mode 100644
index 0000000..85ad880
--- /dev/null
+++ b/PCAP-PIC/flink/topology/completion/tmp/ETL-PROXY-EVENT-COMPLETED
@@ -0,0 +1,78 @@
+#--------------------------------地址配置------------------------------#
+#管理kafka地址
+source.kafka.servers=192.168.10.193:9094,192.168.10.194:9094,192.168.10.195:9094
+
+#管理输出kafka地址
+sink.kafka.servers=192.168.10.193:9094,192.168.10.194:9094,192.168.10.195:9094
+
+#用于分配log_id、连接hbase的zookeeper地址
+zookeeper.servers=192.168.10.193:2181,192.168.10.194:2181,192.168.10.195:2181
+
+#hdfs地址用于获取定位库
+hdfs.servers=192.168.10.193:9000,192.168.10.194:9000
+
+#--------------------------------HTTP/定位库------------------------------#
+#定位库存储文件系统类型,hdfs or local
+knowledgebase.file.storage.type=hdfs
+
+#定位库地址,根据file.system.type配置填写对应地址路径。
+knowledgebase.file.storage.path=/knowledgebase/ETL-PROXY-EVENT-COMPLETED/
+
+#从知识库元数据中需要获取的文件type列表,配置为空则不过滤type;优先级比name高
+knowledgebase.type.list=ip_location,asn
+
+#从知识库元数据中需要获取文件的name列表;配置为空则不过滤name
+knowledgebase.name.list=ip_v4_built_in,ip_v6_built_in,ip_v4_user_defined,ip_v6_user_defined,asn_v4,asn_v6
+
+#工具库地址,存放秘钥文件等。
+tools.library=/home/tsg/olap/topology/data/
+
+#--------------------------------nacos配置------------------------------#
+#nacos 地址
+nacos.server=192.168.20.252:8848
+
+#schema namespace名称
+nacos.schema.namespace=MSH
+
+#schema data id名称
+nacos.schema.data.id=proxy_event.json
+
+#knowledgebase namespace名称
+nacos.knowledgebase.namespace=
+
+#knowledgebase data id名称
+nacos.knowledgebase.data.id=knowledge_base.json
+
+#--------------------------------Kafka消费/生产配置------------------------------#
+#kafka 接收数据topic
+source.kafka.topic=PROXY-EVENT
+
+#补全数据 输出 topic
+sink.kafka.topic=PROXY-EVENT-COMPLETED
+
+#读取topic,存储该spout id的消费offset信息,可通过该拓扑命名;具体存储offset的位置,确定下次读取不重复的数据;
+group.id=proxy-event-20220408-1
+
+#--------------------------------topology配置------------------------------#
+#consumer 并行度
+source.parallelism=24
+
+#转换函数并行度
+transform.parallelism=24
+
+#kafka producer 并行度
+sink.parallelism=24
+
+#数据中心,取值范围(0-31)
+data.center.id.num=5
+
+#hbase 更新时间,如填写0则不更新缓存
+hbase.tick.tuple.freq.secs=180
+
+#--------------------------------默认值配置------------------------------#
+
+#0不需要补全原样输出日志,1需要补全
+log.need.complete=1
+
+#生产者压缩模式 none or snappy
+producer.kafka.compression.type=snappy
diff --git a/PCAP-PIC/flink/topology/completion/tmp/ETL-SECURITY-EVENT-COMPLETED b/PCAP-PIC/flink/topology/completion/tmp/ETL-SECURITY-EVENT-COMPLETED
new file mode 100644
index 0000000..45855a8
--- /dev/null
+++ b/PCAP-PIC/flink/topology/completion/tmp/ETL-SECURITY-EVENT-COMPLETED
@@ -0,0 +1,78 @@
+#--------------------------------地址配置------------------------------#
+#管理kafka地址
+source.kafka.servers=192.168.10.193:9094,192.168.10.194:9094,192.168.10.195:9094
+
+#管理输出kafka地址
+sink.kafka.servers=192.168.10.193:9094,192.168.10.194:9094,192.168.10.195:9094
+
+#用于分配log_id、连接hbase的zookeeper地址
+zookeeper.servers=192.168.10.193:2181,192.168.10.194:2181,192.168.10.195:2181
+
+#hdfs地址用于获取定位库
+hdfs.servers=192.168.10.193:9000,192.168.10.194:9000
+
+#--------------------------------HTTP/定位库------------------------------#
+#定位库存储文件系统类型,hdfs or local
+knowledgebase.file.storage.type=hdfs
+
+#定位库地址,根据file.system.type配置填写对应地址路径。
+knowledgebase.file.storage.path=/knowledgebase/ETL-SECURITY-EVENT-COMPLETED/
+
+#从知识库元数据中需要获取的文件type列表,配置为空则不过滤type;优先级比name高
+knowledgebase.type.list=ip_location,asn
+
+#从知识库元数据中需要获取文件的name列表;配置为空则不过滤name
+knowledgebase.name.list=ip_v4_built_in,ip_v6_built_in,ip_v4_user_defined,ip_v6_user_defined,asn_v4,asn_v6
+
+#工具库地址,存放秘钥文件等。
+tools.library=/home/tsg/olap/topology/data/
+
+#--------------------------------nacos配置------------------------------#
+#nacos 地址
+nacos.server=192.168.20.252:8848
+
+#schema namespace名称
+nacos.schema.namespace=MSH
+
+#schema data id名称
+nacos.schema.data.id=security_event.json
+
+#knowledgebase namespace名称
+nacos.knowledgebase.namespace=
+
+#knowledgebase data id名称
+nacos.knowledgebase.data.id=knowledge_base.json
+
+#--------------------------------Kafka消费/生产配置------------------------------#
+#kafka 接收数据topic
+source.kafka.topic=SECURITY-EVENT
+
+#补全数据 输出 topic
+sink.kafka.topic=SECURITY-EVENT-COMPLETED
+
+#读取topic,存储该spout id的消费offset信息,可通过该拓扑命名;具体存储offset的位置,确定下次读取不重复的数据;
+group.id=security-event-log-20220408-1
+
+#--------------------------------topology配置------------------------------#
+#consumer 并行度
+source.parallelism=24
+
+#转换函数并行度
+transform.parallelism=24
+
+#kafka producer 并行度
+sink.parallelism=24
+
+#数据中心,取值范围(0-31)
+data.center.id.num=7
+
+#hbase 更新时间,如填写0则不更新缓存
+hbase.tick.tuple.freq.secs=180
+
+#--------------------------------默认值配置------------------------------#
+
+#0不需要补全原样输出日志,1需要补全
+log.need.complete=1
+
+#生产者压缩模式 none or snappy
+producer.kafka.compression.type=snappy