summaryrefslogtreecommitdiff
path: root/properties
diff options
context:
space:
mode:
authorzhanghongqing <[email protected]>2022-07-07 14:07:27 +0800
committerzhanghongqing <[email protected]>2022-07-07 14:07:27 +0800
commitf552793230d0428cbc63714ee296c1ce4971a31b (patch)
tree1bf3a26d957710b261f61a65559d393f55bf9382 /properties
Initial commit
Diffstat (limited to 'properties')
-rw-r--r--properties/default_config.properties65
-rw-r--r--properties/service_flow_config.properties84
2 files changed, 149 insertions, 0 deletions
diff --git a/properties/default_config.properties b/properties/default_config.properties
new file mode 100644
index 0000000..aaeccfc
--- /dev/null
+++ b/properties/default_config.properties
@@ -0,0 +1,65 @@
+#====================Kafka KafkaConsumer====================#
+#kafka source connection timeout
+session.timeout.ms=60000
+
+#kafka source poll
+max.poll.records=5000
+
+#kafka source poll bytes
+max.partition.fetch.bytes=31457280
+#====================Kafka KafkaProducer====================#
+#producer重试的次数设置
+retries=0
+
+#他的含义就是说一个Batch被创建之后,最多过多久,不管这个Batch有没有写满,都必须发送出去了
+linger.ms=10
+
+#如果在超时之前未收到响应,客户端将在必要时重新发送请求
+request.timeout.ms=30000
+
+#producer都是按照batch进行发送的,批次大小,默认:16384
+batch.size=262144
+
+#Producer端用于缓存消息的缓冲区大小
+#128M
+buffer.memory=134217728
+
+#这个参数决定了每次发送给Kafka服务器请求的最大大小,默认1048576
+#10M
+max.request.size=10485760
+#====================kafka default====================#
+#kafka SASL验证用户名-加密
+kafka.user=nsyGpHKGFA4KW0zro9MDdw==
+
+#kafka SASL及SSL验证密码-加密
+kafka.pin=6MleDyA3Z73HSaXiKsDJ2k7Ys8YWLhEJ
+
+#生产者ack
+producer.ack=1
+#====================nacos default====================#
+#nacos username
+nacos.username=nacos
+
+#nacos password
+nacos.pin=nacos
+
+#nacos group
+nacos.group=Galaxy
+#====================Topology Default====================#
+#hbase table name
+hbase.table.name=tsg_galaxy:relation_framedip_account
+
+#邮件默认编码
+mail.default.charset=UTF-8
+
+#0不做任何校验,1弱类型校验
+log.transform.type=1
+
+#两个输出之间的最大时间(单位milliseconds)
+buffer.timeout=5000
+#====================临时配置-待删除====================#
+#网关APP_ID 获取接口
+app.id.http=http://192.168.44.20:9999/open-api/appDicList
+
+#app_id 更新时间,如填写0则不更新缓存
+app.tick.tuple.freq.secs=0 \ No newline at end of file
diff --git a/properties/service_flow_config.properties b/properties/service_flow_config.properties
new file mode 100644
index 0000000..cf73cf0
--- /dev/null
+++ b/properties/service_flow_config.properties
@@ -0,0 +1,84 @@
+#--------------------------------地址配置------------------------------#
+#管理kafka地址
+source.kafka.servers=192.168.45.102:9092
+
+#管理输出kafka地址
+sink.kafka.servers=192.168.45.102:9092
+
+#zookeeper 地址 用于配置log_id
+zookeeper.servers=192.168.45.102:2181
+
+#hbase zookeeper地址 用于连接HBase
+hbase.zookeeper.servers=192.168.45.102:2181
+
+#--------------------------------HTTP/定位库------------------------------#
+#定位库地址
+tools.library=D:\\workerspace\\dat\\
+
+#--------------------------------nacos配置------------------------------#
+#nacos 地址
+nacos.server=192.168.45.102:8848
+
+#nacos namespace
+nacos.schema.namespace=prod
+
+#nacos data id
+nacos.data.id=session_record.json
+
+#--------------------------------Kafka消费/生产配置------------------------------#
+
+#kafka 接收数据topic
+source.kafka.topic=atest
+
+#补全数据 输出 topic
+sink.kafka.topic=atest2
+
+#读取topic,存储该spout id的消费offset信息,可通过该拓扑命名;具体存储offset的位置,确定下次读取不重复的数据;
+group.id=flinktest-102
+
+#--------------------------------topology配置------------------------------#
+
+#consumer 并行度
+source.parallelism=1
+
+#转换函数并行度
+transform.parallelism=1
+
+#kafka producer 并行度
+sink.parallelism=1
+
+#数据中心,取值范围(0-31)
+data.center.id.num=0
+
+#hbase 更新时间,如填写0则不更新缓存
+hbase.tick.tuple.freq.secs=180
+
+#--------------------------------默认值配置------------------------------#
+#0不需要补全原样输出日志,1需要补全
+log.need.complete=1
+
+#生产者压缩模式 none or snappy
+producer.kafka.compression.type=none
+
+
+source.kafka.topic.connection=connection_record_log
+source.kafka.topic.sketch=connection_sketch_record_log
+source.kafka.topic.dns=dns_record_log
+
+sink.ck.table.connection=connection_record_log
+sink.ck.table.sketch=connection_sketch_record_log
+sink.ck.table.dns=dns_record_log
+sink.ck.table.relation.connection=connection_relation_log
+sink.ck.table.relation.dns=dns_relation_log
+
+#clickhouse 入库
+ck.hosts=192.168.45.102:8123
+ck.database=default
+ck.username=default
+ck.pin=galaxy2019
+
+#connection_record_log
+
+flink.watermark.max.orderness=10
+#s
+log.aggregate.duration=30 \ No newline at end of file