1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
|
#A版:4, B版:2, C版:1
active.system=2
#HTTP批量插入数据库条数
batch.insert.num=1000
#kafka消费group id
#group.id=df-ip-port-log-1812291718
#group.id共用前置字符串
group.id.prefix=1904091353
#group.id共用后置字符串
#group.id.suffix=1905151418
#group.id.suffix=-sh-1908231129
group.id.suffix=-sh-1911181500
#管理kafka地址
#数据源kafka地址
#bootstrap.servers=10.172.208.1:9092,10.172.208.2:9092,10.172.208.3:9092,10.172.208.4:9092,10.172.208.5:9092
## XXG
#bootstrap.servers=192.168.40.203:9092,192.168.40.206:9092
## HY
#bootstrap.servers=192.168.10.10:9092,192.168.10.11:9092,192.168.10.12:9092
## ZX
bootstrap.servers=172.29.123.121:9092,172.29.123.122:9092,172.29.123.123:9092,172.29.123.124:9092
#数据输出kafka地址
# XXG 192.168.40.202:9092,
#bootstrap.output.servers=192.168.40.203:9092,192.168.40.206:9092
## HY
#bootstrap.output.servers=192.168.10.10:9092,192.168.10.11:9092,192.168.10.12:9092
## ZX
bootstrap.output.servers=172.29.123.121:9092,172.29.123.122:9092,172.29.123.123:9092,172.29.123.124:9092
#批量插入kafka阈值
#batch.kafka.insert.num=10000
## ZX
batch.kafka.insert.num=500000
#kafka broker下的topic名称
#数据源topic
#kafka.sip.origin.topic=test_sip
## HY
#kafka.sip.origin.topic=VOIP_REALTIMECOUNT_TEST
## ZX
kafka.sip.origin.topic=SIP_ORIGIN_ALL
#输出topic-sip原始日志补全日志
#kafka.sip.complement.topic=test_sip_comple
## ZX
kafka.sip.complement.topic=SIP_ORIGIN_PATCH
#输出topic-片段节目日志清洗日志
#kafka.route.relation.topic=test_route_relat
## ZX
kafka.route.relation.topic=ROUTE_SIP_ORIGIN_PATCH
#从kafka哪里开始读:earliest/latest
auto.offset.reset=latest
#kafka一次fetch请求
fetch.max.bytes=524288000
#kafka每次单分区最大拉取字节数
max.partition.fetch.bytes=104857600
#kafka最大拉取间隔
max.poll.interval.ms=300000
#kafka最大拉取量
max.poll.records=50000
#kafka的Consumer session过期时间
session.timeout.ms=60000
#数据中心HTTP加载地址 逗号分隔
datacenter.addrs=http://10.208.133.172:10080
#数据中心用户名
datacenter.username=xa_z2_mesa
#数据中心密码
datacenter.password=123!@#qwe
#ClickHouse
#Clickhouse批量插入条数
batch.chinsert.num=100000
#通联中被killed日志插入clickhouse
batch.chinsert.killed.num=1000
#ClickHouse插入表名
table.name=df_test.dc_ntc_conn_record_log
#通联中被killed日志插入clickhouse表名
table.killed.name=df_test.dc_ntc_killed_log
#ip定位库地址
## ZX
ipip.library=/home/bigdata/topology/dat/ipipdat/ipip.dat
#本地测试用ip定位库地址
#ipip.library=E:/ipip.dat
#ipip.library=D:/workdata/ip_all/ipip.dat
#ipip.library=/root/ipip.dat
#ipip.library=/home/mesasoft/data/ipip.dat
#导入hdfs配置
#hdfs.url=hdfs://192.168.40.202:9000
hdfs.url=hdfs://ns1
hdfs.path=/voipInput/
hdfs.user=root
#hive相关配置,目前已弃用
#hive.url=jdbc:hive2://192.168.40.202:2181,192.168.40.203:2181,192.168.40.206:2181/test;serviceDiscoveryMode=zooKeeper;zooKeeperNamespace=hiveserver2
#hive.url=jdbc:hive2://192.168.40.202:10000/test
#hive.username=root
#hive.password=123456
#入hive的表名
#清洗日志
hive.sip.clean.table=sip_origin_patch_avro
#清洗日志片段日志
#hive.sip.route.table=no_table
#总catch日志打印控制器
all.log.output.controller=yes
#局部日志打印控制器
part.log.output.controller=yes
|