summaryrefslogtreecommitdiff
path: root/MSH-PIC/flink/conf/hdfs-site.xml
blob: 6d93805be1be8f92c92bdbdd462f8a51633b5809 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
  Licensed under the Apache License, Version 2.0 (the "License");
  you may not use this file except in compliance with the License.
  You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

  Unless required by applicable law or agreed to in writing, software
  distributed under the License is distributed on an "AS IS" BASIS,
  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  See the License for the specific language governing permissions and
  limitations under the License. See accompanying LICENSE file.
-->

<!-- Put site-specific property overrides in this file. -->

<configuration>
    <property>
        <name>dfs.namenode.name.dir</name>
        <value>file:/home/tsg/olap/hadoop/dfs/name</value>
    </property>
    <property>
        <name>dfs.datanode.data.dir</name>
        <value>file:/home/tsg/olap/hadoop/dfs/data</value>
    </property>
    <property>
        <name>dfs.replication</name>
        <value>2</value>
    </property>
    <property>
	<name>dfs.webhdfs.enabled</name>
	<value>true</value>
    </property>
    <property>
        <name>dfs.permissions</name>
        <value>false</value>
    </property>	
	<property>  
        <name>dfs.permissions.enabled</name>  
        <value>false</value>  
    </property>  
    <property>  
        <name>dfs.nameservices</name>  
        <value>ns1</value>  
    </property>  
    <property>  
        <name>dfs.blocksize</name>  
        <value>134217728</value>  
    </property>  
    <property>  
        <name>dfs.ha.namenodes.ns1</name>  
        <value>nn1,nn2</value>  
    </property>  
    <!-- nn1的RPC通信地址,nn1所在地址  -->  
    <property>  
        <name>dfs.namenode.rpc-address.ns1.nn1</name>  
        <value>192.168.20.193:9000</value>  
    </property>  
    <!-- nn1的http通信地址,外部访问地址 -->  
    <property>  
        <name>dfs.namenode.http-address.ns1.nn1</name>  
        <value>192.168.20.193:50070</value>  
    </property>  
    <!-- nn2的RPC通信地址,nn2所在地址 -->  
    <property>  
        <name>dfs.namenode.rpc-address.ns1.nn2</name>  
        <value>192.168.20.194:9000</value>  
    </property>  
    <!-- nn2的http通信地址,外部访问地址 -->  
    <property>  
        <name>dfs.namenode.http-address.ns1.nn2</name>  
        <value>192.168.20.194:50070</value>  
    </property>  
    <!-- 指定NameNode的元数据在JournalNode日志上的存放位置(一般和zookeeper部署在一起) -->  
    <property>  
        <name>dfs.namenode.shared.edits.dir</name>  
        <value>qjournal://192.168.20.193:8485;192.168.20.194:8485;192.168.20.195:8485/ns1</value>  
    </property>  
    <!-- 指定JournalNode在本地磁盘存放数据的位置 -->  
    <property>  
        <name>dfs.journalnode.edits.dir</name>  
        <value>/home/tsg/olap/hadoop/journal</value>  
    </property>  
    <!--客户端通过代理访问namenode,访问文件系统,HDFS 客户端与Active 节点通信的Java 类,使用其确定Active 节点是否活跃  -->  
    <property>  
        <name>dfs.client.failover.proxy.provider.ns1</name>  
        <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>  
    </property>  
    <!--这是配置自动切换的方法,有多种使用方法,具体可以看官网,在文末会给地址,这里是远程登录杀死的方法  -->  
    <property>  
        <name>dfs.ha.fencing.methods</name>  
        <value>sshfence</value>    
        <value>shell(true)</value> 
    </property>  
    <!-- 这个是使用sshfence隔离机制时才需要配置ssh免登陆 -->  
    <property>  
        <name>dfs.ha.fencing.ssh.private-key-files</name>  
        <value>/root/.ssh/id_rsa</value>  
    </property>  
    <!-- 配置sshfence隔离机制超时时间,这个属性同上,如果你是用脚本的方法切换,这个应该是可以不配置的 -->  
    <property>  
        <name>dfs.ha.fencing.ssh.connect-timeout</name>  
        <value>30000</value>  
    </property>  
    <!-- 这个是开启自动故障转移,如果你没有自动故障转移,这个可以先不配 -->  
    <property>  
        <name>dfs.ha.automatic-failover.enabled</name>  
        <value>true</value>  
    </property>
    <property>
        <name>dfs.datanode.max.transfer.threads</name>
        <value>8192</value>
    </property>
    <!-- namenode处理RPC请求线程数,增大该值资源占用不大 -->
    <property>
        <name>dfs.namenode.handler.count</name>
        <value>30</value>
    </property>
    <!-- datanode处理RPC请求线程数,增大该值会占用更多内存 -->
    <property>
        <name>dfs.datanode.handler.count</name>
        <value>40</value>
    </property>
    <!-- balance时可占用的带宽 -->
    <property>
        <name>dfs.balance.bandwidthPerSec</name>
        <value>104857600</value>
    </property>
    <!-- 磁盘预留空间,该空间不会被hdfs占用,单位字节-->
    <property>
        <name>dfs.datanode.du.reserved</name>
        <value>53687091200</value>
    </property>
    <!-- datanode与namenode连接超时时间,单位毫秒  2 * heartbeat.recheck.interval + 30000  -->
    <property>
	<name>heartbeat.recheck.interval</name>
	<value>100000</value>
    </property>
</configuration>