summaryrefslogtreecommitdiff
path: root/MPE/flink/conf
diff options
context:
space:
mode:
Diffstat (limited to 'MPE/flink/conf')
-rw-r--r--MPE/flink/conf/core-site.xml58
-rw-r--r--MPE/flink/conf/flink-conf.yaml207
-rw-r--r--MPE/flink/conf/hdfs-site.xml142
-rw-r--r--MPE/flink/conf/log4j-cli.properties67
-rw-r--r--MPE/flink/conf/log4j-console.properties66
-rw-r--r--MPE/flink/conf/log4j-session.properties40
-rw-r--r--MPE/flink/conf/log4j.properties59
-rw-r--r--MPE/flink/conf/log4j2.component.properties2
-rw-r--r--MPE/flink/conf/logback-console.xml64
-rw-r--r--MPE/flink/conf/logback-session.xml39
-rw-r--r--MPE/flink/conf/logback.xml58
-rw-r--r--MPE/flink/conf/masters2
-rw-r--r--MPE/flink/conf/workers1
-rw-r--r--MPE/flink/conf/yarn-site.xml224
-rw-r--r--MPE/flink/conf/zoo.cfg36
15 files changed, 1065 insertions, 0 deletions
diff --git a/MPE/flink/conf/core-site.xml b/MPE/flink/conf/core-site.xml
new file mode 100644
index 0000000..9d34fa9
--- /dev/null
+++ b/MPE/flink/conf/core-site.xml
@@ -0,0 +1,58 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+ <property>
+ <name>fs.defaultFS</name>
+ <value>hdfs://ns1</value>
+ </property>
+ <property>
+ <name>hadoop.tmp.dir</name>
+ <value>file:/data/tsg/olap/hadoop/tmp</value>
+ </property>
+ <property>
+ <name>io.file.buffer.size</name>
+ <value>131702</value>
+ </property>
+ <property>
+ <name>hadoop.proxyuser.root.hosts</name>
+ <value>*</value>
+ </property>
+ <property>
+ <name>hadoop.proxyuser.root.groups</name>
+ <value>*</value>
+ </property>
+ <property>
+ <name>hadoop.logfile.size</name>
+ <value>10000000</value>
+ <description>The max size of each log file</description>
+ </property>
+ <property>
+ <name>hadoop.logfile.count</name>
+ <value>1</value>
+ <description>The max number of log files</description>
+ </property>
+ <property>
+ <name>ha.zookeeper.quorum</name>
+ <value>192.168.20.221:2181,192.168.20.222:2181,192.168.20.223:2181</value>
+ </property>
+ <property>
+ <name>ipc.client.connect.timeout</name>
+ <value>90000</value>
+ </property>
+</configuration>
diff --git a/MPE/flink/conf/flink-conf.yaml b/MPE/flink/conf/flink-conf.yaml
new file mode 100644
index 0000000..59d1943
--- /dev/null
+++ b/MPE/flink/conf/flink-conf.yaml
@@ -0,0 +1,207 @@
+################################################################################
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+################################################################################
+
+
+#==============================================================================
+# Common
+#==============================================================================
+
+# The external address of the host on which the JobManager runs and can be
+# reached by the TaskManagers and any clients which want to connect. This setting
+# is only used in Standalone mode and may be overwritten on the JobManager side
+# by specifying the --host <hostname> parameter of the bin/jobmanager.sh executable.
+# In high availability mode, if you use the bin/start-cluster.sh script and setup
+# the conf/masters file, this will be taken care of automatically. Yarn/Mesos
+# automatically configure the host name based on the hostname of the node where the
+# JobManager runs.
+
+jobmanager.rpc.address: 192.168.20.223
+
+#JVM 相关配置
+#env.java.opts: "-XX:+UseG1GC -XX:NewRatio=2 -XX:MaxGCPauseMillis=300 -XX:InitiatingHeapOccupancyPercent=35 -Xloggc:/data/tsg/olap/flink-1.13.1/log/gc.log -XX:+PrintGCDetails -XX:-OmitStackTraceInFastThrow -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=20 -XX:GCLogFileSize=20M"
+
+#jobmanager rpc 端口
+jobmanager.rpc.port: 6123
+
+#允许任务在所有taskmanager上均匀分布
+cluster.evenly-spread-out-slots: true
+
+#避免报出metaspace oom而是flink jvm进程挂掉
+classloader.fail-on-metaspace-oom-error: false
+
+#规避第三方库堆栈泄漏问题
+classloader.check-leaked-classloader: false
+
+#避免由于task不能正常取消而使taskmanager服务挂掉
+task.cancellation.timeout: 0
+
+#JobManager进程占用的所有与Flink相关的内存
+jobmanager.memory.process.size: 1024M
+
+#TaskManager进程占用的所有与Flink相关的内存
+taskmanager.memory.process.size: 1024M
+
+#taskmanager使用的堆外内存的大小
+taskmanager.memory.managed.size: 10M
+
+#taskmanager.memory.off-heap默认为false,主要指的是Flink Managed Memory使用Heap还是Non-heap,
+#默认使用Heap,如果开启使用Non-heap将再减少一部分资源
+taskmanager.memory.off-heap: false
+
+#堆外部分(Framework Off-Heap),以直接内存形式分配
+taskmanager.memory.framework.off-heap.size: 128M
+
+#taskmanager元数据大小 默认256M
+taskmanager.memory.jvm-metaspace.size: 384M
+
+#每个排序合并阻塞结果分区所需的最小网络缓冲区数,默认64。对于生产使用,建议将该配置值增加到2048,以提高数据压缩比并减少较小的网络数据包。增加该参数值,需要增加总网络内存大小。
+taskmanager.network.sort-shuffle.min-buffers: 64
+
+#用于读取shuffle数据的内存大小(目前只用于排序合并shuffle)。该内存参数占用framework.off-heap.size内存,默认32M,当更改该参数时,需要增加framework.off-heap.size内存大小。
+taskmanager.memory.framework.off-heap.batch-shuffle.size: 8M
+
+#每个通道可以使用的最大缓冲区数,默认为10。该参数可以通过防止在数据倾斜和配置的浮动缓冲区数量高的情况下缓冲的动态数据的过度增长来加速检查点对齐。
+taskmanager.network.memory.max-buffers-per-channel: 10
+
+# The number of task slots that each TaskManager offers. Each slot runs one parallel pipeline.
+taskmanager.numberOfTaskSlots: 1
+
+# The parallelism used for programs that did not specify and other parallelism.
+parallelism.default: 1
+
+# The default file system scheme and authority.
+#
+# By default file paths without scheme are interpreted relative to the local
+# root file system 'file:///'. Use this to override the default and interpret
+# relative paths relative to a different file system,
+# for example 'hdfs://mynamenode:12345'
+#
+# fs.default-scheme
+
+#==============================================================================
+# NetWork
+#==============================================================================
+
+#网络缓冲区数目,默认为8。帮助缓解由于子分区之间的数据分布不均匀造成的背压。
+taskmanager.network.memory.floating-buffers-per-gate: 8
+
+#输入/输出通道使用的独占网络缓冲区的数量。至少配置2。
+taskmanager.network.memory.buffers-per-channel: 2
+
+#用于TaskManager之间(shuffle、广播等)及与外部组件的数据传输
+#Min
+taskmanager.memory.network.min: 64M
+#Max
+taskmanager.memory.network.max: 128M
+
+#==============================================================================
+# High Availability
+#==============================================================================
+
+# The high-availability mode. Possible options are 'NONE' or 'zookeeper'.
+#
+# high-availability: zookeeper
+
+# The path where metadata for master recovery is persisted. While ZooKeeper stores
+# the small ground truth for checkpoint and leader election, this location stores
+# the larger objects, like persisted dataflow graphs.
+#
+# Must be a durable file system that is accessible from all nodes
+# (like HDFS, S3, Ceph, nfs, ...)
+#
+# high-availability.storageDir: hdfs:///flink/ha/
+
+# The list of ZooKeeper quorum peers that coordinate the high-availability
+# setup. This must be a list of the form:
+# "host1:clientPort,host2:clientPort,..." (default clientPort: 2181)
+
+#high-availability: zookeeper
+#high-availability.zookeeper.quorum: 192.168.20.221:2181,192.168.20.222:2181,192.168.20.223:2181
+#high-availability.zookeeper.path.root: /flink
+#high-availability.zookeeper.client.connection-timeout: 150000
+#high-availability.zookeeper.client.max-retry-attempts: 10
+#high-availability.zookeeper.client.retry-wait: 10000
+#high-availability.zookeeper.client.session-timeout: 240000
+
+#读取本地Hadoop配置文件
+#fs.hdfs.hadoopconf: /data/tsg/olap/flink-1.13.1/conf/
+#high-availability.cluster-id: /flink_cluster
+#important: customize per cluster
+#high-availability.storageDir: hdfs:///flink/recover
+
+heartbeat.timeout: 180000
+heartbeat.interval: 20000
+akka.ask.timeout: 300 s
+
+# ACL options are based on https://zookeeper.apache.org/doc/r3.1.2/zookeeperProgrammers.html#sc_BuiltinACLSchemes
+# It can be either "creator" (ZOO_CREATE_ALL_ACL) or "open" (ZOO_OPEN_ACL_UNSAFE)
+# The default value is "open" and it can be changed to "creator" if ZK security is enabled
+#
+# high-availability.zookeeper.client.acl: open
+
+# The failover strategy, i.e., how the job computation recovers from task failures.
+# Only restart tasks that may have been affected by the task failure, which typically includes
+# downstream tasks and potentially upstream tasks if their produced data is no longer available for consumption.
+jobmanager.execution.failover-strategy: region
+
+#rest.port: 8080
+
+restart-strategy: fixed-delay
+
+#重启策略
+#21.12 version value is 9999
+#22.01 version value change to INT_MAX
+restart-strategy.fixed-delay.attempts: 2147483647
+
+yarn.application-attempts: 10000
+
+restart-strategy.fixed-delay.delay: 5 s
+
+jobmanager.web.upload.dir: /data/tsg/olap/flink-1.13.1/flink-web
+
+#==============================================================================
+# Advanced
+#==============================================================================
+
+# Override the directories for temporary files. If not specified, the
+# system-specific Java temporary directory (java.io.tmpdir property) is taken.
+#
+# For framework setups on Yarn or Mesos, Flink will automatically pick up the
+# containers' temp directories without any need for configuration.
+#
+# Add a delimited list for multiple directories, using the system directory
+# delimiter (colon ':' on unix) or a comma, e.g.:
+# /data1/tmp:/data2/tmp:/data3/tmp
+#
+# Note: Each directory entry is read from and written to by a different I/O
+# thread. You can include the same directory multiple times in order to create
+# multiple I/O threads against that directory. This is for example relevant for
+# high-throughput RAIDs.
+#
+# io.tmp.dirs: /tmp
+
+# The classloading resolve order. Possible values are 'child-first' (Flink's default)
+# and 'parent-first' (Java's default).
+#
+# Child first classloading allows users to use different dependency/library
+# versions in their application than those in the classpath. Switching back
+# to 'parent-first' may help with debugging dependency issues.
+#
+# classloader.resolve-order: child-first
+classloader.resolve-order: parent-first
+
diff --git a/MPE/flink/conf/hdfs-site.xml b/MPE/flink/conf/hdfs-site.xml
new file mode 100644
index 0000000..99be7da
--- /dev/null
+++ b/MPE/flink/conf/hdfs-site.xml
@@ -0,0 +1,142 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+ <property>
+ <name>dfs.namenode.name.dir</name>
+ <value>file:/data/tsg/olap/hadoop/dfs/name</value>
+ </property>
+ <property>
+ <name>dfs.datanode.data.dir</name>
+ <value>file:/data/tsg/olap/hadoop/dfs/data</value>
+ </property>
+ <property>
+ <name>dfs.replication</name>
+ <value>2</value>
+ </property>
+ <property>
+ <name>dfs.webhdfs.enabled</name>
+ <value>true</value>
+ </property>
+ <property>
+ <name>dfs.permissions</name>
+ <value>false</value>
+ </property>
+ <property>
+ <name>dfs.permissions.enabled</name>
+ <value>false</value>
+ </property>
+ <property>
+ <name>dfs.nameservices</name>
+ <value>ns1</value>
+ </property>
+ <property>
+ <name>dfs.blocksize</name>
+ <value>134217728</value>
+ </property>
+ <property>
+ <name>dfs.ha.namenodes.ns1</name>
+ <value>nn1,nn2</value>
+ </property>
+ <!-- nn1的RPC通信地址,nn1所在地址 -->
+ <property>
+ <name>dfs.namenode.rpc-address.ns1.nn1</name>
+ <value>192.168.20.223:9000</value>
+ </property>
+ <!-- nn1的http通信地址,外部访问地址 -->
+ <property>
+ <name>dfs.namenode.http-address.ns1.nn1</name>
+ <value>192.168.20.223:50070</value>
+ </property>
+ <!-- nn2的RPC通信地址,nn2所在地址 -->
+ <property>
+ <name>dfs.namenode.rpc-address.ns1.nn2</name>
+ <value>192.168.20.224:9000</value>
+ </property>
+ <!-- nn2的http通信地址,外部访问地址 -->
+ <property>
+ <name>dfs.namenode.http-address.ns1.nn2</name>
+ <value>192.168.20.224:50070</value>
+ </property>
+ <!-- 指定NameNode的元数据在JournalNode日志上的存放位置(一般和zookeeper部署在一起) -->
+ <property>
+ <name>dfs.namenode.shared.edits.dir</name>
+ <value>qjournal://192.168.20.223:8485;192.168.20.224:8485;192.168.20.225:8485/ns1</value>
+ </property>
+ <!-- 指定JournalNode在本地磁盘存放数据的位置 -->
+ <property>
+ <name>dfs.journalnode.edits.dir</name>
+ <value>/data/tsg/olap/hadoop/journal</value>
+ </property>
+ <!--客户端通过代理访问namenode,访问文件系统,HDFS 客户端与Active 节点通信的Java 类,使用其确定Active 节点是否活跃 -->
+ <property>
+ <name>dfs.client.failover.proxy.provider.ns1</name>
+ <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
+ </property>
+ <!--这是配置自动切换的方法,有多种使用方法,具体可以看官网,在文末会给地址,这里是远程登录杀死的方法 -->
+ <property>
+ <name>dfs.ha.fencing.methods</name>
+ <value>sshfence</value>
+ <value>shell(true)</value>
+ </property>
+ <!-- 这个是使用sshfence隔离机制时才需要配置ssh免登陆 -->
+ <property>
+ <name>dfs.ha.fencing.ssh.private-key-files</name>
+ <value>/root/.ssh/id_rsa</value>
+ </property>
+ <!-- 配置sshfence隔离机制超时时间,这个属性同上,如果你是用脚本的方法切换,这个应该是可以不配置的 -->
+ <property>
+ <name>dfs.ha.fencing.ssh.connect-timeout</name>
+ <value>30000</value>
+ </property>
+ <!-- 这个是开启自动故障转移,如果你没有自动故障转移,这个可以先不配 -->
+ <property>
+ <name>dfs.ha.automatic-failover.enabled</name>
+ <value>true</value>
+ </property>
+ <property>
+ <name>dfs.datanode.max.transfer.threads</name>
+ <value>8192</value>
+ </property>
+ <!-- namenode处理RPC请求线程数,增大该值资源占用不大 -->
+ <property>
+ <name>dfs.namenode.handler.count</name>
+ <value>30</value>
+ </property>
+ <!-- datanode处理RPC请求线程数,增大该值会占用更多内存 -->
+ <property>
+ <name>dfs.datanode.handler.count</name>
+ <value>40</value>
+ </property>
+ <!-- balance时可占用的带宽 -->
+ <property>
+ <name>dfs.balance.bandwidthPerSec</name>
+ <value>104857600</value>
+ </property>
+ <!-- 磁盘预留空间,该空间不会被hdfs占用,单位字节-->
+ <property>
+ <name>dfs.datanode.du.reserved</name>
+ <value>53687091200</value>
+ </property>
+ <!-- datanode与namenode连接超时时间,单位毫秒 2 * heartbeat.recheck.interval + 30000 -->
+ <property>
+ <name>heartbeat.recheck.interval</name>
+ <value>100000</value>
+ </property>
+</configuration>
+
diff --git a/MPE/flink/conf/log4j-cli.properties b/MPE/flink/conf/log4j-cli.properties
new file mode 100644
index 0000000..e7add42
--- /dev/null
+++ b/MPE/flink/conf/log4j-cli.properties
@@ -0,0 +1,67 @@
+################################################################################
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+################################################################################
+
+# Allows this configuration to be modified at runtime. The file will be checked every 30 seconds.
+monitorInterval=30
+
+rootLogger.level = INFO
+rootLogger.appenderRef.file.ref = FileAppender
+
+# Log all infos in the given file
+appender.file.name = FileAppender
+appender.file.type = FILE
+appender.file.append = false
+appender.file.fileName = ${sys:log.file}
+appender.file.layout.type = PatternLayout
+appender.file.layout.pattern = [%d{yyyy-MM-dd HH:mm:ssZ}{UTC}] %-5p %-60c %x - %m%n
+
+# Log output from org.apache.flink.yarn to the console. This is used by the
+# CliFrontend class when using a per-job YARN cluster.
+logger.yarn.name = org.apache.flink.yarn
+logger.yarn.level = INFO
+logger.yarn.appenderRef.console.ref = ConsoleAppender
+logger.yarncli.name = org.apache.flink.yarn.cli.FlinkYarnSessionCli
+logger.yarncli.level = INFO
+logger.yarncli.appenderRef.console.ref = ConsoleAppender
+logger.hadoop.name = org.apache.hadoop
+logger.hadoop.level = INFO
+logger.hadoop.appenderRef.console.ref = ConsoleAppender
+
+# Make sure hive logs go to the file.
+logger.hive.name = org.apache.hadoop.hive
+logger.hive.level = INFO
+logger.hive.additivity = false
+logger.hive.appenderRef.file.ref = FileAppender
+
+# Log output from org.apache.flink.kubernetes to the console.
+logger.kubernetes.name = org.apache.flink.kubernetes
+logger.kubernetes.level = INFO
+logger.kubernetes.appenderRef.console.ref = ConsoleAppender
+
+appender.console.name = ConsoleAppender
+appender.console.type = CONSOLE
+appender.console.layout.type = PatternLayout
+appender.console.layout.pattern = [%d{yyyy-MM-dd HH:mm:ssZ}{UTC}] %-5p %-60c %x - %m%n
+
+# suppress the warning that hadoop native libraries are not loaded (irrelevant for the client)
+logger.hadoopnative.name = org.apache.hadoop.util.NativeCodeLoader
+logger.hadoopnative.level = OFF
+
+# Suppress the irrelevant (wrong) warnings from the Netty channel handler
+logger.netty.name = org.apache.flink.shaded.akka.org.jboss.netty.channel.DefaultChannelPipeline
+logger.netty.level = OFF
diff --git a/MPE/flink/conf/log4j-console.properties b/MPE/flink/conf/log4j-console.properties
new file mode 100644
index 0000000..499839e
--- /dev/null
+++ b/MPE/flink/conf/log4j-console.properties
@@ -0,0 +1,66 @@
+################################################################################
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+################################################################################
+
+# Allows this configuration to be modified at runtime. The file will be checked every 30 seconds.
+monitorInterval=30
+
+# This affects logging for both user code and Flink
+rootLogger.level = INFO
+rootLogger.appenderRef.console.ref = ConsoleAppender
+rootLogger.appenderRef.rolling.ref = RollingFileAppender
+
+# Uncomment this if you want to _only_ change Flink's logging
+#logger.flink.name = org.apache.flink
+#logger.flink.level = INFO
+
+# The following lines keep the log level of common libraries/connectors on
+# log level INFO. The root logger does not override this. You have to manually
+# change the log levels here.
+logger.akka.name = akka
+logger.akka.level = INFO
+logger.kafka.name= org.apache.kafka
+logger.kafka.level = INFO
+logger.hadoop.name = org.apache.hadoop
+logger.hadoop.level = INFO
+logger.zookeeper.name = org.apache.zookeeper
+logger.zookeeper.level = INFO
+
+# Log all infos to the console
+appender.console.name = ConsoleAppender
+appender.console.type = CONSOLE
+appender.console.layout.type = PatternLayout
+appender.console.layout.pattern = [%d{yyyy-MM-dd HH:mm:ssZ}{UTC}] %-5p %-60c %x - %m%n
+
+# Log all infos in the given rolling file
+appender.rolling.name = RollingFileAppender
+appender.rolling.type = RollingFile
+appender.rolling.append = true
+appender.rolling.fileName = ${sys:log.file}
+appender.rolling.filePattern = ${sys:log.file}.%i
+appender.rolling.layout.type = PatternLayout
+appender.rolling.layout.pattern = [%d{yyyy-MM-dd HH:mm:ssZ}{UTC}] %-5p %-60c %x - %m%n
+appender.rolling.policies.type = Policies
+appender.rolling.policies.size.type = SizeBasedTriggeringPolicy
+appender.rolling.policies.size.size=100MB
+appender.rolling.policies.startup.type = OnStartupTriggeringPolicy
+appender.rolling.strategy.type = DefaultRolloverStrategy
+appender.rolling.strategy.max = ${env:MAX_LOG_FILE_NUMBER:-10}
+
+# Suppress the irrelevant (wrong) warnings from the Netty channel handler
+logger.netty.name = org.apache.flink.shaded.akka.org.jboss.netty.channel.DefaultChannelPipeline
+logger.netty.level = OFF
diff --git a/MPE/flink/conf/log4j-session.properties b/MPE/flink/conf/log4j-session.properties
new file mode 100644
index 0000000..9044140
--- /dev/null
+++ b/MPE/flink/conf/log4j-session.properties
@@ -0,0 +1,40 @@
+################################################################################
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+################################################################################
+
+# Allows this configuration to be modified at runtime. The file will be checked every 30 seconds.
+monitorInterval=30
+
+rootLogger.level = INFO
+rootLogger.appenderRef.console.ref = ConsoleAppender
+
+appender.console.name = ConsoleAppender
+appender.console.type = CONSOLE
+appender.console.layout.type = PatternLayout
+appender.console.layout.pattern = [%d{yyyy-MM-dd HH:mm:ssZ}{UTC}] %-5p %-60c %x - %m%n
+
+# Suppress the irrelevant (wrong) warnings from the Netty channel handler
+logger.netty.name = org.apache.flink.shaded.akka.org.jboss.netty.channel.DefaultChannelPipeline
+logger.netty.level = OFF
+logger.zookeeper.name = org.apache.zookeeper
+logger.zookeeper.level = WARN
+logger.curator.name = org.apache.flink.shaded.org.apache.curator.framework
+logger.curator.level = WARN
+logger.runtimeutils.name= org.apache.flink.runtime.util.ZooKeeperUtils
+logger.runtimeutils.level = WARN
+logger.runtimeleader.name = org.apache.flink.runtime.leaderretrieval.ZooKeeperLeaderRetrievalDriver
+logger.runtimeleader.level = WARN
diff --git a/MPE/flink/conf/log4j.properties b/MPE/flink/conf/log4j.properties
new file mode 100644
index 0000000..64293a9
--- /dev/null
+++ b/MPE/flink/conf/log4j.properties
@@ -0,0 +1,59 @@
+################################################################################
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+################################################################################
+
+# Allows this configuration to be modified at runtime. The file will be checked every 30 seconds.
+monitorInterval=30
+
+# This affects logging for both user code and Flink
+rootLogger.level = ERROR
+rootLogger.appenderRef.file.ref = MainAppender
+
+# Uncomment this if you want to _only_ change Flink's logging
+#logger.flink.name = org.apache.flink
+#logger.flink.level = INFO
+
+# The following lines keep the log level of common libraries/connectors on
+# log level INFO. The root logger does not override this. You have to manually
+# change the log levels here.
+logger.akka.name = akka
+logger.akka.level = INFO
+logger.kafka.name= org.apache.kafka
+logger.kafka.level = INFO
+logger.hadoop.name = org.apache.hadoop
+logger.hadoop.level = INFO
+logger.zookeeper.name = org.apache.zookeeper
+logger.zookeeper.level = INFO
+
+# Log all infos in the given file
+appender.main.name = MainAppender
+appender.main.type = RollingFile
+appender.main.append = true
+appender.main.fileName = ${sys:log.file}
+appender.main.filePattern = ${sys:log.file}.%i
+appender.main.layout.type = PatternLayout
+appender.main.layout.pattern = [%d{yyyy-MM-dd HH:mm:ssZ}{UTC}] %-5p %-60c %x - %m%n
+appender.main.policies.type = Policies
+appender.main.policies.size.type = SizeBasedTriggeringPolicy
+appender.main.policies.size.size = 100MB
+appender.main.policies.startup.type = OnStartupTriggeringPolicy
+appender.main.strategy.type = DefaultRolloverStrategy
+appender.main.strategy.max = ${env:MAX_LOG_FILE_NUMBER:-10}
+
+# Suppress the irrelevant (wrong) warnings from the Netty channel handler
+logger.netty.name = org.apache.flink.shaded.akka.org.jboss.netty.channel.DefaultChannelPipeline
+logger.netty.level = OFF
diff --git a/MPE/flink/conf/log4j2.component.properties b/MPE/flink/conf/log4j2.component.properties
new file mode 100644
index 0000000..2d5d906
--- /dev/null
+++ b/MPE/flink/conf/log4j2.component.properties
@@ -0,0 +1,2 @@
+#此文件放在flink安装目录配置文件conf/下
+log4j2.formatMsgNoLookups=true
diff --git a/MPE/flink/conf/logback-console.xml b/MPE/flink/conf/logback-console.xml
new file mode 100644
index 0000000..62963f3
--- /dev/null
+++ b/MPE/flink/conf/logback-console.xml
@@ -0,0 +1,64 @@
+<!--
+ ~ Licensed to the Apache Software Foundation (ASF) under one
+ ~ or more contributor license agreements. See the NOTICE file
+ ~ distributed with this work for additional information
+ ~ regarding copyright ownership. The ASF licenses this file
+ ~ to you under the Apache License, Version 2.0 (the
+ ~ "License"); you may not use this file except in compliance
+ ~ with the License. You may obtain a copy of the License at
+ ~
+ ~ http://www.apache.org/licenses/LICENSE-2.0
+ ~
+ ~ Unless required by applicable law or agreed to in writing, software
+ ~ distributed under the License is distributed on an "AS IS" BASIS,
+ ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ ~ See the License for the specific language governing permissions and
+ ~ limitations under the License.
+ -->
+
+<configuration>
+ <appender name="console" class="ch.qos.logback.core.ConsoleAppender">
+ <encoder>
+ <pattern>[%d{yyyy-MM-dd HH:mm:ssZ,UTC}] [%thread] %-5level %logger{60} %X{sourceThread} - %msg%n</pattern>
+ </encoder>
+ </appender>
+
+ <appender name="rolling" class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <file>${log.file}</file>
+ <append>false</append>
+
+ <rollingPolicy class="ch.qos.logback.core.rolling.FixedWindowRollingPolicy">
+ <fileNamePattern>${log.file}.%i</fileNamePattern>
+ <minIndex>1</minIndex>
+ <maxIndex>10</maxIndex>
+ </rollingPolicy>
+
+ <triggeringPolicy class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
+ <maxFileSize>100MB</maxFileSize>
+ </triggeringPolicy>
+
+ <encoder>
+ <pattern>[%d{yyyy-MM-dd HH:mm:ssZ,UTC}] [%thread] %-5level %logger{60} %X{sourceThread} - %msg%n</pattern>
+ </encoder>
+ </appender>
+
+ <!-- This affects logging for both user code and Flink -->
+ <root level="INFO">
+ <appender-ref ref="console"/>
+ <appender-ref ref="rolling"/>
+ </root>
+
+ <!-- Uncomment this if you want to only change Flink's logging -->
+ <!--<logger name="org.apache.flink" level="INFO"/>-->
+
+ <!-- The following lines keep the log level of common libraries/connectors on
+ log level INFO. The root logger does not override this. You have to manually
+ change the log levels here. -->
+ <logger name="akka" level="INFO"/>
+ <logger name="org.apache.kafka" level="INFO"/>
+ <logger name="org.apache.hadoop" level="INFO"/>
+ <logger name="org.apache.zookeeper" level="INFO"/>
+
+ <!-- Suppress the irrelevant (wrong) warnings from the Netty channel handler -->
+ <logger name="org.apache.flink.shaded.akka.org.jboss.netty.channel.DefaultChannelPipeline" level="ERROR"/>
+</configuration>
diff --git a/MPE/flink/conf/logback-session.xml b/MPE/flink/conf/logback-session.xml
new file mode 100644
index 0000000..7c07147
--- /dev/null
+++ b/MPE/flink/conf/logback-session.xml
@@ -0,0 +1,39 @@
+<!--
+ ~ Licensed to the Apache Software Foundation (ASF) under one
+ ~ or more contributor license agreements. See the NOTICE file
+ ~ distributed with this work for additional information
+ ~ regarding copyright ownership. The ASF licenses this file
+ ~ to you under the Apache License, Version 2.0 (the
+ ~ "License"); you may not use this file except in compliance
+ ~ with the License. You may obtain a copy of the License at
+ ~
+ ~ http://www.apache.org/licenses/LICENSE-2.0
+ ~
+ ~ Unless required by applicable law or agreed to in writing, software
+ ~ distributed under the License is distributed on an "AS IS" BASIS,
+ ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ ~ See the License for the specific language governing permissions and
+ ~ limitations under the License.
+ -->
+
+<configuration>
+ <appender name="file" class="ch.qos.logback.core.FileAppender">
+ <file>${log.file}</file>
+ <append>false</append>
+ <encoder>
+ <pattern>[%d{yyyy-MM-dd HH:mm:ssZ,UTC}] [%thread] %-5level %logger{60} %X{sourceThread} - %msg%n</pattern>
+ </encoder>
+ </appender>
+
+ <appender name="console" class="ch.qos.logback.core.ConsoleAppender">
+ <encoder>
+ <pattern>[%d{yyyy-MM-dd HH:mm:ssZ,UTC}] %-5level %logger{60} %X{sourceThread} - %msg%n</pattern>
+ </encoder>
+ </appender>
+
+ <logger name="ch.qos.logback" level="WARN" />
+ <root level="INFO">
+ <appender-ref ref="file"/>
+ <appender-ref ref="console"/>
+ </root>
+</configuration>
diff --git a/MPE/flink/conf/logback.xml b/MPE/flink/conf/logback.xml
new file mode 100644
index 0000000..e1c0d7c
--- /dev/null
+++ b/MPE/flink/conf/logback.xml
@@ -0,0 +1,58 @@
+<!--
+ ~ Licensed to the Apache Software Foundation (ASF) under one
+ ~ or more contributor license agreements. See the NOTICE file
+ ~ distributed with this work for additional information
+ ~ regarding copyright ownership. The ASF licenses this file
+ ~ to you under the Apache License, Version 2.0 (the
+ ~ "License"); you may not use this file except in compliance
+ ~ with the License. You may obtain a copy of the License at
+ ~
+ ~ http://www.apache.org/licenses/LICENSE-2.0
+ ~
+ ~ Unless required by applicable law or agreed to in writing, software
+ ~ distributed under the License is distributed on an "AS IS" BASIS,
+ ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ ~ See the License for the specific language governing permissions and
+ ~ limitations under the License.
+ -->
+
+<configuration>
+ <appender name="file" class="ch.qos.logback.core.FileAppender">
+ <file>${log.file}</file>
+ <append>false</append>
+ <encoder>
+ <pattern>[%d{yyyy-MM-dd HH:mm:ssZ,UTC}] [%thread] %-5level %logger{60} %X{sourceThread} - %msg%n</pattern>
+ </encoder>
+ </appender>
+
+ <!-- This affects logging for both user code and Flink -->
+ <root level="INFO">
+ <appender-ref ref="file"/>
+ </root>
+
+ <!-- Uncomment this if you want to only change Flink's logging -->
+ <!--<logger name="org.apache.flink" level="INFO">-->
+ <!--<appender-ref ref="file"/>-->
+ <!--</logger>-->
+
+ <!-- The following lines keep the log level of common libraries/connectors on
+ log level INFO. The root logger does not override this. You have to manually
+ change the log levels here. -->
+ <logger name="akka" level="INFO">
+ <appender-ref ref="file"/>
+ </logger>
+ <logger name="org.apache.kafka" level="INFO">
+ <appender-ref ref="file"/>
+ </logger>
+ <logger name="org.apache.hadoop" level="INFO">
+ <appender-ref ref="file"/>
+ </logger>
+ <logger name="org.apache.zookeeper" level="INFO">
+ <appender-ref ref="file"/>
+ </logger>
+
+ <!-- Suppress the irrelevant (wrong) warnings from the Netty channel handler -->
+ <logger name="org.apache.flink.shaded.akka.org.jboss.netty.channel.DefaultChannelPipeline" level="ERROR">
+ <appender-ref ref="file"/>
+ </logger>
+</configuration>
diff --git a/MPE/flink/conf/masters b/MPE/flink/conf/masters
new file mode 100644
index 0000000..3b50106
--- /dev/null
+++ b/MPE/flink/conf/masters
@@ -0,0 +1,2 @@
+192.168.20.223:8080
+192.168.20.224:8080
diff --git a/MPE/flink/conf/workers b/MPE/flink/conf/workers
new file mode 100644
index 0000000..c136f0a
--- /dev/null
+++ b/MPE/flink/conf/workers
@@ -0,0 +1 @@
+192.168.20.225
diff --git a/MPE/flink/conf/yarn-site.xml b/MPE/flink/conf/yarn-site.xml
new file mode 100644
index 0000000..784f511
--- /dev/null
+++ b/MPE/flink/conf/yarn-site.xml
@@ -0,0 +1,224 @@
+<?xml version="1.0"?>
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+<configuration>
+
+ <property>
+ <name>yarn.nodemanager.aux-services</name>
+ <value>mapreduce_shuffle</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.ha.enabled</name>
+ <value>true</value>
+ </property>
+
+ <!--声明两台resourcemanager的地址-->
+ <property>
+ <name>yarn.resourcemanager.cluster-id</name>
+ <value>rsmcluster</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.ha.rm-ids</name>
+ <value>rsm1,rsm2</value>
+ </property>
+
+ <!-- 配置rm1-->
+ <!-- 配置rm1 hostname-->
+ <property>
+ <name>yarn.resourcemanager.hostname.rsm1</name>
+ <value>192.168.20.223</value>
+ </property>
+
+ <!-- 配置rm1 web application-->
+ <property>
+ <name>yarn.resourcemanager.webapp.address.rsm1</name>
+ <value>192.168.20.223:8080</value>
+ </property>
+
+ <!-- 配置rm1 调度端口,默认8030-->
+ <property>
+ <name>yarn.resourcemanager.scheduler.address.rsm1</name>
+ <value>192.168.20.223:8030</value>
+ </property>
+
+ <!-- 默认端口8031-->
+ <property>
+ <name>yarn.resourcemanager.resource-tracker.address.rsm1</name>
+ <value>192.168.20.223:8031</value>
+ </property>
+
+ <!-- 配置rm1 应用程序管理器接口的地址端口,默认8032-->
+ <property>
+ <name>yarn.resourcemanager.address.rsm1</name>
+ <value>192.168.20.223:8032</value>
+ </property>
+
+ <!-- 配置rm1 管理端口,默认8033-->
+ <property>
+ <name>yarn.resourcemanager.admin.address.rsm1</name>
+ <value>192.168.20.223:8033</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.ha.admin.address.rsm1</name>
+ <value>192.168.20.223:23142</value>
+ </property>
+
+ <!-- 配置rm2-->
+ <property>
+ <name>yarn.resourcemanager.hostname.rsm2</name>
+ <value>192.168.20.224</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.webapp.address.rsm2</name>
+ <value>192.168.20.224:8080</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.scheduler.address.rsm2</name>
+ <value>192.168.20.224:8030</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.resource-tracker.address.rsm2</name>
+ <value>192.168.20.224:8031</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.address.rsm2</name>
+ <value>192.168.20.224:8032</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.admin.address.rsm2</name>
+ <value>192.168.20.224:8033</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.ha.admin.address.rsm2</name>
+ <value>192.168.20.224:23142</value>
+ </property>
+
+ <!--指定zookeeper集群的地址-->
+ <property>
+ <name>yarn.resourcemanager.zk-address</name>
+ <value>192.168.20.221:2181,192.168.20.222:2181,192.168.20.223:2181</value>
+ </property>
+
+ <!--启用自动恢复,当任务进行一半,rm坏掉,就要启动自动恢复,默认是false-->
+ <property>
+ <name>yarn.resourcemanager.recovery.enabled</name>
+ <value>true</value>
+ </property>
+
+ <!--启用Nodemanager自动恢复,默认是false-->
+ <property>
+ <name>yarn.nodemanager.recovery.enabled</name>
+ <value>true</value>
+ </property>
+
+ <!--配置NodeManager保存运行状态的本地文件系统目录路径 -->
+ <property>
+ <name>yarn.nodemanager.recovery.dir</name>
+ <value>/data/tsg/olap/hadoop-2.7.1/yarn</value>
+ </property>
+
+ <property>
+ <name>yarn.resourcemanager.store.class</name>
+ <value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
+ </property>
+
+ <!--配置nm可用的RPC地址,默认${yarn.nodemanager.hostname}:0,为临时端口。集群重启后,nm与rm连接的端口会变化,这里指定端口,保障nm restart功能 -->
+ <property>
+ <name>yarn.nodemanager.address</name>
+ <value>${yarn.nodemanager.hostname}:9923</value>
+ </property>
+
+ <property>
+ <name>yarn.log-aggregation-enable</name>
+ <value>true</value>
+ </property>
+
+ <property>
+ <name>yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds</name>
+ <value>3600</value>
+ </property>
+
+ <property>
+ <name>yarn.nodemanager.remote-app-log-dir</name>
+ <value>/data/tsg/olap/hadoop-2.7.1/logs/app-logs/</value>
+ </property>
+
+ <!--NM可以为容器分配的物理内存量,以MB为单位 ,默认8192-->
+ <property>
+ <name>yarn.nodemanager.resource.memory-mb</name>
+ <value>51200</value>
+ </property>
+
+ <!-- RM上每个容器请求的最小分配,以mb为单位,默认1024-->
+ <property>
+ <name>yarn.scheduler.minimum-allocation-mb</name>
+ <value>1024</value>
+ </property>
+
+ <!-- RM上每个容器请求的最大分配,以mb为单位,一般设置为 yarn.nodemanager.resource.memory-mb 一致,默认8192-->
+ <property>
+ <name>yarn.scheduler.maximum-allocation-mb</name>
+ <value>51200</value>
+ </property>
+
+ <!--可为容器分配的vcore数。RM调度器在为容器分配资源时使用它。这不是用来限制YARN容器使用的物理内核的数量,默认8,一般配置为服务器cpu总核数一致 -->
+ <property>
+ <name>yarn.nodemanager.resource.cpu-vcores</name>
+ <value>48</value>
+ </property>
+
+ <!--RM上每个容器请求的最小分配(以虚拟CPU内核为单位) ,默认1-->
+ <property>
+ <name>yarn.scheduler.minimum-allocation-vcores</name>
+ <value>1</value>
+ </property>
+
+ <!--RM上每个容器请求的最大分配(以虚拟CPU内核为单位) ,默认32,一般配置为略小于yarn.nodemanager.resource.cpu-vcores,同时指定任务的slot不应超过该值-->
+ <property>
+ <name>yarn.scheduler.maximum-allocation-vcores</name>
+ <value>48</value>
+ </property>
+
+ <property>
+ <name>yarn.nodemanager.vmem-check-enabled</name>
+ <value>false</value>
+ </property>
+
+ <property>
+ <name>yarn.nodemanager.pmem-check-enabled</name>
+ <value>false</value>
+ </property>
+
+ <!--ApplicationMaster重启次数,配置HA后默认为2,生产环境可增大该值-->
+ <property>
+ <name>yarn.resourcemanager.am.max-attempts</name>
+ <value>10000</value>
+ </property>
+
+ <property>
+ <name>yarn.log.server.url</name>
+ <value>http://192.168.20.223:19888/jobhistory/logs</value>
+ </property>
+
+</configuration>
+
diff --git a/MPE/flink/conf/zoo.cfg b/MPE/flink/conf/zoo.cfg
new file mode 100644
index 0000000..f598997
--- /dev/null
+++ b/MPE/flink/conf/zoo.cfg
@@ -0,0 +1,36 @@
+################################################################################
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+################################################################################
+
+# The number of milliseconds of each tick
+tickTime=2000
+
+# The number of ticks that the initial synchronization phase can take
+initLimit=10
+
+# The number of ticks that can pass between sending a request and getting an acknowledgement
+syncLimit=5
+
+# The directory where the snapshot is stored.
+# dataDir=/tmp/zookeeper
+
+# The port at which the clients will connect
+clientPort=2181
+
+# ZooKeeper quorum peers
+server.1=localhost:2888:3888
+# server.2=host:peer-port:leader-port