summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorwangkuan <[email protected]>2020-09-29 18:09:36 +0800
committerwangkuan <[email protected]>2020-09-29 18:09:36 +0800
commit466f97d47151b9b57e6fa6345f3b905b0be8ecca (patch)
tree4e093528010015dc96a6217264921008a76a0369
parented8be84005d5fe13c4170fe003f126d4cd602320 (diff)
去掉多余日志
-rw-r--r--config/application.properties8
-rw-r--r--config/log4j2-dev.xml155
-rw-r--r--pom.xml4
-rw-r--r--src/main/java/com/mesa/reportservice/configuration/HbaseFactory.java9
-rw-r--r--src/main/java/com/mesa/reportservice/controller/ScheduledResultController.java5
-rw-r--r--src/main/java/com/mesa/reportservice/service/impl/ExcuteProcessServiceImpl.java22
-rw-r--r--src/main/java/com/mesa/reportservice/service/impl/ExcuteserviceImpl.java4
-rw-r--r--src/main/resources/config/log4j2-dev.xml155
8 files changed, 120 insertions, 242 deletions
diff --git a/config/application.properties b/config/application.properties
index 5ac7f6c..961f4a4 100644
--- a/config/application.properties
+++ b/config/application.properties
@@ -7,8 +7,8 @@ globle.job_thread=2
#Hbasehttp�Ķ˿�
#Hbase�ı���������ͨ������Ҫ����
hbase.table=tsg:report_result
-hbase.zookeeper_quorum=192.168.40.224
-hbase.zookeeper_property_clientPort=2181
+hbase.zookeeper_quorum=192.168.40.222,192.168.40.223
+hbase.zookeeper_property_clientPort=2182
hbase.zookeeper_znode_parent=/hbase
hbase.client_retries_number=3
hbase.rpc_timeout=100000
@@ -24,9 +24,9 @@ spring.datasource.username=root
spring.datasource.password=111111
#zk��Ⱥ��ip
-zookeeper.connectString=192.168.40.203:2181
+zookeeper.connectString=192.168.44.34,192.168.44.35:2181
#�Ƿ�����zookeeper 0����(��Ⱥ) 1����(����)
-zookeeper.open=1
+zookeeper.open=0
zookeeper.retryCount=5
zookeeper.elapsedTimeMs=10000
zookeeper.sessionTimeoutMs=50000
diff --git a/config/log4j2-dev.xml b/config/log4j2-dev.xml
index 1d4b967..94f8476 100644
--- a/config/log4j2-dev.xml
+++ b/config/log4j2-dev.xml
@@ -1,117 +1,56 @@
<?xml version="1.0" encoding="UTF-8"?>
+<configuration>
-<!--
- status : 这个用于设置log4j2自身内部的信息输出,可以不设置,当设置成TRACE时,会看到log4j2内部各种详细输出
- monitorInterval : Log4j能够自动检测修改配置文件和重新配置本身, 设置间隔秒数。此处表示每隔几秒重读一次配置文件.
- 日志级别:TRACE < DEBUG < INFO < WARN < ERROR < FATAL
- 如果设置为WARN,则低于WARN的信息都不会输出
--->
-<Configuration status="INFO" monitorInterval="30">
-
- <!-- 参数配置 -->
+ <!--日志打印相关参数配置-->
<Properties>
- <!-- 配置日志文件输出目录 -->
- <Property name="LOG_HOME">./logs</Property>
- <!-- 日志输出文件名 -->
- <property name="FILE_NAME">galaxy-report-service</property>
- <!-- 日志格式化 -->
- <property name="console_pattern_layout">
- [%d{yyyy-MM-dd HH:mm:ss}] [%p] [Thread:%t] %l %x - %m%n
- </property>
- <property name="pattern_layout">
- [%d{yyyy-MM-dd HH:mm:ss}] [%p] [Thread:%t] %l %x - %m%n
- </property>
+ <!--每5M压缩日志文件-->
+ <property name="LOG_SIZE">5M</property>
+ <!--最多产生10个压缩文件-->
+ <property name="LOG_NUMS">10</property>
+ <!--日志打印等级-->
+ <property name="LOG_LEVEL">info</property>
+ <!--日志文件路径-->
+ <property name="LOG_PATH">logs</property>
+ <!--日志文件名称-->
+ <property name="LOG_FILE_NAME">galaxy-report-service</property>
+ <!--日志打印格式-->
+ <property name="LOG_PATTERN">[%d{yyyy-MM-dd HH:mm:ss}] [%p] [Thread:%t] %l %x - %m%n</property>
</Properties>
- <!-- 日志配置Appender -->
- <Appenders>
- <!-- 输出控制台的配置 -->
- <Console name="Console" target="SYSTEM_OUT">
- <!-- ThresholdFilter:配置的日志过滤
- 如果要输出的日志级别在当前级别及以上,则为match,否则走mismatch
- ACCEPT: 执行日志输出;DENY: 不执行日志输出,结束过滤;NEUTRAL: 不执行日志输出,执行下一个过滤器 -->
- <!--<ThresholdFilter level="INFO" onMatch="ACCEPT" onMismatch="DENY"/>-->
- <!-- 日志输出的格式
- %d{yyyy-MM-dd HH:mm:ss, SSS} : 日志生产时间,输出到毫秒的时间
- %-5p (level) : 输出日志级别,-5表示左对齐并且固定输出5个字符,如果不足在右边补0
- %c (logger) : logger的名称(%logger)
- %t (thread) : 输出当前线程名称
- %m : 日志内容,即 logger.info("message")
- %n : 换行符
- %C : Java类名(%F)
- %L : 行号
- %M : 方法名
- %l : 输出语句所在的行数, 包括类名、方法名、文件名、行数
- hostName : 本地机器名
- hostAddress : 本地ip地址
- -->
- <PatternLayout pattern="${console_pattern_layout}"/>
+ <appenders>
+ <Console name="consoleSystemOutAppender" target="SYSTEM_OUT">
+ <ThresholdFilter level="DEBUG" onMatch="ACCEPT" onMismatch="DENY"/>
+ <PatternLayout pattern="${LOG_PATTERN}"/>
</Console>
- <!-- 文件输出配置,文件会打印出所有信息,这个log每次运行程序会自动清空,由append属性决定,适合临时测试用 -->
- <!--<File name="log" fileName="log/test.log" append="false">
- <PatternLayout pattern="%d{HH:mm:ss.SSS} %-5level %class{36} %L %M - %msg%xEx%n"/>
- </File>-->
-
- <!--
- 循环日志文件配置:日志文件大于阀值的时候,就开始写一个新的日志文件
- 这个会打印出所有的信息,每次大小超过size,则这size大小的日志会自动存入按年份-月份建立的文件夹下面并进行压缩,作为存档
-
- fileName : 指定当前日志文件的位置和文件名称
- filePattern : 指定当发生Rolling时,文件的转移和重命名规则
- SizeBasedTriggeringPolicy : 指定当文件体积大于size指定的值时,触发Rolling
- DefaultRolloverStrategy : 指定最多保存的文件个数
- TimeBasedTriggeringPolicy : 这个配置需要和filePattern结合使用
- 注意filePattern中配置的文件重命名规则是${FILE_NAME}_%d{yyyy-MM-dd}_%i,最小的时间粒度是dd,即天,
- TimeBasedTriggeringPolicy指定的size是1,结合起来就是每1天生成一个新文件
- -->
- <RollingRandomAccessFile name="ALL"
- fileName="${LOG_HOME}/${FILE_NAME}.log"
- filePattern="${LOG_HOME}/${FILE_NAME}.log.%d{yyyy-MM-dd}_%i">
- <!--<Filters>
- <ThresholdFilter level="INFO" onMatch="ACCEPT" onMismatch="DENY"/>
- </Filters>-->
- <PatternLayout pattern="${pattern_layout}"/>
+ <RollingFile name="rollingFileAllAppender"
+ fileName="${LOG_PATH}/${LOG_FILE_NAME}.log"
+ filePattern="${LOG_PATH}/history/$${date:yyyy-MM-dd}/${LOG_FILE_NAME}-%d{yyyy-MM-dd}-%i.log.gz">
+ <PatternLayout pattern="${LOG_PATTERN}"/>
<Policies>
- <TimeBasedTriggeringPolicy interval="1"/>
- <!--<SizeBasedTriggeringPolicy size="100MB"/>-->
+ <SizeBasedTriggeringPolicy size="${LOG_SIZE}"/>
+ <TimeBasedTriggeringPolicy interval="1" modulate="true"/>
</Policies>
- <DefaultRolloverStrategy max="20"/>
- </RollingRandomAccessFile>
-
- <!-- 异步日志配置 -->
- <Async name="AsyncAll">
- <AppenderRef ref="Console"/>
- <AppenderRef ref="ALL"/>
- </Async>
-
- </Appenders>
-
- <!-- 日志记录Logger -->
- <Loggers>
-
- <Logger name="druid.sql" level="INFO" additivity="false">
- <AppenderRef ref="AsyncAll"/>
- </Logger>
- <Logger name="druid.sql.Connection" level="INFO" additivity="false">
- <AppenderRef ref="AsyncAll"/>
- </Logger>
- <Logger name="net.sf.ehcache" level="INFO" additivity="false">
- <AppenderRef ref="AsyncAll"/>
- </Logger>
-
- <!--
- Logger节点用来单独指定日志的形式,比如要为指定包下的class指定不同的日志级别等。
- level:日志输出级别,共有8个级别,按照从低到高为:All < Trace < Debug < Info < Warn < Error < Fatal < OFF.
- name:用来指定该Logger所适用的类或者类所在的包全路径,继承自Root节点.
-       AppenderRef:Logger的子节点,用来指定该日志输出到哪个Appender,如果没有指定,就会默认继承自Root.
- 如果指定了,那么会在指定的这个Appender和Root的Appender中都会输出,
- 此时我们可以设置Logger的additivity="false"只在自定义的Appender中进行输出。
- -->
- <Root level="INFO">
- <AppenderRef ref="Console"/>
- <AppenderRef ref="ALL"/>
- </Root>
- </Loggers>
-
-</Configuration> \ No newline at end of file
+ <Filters>
+ <ThresholdFilter level="all" onMatch="ACCEPT" onMismatch="DENY"/>
+ </Filters>
+ <DefaultRolloverStrategy max="${LOG_NUMS}">
+ <Delete basePath="${LOG_PATH}/history" maxDepth="1">
+ <IfFileName glob="*.log.gz">
+ <IfLastModified age="90d">
+ <IfAny>
+ <IfAccumulatedFileSize exceeds="200 GB" />
+ </IfAny>
+ </IfLastModified>
+ </IfFileName>
+ </Delete>
+ </DefaultRolloverStrategy>
+ </RollingFile>
+ </appenders>
+ <loggers>
+ <root level="${LOG_LEVEL}">
+ <appender-ref ref="consoleSystemOutAppender"/>
+ <appender-ref ref="rollingFileAllAppender"/>
+ </root>
+ </loggers>
+</configuration> \ No newline at end of file
diff --git a/pom.xml b/pom.xml
index 3a65ce4..6bdd092 100644
--- a/pom.xml
+++ b/pom.xml
@@ -10,7 +10,7 @@
</parent>
<groupId>com.mesa</groupId>
<artifactId>galaxy-report-service</artifactId>
- <version>20.09.21-report</version>
+ <version>20.09.29</version>
<name>galaxy-report-service</name>
<description>Demo project for Spring Boot</description>
@@ -219,7 +219,7 @@
<forceTags>true</forceTags>
<imageTags>
- <imageTag>20.09.21</imageTag>
+ <imageTag>20.09.29</imageTag>
</imageTags>
<!--远程docker构建,供dockerfile使用-->
<dockerHost>http://192.168.40.153:2375</dockerHost>
diff --git a/src/main/java/com/mesa/reportservice/configuration/HbaseFactory.java b/src/main/java/com/mesa/reportservice/configuration/HbaseFactory.java
index 0861eff..ddbf5d3 100644
--- a/src/main/java/com/mesa/reportservice/configuration/HbaseFactory.java
+++ b/src/main/java/com/mesa/reportservice/configuration/HbaseFactory.java
@@ -20,6 +20,8 @@ import java.util.concurrent.Executors;
public class HbaseFactory {
private final HbaseProperties hbproperties;
+ private final org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(this.getClass());
+
@Autowired
public HbaseFactory(HbaseProperties hbproperties) {
this.hbproperties = hbproperties;
@@ -33,10 +35,13 @@ public class HbaseFactory {
conf = HBaseConfiguration.create();
conf.set("hbase.zookeeper.quorum", hbproperties.getZookeeper_quorum());
conf.set("hbase.zookeeper.property.clientPort", hbproperties.getZookeeper_property_clientPort());
- conf.set("hbase.zookeeper.znode.parent", hbproperties.getZookeeper_znode_parent());
+ conf.set("zookeeper.znode.parent", hbproperties.getZookeeper_znode_parent());
conf.set("hbase.client.retries.number", hbproperties.getClient_retries_number());
conf.set("hbase.rpc.timeout", hbproperties.getRpc_timeout());
conf.set("hbase.client.keyvalue.maxsize", "1024000000");
+ conf.set("zookeeper.recovery.retry", "3");
+
+
}
return conf;
@@ -51,7 +56,7 @@ public class HbaseFactory {
con = ConnectionFactory.createConnection(Conf,executor);
} catch (IOException e) {
- e.printStackTrace();
+ logger.error(e.toString());
}
return con;
diff --git a/src/main/java/com/mesa/reportservice/controller/ScheduledResultController.java b/src/main/java/com/mesa/reportservice/controller/ScheduledResultController.java
index 245ece3..048005e 100644
--- a/src/main/java/com/mesa/reportservice/controller/ScheduledResultController.java
+++ b/src/main/java/com/mesa/reportservice/controller/ScheduledResultController.java
@@ -48,8 +48,8 @@ public class ScheduledResultController {
for (JobEntity jobEntity : joblist) {
String sql = jobEntity.getQuerySql().trim();
sql = sql.replace("$exe_time", "toDateTime('" + jobEntity.getIssuedTime().trim() + "')");
- //sql = sql.replace("$start_time", "toDateTime('" + jobEntity.getStartTime().trim() + "')");
- //sql = sql.replace("$end_time", "toDateTime('" + jobEntity.getEndTime().trim() + "')");
+ //sql = sql.replace("$start_time", "toDateTime('" + jobEntity.getStartTime().trim() + "')");
+ //sql = sql.replace("$end_time", "toDateTime('" + jobEntity.getEndTime().trim() + "')");
String queryid = DigestUtils.md5Hex(jobEntity.getResultId() + sql);
jobEntity.setQuery_id(queryid);
@@ -85,7 +85,6 @@ public class ScheduledResultController {
int rows = GlobelConfig.job_thread - GlobelConfig.mapresult.size();
for (Map.Entry<String, JobEntity> entry : GlobelConfig.mapresult.entrySet()) {
logger.info("----key = " + entry.getKey() + ", value = " + entry.getValue().getStatus());
- System.out.print("----key = " + entry.getKey() + ", value = " + entry.getValue().getStatus());
}
if (rows > 0) {
List<JobEntity> jobs = ms.getJobTask(rows);
diff --git a/src/main/java/com/mesa/reportservice/service/impl/ExcuteProcessServiceImpl.java b/src/main/java/com/mesa/reportservice/service/impl/ExcuteProcessServiceImpl.java
index 9c0fc67..a6bd409 100644
--- a/src/main/java/com/mesa/reportservice/service/impl/ExcuteProcessServiceImpl.java
+++ b/src/main/java/com/mesa/reportservice/service/impl/ExcuteProcessServiceImpl.java
@@ -34,8 +34,6 @@ public class ExcuteProcessServiceImpl implements ExcuteProcessService {
public void updateResultMessage(JobEntity je) {
- logger.info("startget++++++" + je.getQuery_id());
-
try {
if (je.getIsValid() == 0) {
je.setStatus(9);
@@ -49,44 +47,44 @@ public class ExcuteProcessServiceImpl implements ExcuteProcessService {
je.setExcuteDetail("SUCCESS");
je.setExcuteProcess(100);
je.setStatus(2);
- logger.info("success save to hbase resultid =" + je.getResultId() + " excutesql=" + je.getQuerySql());
+ logger.info("success save to hbase query_id="+je.getQuery_id()+" resultid =" + je.getResultId() + " excutesql=" + je.getQuerySql());
} else {
je.setStatus(5);
je.setExcuteDetail("Write Data Error");
mons.addFail();
- logger.error("save hbase error resultid =" + je.getResultId() + " excutesql=" + je.getQuerySql());
+ logger.error("save hbase error query_id="+je.getQuery_id()+" resultid =" + je.getResultId() + " excutesql=" + je.getQuerySql());
}
break;
case 400001:
je.setStatus(3);
je.setExcuteDetail("Param Syntax Error");
- logger.error("Param Syntax Error resultid =" + je.getResultId() + " excutesql=" + je.getQuerySql());
+ logger.error("Param Syntax Error query_id="+je.getQuery_id()+" resultid =" + je.getResultId() + " excutesql=" + je.getQuerySql());
break;
case 400010:
je.setStatus(3);
je.setExcuteDetail("SQL Syntax Error");
- logger.error("SQL Syntax Error resultid =" + je.getResultId() + " excutesql=" + je.getQuerySql());
+ logger.error("SQL Syntax Error query_id="+je.getQuery_id()+" resultid =" + je.getResultId() + " excutesql=" + je.getQuerySql());
break;
case 500001:
je.setStatus(4);
je.setExcuteDetail("SQL Execution Error");
- logger.error("SQL Execution Error resultid =" + je.getResultId() + " excutesql=" + je.getQuerySql());
+ logger.error("SQL Execution Error query_id="+je.getQuery_id()+" resultid =" + je.getResultId() + " excutesql=" + je.getQuerySql());
break;
case 500010:
je.setStatus(6);
je.setExcuteDetail("Engine Statistics Error");
- logger.error("Engine Statistics Error resultid =" + je.getResultId() + " excutesql=" + je.getQuerySql());
+ logger.error("Engine Statistics Error query_id="+je.getQuery_id()+" resultid =" + je.getResultId() + " excutesql=" + je.getQuerySql());
break;
case 555999:
je.setStatus(7);
je.setExcuteDetail("Unknow Error");
- logger.error("Unknow Error resultid =" + je.getResultId() + " excutesql=" + je.getQuerySql());
+ logger.error("Unknow Error query_id="+je.getQuery_id()+" resultid =" + je.getResultId() + " excutesql=" + je.getQuerySql());
break;
default:
je.setStatus(8);
je.setExcuteDetail("System Error");
- logger.error("System Error resultid =" + je.getResultId() + " excutesql=" + je.getQuerySql());
+ logger.error("System Error query_id="+je.getQuery_id()+" resultid =" + je.getResultId() + " excutesql=" + je.getQuerySql());
}
}
@@ -101,7 +99,7 @@ public class ExcuteProcessServiceImpl implements ExcuteProcessService {
je.setStatus(10);
je.setExcuteDetail("Database Error");
ms.updateProcesses(je);
- logger.error("save database error queryid=" + je.getResultId() + e.toString());
+ logger.error("save database error resultid =" + je.getResultId()+" queryid=" + je.getResultId() + e.toString());
} finally {
saveToMonitor(je);
@@ -115,9 +113,7 @@ public class ExcuteProcessServiceImpl implements ExcuteProcessService {
public void killQuery(JobEntity jobEntity) {
String killurl = ClickhouseConfig.getKillUrl(jobEntity.getQuery_id());
try {
- logger.info("startkill++++++" + jobEntity.getQuery_id());
cs.QuerySystemForDelete(killurl);
- logger.info("endkill=======" + jobEntity.getQuery_id());
} catch (Exception e) {
logger.error(e.toString());
diff --git a/src/main/java/com/mesa/reportservice/service/impl/ExcuteserviceImpl.java b/src/main/java/com/mesa/reportservice/service/impl/ExcuteserviceImpl.java
index e732e9d..7b6d6a4 100644
--- a/src/main/java/com/mesa/reportservice/service/impl/ExcuteserviceImpl.java
+++ b/src/main/java/com/mesa/reportservice/service/impl/ExcuteserviceImpl.java
@@ -86,7 +86,7 @@ public class ExcuteserviceImpl implements ExcuteService {
job.setResult(hr.getBody());
job.setExcuteTime((int) Float.parseFloat(rows.get("elapsed").toString()));
job.setExcute_status(Integer.parseInt(mapresult.get("code").toString()));
- logger.info("success queryid=" + queryid + " sql=" + sql);
+ logger.info("success resultid = "+job.getResultId()+" queryid=" + queryid + " sql=" + sql);
}
} catch (SocketTimeoutException e) {
@@ -95,7 +95,7 @@ public class ExcuteserviceImpl implements ExcuteService {
if (k == 0) {
job.setExcute_status(500001);
job.setExcuteDetail("SQL Execution Error excute query time out");
- logger.info("timeout queryid=" + queryid + " sql=" + sql);
+ logger.info("timeout resultid = "+job.getResultId()+" queryid=" + queryid + " sql=" + sql);
} else {
logger.info("Socket warn " + e.toString() + "retry time " + (3 - k));
}
diff --git a/src/main/resources/config/log4j2-dev.xml b/src/main/resources/config/log4j2-dev.xml
index 1d4b967..94f8476 100644
--- a/src/main/resources/config/log4j2-dev.xml
+++ b/src/main/resources/config/log4j2-dev.xml
@@ -1,117 +1,56 @@
<?xml version="1.0" encoding="UTF-8"?>
+<configuration>
-<!--
- status : 这个用于设置log4j2自身内部的信息输出,可以不设置,当设置成TRACE时,会看到log4j2内部各种详细输出
- monitorInterval : Log4j能够自动检测修改配置文件和重新配置本身, 设置间隔秒数。此处表示每隔几秒重读一次配置文件.
- 日志级别:TRACE < DEBUG < INFO < WARN < ERROR < FATAL
- 如果设置为WARN,则低于WARN的信息都不会输出
--->
-<Configuration status="INFO" monitorInterval="30">
-
- <!-- 参数配置 -->
+ <!--日志打印相关参数配置-->
<Properties>
- <!-- 配置日志文件输出目录 -->
- <Property name="LOG_HOME">./logs</Property>
- <!-- 日志输出文件名 -->
- <property name="FILE_NAME">galaxy-report-service</property>
- <!-- 日志格式化 -->
- <property name="console_pattern_layout">
- [%d{yyyy-MM-dd HH:mm:ss}] [%p] [Thread:%t] %l %x - %m%n
- </property>
- <property name="pattern_layout">
- [%d{yyyy-MM-dd HH:mm:ss}] [%p] [Thread:%t] %l %x - %m%n
- </property>
+ <!--每5M压缩日志文件-->
+ <property name="LOG_SIZE">5M</property>
+ <!--最多产生10个压缩文件-->
+ <property name="LOG_NUMS">10</property>
+ <!--日志打印等级-->
+ <property name="LOG_LEVEL">info</property>
+ <!--日志文件路径-->
+ <property name="LOG_PATH">logs</property>
+ <!--日志文件名称-->
+ <property name="LOG_FILE_NAME">galaxy-report-service</property>
+ <!--日志打印格式-->
+ <property name="LOG_PATTERN">[%d{yyyy-MM-dd HH:mm:ss}] [%p] [Thread:%t] %l %x - %m%n</property>
</Properties>
- <!-- 日志配置Appender -->
- <Appenders>
- <!-- 输出控制台的配置 -->
- <Console name="Console" target="SYSTEM_OUT">
- <!-- ThresholdFilter:配置的日志过滤
- 如果要输出的日志级别在当前级别及以上,则为match,否则走mismatch
- ACCEPT: 执行日志输出;DENY: 不执行日志输出,结束过滤;NEUTRAL: 不执行日志输出,执行下一个过滤器 -->
- <!--<ThresholdFilter level="INFO" onMatch="ACCEPT" onMismatch="DENY"/>-->
- <!-- 日志输出的格式
- %d{yyyy-MM-dd HH:mm:ss, SSS} : 日志生产时间,输出到毫秒的时间
- %-5p (level) : 输出日志级别,-5表示左对齐并且固定输出5个字符,如果不足在右边补0
- %c (logger) : logger的名称(%logger)
- %t (thread) : 输出当前线程名称
- %m : 日志内容,即 logger.info("message")
- %n : 换行符
- %C : Java类名(%F)
- %L : 行号
- %M : 方法名
- %l : 输出语句所在的行数, 包括类名、方法名、文件名、行数
- hostName : 本地机器名
- hostAddress : 本地ip地址
- -->
- <PatternLayout pattern="${console_pattern_layout}"/>
+ <appenders>
+ <Console name="consoleSystemOutAppender" target="SYSTEM_OUT">
+ <ThresholdFilter level="DEBUG" onMatch="ACCEPT" onMismatch="DENY"/>
+ <PatternLayout pattern="${LOG_PATTERN}"/>
</Console>
- <!-- 文件输出配置,文件会打印出所有信息,这个log每次运行程序会自动清空,由append属性决定,适合临时测试用 -->
- <!--<File name="log" fileName="log/test.log" append="false">
- <PatternLayout pattern="%d{HH:mm:ss.SSS} %-5level %class{36} %L %M - %msg%xEx%n"/>
- </File>-->
-
- <!--
- 循环日志文件配置:日志文件大于阀值的时候,就开始写一个新的日志文件
- 这个会打印出所有的信息,每次大小超过size,则这size大小的日志会自动存入按年份-月份建立的文件夹下面并进行压缩,作为存档
-
- fileName : 指定当前日志文件的位置和文件名称
- filePattern : 指定当发生Rolling时,文件的转移和重命名规则
- SizeBasedTriggeringPolicy : 指定当文件体积大于size指定的值时,触发Rolling
- DefaultRolloverStrategy : 指定最多保存的文件个数
- TimeBasedTriggeringPolicy : 这个配置需要和filePattern结合使用
- 注意filePattern中配置的文件重命名规则是${FILE_NAME}_%d{yyyy-MM-dd}_%i,最小的时间粒度是dd,即天,
- TimeBasedTriggeringPolicy指定的size是1,结合起来就是每1天生成一个新文件
- -->
- <RollingRandomAccessFile name="ALL"
- fileName="${LOG_HOME}/${FILE_NAME}.log"
- filePattern="${LOG_HOME}/${FILE_NAME}.log.%d{yyyy-MM-dd}_%i">
- <!--<Filters>
- <ThresholdFilter level="INFO" onMatch="ACCEPT" onMismatch="DENY"/>
- </Filters>-->
- <PatternLayout pattern="${pattern_layout}"/>
+ <RollingFile name="rollingFileAllAppender"
+ fileName="${LOG_PATH}/${LOG_FILE_NAME}.log"
+ filePattern="${LOG_PATH}/history/$${date:yyyy-MM-dd}/${LOG_FILE_NAME}-%d{yyyy-MM-dd}-%i.log.gz">
+ <PatternLayout pattern="${LOG_PATTERN}"/>
<Policies>
- <TimeBasedTriggeringPolicy interval="1"/>
- <!--<SizeBasedTriggeringPolicy size="100MB"/>-->
+ <SizeBasedTriggeringPolicy size="${LOG_SIZE}"/>
+ <TimeBasedTriggeringPolicy interval="1" modulate="true"/>
</Policies>
- <DefaultRolloverStrategy max="20"/>
- </RollingRandomAccessFile>
-
- <!-- 异步日志配置 -->
- <Async name="AsyncAll">
- <AppenderRef ref="Console"/>
- <AppenderRef ref="ALL"/>
- </Async>
-
- </Appenders>
-
- <!-- 日志记录Logger -->
- <Loggers>
-
- <Logger name="druid.sql" level="INFO" additivity="false">
- <AppenderRef ref="AsyncAll"/>
- </Logger>
- <Logger name="druid.sql.Connection" level="INFO" additivity="false">
- <AppenderRef ref="AsyncAll"/>
- </Logger>
- <Logger name="net.sf.ehcache" level="INFO" additivity="false">
- <AppenderRef ref="AsyncAll"/>
- </Logger>
-
- <!--
- Logger节点用来单独指定日志的形式,比如要为指定包下的class指定不同的日志级别等。
- level:日志输出级别,共有8个级别,按照从低到高为:All < Trace < Debug < Info < Warn < Error < Fatal < OFF.
- name:用来指定该Logger所适用的类或者类所在的包全路径,继承自Root节点.
-       AppenderRef:Logger的子节点,用来指定该日志输出到哪个Appender,如果没有指定,就会默认继承自Root.
- 如果指定了,那么会在指定的这个Appender和Root的Appender中都会输出,
- 此时我们可以设置Logger的additivity="false"只在自定义的Appender中进行输出。
- -->
- <Root level="INFO">
- <AppenderRef ref="Console"/>
- <AppenderRef ref="ALL"/>
- </Root>
- </Loggers>
-
-</Configuration> \ No newline at end of file
+ <Filters>
+ <ThresholdFilter level="all" onMatch="ACCEPT" onMismatch="DENY"/>
+ </Filters>
+ <DefaultRolloverStrategy max="${LOG_NUMS}">
+ <Delete basePath="${LOG_PATH}/history" maxDepth="1">
+ <IfFileName glob="*.log.gz">
+ <IfLastModified age="90d">
+ <IfAny>
+ <IfAccumulatedFileSize exceeds="200 GB" />
+ </IfAny>
+ </IfLastModified>
+ </IfFileName>
+ </Delete>
+ </DefaultRolloverStrategy>
+ </RollingFile>
+ </appenders>
+ <loggers>
+ <root level="${LOG_LEVEL}">
+ <appender-ref ref="consoleSystemOutAppender"/>
+ <appender-ref ref="rollingFileAllAppender"/>
+ </root>
+ </loggers>
+</configuration> \ No newline at end of file