diff options
| author | liuyongqiang <[email protected]> | 2020-12-10 11:42:40 +0800 |
|---|---|---|
| committer | liuyongqiang <[email protected]> | 2020-12-10 11:42:40 +0800 |
| commit | 2fae39946874049bfa11c7372b9f1cdef20ec89c (patch) | |
| tree | d8ce952800eebb092edaa9a0bf2ba1dde95d7e0c | |
| parent | 1c4c5f826211e9572202ce7b9085dad7cd5a61dc (diff) | |
galaxy-hos-service迁移完成
80 files changed, 7638 insertions, 11 deletions
diff --git a/galaxy-hos-service/config/application.yml b/galaxy-hos-service/config/application.yml index 7f87392..e7fbed0 100644 --- a/galaxy-hos-service/config/application.yml +++ b/galaxy-hos-service/config/application.yml @@ -4,6 +4,10 @@ server: spring: application: name: galaxy-hos-service + servlet: #设置上传文件大小的最大值 + multipart: + max-file-size: 1024MB + max-request-size: 1024MB cloud: consul: host: 192.168.44.12 @@ -22,6 +26,42 @@ management: endpoint: health: show-details: always +## tomcat config +tomcat: + cacheMaxSize: 10000000 #tomcat缓存大小,单位KB系统默认10M,配置10g ## log file path config logging: - config: ./config/logback-spring.xml
\ No newline at end of file + config: ./config/logback-spring.xml +## auth config +auth: + open: 0 #是否打开验证,0打开,打开需要使用S3身份验证或者token访问服务 +## hbase config +hbase: + client_keyvalue_maxsize: 1024000000 + client_retries_number: 1 + client_write_buffer: 10485760 + connect_pool: 300 + filename_head: 1,2,3,4,5,6,7,8,9,a,b,c,d,e,f + filename_index_table_prefix: index_filename_ + hadoopConf: /home/tsg/galaxy/hadoop-2.7.1/etc/hadoop/ #hadoop配置文件所在文件夹单机环境不需要 + hbasePath: /hbase/hbase-2.2.3 #获取文件大小的目录 + namenodes: 192.168.44.11,192.168.44.14 + partfile_index_table_prefix: index_partfile_ + region_start_key: 1,2,3,4,5,6,7,8,9,a,b,c,d,e,f #创建表预分区时的分区,为空则不分区 + rpc_timeout: 100000 + standone: 1 #1是集群0是单机主要针对存储配额获取方式 + time_index_table_prefix: index_time_ + zookeeper_property_clientPort: 2181 + zookeeper_quorum: 192.168.44.11 + zookeeper_znode_parent: /hbase +## hos config +hos: + deleteMultipleNumber: 1000 #批量删除对象的最大数量 + fileSizeValve: 100000 + maxPartNumber: 1000 #分块上传的最大分块数 + maxPoolNumber: 600 #上传文件线程数 + maxPosition: 10000 #追加上传的最大次数 + maxResultLimit: 10000 #获取对象列表等操作的最大值 + metaHeader: x-hos-meta-message #存放文件元数据的请求头 + simple: 0 #是否打开对象列表查询功能,1打开 + users: default #用户白名单,分割
\ No newline at end of file diff --git a/galaxy-hos-service/config/logback-spring.xml b/galaxy-hos-service/config/logback-spring.xml index 96695e3..2b93f8f 100644 --- a/galaxy-hos-service/config/logback-spring.xml +++ b/galaxy-hos-service/config/logback-spring.xml @@ -38,6 +38,9 @@ <level>DEBUG</level> </filter> </appender> + + <logger name="org.apache.hadoop.util.Shell" level="OFF"/> + <root level="info"> <appender-ref ref="ALL_FILE" /> <appender-ref ref="STDOUT" /> diff --git a/galaxy-hos-service/pom.xml b/galaxy-hos-service/pom.xml index a41fca1..6361f4d 100644 --- a/galaxy-hos-service/pom.xml +++ b/galaxy-hos-service/pom.xml @@ -31,6 +31,74 @@ <groupId>org.springframework.cloud</groupId> <artifactId>spring-cloud-starter-consul-discovery</artifactId> </dependency> + <dependency> + <groupId>org.springframework.boot</groupId> + <artifactId>spring-boot-configuration-processor</artifactId> + <optional>true</optional> + </dependency> + <dependency> + <groupId>com.fasterxml.jackson.dataformat</groupId> + <artifactId>jackson-dataformat-xml</artifactId> + </dependency> + <dependency> + <groupId>org.apache.hbase</groupId> + <artifactId>hbase-client</artifactId> + <version>${hbase.client.version}</version> + <exclusions> + <exclusion> + <groupId>org.slf4j</groupId> + <artifactId>slf4j-log4j12</artifactId> + </exclusion> + </exclusions> + </dependency> + <dependency> + <groupId>org.apache.hadoop</groupId> + <artifactId>hadoop-client</artifactId> + <version>${hadoop.client.version}</version> + <exclusions> + <exclusion> + <groupId>org.slf4j</groupId> + <artifactId>slf4j-log4j12</artifactId> + </exclusion> + </exclusions> + </dependency> + <dependency> + <groupId>org.apache.hadoop</groupId> + <artifactId>hadoop-common</artifactId> + <version>${hadoop.client.version}</version> + <exclusions> + <exclusion> + <groupId>org.slf4j</groupId> + <artifactId>slf4j-log4j12</artifactId> + </exclusion> + </exclusions> + </dependency> + <dependency> + <groupId>org.apache.hadoop</groupId> + <artifactId>hadoop-hdfs</artifactId> + <version>${hadoop.client.version}</version> + <exclusions> + <exclusion> + <groupId>org.slf4j</groupId> + <artifactId>slf4j-log4j12</artifactId> + </exclusion> + </exclusions> + </dependency> + <dependency> + <groupId>com.alibaba</groupId> + <artifactId>fastjson</artifactId> + <version>${fastjson.version}</version> + </dependency> + <dependency> + <groupId>com.amazonaws</groupId> + <artifactId>aws-java-sdk</artifactId> + <version>1.11.700</version> + </dependency> + <dependency> + <groupId>commons-io</groupId> + <artifactId>commons-io</artifactId> + <version>2.5</version> + </dependency> </dependencies> <build> diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/GalaxyHosService.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/GalaxyHosService.java index 299f72a..7608265 100644 --- a/galaxy-hos-service/src/main/java/com/mesalab/hos/GalaxyHosService.java +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/GalaxyHosService.java @@ -1,9 +1,15 @@ package com.mesalab.hos; +import io.micrometer.core.instrument.MeterRegistry; +import org.springframework.beans.factory.annotation.Value; import org.springframework.boot.SpringApplication; +import org.springframework.boot.actuate.autoconfigure.metrics.MeterRegistryCustomizer; import org.springframework.boot.autoconfigure.SpringBootApplication; +import org.springframework.boot.web.servlet.ServletComponentScan; import org.springframework.cloud.client.discovery.EnableDiscoveryClient; +import org.springframework.context.annotation.Bean; +@ServletComponentScan @EnableDiscoveryClient @SpringBootApplication public class GalaxyHosService { @@ -11,4 +17,9 @@ public class GalaxyHosService { public static void main(String[] args) { SpringApplication.run(GalaxyHosService.class); } + + @Bean + MeterRegistryCustomizer<MeterRegistry> configurer(@Value("${spring.application.name}") String applicationName){ + return registry -> registry.config().commonTags("application", applicationName); + } } diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/config/HadoopFactory.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/config/HadoopFactory.java new file mode 100644 index 0000000..ad413d3 --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/config/HadoopFactory.java @@ -0,0 +1,50 @@ +package com.mesalab.hos.config; + +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.client.BufferedMutator; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.beans.factory.annotation.Qualifier; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.boot.context.properties.EnableConfigurationProperties; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +/** + * Created by Administrator on 2020/3/10. + */ +@Configuration +@EnableConfigurationProperties(com.mesalab.hos.config.HbaseProperties.class) +@ConditionalOnProperty(value = "hbase.standone", havingValue = "1") +public class HadoopFactory { + private final org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(this.getClass()); + + private final com.mesalab.hos.config.HbaseProperties hbproperties; + + @Autowired + public HadoopFactory(com.mesalab.hos.config.HbaseProperties hbproperties) { + this.hbproperties = hbproperties; + } + + @Bean(name = "hadoopConfiguration") + public org.apache.hadoop.conf.Configuration getHadoopConfiguration() { + org.apache.hadoop.conf.Configuration config = new org.apache.hadoop.conf.Configuration(); + config.set("fs.defaultFS","hdfs://ns1"); + config.set("hadoop.proxyuser.root.hosts","*"); + config.set("hadoop.proxyuser.root.groups","*"); + config.set("ha.zookeeper.quorum",hbproperties.getZookeeper_quorum()+":"+hbproperties.getZookeeper_property_clientPort()); + config.set("dfs.nameservices","ns1"); + config.set("dfs.ha.namenodes.ns1","nn1,nn2"); + config.set("dfs.namenode.rpc-address.ns1.nn1",hbproperties.getNameNodes().get(0)+":9000"); + config.set("dfs.namenode.rpc-address.ns1.nn2",hbproperties.getNameNodes().get(1)+":9000"); + config.set("dfs.namenode.http-address.ns1.nn1",hbproperties.getNameNodes().get(0)+":50070"); + config.set("dfs.namenode.http-address.ns1.nn2",hbproperties.getNameNodes().get(1)+":50070"); + config.set("dfs.client.failover.proxy.provider.ns1","org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider"); + return config; + } + + + +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/config/HbaseFactory.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/config/HbaseFactory.java new file mode 100644 index 0000000..b2650f4 --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/config/HbaseFactory.java @@ -0,0 +1,72 @@ +package com.mesalab.hos.config; + +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.client.BufferedMutator; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.beans.factory.annotation.Qualifier; +import org.springframework.boot.context.properties.EnableConfigurationProperties; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +import java.io.IOException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; + +/** + * Created by Administrator on 2020/3/10. + */ +@Configuration +@EnableConfigurationProperties(HbaseProperties.class) +public class HbaseFactory { + private final org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(this.getClass()); + + private final HbaseProperties hbproperties; + + @Autowired + public HbaseFactory(HbaseProperties hbproperties) { + this.hbproperties = hbproperties; + } + + @Bean(name = "hbaseConfiguration") + public org.apache.hadoop.conf.Configuration getConfiguration() { + org.apache.hadoop.conf.Configuration conf = HBaseConfiguration.create(); + conf.set("hbase.zookeeper.quorum", hbproperties.getZookeeper_quorum()); + conf.set("hbase.zookeeper.property.clientPort", hbproperties.getZookeeper_property_clientPort()); + conf.set("zookeeper.znode.parent", hbproperties.getZookeeper_znode_parent()); + conf.set("hbase.client.retries.number", hbproperties.getClient_retries_number()); + conf.set("hbase.rpc.timeout", hbproperties.getRpc_timeout()); + conf.set("hbase.client.keyvalue.maxsize", hbproperties.getClient_keyvalue_maxsize()); + conf.set("hbase.client.write.buffer", hbproperties.getClient_write_buffer()); + conf.set("hbase.client.ipc.pool.size", hbproperties.getConnect_pool().toString()); + return conf; + } + + @Bean(name = "hbaseConnection") + public Connection getConnection(@Qualifier("hbaseConfiguration") org.apache.hadoop.conf.Configuration conf) { + Connection conn = null; + try { + conn = ConnectionFactory.createConnection(conf); + } catch (Exception e) { + logger.error(e.toString()); + } + return conn; + } + + @Bean(name = "hbaseExceptionListener") + public BufferedMutator.ExceptionListener getLishtener(@Qualifier("hbaseConnection") Connection conn) { + + BufferedMutator.ExceptionListener listener = new BufferedMutator.ExceptionListener() { + + public void onException(RetriesExhaustedWithDetailsException e, BufferedMutator mutator) { + for (int i = 0; i < e.getNumExceptions(); i++) { + logger.error("Failed to sent put " + e.getRow(i) + "."); + } + } + }; + return listener; + } + +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/config/HbaseProperties.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/config/HbaseProperties.java new file mode 100644 index 0000000..e2da442 --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/config/HbaseProperties.java @@ -0,0 +1,192 @@ +package com.mesalab.hos.config; + +import org.springframework.beans.factory.annotation.Value; +import org.springframework.boot.context.properties.ConfigurationProperties; + +import java.util.List; + +/** + * Created by Administrator on 2020/3/10. + */ + +@ConfigurationProperties(prefix = "hbase") +public class HbaseProperties { + + private String zookeeper_quorum; + + private String zookeeper_property_clientPort; + + private String zookeeper_znode_parent; + + private String client_pause; + + private String client_retries_number; + + private String rpc_timeout; + + private Integer connect_pool; + + private String time_index_table_prefix; + + private String filename_index_table_prefix; + + private String partfile_index_table_prefix; + + + private String region_start_key; + + private String client_write_buffer; + private String client_keyvalue_maxsize; + + private List<String> filename_head; + + private String hadoopConf; + private String hbasePath; + private int standone; + private List<String> nameNodes; + + public String getZookeeper_quorum() { + return zookeeper_quorum; + } + + public void setZookeeper_quorum(String zookeeper_quorum) { + this.zookeeper_quorum = zookeeper_quorum; + } + + public String getZookeeper_property_clientPort() { + return zookeeper_property_clientPort; + } + + public void setZookeeper_property_clientPort(String zookeeper_property_clientPort) { + this.zookeeper_property_clientPort = zookeeper_property_clientPort; + } + + public String getZookeeper_znode_parent() { + return zookeeper_znode_parent; + } + + public void setZookeeper_znode_parent(String zookeeper_znode_parent) { + this.zookeeper_znode_parent = zookeeper_znode_parent; + } + + public String getClient_pause() { + return client_pause; + } + + public void setClient_pause(String client_pause) { + this.client_pause = client_pause; + } + + public String getClient_retries_number() { + return client_retries_number; + } + + public void setClient_retries_number(String client_retries_number) { + this.client_retries_number = client_retries_number; + } + + public String getRpc_timeout() { + return rpc_timeout; + } + + public void setRpc_timeout(String rpc_timeout) { + this.rpc_timeout = rpc_timeout; + } + + public Integer getConnect_pool() { + return connect_pool; + } + + public void setConnect_pool(Integer connect_pool) { + this.connect_pool = connect_pool; + } + + public String getTime_index_table_prefix() { + return time_index_table_prefix; + } + + public void setTime_index_table_prefix(String time_index_table_prefix) { + this.time_index_table_prefix = time_index_table_prefix; + } + + public String getFilename_index_table_prefix() { + return filename_index_table_prefix; + } + + public void setFilename_index_table_prefix(String filename_index_table_prefix) { + this.filename_index_table_prefix = filename_index_table_prefix; + } + + public String getPartfile_index_table_prefix() { + return partfile_index_table_prefix; + } + + public void setPartfile_index_table_prefix(String partfile_index_table_prefix) { + this.partfile_index_table_prefix = partfile_index_table_prefix; + } + + public String getRegion_start_key() { + return region_start_key; + } + + public void setRegion_start_key(String region_start_key) { + this.region_start_key = region_start_key; + } + + + public String getClient_write_buffer() { + return client_write_buffer; + } + + public void setClient_write_buffer(String client_write_buffer) { + this.client_write_buffer = client_write_buffer; + } + + public String getClient_keyvalue_maxsize() { + return client_keyvalue_maxsize; + } + + public void setClient_keyvalue_maxsize(String client_keyvalue_maxsize) { + this.client_keyvalue_maxsize = client_keyvalue_maxsize; + } + + public List<String> getFilename_head() { + return filename_head; + } + + public void setFilename_head(List<String> filename_head) { + this.filename_head = filename_head; + } + + public String getHadoopConf() { + return hadoopConf; + } + + public void setHadoopConf(String hadoopConf) { + this.hadoopConf = hadoopConf; + } + + public String getHbasePath() { + return hbasePath; + } + + public void setHbasePath(String hbasePath) { + this.hbasePath = hbasePath; + } + + public int getStandone() { + return standone; + } + + public void setStandone(int standone) { + this.standone = standone; + } + + public List<String> getNameNodes() { + return nameNodes; + } + + public void setNameNodes(List<String> nameNodes) { + this.nameNodes = nameNodes; + } +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/config/HosInterceptor.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/config/HosInterceptor.java new file mode 100644 index 0000000..a0e206c --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/config/HosInterceptor.java @@ -0,0 +1,152 @@ +package com.mesalab.hos.config; + +import com.mesalab.hos.entity.ErrorResponse; +import com.mesalab.hos.util.ResponseMessageUtil; +import com.mesalab.hos.util.XmlUtil; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.lang.Nullable; +import org.springframework.stereotype.Component; +import org.springframework.web.servlet.HandlerInterceptor; +import org.springframework.web.servlet.ModelAndView; + +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import java.io.IOException; +import java.io.PrintWriter; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import static com.mesalab.hos.util.AwsUtil.calculateSignatureV2; +import static com.mesalab.hos.util.AwsUtil.calculateSignatureV4; +import static com.mesalab.hos.util.PublicUtil.getUUID; + +/** + * Created by wk1 on 2020/5/25. + */ +@Component +public class HosInterceptor implements HandlerInterceptor { + + private final org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(this.getClass()); + @Value("${auth.open}") + Integer tag; + + @Override + public boolean preHandle(HttpServletRequest request, HttpServletResponse response, Object handler) { + + + String accessKey = "default"; + String secretKey = "default"; + String userToken = "c21f969b5f03d33d43e04f8f136e7682"; + + String uuid = getUUID(); + String hostId = HosProperties.ip; + boolean isPass = false; + + if (tag == 0) { + try { + String Token = request.getHeader("token"); + if (Token == null) { + String auth = request.getHeader("Authorization");// 从 http 请求头中取出 token + if (auth != null && auth.contains("AWS4-HMAC-SHA256")) { + Pattern pDateRelativePars3e = Pattern.compile("\\bCredential=(.*?),\\s*\\bSignedHeaders=(.*?),\\s*\\bSignature=(.*)", Pattern.CASE_INSENSITIVE); + Matcher matcherParse = pDateRelativePars3e.matcher(auth); + if (matcherParse.find()) { + String Credential = matcherParse.group(1); + String signHeaders = matcherParse.group(2); + String signKey = matcherParse.group(3); + String[] credentialMessage = Credential.split("/"); + accessKey = credentialMessage[0]; + String Signature = calculateSignatureV4(request, signHeaders, Credential, accessKey, secretKey); + if (Signature.equals(signKey)) { + isPass = true; + } + } + } else if (auth != null && auth.contains("AWS")) { + Pattern pDateRelativePars3e = Pattern.compile("\\b(.*?):(.*)", Pattern.CASE_INSENSITIVE); + Matcher matcherParse = pDateRelativePars3e.matcher(auth); + if (matcherParse.find()) { + accessKey = matcherParse.group(1); + String Signature = matcherParse.group(2); + String preSignature = calculateSignatureV2(request, secretKey); + if (Signature.equals(preSignature)) { + isPass = true; + } + } + } + } else { + + if (userToken.equals(Token)) { + isPass = true; + } + } + } catch (Exception e) { + logger.error(e.toString()); + } + if (isPass) { + request.setAttribute("username", accessKey); + request.setAttribute("requestId", uuid); + request.setAttribute("hostId", hostId); + return true; + } else { + ErrorResponse errorResponse = ResponseMessageUtil.getResponseMessage("SignatureDoesNotMatch", hostId, uuid, "", "", response); + returnXML(response, XmlUtil.convertToXml(errorResponse)); + return false; + } + } else { + request.setAttribute("username", accessKey); + request.setAttribute("requestId", uuid); + request.setAttribute("hostId", hostId); + return true; + + } + + } + + + private void returnXML(HttpServletResponse response, String message) { + PrintWriter writer = null; + response.setCharacterEncoding("UTF-8"); + response.setStatus(403); + response.setContentType("application/xml;charset=UTF-8"); + try { + writer = response.getWriter(); + writer.print(message); + + } catch (IOException e) { + logger.error("response error", e); + } finally { + if (writer != null) { + writer.close(); + } + } + } + + /** + * 处理请求完成后视图渲染之前的处理操作 + * + * @param request + * @param response + * @param handler + * @param modelAndView + * @throws Exception + */ + @Override + public void postHandle(HttpServletRequest request, HttpServletResponse response, Object handler, @Nullable ModelAndView modelAndView) throws Exception { + } + + /** + * 视图渲染之后的操作 + * + * @param request + * @param response + * @param handler + * @param ex + * @throws Exception + */ + @Override + public void afterCompletion(HttpServletRequest request, HttpServletResponse response, Object handler, @Nullable Exception ex) throws Exception { + } + + +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/config/HosProperties.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/config/HosProperties.java new file mode 100644 index 0000000..e313fcb --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/config/HosProperties.java @@ -0,0 +1,147 @@ +package com.mesalab.hos.config; + +import org.springframework.beans.factory.annotation.Value; +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.context.annotation.Configuration; + +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.util.List; + +/** + * Created by wk on 2020/4/20. + */ +@Configuration +@ConfigurationProperties(prefix = "hos") +public class HosProperties { + + public static String ip; + public static int maxPoolNumber; + public static Double smallFileCount=0.0; + public static Double middleFileCount=0.0; + public static Double bigFileCount=0.0; + public static Double errorCount=0.0; + public static Double successCount=0.0; + public static Double sumPacket=0.0; + + static { + try { + InetAddress inetAddress = InetAddress.getLocalHost(); + ip = inetAddress.getHostAddress();//获得本机Ip + } catch (UnknownHostException e) { + ip = ""; + } + } + + private int maxResultLimit; + private int fileSizeValve; + private int deleteMultipleNumber; + private int maxPartNumber; + private int maxPosition; + private String coprocessorPath; + private String metaHeader; + private int simple; + private List<String> users; + + @Value("${server.port}") + private Integer server_port; + + public String getIp() { + return ip; + } + + public void setIp(String ip) { + HosProperties.ip = ip; + } + + public int getMaxPoolNumber() { + return maxPoolNumber; + } + + public void setMaxPoolNumber(int maxPoolNumber) { + HosProperties.maxPoolNumber = maxPoolNumber; + } + + public int getFileSizeValve() { + return fileSizeValve; + } + + public void setFileSizeValve(int fileSizeValve) { + this.fileSizeValve = fileSizeValve; + } + + public int getDeleteMultipleNumber() { + return deleteMultipleNumber; + } + + public void setDeleteMultipleNumber(int deleteMultipleNumber) { + this.deleteMultipleNumber = deleteMultipleNumber; + } + + public String getCoprocessorPath() { + return coprocessorPath; + } + + public void setCoprocessorPath(String coprocessorPath) { + this.coprocessorPath = coprocessorPath; + } + + public int getMaxResultLimit() { + return maxResultLimit; + } + + public void setMaxResultLimit(int maxResultLimit) { + this.maxResultLimit = maxResultLimit; + } + + public String getMetaHeader() { + return metaHeader; + } + + public void setMetaHeader(String metaHeader) { + this.metaHeader = metaHeader; + } + + public Integer getServer_port() { + return server_port; + } + + public void setServer_port(Integer server_port) { + this.server_port = server_port; + } + + public int getMaxPartNumber() { + return maxPartNumber; + } + + public void setMaxPartNumber(int maxPartNumber) { + this.maxPartNumber = maxPartNumber; + } + + public int getMaxPosition() { + return maxPosition; + } + + public void setMaxPosition(int maxPosition) { + this.maxPosition = maxPosition; + } + + public int getSimple() { + return simple; + } + + public void setSimple(int simple) { + this.simple = simple; + } + + public List<String> getUsers() { + return users; + } + + public void setUsers(List<String> users) { + this.users = users; + } + + + +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/config/LogChartMetricsFilter.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/config/LogChartMetricsFilter.java new file mode 100644 index 0000000..b7313a7 --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/config/LogChartMetricsFilter.java @@ -0,0 +1,101 @@ +package com.mesalab.hos.config; + +import io.micrometer.core.instrument.*; +import org.apache.commons.collections.map.HashedMap; +import org.springframework.stereotype.Component; +import org.springframework.util.ObjectUtils; +import org.springframework.web.servlet.HandlerInterceptor; + +import javax.annotation.Resource; +import javax.servlet.*; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import java.io.IOException; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +@Component +public class LogChartMetricsFilter implements Filter { + + + private MeterRegistry registry; + Map<String, Double> dashboardMap = new HashMap<String, Double>(); + + + + public LogChartMetricsFilter(MeterRegistry registry) { + this.registry = registry; + } + + + @Override + public void init(FilterConfig filterConfig) throws ServletException { + + } + + @Override + public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse, FilterChain filterChain) throws IOException, ServletException { + HttpServletRequest req = (HttpServletRequest) servletRequest; + filterChain.doFilter(servletRequest, servletResponse); + if (req.getRequestURI().contains("/prometheus")) { + registryDashboardInfo(); + } + } + + @Override + public void destroy() { + + } + + + + + + /** + * 更新info指标 + */ + private void registryDashboardInfo() { + //去除容器中值防止影响 + Double lc =HosProperties.smallFileCount; + Double mc =HosProperties.middleFileCount; + Double bc=HosProperties.bigFileCount; + Double ec=HosProperties.errorCount; + Double sc=HosProperties.successCount; + + Double sp =HosProperties.sumPacket; + HosProperties.smallFileCount=0.0; + HosProperties.middleFileCount=0.0; + HosProperties.bigFileCount=0.0; + HosProperties.errorCount=0.0; + HosProperties.successCount=0.0; + HosProperties.sumPacket=0.0; + dashboardMap.put("smallFileCount", lc); + dashboardMap.put("middleFileCount",mc); + dashboardMap.put("bigFileCount", bc); + dashboardMap.put("errorCount", ec); + dashboardMap.put("successCount", sc); + dashboardMap.put("sumPacket", sp); + registryMetrics(dashboardMap); + + } + + + + private void registryMetrics(Map<String, Double> map) { + if (!ObjectUtils.isEmpty(map)) { + for (Map.Entry<String, Double> entry : map.entrySet()) { + //去除容器中值防止影响 + + + Gauge.builder("dashInfo", map, x -> x.get(entry.getKey())) + .tags("id", entry.getKey(), "name", entry.getKey(), "severity", entry.getKey()) + .description("info") + .register(registry); + } + } + + } + +}
\ No newline at end of file diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/config/WebConfig.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/config/WebConfig.java new file mode 100644 index 0000000..d70e7b9 --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/config/WebConfig.java @@ -0,0 +1,34 @@ +package com.mesalab.hos.config; + +import org.apache.catalina.Context; +import org.apache.catalina.webresources.StandardRoot; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.boot.web.embedded.tomcat.TomcatServletWebServerFactory; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +/** + * Created by wk1 on 2020/4/29. + */ +@Configuration +public class WebConfig { + + @Value("${tomcat.cacheMaxSize}") + Integer cacheMaxSize; + + + @Bean + public TomcatServletWebServerFactory servletContainer() { + return new TomcatServletWebServerFactory() { + @Override + protected void postProcessContext(Context context) { + + StandardRoot standardRoot = new StandardRoot(context); + standardRoot.setCacheMaxSize(cacheMaxSize); + context.setResources(standardRoot); + } + }; + } + + +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/config/WebFilterConfig.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/config/WebFilterConfig.java new file mode 100644 index 0000000..998bf3f --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/config/WebFilterConfig.java @@ -0,0 +1,24 @@ +package com.mesalab.hos.config; + + +import org.springframework.context.annotation.Configuration; +import org.springframework.web.servlet.config.annotation.InterceptorRegistry; +import org.springframework.web.servlet.config.annotation.WebMvcConfigurer; + +import javax.annotation.Resource; + +/** + * Created by wk1 on 2020/5/25. + */ +@Configuration +public class WebFilterConfig implements WebMvcConfigurer { + //实现拦截器 要拦截的路径以及不拦截的路径 + @Resource + private HosInterceptor hosInterceptor; + + @Override + public void addInterceptors(InterceptorRegistry registry) { + //注册自定义拦截器,添加拦截路径和排除拦截路径 + registry.addInterceptor(hosInterceptor).addPathPatterns("/hos/**").excludePathPatterns("/admin/login"); + } +}
\ No newline at end of file diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/controller/AdminController.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/controller/AdminController.java new file mode 100644 index 0000000..796750c --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/controller/AdminController.java @@ -0,0 +1,60 @@ +package com.mesalab.hos.controller; + +import com.mesalab.hos.config.HbaseProperties; +import com.mesalab.hos.config.HosProperties; +import com.mesalab.hos.entity.DiskResponse; +import com.mesalab.hos.service.DiskService; +import com.mesalab.hos.util.XmlUtil; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.ContentSummary; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FsStatus; +import org.apache.hadoop.fs.Path; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.beans.factory.annotation.Qualifier; +import org.springframework.http.MediaType; +import org.springframework.stereotype.Component; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.RestController; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.charset.StandardCharsets; +import java.util.List; + +/** + * Created by wk on 2020/11/30. + */ +@RestController +@Component +public class AdminController { + + @Autowired + private HosProperties hosProperties; + @Autowired + private DiskService diskService; + @Autowired + private HbaseProperties hbaseProperties; + + + private final org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(this.getClass()); + + @GetMapping(value = "/admin/diskspace", produces = MediaType.APPLICATION_XML_VALUE) + public String getDiskSpace() { + + DiskResponse diskResponse=new DiskResponse(); + try { + List<String> users = hosProperties.getUsers(); + String path = hbaseProperties.getHbasePath(); + diskResponse = diskService.getHBaseSize(users, path); + } + catch (Exception e){ + diskResponse.setHosUsed(-1L); + diskResponse.setHosCapacity(-1L); + logger.error(e); + } + return XmlUtil.convertToXml(diskResponse); + } + + +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/controller/BucketController.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/controller/BucketController.java new file mode 100644 index 0000000..fdef0f6 --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/controller/BucketController.java @@ -0,0 +1,118 @@ +package com.mesalab.hos.controller; + +import com.mesalab.hos.entity.ErrorResponse; +import com.mesalab.hos.entity.ResEntity; +import com.mesalab.hos.entity.lifecycle.LifecycleRequest; +import com.mesalab.hos.service.BucketService; +import com.mesalab.hos.util.ResponseMessageUtil; +import com.mesalab.hos.util.XmlUtil; +import org.apache.commons.io.IOUtils; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.http.MediaType; +import org.springframework.stereotype.Component; +import org.springframework.util.ObjectUtils; +import org.springframework.util.StringUtils; +import org.springframework.web.bind.annotation.*; + +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import java.io.IOException; +import java.io.InputStream; + +import static com.mesalab.hos.util.XmlUtil.convertXmlStrToObject; + +@RestController +@Component +@RequestMapping("/hos") +public class BucketController { + private final org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(this.getClass()); + + @Autowired + private BucketService bucketService; + + @PutMapping(value = "/{bucket}", produces = MediaType.APPLICATION_XML_VALUE) + public String createOrChangeTTL(@PathVariable("bucket") String bucket, String lifecycle, HttpServletRequest request, HttpServletResponse response, @RequestAttribute("username") String username, @RequestAttribute("requestId") String requestId, @RequestAttribute("hostId") String hostId) { + ResEntity resEntity; + ErrorResponse errorResponse = null; + if (lifecycle != null) { + try (InputStream inputStream = request.getInputStream()) { + String xmlBody = IOUtils.toString(inputStream, "UTF-8"); + LifecycleRequest lr = (LifecycleRequest) convertXmlStrToObject(LifecycleRequest.class, xmlBody); + if (lr != null) { + resEntity = bucketService.changeTTL(lr.getRuleObj().getExpirationObj().getDays(), username, bucket); + if (!resEntity.getIsSuccess()) { + errorResponse = ResponseMessageUtil.getResponseMessage(resEntity.getCode(), hostId, requestId, bucket, "", response); + } + } else { + errorResponse = ResponseMessageUtil.getResponseMessage("MalformedXML", hostId, requestId, bucket, "", response); + } + + } catch (IOException e) { + logger.error(requestId + " " + e.toString()); + errorResponse = ResponseMessageUtil.getResponseMessage("MalformedXML", hostId, requestId, bucket, "", response); + } catch (Exception e) { + logger.error(requestId + " " + e.toString()); + errorResponse = ResponseMessageUtil.getResponseMessage("InternalError", hostId, requestId, bucket, "", response); + } + } else { + resEntity = bucketService.createBucket(username, bucket); + if (resEntity.getIsSuccess()) { + response.setHeader("Location", "/" + bucket); + } else { + errorResponse = ResponseMessageUtil.getResponseMessage(resEntity.getCode(), hostId, requestId, bucket, "", response); + } + } + if (errorResponse == null) { + return ""; + } else { + return XmlUtil.convertToXml(errorResponse); + } + } + + @DeleteMapping(value = "/{bucket}", produces = MediaType.APPLICATION_XML_VALUE) + public String truncateOrDelete(@PathVariable("bucket") String bucket, String truncate, HttpServletResponse response, @RequestAttribute("username") String username, @RequestAttribute("requestId") String requestId, @RequestAttribute("hostId") String hostId) { + ResEntity resEntity; + ErrorResponse errorResponse = null; + try { + if (truncate != null) { + resEntity = bucketService.truncateData(username, bucket); + if (!resEntity.getIsSuccess()) { + errorResponse = ResponseMessageUtil.getResponseMessage(resEntity.getCode(), hostId, requestId, bucket, "", response); + } + }else { + resEntity = bucketService.deleteBucket(username, bucket); + if (!resEntity.getIsSuccess()) { + errorResponse = ResponseMessageUtil.getResponseMessage(resEntity.getCode(), hostId, requestId, bucket, "", response); + } else { + response.setStatus(204); + } + } + } catch (Exception e) { + logger.error(requestId + " " + e.toString()); + errorResponse = ResponseMessageUtil.getResponseMessage("InternalError", hostId, requestId, bucket, "", response); + } + if (errorResponse == null) { + return ""; + } else { + return XmlUtil.convertToXml(errorResponse); + } + } + + @GetMapping(value = "/", produces = MediaType.APPLICATION_XML_VALUE) + public String getBucketList(HttpServletResponse response, @RequestAttribute("username") String username, @RequestAttribute("requestId") String requestId, @RequestAttribute("hostId") String hostId) { + ResEntity resEntity; + ErrorResponse errorResponse; + try { + resEntity = bucketService.getBucketList(username); + if (resEntity.getIsSuccess()) { + return XmlUtil.convertToXml(resEntity.getReturnEntity()); + } else { + errorResponse = ResponseMessageUtil.getResponseMessage(resEntity.getCode(), hostId, requestId, "", "", response); + } + } catch (Exception e) { + logger.error(requestId + " " + e.toString()); + errorResponse = ResponseMessageUtil.getResponseMessage("InternalError", hostId, requestId, "", "", response); + } + return XmlUtil.convertToXml(errorResponse); + } +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/controller/MonitorController.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/controller/MonitorController.java new file mode 100644 index 0000000..440b413 --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/controller/MonitorController.java @@ -0,0 +1,348 @@ +package com.mesalab.hos.controller; + +import com.alibaba.fastjson.JSONArray; +import com.amazonaws.auth.AWSStaticCredentialsProvider; +import com.amazonaws.auth.BasicAWSCredentials; +import com.amazonaws.client.builder.AwsClientBuilder; +import com.amazonaws.services.s3.AmazonS3; +import com.amazonaws.services.s3.AmazonS3ClientBuilder; +import com.amazonaws.services.s3.model.*; +import com.amazonaws.util.IOUtils; +import com.mesalab.hos.config.HosProperties; +import com.mesalab.hos.util.PublicUtil; + +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.NamespaceNotFoundException; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.client.*; +import org.apache.hadoop.hbase.util.Bytes; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; +import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.RestController; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.*; + + +/** + * Created by wk on 2020/4/16. + */ +@RestController +@Component +public class MonitorController { + + private final org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(this.getClass()); + + @Autowired + private Connection hbaseConnection; + @Autowired + private HosProperties hosProperties; + + private String endpoint = "http://127.0.0.1:9098/hos"; + private String accessKey = "default"; + private String secretKey = "default"; + private String user = "default"; + private String bucketName = "hostest"; + private String objectName = "aa"; + private String url = endpoint + "/" + bucketName + "/" + objectName; + private String data = "aaaa";//文件数据 + private String appendData = "bbbb";//文件追加的数据 + private String message = "aa";//文件元数据 + private String appendMessage = "bb";//追加的元数据 + private int lifecycleDays = 10;//设置生命周期天数 + + private BasicAWSCredentials awsCredentials = new BasicAWSCredentials(accessKey, secretKey); + private AmazonS3 s3 = AmazonS3ClientBuilder.standard() + .withCredentials(new AWSStaticCredentialsProvider(awsCredentials)) + .withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration(endpoint, "us-east-1")) + .withChunkedEncodingDisabled(true) + .build(); + + @GetMapping(value = "/monitor") + public String getReportStatus() { + String json = ""; + try { + + Map checkmap = new TreeMap(); + checkmap.put("CreateBucket", testS3CreateBucket()); + checkmap.put("AppendFile", testS3AppendFile()); + checkmap.put("UploadFile", testS3UploadFile()); + checkmap.put("ChangeLifecycle", testS3ChangeLifecycle()); + checkmap.put("GetBucketList", testS3GetBucketList()); + if (hosProperties.getSimple() == 1) { + checkmap.put("GetObjectList", testS3GetObjectList()); + } + checkmap.put("DeleteBucket", testS3DeleteBucket()); + + Object obj = JSONArray.toJSON(checkmap); + json = obj.toString(); + } catch (Exception e) { + logger.error(e.toString()); + } + return json; + } + + + public String testS3CreateBucket() { + + String message = ""; + try { + s3.createBucket(bucketName); + try (Admin admin = hbaseConnection.getAdmin()) { + admin.getNamespaceDescriptor(user); + TableDescriptor descriptor = admin.getDescriptor(TableName.valueOf(user + ":" + bucketName)); + message = "创建桶验证成功"; + } catch (NamespaceNotFoundException e) { + message = "创建桶验证失败:用户不存在"; + } catch (TableNotFoundException e) { + message = "创建桶验证失败:桶不存在"; + } catch (Exception e) { + message = "创建桶验证失败" + e.toString(); + } + } catch (Exception e) { + message = "创建桶失败" + e.toString(); + } + return message; + } + + + public String testS3UploadFile() { + + String message = ""; + try (InputStream input = new ByteArrayInputStream(data.getBytes()); + Table table = hbaseConnection.getTable(TableName.valueOf(user + ":" + bucketName)); + ) { + ObjectMetadata metadata = new ObjectMetadata(); + metadata.setHeader("x-hos-meta-message", message); + PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, objectName, input, metadata); + s3.putObject(putObjectRequest); + Result result = table.get(new Get(PublicUtil.getRowKey(objectName).getBytes())); + String stringData = Bytes.toString(CellUtil.cloneValue(result.getColumnLatestCell("data".getBytes(), "file".getBytes()))); + String fileMessage = Bytes.toString(CellUtil.cloneValue(result.getColumnLatestCell("meta".getBytes(), "message".getBytes()))); + if (data.equals(stringData)) { + if (message.equals(fileMessage)) { + message = "上传功能验证成功"; + } else { + message = "上传失败:元信息内容与上传的元信息内容不符"; + } + } else { + message = "上传失败:文件内容与上传内容不符"; + } + } catch (Exception e) { + + message = "上传失败 " + e.toString(); + } + return message; + + } + + + public String testS3AppendFile() { + + String resultmessage = "追加功能验证成功"; + + try (Table table = hbaseConnection.getTable(TableName.valueOf(user + ":" + bucketName))) { + Result result; + for (int i = 1; i <= 5; i++) { + try (InputStream input = new ByteArrayInputStream((appendData + i).getBytes())) { + ObjectMetadata metadata = new ObjectMetadata(); + metadata.setHeader("x-hos-upload-type", "append"); + metadata.setHeader("x-hos-meta-message", appendMessage + i); + metadata.setHeader("x-hos-position", i); + PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, objectName, input, metadata); + s3.putObject(putObjectRequest); + result = table.get(new Get(PublicUtil.getRowKey(objectName + "|" + i).getBytes())); + String stringData = Bytes.toString(CellUtil.cloneValue(result.getColumnLatestCell("data".getBytes(), "file".getBytes()))); + if (!stringData.equals(appendData + i)) { + resultmessage = "文件内容追加失败"; + } + result = table.get(new Get(PublicUtil.getRowKey(objectName + "|" + i).getBytes())); + + } catch (Exception e) { + + resultmessage = "文件追加失败" + e.toString(); + } + } + S3Object object = s3.getObject(bucketName, objectName); + + byte[] bytesData = IOUtils.toByteArray(object.getObjectContent()); + + String fileMessage = object.getObjectMetadata().getRawMetadataValue("x-hos-meta-message").toString(); + + String stringData = new String(bytesData); + StringBuffer fileData = new StringBuffer(); + StringBuffer fm = new StringBuffer(); + for (int i = 1; i <= 5; i++) { + fileData.append(appendData).append(i); + fm.append(appendMessage).append(i); + } + for (int i = 1; i <= 5; i++) { + } + + if (!(fileData.toString()).equals(stringData)) { + resultmessage = "文件内容与预期不符"; + } + if (!(fileMessage).equals(fm.toString())) { + resultmessage = "文件元数据与预期不符"; + } + s3.deleteObject(bucketName, objectName); + boolean exists = table.exists(new Get(PublicUtil.getRowKey(objectName).getBytes())); + if (exists) { + resultmessage = "删除追加文件失败"; + } + } catch (Exception e) { + + resultmessage = "文件追加验证失败" + e.toString(); + } + return resultmessage; + } + + public String testS3ChangeLifecycle() { + + String message = ""; + try { + BucketLifecycleConfiguration lifecycleConfiguration = new BucketLifecycleConfiguration(); + BucketLifecycleConfiguration.Rule rule = new BucketLifecycleConfiguration.Rule(); + rule.setId("1"); + rule.setExpirationInDays(lifecycleDays); + rule.setStatus("Enabled"); + rule.setPrefix(""); + ArrayList<BucketLifecycleConfiguration.Rule> rules = new ArrayList<>(); + rules.add(rule); + lifecycleConfiguration.setRules(rules); + s3.setBucketLifecycleConfiguration(bucketName, lifecycleConfiguration); + message = "更改桶TTl验证成功"; + } catch (Exception e) { + message = "更改ttl功能失败" + e.toString(); + } + + int dataTTL = 0; + int metaTTL = 0; + try (Admin admin = hbaseConnection.getAdmin()) { + TableDescriptor tableDescriptor = admin.getDescriptor(TableName.valueOf(bucketName)); + ColumnFamilyDescriptor dataColumnFamily = tableDescriptor.getColumnFamily("data".getBytes()); + ColumnFamilyDescriptor metaColumnFamily = tableDescriptor.getColumnFamily("meta".getBytes()); + dataTTL = dataColumnFamily.getTimeToLive(); + metaTTL = metaColumnFamily.getTimeToLive(); + if (dataTTL != lifecycleDays * 24 * 60 * 60) { + message = "生命周期设置失败:桶的生命周期和预期值不同"; + } + if (metaTTL != lifecycleDays * 24 * 60 * 60) { + message = "生命周期设置失败:桶的生命周期和预期值不同"; + } + } catch (IOException e) { + message = "生命周期设置失败 " + e.toString(); + } + return message; + } + + public String testS3GetBucketList() { + + String message = ""; + try { + List<Bucket> buckets = s3.listBuckets(new ListBucketsRequest()); + ArrayList<String> bucketNameList = new ArrayList<>(); + ArrayList<String> bucketNameList1 = new ArrayList<>(); + for (Bucket bucket : buckets) { + bucketNameList.add(bucket.getName()); + } + message = "获取桶列表功能验证成功"; + try (Admin admin = hbaseConnection.getAdmin()) { + TableName[] tableNames = admin.listTableNamesByNamespace(user); + for (TableName tableName : tableNames) { + bucketNameList1.add(Bytes.toString(tableName.getName())); + } + if (bucketNameList.retainAll(bucketNameList1)) { + message = "获取桶列表错误:获取的桶列表不正确"; + } + } catch (Exception e) { + message = "获取桶列表错误" + e.toString(); + } + + } catch (Exception e) { + message = "获取桶列表错误" + e.toString(); + } + return message; + } + + + public String testS3DeleteBucket() { + String message = ""; + s3.deleteBucket(bucketName); + message = "删除桶功能验证成功"; + try (Admin admin = hbaseConnection.getAdmin()) { + if (admin.tableExists(TableName.valueOf(user + ":" + bucketName))) { + message = "桶删除失败"; + } + } catch (Exception e) { + message = "桶删除失败" + e.toString(); + } + return message; + } + + public String testS3GetObjectList() { + String message; + try { + List<String> putObjs = new ArrayList<>(); + long startTime = System.currentTimeMillis() / 1000 - 100; + putObjs.add("b-1"); + putObjs.add("b-2"); + putObjs.add("b-3"); + for (String obj : putObjs) { + uploadObj(obj); + } + long endTime = System.currentTimeMillis() / 1000 + 100; + String startAfter = startTime + "_" + endTime; + + //测试按时间+前缀查询列表 + ArrayList<String> objectNames = new ArrayList<>(); + ListObjectsV2Request listObjectsV2Request = new ListObjectsV2Request(); + listObjectsV2Request.withMaxKeys(10) + .withStartAfter(startAfter) + .withBucketName(bucketName) + .withPrefix("b"); + ListObjectsV2Result listObjectsV2Result = s3.listObjectsV2(listObjectsV2Request); + List<S3ObjectSummary> S3ObjectSummarylist = listObjectsV2Result.getObjectSummaries(); + for (S3ObjectSummary S3Object : S3ObjectSummarylist) { + String filename = S3Object.getKey(); + objectNames.add(filename); + } + if (!objectNames.containsAll(putObjs)) { + return "按时间+文件名前缀获取对象列表错误,和预期列表不符"; + } + //测试按时间查询列表 + objectNames = new ArrayList<>(); + listObjectsV2Request = new ListObjectsV2Request(); + listObjectsV2Request.withMaxKeys(10) + .withStartAfter(startAfter) + .withBucketName(bucketName); + listObjectsV2Result = s3.listObjectsV2(listObjectsV2Request); + S3ObjectSummarylist = listObjectsV2Result.getObjectSummaries(); + for (S3ObjectSummary S3Object : S3ObjectSummarylist) { + String filename = S3Object.getKey(); + objectNames.add(filename); + } + if (!objectNames.containsAll(putObjs)) { + return "按时间获取对象列表错误,和预期列表不符"; + } + message = "获取对象列表功能验证成功"; + } catch (Exception e) { + message = "获取对象列表失败" + e.toString(); + } + return message; + } + + public void uploadObj(String object) throws IOException { + InputStream input = new ByteArrayInputStream(data.getBytes()); + ObjectMetadata metadata = new ObjectMetadata(); + metadata.setHeader("x-hos-meta-message", message); + PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, object, input, metadata); + s3.putObject(putObjectRequest); + input.close(); + } + +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/controller/ObjectController.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/controller/ObjectController.java new file mode 100644 index 0000000..9d49184 --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/controller/ObjectController.java @@ -0,0 +1,380 @@ +package com.mesalab.hos.controller; + +import com.mesalab.hos.config.HosProperties; +import com.mesalab.hos.config.LogChartMetricsFilter; +import com.mesalab.hos.entity.ErrorResponse; +import com.mesalab.hos.entity.FileEntity; +import com.mesalab.hos.entity.MultipartUpload.CompleteMultipartUpload; +import com.mesalab.hos.entity.MultipartUpload.CompleteMultipartUploadResult; +import com.mesalab.hos.entity.MultipartUpload.InitiateMultipartUploadResult; +import com.mesalab.hos.entity.MultipartUpload.Part; +import com.mesalab.hos.entity.ResEntity; +import com.mesalab.hos.entity.ColumnEntity; +import com.mesalab.hos.entity.delete.DeleteObj; +import com.mesalab.hos.entity.delete.DeleteRequest; +import com.mesalab.hos.entity.delete.DeleteResponse; +import com.mesalab.hos.service.ObjectService; +import com.mesalab.hos.util.ResponseMessageUtil; +import com.mesalab.hos.util.XmlUtil; +import org.apache.commons.io.IOUtils; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.http.MediaType; +import org.springframework.scheduling.annotation.EnableScheduling; +import org.springframework.stereotype.Component; +import org.springframework.web.bind.annotation.*; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.charset.StandardCharsets; +import java.util.List; + +import static com.mesalab.hos.util.PublicUtil.getRowKey; +import static com.mesalab.hos.util.XmlUtil.convertXmlStrToObject; + + +/** + * Created by wk on 2020/4/16. + */ +@RestController +@Component +@RequestMapping("/hos") +public class ObjectController { + + private final org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(this.getClass()); + @Autowired + private ObjectService objectService; + @Autowired + private HosProperties hosProperties; + + //上传追加接口 + @PutMapping(value = "/{bucket}/{filename}", produces = MediaType.APPLICATION_XML_VALUE) + public String uploadOrAppend(@PathVariable("bucket") String bucket, @PathVariable("filename") String fileName, String append, @RequestHeader(name = "x-hos-position", defaultValue = "0") int position, @RequestHeader(name = "Content-Type", defaultValue = "application/octet-stream") String filetype, @RequestHeader(name = "x-hos-meta-message", defaultValue = "") String message, @RequestHeader(name = "x-hos-upload-type", defaultValue = "update") String uploadType, HttpServletRequest request, HttpServletResponse response, @RequestAttribute("username") String username, @RequestAttribute("requestId") String requestId, @RequestAttribute("hostId") String hostId, @RequestParam(value = "partNumber", defaultValue = "0") int PartNumber, @RequestParam(value = "uploadId", defaultValue = "") String UploadId, @RequestHeader(name = "x-hos-time-lastmodified", defaultValue = "0") long lastModiFiedTime) { + ResEntity resEntity = null; + ErrorResponse errorResponse = null; + String responseMessage=""; + byte[] byteFile; + long timestamp; + if(lastModiFiedTime==0){ + timestamp = System.currentTimeMillis() / 1000 * 1000; + } + else{ + timestamp = lastModiFiedTime; + } + try (InputStream inputStream = request.getInputStream()) { + byteFile = IOUtils.toByteArray(inputStream); + FileEntity fileEntity = new FileEntity(); + fileEntity.setFileName(fileName); + fileEntity.setMessage(message); + fileEntity.setFile(byteFile); + fileEntity.setUser(username); + fileEntity.setBucket(bucket); + fileEntity.setFileType(filetype); + fileEntity.setTimestamp(timestamp); + fileEntity.setIsParent(0); + fileEntity.setIndexFileNameKey(""); + fileEntity.setIndexTimeKey(""); + fileEntity.setPartName(""); + fileEntity.setFileSize(byteFile.length); + if(PartNumber > hosProperties.getMaxPartNumber()){ + errorResponse = ResponseMessageUtil.getResponseMessage("InvalidRange", hostId, requestId, bucket, fileName, response); + } else if (PartNumber > 0 && !"".equals(UploadId)) { + fileEntity.setPartNumber(PartNumber); + fileEntity.setUploadId(UploadId); + resEntity = objectService.putFilePart(fileEntity); + if (resEntity.getIsSuccess()) { + String Etag = (String) resEntity.getReturnEntity(); + response.setHeader("ETag", Etag); + } else { + errorResponse = ResponseMessageUtil.getResponseMessage("InternalError", hostId, requestId, bucket, fileName, response); + } + } else { + if (append == null && uploadType.equals("update")) { + logger.info("put object key="+fileName+" filesize="+byteFile.length+" bucket="+bucket); + resEntity = objectService.putFile(fileEntity); + } else { + logger.info("append object key="+fileName+" position="+position+" filesize="+byteFile.length+" bucket="+bucket); + if(position<=0){ + errorResponse = ResponseMessageUtil.getResponseMessage("PositionNotEqualToLength", hostId, requestId, bucket, fileName, response); + }else if(position > hosProperties.getMaxPosition()){ + errorResponse = ResponseMessageUtil.getResponseMessage("InvalidRange", hostId, requestId, bucket, fileName, response); + }else { + fileEntity.setPartName(position+""); + fileEntity.setIsParent(1); + resEntity = objectService.appendFile(fileEntity); + } + } + if (resEntity != null && !resEntity.getIsSuccess()) { + errorResponse = ResponseMessageUtil.getResponseMessage(resEntity.getCode(), hostId, requestId, bucket, fileName, response); + } + } + + try { + if (fileEntity.getFileSize() < 1024 * 1024) { + HosProperties.smallFileCount++; + } else if (fileEntity.getFileSize() >= 1024 * 1024 && fileEntity.getFileSize() <= 10 * 1024 * 1024) { + HosProperties.middleFileCount++; + } else { + HosProperties.bigFileCount++; + } + HosProperties.sumPacket = HosProperties.sumPacket + fileEntity.getFileSize(); + } + catch (Exception e){ + logger.error(requestId + " " + e.toString()+"monitor error"); + } + } catch (Exception e) { + logger.error(requestId + " " + e.toString()); + errorResponse = ResponseMessageUtil.getResponseMessage("InternalError", hostId, requestId, bucket, fileName, response); + } + try { + if (errorResponse == null) { + HosProperties.successCount++; + } else { + responseMessage =XmlUtil.convertToXml(errorResponse); + HosProperties.errorCount++; + } + + } + catch (Exception e){ + logger.error(requestId + " " + e.toString()+"monitor error"); + } + return responseMessage; + + } + + + @PostMapping(value = "/{bucket}/{filename}", produces = MediaType.APPLICATION_XML_VALUE) + public String MultipartUpload(@PathVariable("bucket") String bucket, @PathVariable("filename") String fileName, String uploads, HttpServletRequest request, HttpServletResponse response, @RequestAttribute("username") String username, @RequestAttribute("requestId") String requestId, @RequestAttribute("hostId") String hostId, @RequestHeader(name = "Content-Type", defaultValue = "application/octet-stream") String fileType, @RequestHeader(name = "x-hos-meta-message", defaultValue = "") String message, @RequestParam(value = "uploadId", defaultValue = "") String uploadId) { + ErrorResponse errorResponse; + long timestamp = System.currentTimeMillis() / 1000 * 1000; + if (uploads != null) { + try { + FileEntity fileEntity = new FileEntity(); + fileEntity.setFileName(fileName); + fileEntity.setMessage(message); + fileEntity.setUser(username); + fileEntity.setBucket(bucket); + fileEntity.setFileType(fileType); + fileEntity.setTimestamp(timestamp); + fileEntity.setIsParent(1); + fileEntity.setIndexFileNameKey(""); + fileEntity.setIndexTimeKey(""); + uploadId = getRowKey(fileName); + ResEntity resEntity = objectService.createMultipartUpload(fileEntity); + if (resEntity.getIsSuccess()) { + InitiateMultipartUploadResult initiateMultipartUploadResult = new InitiateMultipartUploadResult(bucket, fileName, uploadId); + return XmlUtil.convertToXml(initiateMultipartUploadResult); + } else { + errorResponse = ResponseMessageUtil.getResponseMessage(resEntity.getCode(), hostId, requestId, bucket, fileName, response); + } + } catch (Exception e) { + logger.error(requestId + " " + e.toString()); + errorResponse = ResponseMessageUtil.getResponseMessage("InternalError", hostId, requestId, bucket, "", response); + } + } else if (!"".equals(uploadId)) { + String xmlBody; + try (InputStream inputStream = request.getInputStream()) { + xmlBody = IOUtils.toString(inputStream, StandardCharsets.UTF_8); + CompleteMultipartUpload cmu = (CompleteMultipartUpload) convertXmlStrToObject(CompleteMultipartUpload.class, xmlBody); + if (cmu != null) { + List<Part> completeListObjects = cmu.getParts(); + ResEntity resEntity = objectService.completeMultipartUpload(bucket, username, fileName, completeListObjects, timestamp); + if (!resEntity.getIsSuccess()) { + errorResponse = ResponseMessageUtil.getResponseMessage(resEntity.getCode(), hostId, requestId, bucket, "", response); + } else { + CompleteMultipartUploadResult output = new CompleteMultipartUploadResult(); + output.setLocation("http://" + hostId + ":" + hosProperties.getServer_port() + "/hos/" + bucket + "/" + fileName); + output.setKey(fileName); + output.setBucket(bucket); + output.setETag('"' + getRowKey(fileName) + '"'); + return XmlUtil.convertToXml(output); + } + } else { + errorResponse = ResponseMessageUtil.getResponseMessage("MalformedXML", hostId, requestId, bucket, "", response); + } + } catch (IOException e) { + logger.error(requestId + " " + e.toString()); + errorResponse = ResponseMessageUtil.getResponseMessage("MalformedXML", hostId, requestId, bucket, "", response); + } catch (Exception e) { + logger.error(requestId + " " + e.toString()); + errorResponse = ResponseMessageUtil.getResponseMessage("InternalError", hostId, requestId, bucket, "", response); + } + } else { + response.setStatus(405); + return ""; + } + return XmlUtil.convertToXml(errorResponse); + } + + //下载文件信息接口 + @GetMapping(value = "/{bucket}/{filename}", produces = MediaType.APPLICATION_XML_VALUE) + public String downloadObj(@PathVariable("bucket") String bucket, @PathVariable("filename") String fileName, String metaData, @RequestParam(name = "uploadId", defaultValue = "") String uploadId, @RequestParam(name = "max-parts", defaultValue = "1000") int maxParts, @RequestParam(name = "part-number-marker", defaultValue = "") String partNumberMarker, HttpServletResponse response, @RequestAttribute("username") String username, @RequestAttribute("requestId") String requestId, @RequestAttribute("hostId") String hostId) { + ResEntity resEntity; + ErrorResponse errorResponse = null; + response.setHeader("Content-Disposition", "attachment;fileName=" + fileName);// 设置文件名 + if (!"".equals(uploadId)) { + if (maxParts > hosProperties.getMaxResultLimit()) { + errorResponse = ResponseMessageUtil.getResponseMessage("InventoryFull", hostId, requestId, bucket, "", response); + } else if (maxParts < 0) { + errorResponse = ResponseMessageUtil.getResponseMessage("InvalidArgument", hostId, requestId, bucket, "", response); + } else { + resEntity = objectService.getObjectMultipartUploadParts(bucket, maxParts, username, fileName, partNumberMarker, uploadId); + if (resEntity.getIsSuccess()) { + return XmlUtil.convertToXml(resEntity.getReturnEntity()); + } else { + errorResponse = ResponseMessageUtil.getResponseMessage(resEntity.getCode(), hostId, requestId, bucket, "", response); + } + } + } else { + if (metaData == null) { + resEntity = objectService.getFile(username, bucket, fileName); + FileEntity fileEntity = (FileEntity) resEntity.getReturnEntity(); + if (resEntity.getIsSuccess()) { + try (OutputStream outputStream = response.getOutputStream()) { + response.setContentLength(fileEntity.getFile().length); + response.setHeader(hosProperties.getMetaHeader(), fileEntity.getMessage());// 设置文件名 + response.setContentType(fileEntity.getFileType()); + IOUtils.write(fileEntity.getFile(), outputStream); + } catch (Exception e) { + logger.error(requestId + " " + e.toString()); + errorResponse = ResponseMessageUtil.getResponseMessage("InternalError", hostId, requestId, bucket, fileName, response); + } + } else { + errorResponse = ResponseMessageUtil.getResponseMessage(resEntity.getCode(), hostId, requestId, bucket, fileName, response); + } + } else { + resEntity = objectService.getColumn(username, bucket, getRowKey(fileName), "message"); + if (resEntity.getIsSuccess()) { + ColumnEntity ce = (ColumnEntity) resEntity.getReturnEntity(); + response.setHeader(hosProperties.getMetaHeader(), ce.getColumn()); + } else { + errorResponse = ResponseMessageUtil.getResponseMessage(resEntity.getCode(), hostId, requestId, bucket, fileName, response); + } + } + } + if (errorResponse == null) { + return ""; + } else { + return XmlUtil.convertToXml(errorResponse); + } + } + + @DeleteMapping(value = "/{bucket}/{filename}", produces = MediaType.APPLICATION_XML_VALUE) + public String deleteObj(@PathVariable("bucket") String bucket, @PathVariable("filename") String fileName, @RequestParam(value = "uploadId", defaultValue = "") String uploadId, HttpServletResponse response, @RequestAttribute("username") String username, @RequestAttribute("requestId") String requestId, @RequestAttribute("hostId") String hostId) { + ResEntity resEntity; + ErrorResponse errorResponse = null; + FileEntity fileEntity = new FileEntity(); + fileEntity.setFileName(fileName); + fileEntity.setBucket(bucket); + fileEntity.setUser(username); + try { + if ("".equals(uploadId)) { + resEntity = objectService.deleteFile(fileEntity); + if (!resEntity.getIsSuccess()) { + errorResponse = ResponseMessageUtil.getResponseMessage(resEntity.getCode(), hostId, requestId, bucket, fileName, response); + } else { + response.setStatus(204); + } + } else { + resEntity = objectService.abortMultipartUpload(username, bucket, fileName); + if (!resEntity.getIsSuccess()) { + errorResponse = ResponseMessageUtil.getResponseMessage("InternalError", hostId, requestId, bucket, fileName, response); + }else { + response.setStatus(204); + } + } + } catch (Exception e) { + logger.error(requestId + " " + e.toString()); + errorResponse = ResponseMessageUtil.getResponseMessage("InternalError", hostId, requestId, bucket, fileName, response); + } + if (errorResponse == null) { + return ""; + } else { + return XmlUtil.convertToXml(errorResponse); + } + } + + @PostMapping(value = "/{bucket}", produces = MediaType.APPLICATION_XML_VALUE) + public String deleteMultipleObj(@PathVariable("bucket") String bucket, String delete, HttpServletRequest request, HttpServletResponse response, @RequestAttribute("username") String username, @RequestAttribute("requestId") String requestId, @RequestAttribute("hostId") String hostId) { + ResEntity resEntity; + ErrorResponse errorResponse; + if (delete != null) { + String xmlBody; + try (InputStream inputStream = request.getInputStream()) { + xmlBody = IOUtils.toString(inputStream, "UTF-8"); + DeleteRequest deleteRequest = (DeleteRequest) convertXmlStrToObject(DeleteRequest.class, xmlBody); + if (deleteRequest != null) { + List<DeleteObj> deleteListObjects = deleteRequest.getObjects(); + if (deleteListObjects.size() > hosProperties.getDeleteMultipleNumber()) { + errorResponse = ResponseMessageUtil.getResponseMessage("InventoryFull", hostId, requestId, bucket, "", response); + } else { + resEntity = objectService.deleteFileList(bucket, username, deleteListObjects); + if (!resEntity.getIsSuccess()) { + return XmlUtil.convertToXml(resEntity.getReturnEntity()); + } else { + DeleteResponse deleteResponse = new DeleteResponse(); + return XmlUtil.convertToXml(deleteResponse); + } + } + } else { + errorResponse = ResponseMessageUtil.getResponseMessage("MalformedXML", hostId, requestId, bucket, "", response); + } + } catch (IOException e) { + logger.error(requestId + " " + e.toString()); + errorResponse = ResponseMessageUtil.getResponseMessage("MalformedXML", hostId, requestId, bucket, "", response); + } catch (Exception e) { + logger.error(requestId + " " + e.toString()); + errorResponse = ResponseMessageUtil.getResponseMessage("InternalError", hostId, requestId, bucket, "", response); + } + } else { + response.setStatus(405); + return ""; + } + return XmlUtil.convertToXml(errorResponse); + } + + + @GetMapping(value = "/{bucket}/", produces = MediaType.APPLICATION_XML_VALUE) + public String getObjectList(HttpServletResponse response, String uploads, @RequestParam(name = "max-uploads", defaultValue = "1000") int maxUploads, @RequestParam(name = "delimiter", defaultValue = "") String delimiter, @RequestParam(name = "key-marker", defaultValue = "") String keyMarker, @RequestParam(name = "prefix", defaultValue = "") String uploadsPrefix, @RequestParam(name = "upload-id-marker", defaultValue = "") String uploadIdMarker, @PathVariable("bucket") String bucket, @RequestParam(name = "max-keys", defaultValue = "1000") int maxKeys, @RequestParam(name = "prefix", defaultValue = "") String prefix, @RequestParam(name = "start-after", defaultValue = "0_0") String startAfter, @RequestAttribute("username") String username, @RequestAttribute("requestId") String requestId, @RequestAttribute("hostId") String hostId) { + + ResEntity resEntity; + ErrorResponse errorResponse; + try { + if (uploads != null) { + if (maxUploads > hosProperties.getMaxResultLimit()) { + errorResponse = ResponseMessageUtil.getResponseMessage("InventoryFull", hostId, requestId, bucket, "", response); + } else if (maxUploads < 0) { + errorResponse = ResponseMessageUtil.getResponseMessage("InvalidArgument", hostId, requestId, bucket, "", response); + } else { + resEntity = objectService.getMultipartUploadObjects(bucket, maxUploads, username, uploadsPrefix); + if (resEntity.getIsSuccess()) { + return XmlUtil.convertToXml(resEntity.getReturnEntity()); + } else { + errorResponse = ResponseMessageUtil.getResponseMessage(resEntity.getCode(), hostId, requestId, bucket, "", response); + } + } + } else { + if (maxKeys < hosProperties.getMaxResultLimit()) { + String[] timeRange = startAfter.split("_"); + String startTime = timeRange[0]; + String endTime = timeRange[1]; + resEntity = objectService.getObjectList(username, bucket, maxKeys, prefix, startTime, endTime); + if (resEntity.getIsSuccess()) { + return XmlUtil.convertToXml(resEntity.getReturnEntity()); + } else { + errorResponse = ResponseMessageUtil.getResponseMessage(resEntity.getCode(), hostId, requestId, bucket, "", response); + } + } else { + errorResponse = ResponseMessageUtil.getResponseMessage("InlineDataTooLarge", hostId, requestId, bucket, "", response); + } + } + } catch (Exception e) { + logger.error(requestId + " " + e.toString()); + errorResponse = ResponseMessageUtil.getResponseMessage("InternalError", hostId, requestId, bucket, "", response); + } + return XmlUtil.convertToXml(errorResponse); + } + + +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/dao/BucketDao.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/dao/BucketDao.java new file mode 100644 index 0000000..a6edac6 --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/dao/BucketDao.java @@ -0,0 +1,28 @@ +package com.mesalab.hos.dao; + +import com.mesalab.hos.entity.ResEntity; + +/** + * Created by wk on 2020/4/20. + */ + +public interface BucketDao { + + ResEntity createDataTable(String tableName); + + ResEntity createIndexTable(String tableName); + + ResEntity deleteTable(String tableName); + + ResEntity truncateTable(String tableName); + + ResEntity changeDataTableTTL(int day, String tableName); + + ResEntity changeIndexTableTTL(int day, String tableName); + + ResEntity getTableListForNs(String user); + + Boolean ifExistTable(String tableName); + + long getBucketCreateTime(String bucket); +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/dao/DiskDao.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/dao/DiskDao.java new file mode 100644 index 0000000..16cddbf --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/dao/DiskDao.java @@ -0,0 +1,18 @@ +package com.mesalab.hos.dao; + +import com.mesalab.hos.entity.ResEntity; +import org.apache.hadoop.conf.Configuration; + +import java.io.IOException; + +/** + * Created by wk1 on 2020/7/14. + */ + +public interface DiskDao { + + Long getHBaseCurrSize(String path) throws Exception; + + Long getHBaseMaxSize(String path) throws Exception; + +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/dao/Impl/BucketDaoImpl.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/dao/Impl/BucketDaoImpl.java new file mode 100644 index 0000000..3c5c633 --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/dao/Impl/BucketDaoImpl.java @@ -0,0 +1,251 @@ +package com.mesalab.hos.dao.Impl; + +import com.mesalab.hos.config.HbaseProperties; +import com.mesalab.hos.dao.BucketDao; +import com.mesalab.hos.entity.ResEntity; +import com.mesalab.hos.entity.bucketlist.Bucket; +import com.mesalab.hos.entity.bucketlist.BucketListResponse; +import com.mesalab.hos.entity.bucketlist.BucketOwner; +import com.mesalab.hos.util.PublicUtil; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.client.*; +import org.apache.hadoop.hbase.io.compress.Compression; +import org.apache.hadoop.hbase.util.Bytes; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; +import org.springframework.util.DigestUtils; +import org.springframework.util.StringUtils; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +/** + * Created by wk1 on 2020/7/14. + */ +@Component +public class BucketDaoImpl implements BucketDao { + + private final org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(this.getClass()); + @Autowired + private Connection hbaseConnection; + @Autowired + HbaseProperties hbaseProperties; + + @Override + public ResEntity createDataTable(String tableName) { + ResEntity resEntity = new ResEntity(); + resEntity.setCode(""); + resEntity.setIsSuccess(false); + try (Admin admin = hbaseConnection.getAdmin()) { + List<ColumnFamilyDescriptor> families = new ArrayList<>(); + ColumnFamilyDescriptor dataColumnFamily = ColumnFamilyDescriptorBuilder.newBuilder("data".getBytes()) + .setValue(ColumnFamilyDescriptorBuilder.IS_MOB, "true") + .setValue(ColumnFamilyDescriptorBuilder.MOB_THRESHOLD, 0 + "") + .setCompressionType(Compression.Algorithm.GZ) + .build(); + ColumnFamilyDescriptor metaColumnFamily = ColumnFamilyDescriptorBuilder.newBuilder("meta".getBytes()) + .setCompressionType(Compression.Algorithm.GZ) + .build(); + families.add(dataColumnFamily); + families.add(metaColumnFamily); + TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)) + .setColumnFamilies(families) + .build(); + if (StringUtils.isEmpty(hbaseProperties.getRegion_start_key())) { + admin.createTable(tableDescriptor); + } else { + byte[][] regionParts = PublicUtil.stringToByteArray(hbaseProperties.getRegion_start_key()); + admin.createTable(tableDescriptor, regionParts); + } + resEntity.setIsSuccess(true); + } catch (Exception e) { + logger.error(e.toString()); + resEntity.setCode("InternalError"); + } + return resEntity; + } + + @Override + public ResEntity createIndexTable(String tableName) { + ResEntity resEntity = new ResEntity(); + resEntity.setCode(""); + resEntity.setIsSuccess(false); + try (Admin admin = hbaseConnection.getAdmin()) { + List<ColumnFamilyDescriptor> indexTableFamilies = new ArrayList<>(); + ColumnFamilyDescriptor indexTableColumnFamily = ColumnFamilyDescriptorBuilder.newBuilder("meta".getBytes()) + .setCompressionType(Compression.Algorithm.GZ) + .build(); + indexTableFamilies.add(indexTableColumnFamily); + TableDescriptor indexTableDescriptor = TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName)) + .setColumnFamilies(indexTableFamilies) + .build(); + if (StringUtils.isEmpty(hbaseProperties.getRegion_start_key())) { + admin.createTable(indexTableDescriptor); + } else { + byte[][] regionParts = PublicUtil.stringToByteArray(hbaseProperties.getRegion_start_key()); + admin.createTable(indexTableDescriptor, regionParts); + } + resEntity.setIsSuccess(true); + } catch (Exception e) { + logger.error("create indextable "+tableName+"error "+e.toString()); + resEntity.setCode("InternalError"); + } + return resEntity; + } + + @Override + public ResEntity deleteTable(String tableName) { + ResEntity resEntity = new ResEntity(); + resEntity.setCode(""); + resEntity.setIsSuccess(false); + try (Admin admin = hbaseConnection.getAdmin()) { + if (admin.tableExists(TableName.valueOf(tableName))){ + if (admin.isTableEnabled(TableName.valueOf(tableName))) { + admin.disableTable(TableName.valueOf(tableName)); + } + admin.deleteTable(TableName.valueOf(tableName)); + } + resEntity.setIsSuccess(true); + } catch (TableNotFoundException e) { + logger.error(e.toString()); + resEntity.setCode("NoSuchBucket"); + } catch (Exception e) { + logger.error(e.toString()); + resEntity.setCode("InternalError"); + } + return resEntity; + } + + @Override + public ResEntity truncateTable(String tableName) { + ResEntity resEntity = new ResEntity(); + resEntity.setCode(""); + resEntity.setIsSuccess(false); + try (Admin admin = hbaseConnection.getAdmin()) { + if (admin.isTableEnabled(TableName.valueOf(tableName))) { + admin.disableTable(TableName.valueOf(tableName)); + } + admin.truncateTable(TableName.valueOf(tableName), true); + resEntity.setIsSuccess(true); + } catch (TableNotFoundException e) { + logger.error(e.toString()); + resEntity.setCode("NoSuchBucket"); + } catch (Exception e) { + logger.error(e.toString()); + resEntity.setCode("InternalError"); + } + return resEntity; + } + + @Override + public ResEntity changeDataTableTTL(int day, String tableName) { + ResEntity resEntity = new ResEntity(); + resEntity.setCode(""); + resEntity.setIsSuccess(false); + try (Admin admin = hbaseConnection.getAdmin()) { + TableDescriptor descriptor = admin.getDescriptor(TableName.valueOf(tableName)); + ColumnFamilyDescriptor columnFamily = descriptor.getColumnFamily("data".getBytes()); + long mobThreshold = columnFamily.getMobThreshold(); + ColumnFamilyDescriptor columnFamilyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder("data".getBytes()) + .setValue(ColumnFamilyDescriptorBuilder.TTL, Integer.toString(day * 24 * 60 * 60)) + .setValue(ColumnFamilyDescriptorBuilder.IS_MOB, "true") + .setValue(ColumnFamilyDescriptorBuilder.MOB_THRESHOLD, mobThreshold + "") + .setCompressionType(Compression.Algorithm.GZ) + .build(); + admin.modifyColumnFamily(TableName.valueOf(tableName), columnFamilyDescriptor); + columnFamilyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder("meta".getBytes()) + .setValue(ColumnFamilyDescriptorBuilder.TTL, Integer.toString(day * 24 * 60 * 60)) + .setCompressionType(Compression.Algorithm.GZ) + .build(); + admin.modifyColumnFamily(TableName.valueOf(tableName), columnFamilyDescriptor); + resEntity.setIsSuccess(true); + } catch (TableNotFoundException e) { + logger.error(e.toString()); + resEntity.setCode("NoSuchBucket"); + } catch (Exception e) { + logger.error(e.toString()); + resEntity.setCode("InternalError"); + } + return resEntity; + } + + @Override + public ResEntity changeIndexTableTTL(int day, String tableName) { + ResEntity resEntity = new ResEntity(); + resEntity.setCode(""); + resEntity.setIsSuccess(false); + try (Admin admin = hbaseConnection.getAdmin()) { + ColumnFamilyDescriptor columnFamilyDescriptor = ColumnFamilyDescriptorBuilder.newBuilder("meta".getBytes()) + .setValue(ColumnFamilyDescriptorBuilder.TTL, Integer.toString(day * 24 * 60 * 60)) + .setCompressionType(Compression.Algorithm.GZ) + .build(); + admin.modifyColumnFamily(TableName.valueOf(tableName), columnFamilyDescriptor); + resEntity.setIsSuccess(true); + } catch (TableNotFoundException e) { + logger.error("change ttl index_table"+tableName+"error +"+e.toString()); + resEntity.setCode("NoSuchBucket"); + } catch (Exception e) { + logger.error("change ttl index_table"+tableName+"error +"+e.toString()); + resEntity.setCode("InternalError"); + } + return resEntity; + } + + @Override + public ResEntity getTableListForNs(String user) { + ResEntity resEntity = new ResEntity(); + resEntity.setCode(""); + resEntity.setIsSuccess(false); + BucketListResponse bucketListResponse = new BucketListResponse(); + try (Admin admin = hbaseConnection.getAdmin()) { + TableName[] tableNames = admin.listTableNamesByNamespace(user); + ArrayList<Bucket> bucketList = new ArrayList<>(); + for (TableName tableName : tableNames) { + String table = Bytes.toString(tableName.getName()); + if (table.contains(":")) { + table = table.split(":")[1]; + } + if (!table.startsWith(hbaseProperties.getTime_index_table_prefix()) && !table.startsWith(hbaseProperties.getFilename_index_table_prefix()) && !table.startsWith(hbaseProperties.getPartfile_index_table_prefix())) { + bucketList.add(new Bucket(PublicUtil.getUTCTime(getBucketCreateTime(Bytes.toString(tableName.getName()))), table)); + } + } + bucketListResponse.setBucket(bucketList); + bucketListResponse.setOwner(new BucketOwner(user, DigestUtils.md5DigestAsHex(user.getBytes()))); + resEntity.setReturnEntity(bucketListResponse); + resEntity.setIsSuccess(true); + } catch (Exception e) { + logger.error(e.toString()); + resEntity.setCode("InternalError"); + } + return resEntity; + } + + + @Override + public Boolean ifExistTable(String tableName) { + boolean ifExistTable = false; + try (Admin admin = hbaseConnection.getAdmin()) { + ifExistTable = admin.tableExists(TableName.valueOf(tableName)); + } catch (Exception e) { + logger.error(e.toString()); + } + return ifExistTable; + } + + @Override + public long getBucketCreateTime(String bucket) { + long timestamp = 0; + try (Table table = hbaseConnection.getTable(TableName.valueOf("hbase:meta"))) { + Get get = new Get(Bytes.toBytes(bucket)); + Result result = table.get(get); + Cell cell = result.getColumnLatestCell(Bytes.toBytes("table"), Bytes.toBytes("state")); + timestamp = cell.getTimestamp(); + } catch (IOException e) { + logger.error(e.toString()); + } + return timestamp; + } +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/dao/Impl/DiskDaoClusterImpl.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/dao/Impl/DiskDaoClusterImpl.java new file mode 100644 index 0000000..699513f --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/dao/Impl/DiskDaoClusterImpl.java @@ -0,0 +1,61 @@ +package com.mesalab.hos.dao.Impl; + +import com.mesalab.hos.dao.DiskDao; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.ContentSummary; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FsStatus; +import org.apache.hadoop.fs.Path; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.beans.factory.annotation.Qualifier; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.stereotype.Component; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.charset.StandardCharsets; + + +/** + * Created by wk on 2020/4/20. + */ +@Component +@ConditionalOnProperty(value = "hbase.standone", havingValue = "1") +public class DiskDaoClusterImpl implements DiskDao { + + private final org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(this.getClass()); + @Autowired + @Qualifier("hadoopConfiguration") + private Configuration hadoopConfiguration; + @Override + public Long getHBaseCurrSize(String path) throws Exception { + + Long used; + try (FileSystem fs = FileSystem.get(hadoopConfiguration)) { + Path filenamePath = new Path(path); + ContentSummary in = fs.getContentSummary(filenamePath); + used=in.getSpaceConsumed(); + } catch (IOException e) { + logger.error(e); + throw e; + } + return used; + } + + @Override + public Long getHBaseMaxSize(String path) throws Exception { + Long capacity ; + try (FileSystem fs = FileSystem.get(hadoopConfiguration)) { + FsStatus fsStatus = fs.getStatus(); + capacity = fsStatus.getCapacity(); //Configured Capacity + + } catch (IOException e) { + logger.error(e); + throw e; + } + return capacity; + } + + + +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/dao/Impl/DiskDaoStandoneImpl.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/dao/Impl/DiskDaoStandoneImpl.java new file mode 100644 index 0000000..886f2bf --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/dao/Impl/DiskDaoStandoneImpl.java @@ -0,0 +1,74 @@ +package com.mesalab.hos.dao.Impl; + +import com.mesalab.hos.dao.DiskDao; +import com.mesalab.hos.dao.UserDao; +import com.mesalab.hos.entity.ResEntity; +import org.apache.commons.io.IOUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.ContentSummary; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FsStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.beans.factory.annotation.Qualifier; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.stereotype.Component; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.charset.StandardCharsets; + + +/** + * Created by wk on 2020/4/20. + */ +@Component +@ConditionalOnProperty(value = "hbase.standone", havingValue = "0") +public class DiskDaoStandoneImpl implements DiskDao { + + private final org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(this.getClass()); + + + @Override + public Long getHBaseCurrSize(String path) throws InterruptedException, IOException { + + String result = ""; + Process catCommands = null; + Long resultsize ; + catCommands = Runtime.getRuntime() + .exec(new String[]{"/bin/sh", "-c", "du -s " + path + " | awk '{print $1}'"}); + try (InputStream in = catCommands.getInputStream()) { + result = org.apache.commons.io.IOUtils.toString(in, StandardCharsets.UTF_8); + catCommands.waitFor(); + resultsize = Long.parseLong(result.trim()) * 1024L; + } catch (IOException e) { + throw e; + } catch (InterruptedException e) { + throw e; + } + return resultsize; + } + + @Override + public Long getHBaseMaxSize(String path) throws IOException, InterruptedException { + + String result = ""; + Process catCommands = null; + Long resultsize; + catCommands = Runtime.getRuntime().exec( + new String[]{"/bin/sh", "-c", "df " + path + " | sed -n '2,2p' | awk '{print $2}'"}); + try (InputStream in = catCommands.getInputStream()) { + result = IOUtils.toString(in, StandardCharsets.UTF_8); + catCommands.waitFor(); + resultsize = Long.parseLong(result.trim()) * 1024L; + } catch (IOException e) { + throw e; + } catch (InterruptedException e) { + throw e; + } + return resultsize; + } +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/dao/Impl/IndexFileNameDaoImpl.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/dao/Impl/IndexFileNameDaoImpl.java new file mode 100644 index 0000000..31f7f71 --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/dao/Impl/IndexFileNameDaoImpl.java @@ -0,0 +1,132 @@ +package com.mesalab.hos.dao.Impl; + +import com.mesalab.hos.config.HosProperties; +import com.mesalab.hos.dao.IndexFileNameDao; +import com.mesalab.hos.entity.ColumnEntity; +import com.mesalab.hos.entity.select.Content; +import org.apache.hadoop.hbase.CompareOperator; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.client.*; +import org.apache.hadoop.hbase.filter.BinaryComparator; +import org.apache.hadoop.hbase.filter.FamilyFilter; +import org.apache.hadoop.hbase.util.Bytes; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +import java.time.Instant; +import java.util.Comparator; +import java.util.LinkedList; +import java.util.List; + +import static com.mesalab.hos.util.PublicUtil.stringTonextAscii; + + +/** + * Created by wk on 2020/4/20. + */ +@Component +public class IndexFileNameDaoImpl implements IndexFileNameDao { + + private final org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(this.getClass()); + @Autowired + private Connection hbaseConnection; + @Autowired + private HosProperties hosProperties; + + @Override + public Boolean putFile(String tableName, String rowKey,String fileRowKey,String timestamp) { + boolean isSuccess = false; + try (Table table = hbaseConnection.getTable(TableName.valueOf(tableName))) { + Put put = new Put(Bytes.toBytes(rowKey)); + put.addColumn(Bytes.toBytes("meta"), Bytes.toBytes("filename"), Bytes.toBytes(fileRowKey)); + put.addColumn(Bytes.toBytes("meta"), Bytes.toBytes("timestamp"), Bytes.toBytes(timestamp)); + table.put(put); + isSuccess = true; + } catch (Exception e) { + logger.error(e.toString()); + } + return isSuccess; + } + + @Override + public ColumnEntity getColumnWithTime(String tableName, String rowKey, String column) { + String message; + ColumnEntity ce = new ColumnEntity(); + try (Table table = hbaseConnection.getTable(TableName.valueOf(tableName))) { + Get get = new Get(rowKey.getBytes()); + Result result = table.get(get); + message = Bytes.toString(result.getValue(Bytes.toBytes("meta"), Bytes.toBytes(column))); + long timestamp = result.getColumnLatestCell(Bytes.toBytes("meta"), Bytes.toBytes(column)).getTimestamp(); + ce.setTimestamp(timestamp); + ce.setColumn(message); + if (message == null) { + logger.error("column not exit"); + ce = null; + } + } catch (Exception e) { + logger.error(e.toString()); + ce = null; + } + return ce; + } + + + @Override + public Boolean deleteFile(String tableName, String rowKey) { + boolean issuccess = false; + try (Table table = hbaseConnection.getTable(TableName.valueOf(tableName))) { + Delete delete = new Delete(rowKey.getBytes()); + table.delete(delete); + issuccess = true; + } catch (Exception e) { + logger.error(e.toString()); + } + return issuccess; + } + + @Override + public Boolean ifExistRowKey(String tableName, String rowKey) { + boolean ifExist = false; + try (Table table = hbaseConnection.getTable(TableName.valueOf(tableName))) { + Get get = new Get(Bytes.toBytes(rowKey)); + ifExist = table.exists(get); + } catch (Exception e) { + logger.error(e.toString()); + } + return ifExist; + } + + @Override + public List<Content> getObjectListWithPrefixFilter(String tableName, int maxKeys, String startTime, String endTime, String prefix) throws Exception { + List<Content> contents = new LinkedList<>(); + try (Table table = hbaseConnection.getTable(TableName.valueOf(tableName))) { + Scan scan = new Scan(); + scan.withStartRow(Bytes.toBytes(prefix), false); + scan.withStopRow(Bytes.toBytes(stringTonextAscii(prefix)), false); + scan.setLimit(hosProperties.getMaxResultLimit()); + scan.setTimeRange(Long.parseLong(startTime + "000"), Long.parseLong(endTime + "999")); + FamilyFilter filter = new FamilyFilter(CompareOperator.EQUAL, new BinaryComparator(Bytes.toBytes("meta"))); + ResultScanner resultScanner = table.getScanner(scan.setFilter(filter)); + for (Result result : resultScanner) { + String filename = new String(result.getValue(Bytes.toBytes("meta"), Bytes.toBytes("filename"))); + String time = new String(result.getValue(Bytes.toBytes("meta"), Bytes.toBytes("timestamp"))); + Content ct = new Content(); + Instant timestamp = Instant.ofEpochMilli(Long.parseLong(time)); + ct.setKey(filename); + ct.setLastModified(timestamp.toString()); + //t.setSize(fileSize); + contents.add(ct); + } + + return contents; + + } catch (TableNotFoundException e) { + logger.error(e.toString()); + throw new TableNotFoundException(); + } catch (Exception e) { + logger.error(e.toString()); + throw new Exception(); + } + } +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/dao/Impl/IndexTimeDaoImpl.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/dao/Impl/IndexTimeDaoImpl.java new file mode 100644 index 0000000..9ecc210 --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/dao/Impl/IndexTimeDaoImpl.java @@ -0,0 +1,120 @@ +package com.mesalab.hos.dao.Impl; + +import com.mesalab.hos.dao.IndexTimeDao; +import com.mesalab.hos.entity.ColumnEntity; +import com.mesalab.hos.entity.select.Content; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.client.*; +import org.apache.hadoop.hbase.util.Bytes; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +import java.time.Instant; +import java.util.LinkedList; +import java.util.List; + + +/** + * Created by wk on 2020/4/20. + */ +@Component +public class IndexTimeDaoImpl implements IndexTimeDao { + + private final org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(this.getClass()); + @Autowired + private Connection hbaseConnection; + + @Override + public Boolean putFile(String tableName, String rowKey, String fileName,String timestamp) { + boolean isSuccess = false; + try (Table table = hbaseConnection.getTable(TableName.valueOf(tableName))) { + Put put = new Put(Bytes.toBytes(rowKey)); + put.addColumn(Bytes.toBytes("meta"), Bytes.toBytes("filename"), Bytes.toBytes(fileName)); + put.addColumn(Bytes.toBytes("meta"), Bytes.toBytes("timestamp"), Bytes.toBytes(timestamp)); + table.put(put); + isSuccess = true; + } catch (Exception e) { + logger.error(e.toString()); + } + return isSuccess; + } + + @Override + public ColumnEntity getColumnWithTime(String tableName, String rowKey, String column) { + String message; + ColumnEntity ce = new ColumnEntity(); + try (Table table = hbaseConnection.getTable(TableName.valueOf(tableName))) { + Get get = new Get(Bytes.toBytes(rowKey)); + Result result = table.get(get); + message = Bytes.toString(result.getValue(Bytes.toBytes("meta"), Bytes.toBytes(column))); + long timestamp = result.getColumnLatestCell(Bytes.toBytes("meta"), Bytes.toBytes(column)).getTimestamp(); + ce.setTimestamp(timestamp); + ce.setColumn(message); + if (message == null) { + logger.error("column not exist"); + ce = null; + } + } catch (Exception e) { + logger.error(e.toString()); + ce = null; + } + return ce; + } + + @Override + public Boolean deleteFile(String tableName, String rowKey) { + boolean isSuccess = false; + try (Table table = hbaseConnection.getTable(TableName.valueOf(tableName))) { + Delete delete = new Delete(rowKey.getBytes()); + table.delete(delete); + isSuccess = true; + } catch (Exception e) { + logger.error(e.toString()); + } + return isSuccess; + } + + @Override + public Boolean ifExistRowKey(String tableName, String rowKey) { + boolean ifExist = false; + try (Table table = hbaseConnection.getTable(TableName.valueOf(tableName))) { + Get get = new Get(Bytes.toBytes(rowKey)); + ifExist = table.exists(get); + } catch (Exception e) { + logger.error(e.toString()); + } + return ifExist; + } + + @Override + public List<Content> getObjectListWithTimeFilter(String tableName, int maxKeys, String startTime, String endTime, String prefix) throws Exception { + List<Content> contents = new LinkedList<>(); + try (Table table = hbaseConnection.getTable(TableName.valueOf(tableName))) { + Scan scan = new Scan(); + scan.withStartRow(Bytes.toBytes(endTime + "000"), true); + scan.withStopRow(Bytes.toBytes(startTime + "999"), false); + scan.setLimit(maxKeys); + scan.setReversed(true); + ResultScanner resultScanner = table.getScanner(scan); + for (Result result : resultScanner) { + String[] rowKey = new String(result.getRow(), "UTF-8").split("\\|"); + String filename = new String(result.getValue(Bytes.toBytes("meta"), Bytes.toBytes("filename"))); + String time = new String(result.getValue(Bytes.toBytes("meta"), Bytes.toBytes("timestamp"))); + Content ct = new Content(); + Instant timestamp = Instant.ofEpochMilli(Long.parseLong(time)); + ct.setKey(filename); + ct.setLastModified(timestamp.toString()); + //ct.setSize(fileSize); + contents.add(ct); + } + } catch (TableNotFoundException e) { + logger.error(e.toString()); + throw new TableNotFoundException(); + } catch (Exception e) { + logger.error(e.toString()); + throw new Exception(); + } + return contents; + } +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/dao/Impl/ObjectDaoImpl.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/dao/Impl/ObjectDaoImpl.java new file mode 100644 index 0000000..ce2d5f6 --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/dao/Impl/ObjectDaoImpl.java @@ -0,0 +1,378 @@ +package com.mesalab.hos.dao.Impl; + +import com.mesalab.hos.dao.ObjectDao; +import com.mesalab.hos.entity.FileEntity; +import com.mesalab.hos.entity.ResEntity; +import com.mesalab.hos.entity.ColumnEntity; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.client.*; +import org.apache.hadoop.hbase.util.Bytes; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +import java.util.*; + +import static com.mesalab.hos.util.PublicUtil.byteMerger; +import static com.mesalab.hos.util.PublicUtil.getRowKey; + + +/** + * Created by wk on 2020/4/20. + */ +@Component +public class ObjectDaoImpl implements ObjectDao { + + private final org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(this.getClass()); + @Autowired + private Connection hbaseConnection; + + @Override + public ResEntity putFile(FileEntity fileEntity) { + ResEntity resEntity = new ResEntity(); + resEntity.setCode(""); + resEntity.setIsSuccess(false); + resEntity.setReturnEntity(getRowKey(fileEntity.getFileName())); + try (Table table = hbaseConnection.getTable(TableName.valueOf(fileEntity.getUser() + ":" + fileEntity.getBucket()))) { + Put put = new Put(Bytes.toBytes(getRowKey(fileEntity.getFileName()))); + put.addColumn(Bytes.toBytes("meta"), Bytes.toBytes("message"), Bytes.toBytes(fileEntity.getMessage())); + put.addColumn(Bytes.toBytes("meta"), Bytes.toBytes("contenttype"), Bytes.toBytes(fileEntity.getFileType())); + put.addColumn(Bytes.toBytes("meta"), Bytes.toBytes("filesize"), Bytes.toBytes(String.valueOf(fileEntity.getFileSize()))); + //put.addColumn(Bytes.toBytes("meta"), Bytes.toBytes("filename"), Bytes.toBytes(fileEntity.getFileName())); + put.addColumn(Bytes.toBytes("meta"), Bytes.toBytes("isparent"), Bytes.toBytes(fileEntity.getIsParent())); + put.addColumn(Bytes.toBytes("meta"), Bytes.toBytes("filenamekey"), Bytes.toBytes(fileEntity.getIndexFileNameKey())); + put.addColumn(Bytes.toBytes("meta"), Bytes.toBytes("timekey"), Bytes.toBytes(fileEntity.getIndexTimeKey())); + put.addColumn(Bytes.toBytes("meta"), Bytes.toBytes("partname"),Bytes.toBytes(fileEntity.getPartName())); + + put.addColumn(Bytes.toBytes("data"), Bytes.toBytes("file"), fileEntity.getFile()); + table.put(put); + resEntity.setIsSuccess(true); + } catch (TableNotFoundException e) { + logger.error(e.toString()); + resEntity.setCode("NoSuchBucket"); + } catch (Exception e) { + logger.error(e.toString()); + resEntity.setCode("InternalError"); + } + return resEntity; + } + + @Override + public ResEntity putAppendFileMeta(FileEntity fileEntity) { + ResEntity resEntity = new ResEntity(); + resEntity.setCode(""); + resEntity.setIsSuccess(false); + try (Table table = hbaseConnection.getTable(TableName.valueOf(fileEntity.getUser() + ":" + fileEntity.getBucket()))) { + Put put = new Put(Bytes.toBytes(getRowKey(fileEntity.getFileName()))); + put.addColumn(Bytes.toBytes("meta"), Bytes.toBytes("message"), Bytes.toBytes(fileEntity.getMessage())); + put.addColumn(Bytes.toBytes("meta"), Bytes.toBytes("contenttype"), Bytes.toBytes(fileEntity.getFileType())); + put.addColumn(Bytes.toBytes("meta"), Bytes.toBytes("filesize"), Bytes.toBytes(String.valueOf(fileEntity.getFileSize()))); + put.addColumn(Bytes.toBytes("meta"), Bytes.toBytes("filename"), Bytes.toBytes(fileEntity.getFileName())); + put.addColumn(Bytes.toBytes("meta"), Bytes.toBytes("isparent"), Bytes.toBytes(fileEntity.getIsParent())); + put.addColumn(Bytes.toBytes("meta"), Bytes.toBytes("filenamekey"), Bytes.toBytes(fileEntity.getIndexFileNameKey())); + put.addColumn(Bytes.toBytes("meta"), Bytes.toBytes("timekey"), Bytes.toBytes(fileEntity.getIndexTimeKey())); + Append append = new Append(Bytes.toBytes(getRowKey(fileEntity.getFileName()))); + append.addColumn(Bytes.toBytes("meta"), Bytes.toBytes("partname"),Bytes.toBytes(fileEntity.getPartName())); + /// append.addColumn(Bytes.toBytes("meta"), Bytes.toBytes("filenamekey"), Bytes.toBytes(fileEntity.getIndexFileNameKey())); + // append.addColumn(Bytes.toBytes("meta"), Bytes.toBytes("timekey"), Bytes.toBytes(fileEntity.getIndexTimeKey())); + table.put(put); + table.append(append); + resEntity.setIsSuccess(true); + } catch (TableNotFoundException e) { + logger.error(e.toString()); + resEntity.setCode("NoSuchBucket"); + } catch (Exception e) { + logger.error(e.toString()); + resEntity.setCode("InternalError"); + } + return resEntity; } + + @Override + public ResEntity putColumn(String tableName, String rowKey, String column, String columnData) { + ResEntity resEntity = new ResEntity(); + resEntity.setCode(""); + resEntity.setIsSuccess(false); + try (Table table = hbaseConnection.getTable(TableName.valueOf(tableName))) { + Put put = new Put(Bytes.toBytes(rowKey)); + put.addColumn(Bytes.toBytes("meta"), Bytes.toBytes(column),Bytes.toBytes(columnData)); + table.put(put); + resEntity.setIsSuccess(true); + } catch (TableNotFoundException e) { + logger.error(e.toString()); + resEntity.setCode("NoSuchBucket"); + } catch (Exception e) { + logger.error(e.toString()); + resEntity.setCode("InternalError"); + } + return resEntity; + } + + + + + @Override + public ResEntity putAllFileMeta(FileEntity fileEntity) { + ResEntity resEntity = new ResEntity(); + resEntity.setCode(""); + resEntity.setIsSuccess(false); + try (Table table = hbaseConnection.getTable(TableName.valueOf(fileEntity.getUser() + ":" + fileEntity.getBucket()))) { + Put put = new Put(Bytes.toBytes(getRowKey(fileEntity.getFileName()))); + put.addColumn(Bytes.toBytes("meta"), Bytes.toBytes("message"), Bytes.toBytes(fileEntity.getMessage())); + put.addColumn(Bytes.toBytes("meta"), Bytes.toBytes("contenttype"), Bytes.toBytes(fileEntity.getFileType())); + put.addColumn(Bytes.toBytes("meta"), Bytes.toBytes("filename"), Bytes.toBytes(fileEntity.getFileName())); + put.addColumn(Bytes.toBytes("meta"), Bytes.toBytes("isparent"), Bytes.toBytes(fileEntity.getIsParent())); + put.addColumn(Bytes.toBytes("meta"), Bytes.toBytes("filesize"), Bytes.toBytes(String.valueOf(fileEntity.getFileSize()))); + put.addColumn(Bytes.toBytes("meta"), Bytes.toBytes("filenamekey"), Bytes.toBytes(fileEntity.getIndexFileNameKey())); + put.addColumn(Bytes.toBytes("meta"), Bytes.toBytes("timekey"), Bytes.toBytes(fileEntity.getIndexTimeKey())); + if(fileEntity.getPartName()!=null) { + put.addColumn(Bytes.toBytes("meta"), Bytes.toBytes("partname"), Bytes.toBytes(fileEntity.getPartName())); + } + else{ + put.addColumn(Bytes.toBytes("meta"), Bytes.toBytes("partname"), Bytes.toBytes("")); + } + table.put(put); + resEntity.setIsSuccess(true); + } catch (TableNotFoundException e) { + logger.error(e.toString()); + resEntity.setCode("NoSuchBucket"); + } catch (Exception e) { + logger.error(e.toString()); + resEntity.setCode("InternalError"); + } + return resEntity; + } + + @Override + public ResEntity getAllFileMeta(String tableName, String fileName) { + ResEntity resEntity = new ResEntity(); + resEntity.setCode(""); + resEntity.setIsSuccess(false); + FileEntity fileEntity = new FileEntity(); + try (Table table = hbaseConnection.getTable(TableName.valueOf(tableName))) { + Get get = new Get(Bytes.toBytes(fileName)); + Result result = table.get(get); + if (result.isEmpty()) { + resEntity.setCode("NoSuchKey"); + }else { + String message = Bytes.toString(CellUtil.cloneValue(result.getColumnLatestCell(Bytes.toBytes("meta"), Bytes.toBytes("message")))); + String contentType = Bytes.toString(CellUtil.cloneValue(result.getColumnLatestCell(Bytes.toBytes("meta"), Bytes.toBytes("contenttype")))); + String fileSize = Bytes.toString(CellUtil.cloneValue(result.getColumnLatestCell(Bytes.toBytes("meta"), Bytes.toBytes("filesize")))); + String partname = Bytes.toString(CellUtil.cloneValue(result.getColumnLatestCell(Bytes.toBytes("meta"), Bytes.toBytes("partname")))); + String fileNameKey = Bytes.toString(CellUtil.cloneValue(result.getColumnLatestCell(Bytes.toBytes("meta"), Bytes.toBytes("filenamekey")))); + String timeKey = Bytes.toString(CellUtil.cloneValue(result.getColumnLatestCell(Bytes.toBytes("meta"), Bytes.toBytes("timekey")))); + int isParent = Bytes.toInt(CellUtil.cloneValue(result.getColumnLatestCell(Bytes.toBytes("meta"), Bytes.toBytes("isparent")))); + fileEntity.setMessage(message); + fileEntity.setFileType(contentType); + fileEntity.setFileSize(Long.parseLong(fileSize)); + fileEntity.setPartName(partname); + fileEntity.setIsParent(isParent); + fileEntity.setIndexFileNameKey(fileNameKey); + fileEntity.setIndexTimeKey(timeKey); + resEntity.setReturnEntity(fileEntity); + resEntity.setIsSuccess(true); + } + } catch (TableNotFoundException e) { + logger.error(e.toString()); + resEntity.setCode("NoSuchBucket"); + } catch (Exception e) { + logger.error(e.toString()); + resEntity.setCode("InternalError"); + } + return resEntity; + } + + @Override + public Boolean ifMultiFile(String tableName, String rowKey) { + boolean isMulti = false; + try (Table table = hbaseConnection.getTable(TableName.valueOf(tableName))) { + Get get = new Get(Bytes.toBytes(rowKey)); + Result result = table.get(get); + int partStatus = Bytes.toInt(result.getValue(Bytes.toBytes("meta"), Bytes.toBytes("isparent"))); + if (partStatus > 0) { + isMulti = true; + } + } catch (Exception e) { + logger.error(e.toString()); + } + return isMulti; + } + + @Override + public ResEntity appendColumn(String tableName, String rowKey, String column,String columnData) { + ResEntity resEntity = new ResEntity(); + resEntity.setCode(""); + resEntity.setIsSuccess(false); + try (Table table = hbaseConnection.getTable(TableName.valueOf(tableName))) { + Append append = new Append(Bytes.toBytes(rowKey)); + append.addColumn(Bytes.toBytes("meta"), Bytes.toBytes(column),columnData.getBytes()); + table.append(append); + resEntity.setIsSuccess(true); + } catch (TableNotFoundException e) { + logger.error(e.toString()); + resEntity.setCode("NoSuchBucket"); + } catch (Exception e) { + logger.error(e.toString()); + resEntity.setCode("InternalError"); + } + return resEntity; + } + + @Override + public ResEntity appendIndexKey(FileEntity fileEntity) { + ResEntity resEntity = new ResEntity(); + resEntity.setCode(""); + resEntity.setIsSuccess(false); + try (Table table = hbaseConnection.getTable(TableName.valueOf(fileEntity.getUser() + ":" + fileEntity.getBucket()))) { + Append append = new Append(Bytes.toBytes(getRowKey(fileEntity.getFileName()))); + append.addColumn(Bytes.toBytes("meta"), Bytes.toBytes("filenamekey"), Bytes.toBytes(fileEntity.getIndexFileNameKey()+",")); + append.addColumn(Bytes.toBytes("meta"), Bytes.toBytes("timekey"), Bytes.toBytes(fileEntity.getIndexTimeKey()+",")); + table.append(append); + resEntity.setIsSuccess(true); + } catch (TableNotFoundException e) { + logger.error(e.toString()); + resEntity.setCode("NoSuchBucket"); + } catch (Exception e) { + logger.error(e.toString()); + resEntity.setCode("InternalError"); + } + return resEntity; + } + + @Override + public ResEntity getFile(String tableName, String fileName) { + ResEntity resEntity = new ResEntity(); + resEntity.setCode(""); + resEntity.setIsSuccess(false); + FileEntity fileEntity = new FileEntity(); + try (Table table = hbaseConnection.getTable(TableName.valueOf(tableName))) { + Get get = new Get(Bytes.toBytes(getRowKey(fileName))); + Result result = table.get(get); + if(result.isEmpty()){ + resEntity.setCode("NoSuchKey"); + }else { + String message = ""; + String fileType = Bytes.toString(result.getValue(Bytes.toBytes("meta"), Bytes.toBytes("contenttype"))); + int isParent = Bytes.toInt(result.getValue(Bytes.toBytes("meta"), Bytes.toBytes("isparent"))); + byte[] file = new byte[0]; + if(isParent==1){ + String partFile = Bytes.toString(result.getValue(Bytes.toBytes("meta"), Bytes.toBytes("partname"))); + String[] partsArray = partFile.split(","); + TreeSet<Integer> partsSet = new TreeSet<>(); + StringBuffer messageBuffer = new StringBuffer(); + for (String s : partsArray) { + partsSet.add(Integer.parseInt(s)); + } + for (int part : partsSet) { + Get getPart = new Get(Bytes.toBytes(getRowKey(fileName+"|"+part))); + Result partResult = table.get(getPart); + byte[] partByte = partResult.getValue(Bytes.toBytes("data"), Bytes.toBytes("file")); + String partMessage = Bytes.toString(partResult.getValue(Bytes.toBytes("meta"), Bytes.toBytes("message"))); + messageBuffer.append(partMessage); + file = byteMerger(file, partByte); + } + message = messageBuffer.toString(); + } else{ + file = result.getValue(Bytes.toBytes("data"), Bytes.toBytes("file")); + message = Bytes.toString(result.getValue(Bytes.toBytes("meta"), Bytes.toBytes("message"))); + + } + if (file == null) { + resEntity.setCode("NoSuchKey"); + } else { + fileEntity.setFileName(fileName); + fileEntity.setMessage(message); + fileEntity.setFile(file); + fileEntity.setFileType(fileType); + resEntity.setIsSuccess(true); + } + } + resEntity.setReturnEntity(fileEntity); + } catch (TableNotFoundException e) { + logger.error(e.toString()); + resEntity.setCode("NoSuchBucket"); + } catch (Exception e) { + logger.error(e.toString()); + resEntity.setCode("InternalError"); + } + return resEntity; + } + + @Override + public ResEntity getColumn(String tableName, String fileName, String column) { + ResEntity resEntity = new ResEntity(); + resEntity.setCode(""); + resEntity.setIsSuccess(false); + String message; + ColumnEntity ce = new ColumnEntity(); + try (Table table = hbaseConnection.getTable(TableName.valueOf(tableName))) { + Get get = new Get(Bytes.toBytes(fileName)); + Result result = table.get(get); + if (result.isEmpty()) { + resEntity.setCode("NoSuchKey"); + }else { + message = Bytes.toString(CellUtil.cloneValue(result.getColumnLatestCell(Bytes.toBytes("meta"), Bytes.toBytes(column)))); + long timestamp = result.getColumnLatestCell(Bytes.toBytes("meta"), Bytes.toBytes(column)).getTimestamp(); + ce.setTimestamp(timestamp); + ce.setColumn(message); + resEntity.setReturnEntity(ce); + resEntity.setIsSuccess(true); + } + } catch (TableNotFoundException e) { + logger.error(e.toString()); + resEntity.setCode("NoSuchBucket"); + } catch (Exception e) { + logger.error(e.toString()); + resEntity.setCode("InternalError"); + } + return resEntity; + } + + @Override + public ResEntity deleteFile(String tableName, String fileName) { + ResEntity resEntity = new ResEntity(); + resEntity.setCode(""); + resEntity.setIsSuccess(false); + try (Table table = hbaseConnection.getTable(TableName.valueOf(tableName))) { + Delete delete = new Delete(fileName.getBytes()); + table.delete(delete); + resEntity.setIsSuccess(true); + } catch (TableNotFoundException e) { + logger.error(e.toString()); + resEntity.setCode("NoSuchBucket"); + } catch (Exception e) { + logger.error(e.toString()); + resEntity.setCode("InternalError"); + } + return resEntity; + } + + @Override + public ResEntity deleteMultipartUploadFile(String tableName, String filename) { + ResEntity resEntity = getColumn(tableName, getRowKey(filename), "partname"); + if (!resEntity.getIsSuccess()){ + return resEntity; + } + ColumnEntity returnEntity = (ColumnEntity) resEntity.getReturnEntity(); + String[] partNames = returnEntity.getColumn().split(","); + for (String partName : partNames){ + resEntity = deleteFile(tableName, getRowKey(filename+"|"+partName)); + if (!resEntity.getIsSuccess()){ + return resEntity; + } + } + return deleteFile(tableName,getRowKey(filename)); + } + + @Override + public Boolean ifExistRowKey(String tableName, String fileName) { + boolean ifExist = false; + try (Table table = hbaseConnection.getTable(TableName.valueOf(tableName))) { + Get get = new Get(Bytes.toBytes(fileName)); + ifExist = table.exists(get); + } catch (Exception e) { + logger.error(e.toString()); + } + return ifExist; + } +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/dao/Impl/PartfileDaoImpl.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/dao/Impl/PartfileDaoImpl.java new file mode 100644 index 0000000..0d965eb --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/dao/Impl/PartfileDaoImpl.java @@ -0,0 +1,240 @@ +package com.mesalab.hos.dao.Impl; + +import com.mesalab.hos.config.HbaseProperties; +import com.mesalab.hos.dao.PartFileDao; +import com.mesalab.hos.entity.FileEntity; +import com.mesalab.hos.entity.ResEntity; +import com.mesalab.hos.entity.getMultipartUpload.ListPartsResponse; +import com.mesalab.hos.entity.getMultipartUpload.Part; +import com.mesalab.hos.entity.getMultipartUpload.Upload; +import com.mesalab.hos.entity.partEntity; +import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.client.*; +import org.apache.hadoop.hbase.filter.RegexStringComparator; +import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; +import org.apache.hadoop.hbase.util.Bytes; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + +import java.util.ArrayList; +import java.util.List; + +import static com.mesalab.hos.util.PublicUtil.getRowKey; + + +/** + * Created by wk on 2020/4/20. + */ +@Component +public class PartfileDaoImpl implements PartFileDao { + + private final org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(this.getClass()); + @Autowired + private Connection hbaseConnection; + @Autowired + HbaseProperties hbaseProperties; + + @Override + public ResEntity putFile(FileEntity fileEntity) { + ResEntity resEntity = new ResEntity(); + resEntity.setCode(""); + resEntity.setIsSuccess(false); + resEntity.setReturnEntity(getRowKey(fileEntity.getFileName())); + try (Table table = hbaseConnection.getTable(TableName.valueOf(fileEntity.getUser() + ":index_partfile_" + fileEntity.getBucket()))) { + Put put = new Put(Bytes.toBytes(getRowKey(fileEntity.getFileName()))); + put.addColumn(Bytes.toBytes("meta"), Bytes.toBytes("message"), Bytes.toBytes(fileEntity.getMessage())); + put.addColumn(Bytes.toBytes("meta"), Bytes.toBytes("contenttype"), Bytes.toBytes(fileEntity.getFileType())); + put.addColumn(Bytes.toBytes("meta"), Bytes.toBytes("filename"), Bytes.toBytes(fileEntity.getFileName())); + put.addColumn(Bytes.toBytes("meta"), Bytes.toBytes("partname"), Bytes.toBytes("")); + table.put(put); + resEntity.setIsSuccess(true); + } catch (TableNotFoundException e) { + logger.error("part object error"+e.toString()); + resEntity.setCode("NoSuchBucket"); + } catch (Exception e) { + logger.error("part object error"+e.toString()); + resEntity.setCode("InternalError"); + } + return resEntity; + } + + @Override + public ResEntity appendColumn(String tableName, String fileName, String columnName, String columnValue) { + ResEntity resEntity = new ResEntity(); + resEntity.setCode(""); + resEntity.setIsSuccess(false); + try (Table table = hbaseConnection.getTable(TableName.valueOf(tableName))) { + Append append = new Append(Bytes.toBytes(fileName)); + append.addColumn(Bytes.toBytes("meta"), Bytes.toBytes(columnName), Bytes.toBytes(columnValue)); + table.append(append); + resEntity.setIsSuccess(true); + } catch (TableNotFoundException e) { + logger.error("part object error"+e.toString()); + resEntity.setCode("NoSuchBucket"); + } catch (Exception e) { + logger.error("part object error"+e.toString()); + resEntity.setCode("InternalError"); + } + return resEntity; + } + + @Override + public ResEntity getFile(String tableName, String key) { + ResEntity resEntity = new ResEntity(); + resEntity.setCode(""); + resEntity.setIsSuccess(false); + partEntity partFile = new partEntity(); + try (Table table = hbaseConnection.getTable(TableName.valueOf(tableName))) { + Get get = new Get(Bytes.toBytes(key)); + Result result = table.get(get); + String filename = Bytes.toString(result.getValue(Bytes.toBytes("meta"), Bytes.toBytes("filename"))); + String message = Bytes.toString(result.getValue(Bytes.toBytes("meta"), Bytes.toBytes("message"))); + String fileType = Bytes.toString(result.getValue(Bytes.toBytes("meta"), Bytes.toBytes("contenttype"))); + String partName = Bytes.toString(result.getValue(Bytes.toBytes("meta"), Bytes.toBytes("partname"))); + + partFile.setFileName(filename); + partFile.setFileType(fileType); + partFile.setMessage(message); + partFile.setUploadId(key); + partFile.setPartName(partName); + resEntity.setIsSuccess(true); + } catch (TableNotFoundException e) { + logger.error("part object error"+e.toString()); + resEntity.setCode("NoSuchBucket"); + } catch (Exception e) { + logger.error("part object error"+e.toString()); + resEntity.setCode("InternalError"); + } + resEntity.setReturnEntity(partFile); + return resEntity; + } + + @Override + public ResEntity deleteFile(String tableName, String filename) { + ResEntity resEntity = new ResEntity(); + resEntity.setCode(""); + resEntity.setIsSuccess(false); + try (Table table = hbaseConnection.getTable(TableName.valueOf(tableName))) { + Delete delete = new Delete(filename.getBytes()); + table.delete(delete); + resEntity.setIsSuccess(true); + } catch (Exception e) { + logger.error("part object error"+e.toString()); + } + return resEntity; + } + + @Override + public Boolean ifExistRowKey(String tableName, String key) { + boolean ifExist = false; + try (Table table = hbaseConnection.getTable(TableName.valueOf(tableName))) { + Get get = new Get(Bytes.toBytes(key)); + ifExist = table.exists(get); + } catch (Exception e) { + logger.error("part object error"+e.toString()); + } + return ifExist; + } + + @Override + public ResEntity getMultipartUploadObjects(String tableName,ResEntity resEntity) { + List<Upload> uploadList = new ArrayList<>(); + try (Table table = hbaseConnection.getTable(TableName.valueOf(tableName))){ + Scan scan = new Scan(); + ResultScanner scanner = table.getScanner(scan); + for (Result rss : scanner) { + Upload upload = new Upload(); + String uploadId = Bytes.toString(rss.getRow()); + Cell fileName = rss.getColumnLatestCell("meta".getBytes(), "filename".getBytes()); + long initiated = fileName.getTimestamp(); + String key = Bytes.toString(CellUtil.cloneValue(fileName)); + upload.setKey(key); + upload.setInitiated(String.valueOf(initiated)); + upload.setUploadId(uploadId); + uploadList.add(upload); + } + resEntity.setReturnEntity(uploadList); + }catch (TableNotFoundException e) { + logger.error("part object error"+e.toString()); + resEntity.setCode("NoSuchBucket"); + } catch (Exception e) { + logger.error("part object error"+e.toString()); + resEntity.setCode("InternalError"); + } + return resEntity; + } + + @Override + public ResEntity getMultipartUploadObjectsByPrefix(String tableName, ResEntity resEntity, String prefix) { + List<Upload> uploadList = new ArrayList<>(); + try (Table table = hbaseConnection.getTable(TableName.valueOf(tableName))){ + Scan scan = new Scan(); + SingleColumnValueFilter singleColumnValueFilter = new SingleColumnValueFilter( + "meta".getBytes(), + "filename".getBytes(), + CompareOperator.EQUAL, + new RegexStringComparator(prefix+".*")); + singleColumnValueFilter.setFilterIfMissing(true); + scan.setFilter(singleColumnValueFilter); + ResultScanner scanner = table.getScanner(scan); + for (Result rss : scanner) { + Upload upload = new Upload(); + String uploadId = Bytes.toString(rss.getRow()); + Cell fileName = rss.getColumnLatestCell("meta".getBytes(), "filename".getBytes()); + long initiated = fileName.getTimestamp(); + String key = Bytes.toString(CellUtil.cloneValue(fileName)); + upload.setKey(key); + upload.setInitiated(String.valueOf(initiated)); + upload.setUploadId(uploadId); + uploadList.add(upload); + } + resEntity.setReturnEntity(uploadList); + }catch (TableNotFoundException e) { + logger.error("part object error"+e.toString()); + resEntity.setCode("NoSuchBucket"); + } catch (Exception e) { + logger.error("part object error"+e.toString()); + resEntity.setCode("InternalError"); + } + return resEntity; + } + + @Override + public ResEntity getObjectParts(String user,String bucket, String object, ResEntity resEntity) { + List<Part> partList = new ArrayList<>(); + ListPartsResponse listPartsResponse = (ListPartsResponse)resEntity.getReturnEntity(); + try (Table partTable = hbaseConnection.getTable(TableName.valueOf(user + ":"+ hbaseProperties.getPartfile_index_table_prefix() + bucket)); + Table dataTable = hbaseConnection.getTable(TableName.valueOf(user + ":" + bucket))){ + Get get = new Get(getRowKey(object).getBytes()); + + Result partResult = partTable.get(get); + String uploadId = Bytes.toString(partResult.getRow()); + listPartsResponse.setUploadId(uploadId); + String objectPart = Bytes.toString(CellUtil.cloneValue(partResult.getColumnLatestCell("meta".getBytes(), "partname".getBytes()))); + String[] objectPartArr = objectPart.split(","); + for (String s : objectPartArr) { + Part part = new Part(); + Get dataGet = new Get(getRowKey(object+"|"+s).getBytes()); + Result dataResult = dataTable.get(dataGet); + Cell fileSizeCell = dataResult.getColumnLatestCell("meta".getBytes(), "filesize".getBytes()); + long timeStamp = fileSizeCell.getTimestamp(); + String partEtag = Bytes.toString(dataResult.getRow()); + String size = Bytes.toString(CellUtil.cloneValue(fileSizeCell)); + part.setPartNumber(Integer.parseInt(s)); + part.setETag(partEtag); + part.setSize(Integer.parseInt(size)); + part.setLastModified(timeStamp + ""); + partList.add(part); + } + listPartsResponse.setPart(partList); + resEntity.setReturnEntity(listPartsResponse); + }catch (TableNotFoundException e) { + logger.error("part object error"+e.toString()); + resEntity.setCode("NoSuchBucket"); + } catch (Exception e) { + logger.error("part object error"+e.toString()); + resEntity.setCode("InternalError"); + } + return resEntity; + } +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/dao/Impl/UserDaoImpl.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/dao/Impl/UserDaoImpl.java new file mode 100644 index 0000000..ab7a8b1 --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/dao/Impl/UserDaoImpl.java @@ -0,0 +1,44 @@ +package com.mesalab.hos.dao.Impl; + +import com.mesalab.hos.dao.UserDao; +import com.mesalab.hos.entity.ResEntity; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; + + +/** + * Created by wk on 2020/4/20. + */ +@Component +public class UserDaoImpl implements UserDao { + + private final org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(this.getClass()); + @Autowired + private Connection hbaseConnection; + + @Override + public ResEntity createUser(String user) { + ResEntity resEntity = new ResEntity(); + resEntity.setCode(""); + resEntity.setIsSuccess(false); + try (Admin admin = hbaseConnection.getAdmin()) { + NamespaceDescriptor[] namespaceDescriptors = admin.listNamespaceDescriptors(); + for (NamespaceDescriptor desc : namespaceDescriptors) { + if (user.equals(desc.getName())) { + resEntity.setCode("UserAlreadyExists"); + } else { + NamespaceDescriptor namespaceDescriptor = NamespaceDescriptor.create(user) + .build(); + admin.createNamespace(namespaceDescriptor); + resEntity.setIsSuccess(true); + } + } + } catch (Exception e) { + logger.error(e.toString()); + } + return resEntity; + } +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/dao/IndexFileNameDao.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/dao/IndexFileNameDao.java new file mode 100644 index 0000000..520cf60 --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/dao/IndexFileNameDao.java @@ -0,0 +1,23 @@ +package com.mesalab.hos.dao; + +import com.mesalab.hos.entity.ColumnEntity; +import com.mesalab.hos.entity.select.Content; + +import java.util.List; + +/** + * Created by wk on 2020/4/20. + */ +public interface IndexFileNameDao { + + Boolean putFile(String tableName, String rowKey,String fileRowkey,String timestamp); + + ColumnEntity getColumnWithTime(String tableName, String rowKey, String column); + + Boolean deleteFile(String tableName, String rowKey); + + Boolean ifExistRowKey(String tableName, String rowKey); + + List<Content> getObjectListWithPrefixFilter(String tableName, int maxKeys, String startTime, String endTime, String prefix) throws Exception; + +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/dao/IndexTimeDao.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/dao/IndexTimeDao.java new file mode 100644 index 0000000..2a677f4 --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/dao/IndexTimeDao.java @@ -0,0 +1,23 @@ +package com.mesalab.hos.dao; + +import com.mesalab.hos.entity.ColumnEntity; +import com.mesalab.hos.entity.select.Content; + +import java.util.List; + +/** + * Created by wk on 2020/4/20. + */ +public interface IndexTimeDao { + + Boolean putFile(String tableName, String rowKey, String filename,String timestamp); + + ColumnEntity getColumnWithTime(String tablename, String rowKey, String column); + + Boolean deleteFile(String tableName, String rowKey); + + Boolean ifExistRowKey(String tableName, String rowKey); + + List<Content> getObjectListWithTimeFilter(String tableName, int maxKeys, String startTime, String endTime, String prefix) throws Exception; + +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/dao/ObjectDao.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/dao/ObjectDao.java new file mode 100644 index 0000000..6736a2d --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/dao/ObjectDao.java @@ -0,0 +1,36 @@ +package com.mesalab.hos.dao; + +import com.mesalab.hos.entity.FileEntity; +import com.mesalab.hos.entity.ResEntity; + +/** + * Created by wk on 2020/4/20. + */ +public interface ObjectDao { + + ResEntity putFile(FileEntity fileEntity); + + ResEntity putAppendFileMeta(FileEntity fileEntity); + + ResEntity putAllFileMeta(FileEntity fileEntity); + + ResEntity appendColumn(String tableName, String rowKey,String column,String columnData); + + ResEntity appendIndexKey(FileEntity fileEntity); + + ResEntity putColumn(String tableName, String rowKey,String column,String columnData); + + ResEntity getFile(String tableName, String fileName); + + ResEntity getAllFileMeta(String tableName, String fileName); + + ResEntity getColumn(String tableName, String fileName, String column); + + ResEntity deleteFile(String tableName, String filename); + + ResEntity deleteMultipartUploadFile(String tableName, String filename); + + Boolean ifExistRowKey(String tableName, String rowKey); + + Boolean ifMultiFile(String tableName, String rowKey); +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/dao/PartFileDao.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/dao/PartFileDao.java new file mode 100644 index 0000000..fbc951a --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/dao/PartFileDao.java @@ -0,0 +1,27 @@ +package com.mesalab.hos.dao; + +import com.mesalab.hos.entity.FileEntity; +import com.mesalab.hos.entity.ResEntity; + +/** + * Created by wk on 2020/4/20. + */ +public interface PartFileDao { + + ResEntity putFile(FileEntity fileEntity); + + ResEntity appendColumn(String tableName, String fileName, String columnName, String columnValue); + + ResEntity getFile(String tableName, String fileName); + + ResEntity deleteFile(String tableName, String filename); + + Boolean ifExistRowKey(String tableName, String rowKey); + + ResEntity getMultipartUploadObjects(String tableName,ResEntity resEntity); + + ResEntity getMultipartUploadObjectsByPrefix(String tableName,ResEntity resEntity,String prefix); + + ResEntity getObjectParts(String user,String bucket,String object,ResEntity resEntity); + +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/dao/UserDao.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/dao/UserDao.java new file mode 100644 index 0000000..74bbe68 --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/dao/UserDao.java @@ -0,0 +1,13 @@ +package com.mesalab.hos.dao; + +import com.mesalab.hos.entity.ResEntity; + +/** + * Created by wk1 on 2020/7/14. + */ + +public interface UserDao { + + ResEntity createUser(String user); + +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/ColumnEntity.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/ColumnEntity.java new file mode 100644 index 0000000..53adaf8 --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/ColumnEntity.java @@ -0,0 +1,27 @@ +package com.mesalab.hos.entity; + +/** + * Created by wk1 on 2020/7/13. + */ +public class ColumnEntity { + + private String column; + + private Long timestamp; + + public String getColumn() { + return column; + } + + public void setColumn(String column) { + this.column = column; + } + + public Long getTimestamp() { + return timestamp; + } + + public void setTimestamp(Long timestamp) { + this.timestamp = timestamp; + } +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/DiskResponse.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/DiskResponse.java new file mode 100644 index 0000000..73f3890 --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/DiskResponse.java @@ -0,0 +1,58 @@ +package com.mesalab.hos.entity; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; +import javax.xml.bind.annotation.XmlType; +import java.io.Serializable; + +/** + * Created by wk on 2020/4/23. + */ +@XmlAccessorType(XmlAccessType.FIELD) + +// XML文件中的根标识 +@XmlRootElement(name = "HosSpace") +// 控制JAXB 绑定类中属性和字段的排序 + +@XmlType(propOrder = { + "HosCapacity", + "HosUsed" +}) +public class DiskResponse implements Serializable { + + private static final long serialVersionUID = 2L; + + private Long HosCapacity; + private Long HosUsed; + + public DiskResponse() { + super(); + } + + + public DiskResponse(Long hosCapacity, Long hosUsed) { + super(); + this.HosCapacity = hosCapacity; + this.HosUsed = hosUsed; + + } + + + + public Long getHosCapacity() { + return HosCapacity; + } + + public void setHosCapacity(Long hosCapacity) { + HosCapacity = hosCapacity; + } + + public Long getHosUsed() { + return HosUsed; + } + + public void setHosUsed(Long hosUsed) { + HosUsed = hosUsed; + } +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/ErrorResponse.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/ErrorResponse.java new file mode 100644 index 0000000..2468942 --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/ErrorResponse.java @@ -0,0 +1,94 @@ +package com.mesalab.hos.entity; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; +import javax.xml.bind.annotation.XmlType; +import java.io.Serializable; + +/** + * Created by wk on 2020/4/23. + */ +@XmlAccessorType(XmlAccessType.FIELD) + +// XML文件中的根标识 +@XmlRootElement(name = "Error") +// 控制JAXB 绑定类中属性和字段的排序 + +@XmlType(propOrder = { + "Code", + "Message", + "RequestId", + "Resource", + "HostId" +}) +public class ErrorResponse implements Serializable { + + private static final long serialVersionUID = 2L; + + private String Code; + private String Message; + private String RequestId; + private String Resource; + private String HostId; + + public ErrorResponse() { + super(); + } + + + public ErrorResponse(String code, String message, String requestId, String resource, String hostId) { + super(); + this.Code = code; + this.Message = message; + this.RequestId = requestId; + this.Resource = resource; + this.HostId = hostId; + } + + public static long getSerialVersionUID() { + return serialVersionUID; + } + + public String getCode() { + return Code; + } + + public void setCode(String code) { + Code = code; + } + + public String getMessage() { + return Message; + } + + public void setMessage(String message) { + Message = message; + } + + public String getRequestId() { + return RequestId; + } + + public void setRequestId(String requestId) { + RequestId = requestId; + } + + public String getResource() { + return Resource; + } + + public void setResource(String resource) { + Resource = resource; + } + + public String getHostId() { + return HostId; + } + + public void setHostId(String hostId) { + HostId = hostId; + } + + +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/FileEntity.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/FileEntity.java new file mode 100644 index 0000000..708dc54 --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/FileEntity.java @@ -0,0 +1,138 @@ +package com.mesalab.hos.entity; + +/** + * Created by wk on 2020/4/20. + */ +public class FileEntity { + + private String fileName; + private String message; + private long fileSize; + private byte[] file; + private String user; + private String bucket; + private String fileType; + private long timestamp; + private int partNumber; + private String uploadId; + private int isParent; + private String partName; + private String indexFileNameKey; + private String indexTimeKey; + + public FileEntity() { + } + + public String getFileName() { + return fileName; + } + + public void setFileName(String fileName) { + this.fileName = fileName; + } + + public String getMessage() { + return message; + } + + public void setMessage(String message) { + this.message = message; + } + + public long getFileSize() { + return fileSize; + } + + public void setFileSize(long fileSize) { + this.fileSize = fileSize; + } + + public byte[] getFile() { + return file; + } + + public void setFile(byte[] file) { + this.file = file; + } + + public String getUser() { + return user; + } + + public void setUser(String user) { + this.user = user; + } + + public String getBucket() { + return bucket; + } + + public void setBucket(String bucket) { + this.bucket = bucket; + } + + public String getFileType() { + return fileType; + } + + public void setFileType(String fileType) { + this.fileType = fileType; + } + + public long getTimestamp() { + return timestamp; + } + + public void setTimestamp(long timestamp) { + this.timestamp = timestamp; + } + + + public int getPartNumber() { + return partNumber; + } + + public void setPartNumber(int partNumber) { + this.partNumber = partNumber; + } + + public String getUploadId() { + return uploadId; + } + + public void setUploadId(String uploadId) { + this.uploadId = uploadId; + } + + public int getIsParent() { + return isParent; + } + + public void setIsParent(int isParent) { + this.isParent = isParent; + } + + public String getPartName() { + return partName; + } + + public void setPartName(String partName) { + this.partName = partName; + } + + public String getIndexFileNameKey() { + return indexFileNameKey; + } + + public void setIndexFileNameKey(String indexFileNameKey) { + this.indexFileNameKey = indexFileNameKey; + } + + public String getIndexTimeKey() { + return indexTimeKey; + } + + public void setIndexTimeKey(String indexTimeKey) { + this.indexTimeKey = indexTimeKey; + } +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/MultipartUpload/CompleteMultipartUpload.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/MultipartUpload/CompleteMultipartUpload.java new file mode 100644 index 0000000..7c1d287 --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/MultipartUpload/CompleteMultipartUpload.java @@ -0,0 +1,28 @@ +package com.mesalab.hos.entity.MultipartUpload; + +import com.mesalab.hos.entity.delete.DeleteObj; + +import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlRootElement; +import java.util.List; + +/** + * Created by wk on 2020/4/23. + */ +// XML文件中的根标识 +@XmlRootElement(name = "CompleteMultipartUpload") +// 控制JAXB 绑定类中属性和字段的排序 +public class CompleteMultipartUpload { + + + private List<Part> parts; + + @XmlElement(name = "Part") + public List<Part> getParts() { + return parts; + } + + public void setParts(List<Part> parts) { + this.parts = parts; + } +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/MultipartUpload/CompleteMultipartUploadResult.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/MultipartUpload/CompleteMultipartUploadResult.java new file mode 100644 index 0000000..a699f72 --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/MultipartUpload/CompleteMultipartUploadResult.java @@ -0,0 +1,76 @@ +package com.mesalab.hos.entity.MultipartUpload; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; +import javax.xml.bind.annotation.XmlType; +import java.io.Serializable; + +/** + * Created by wk1 on 2020/7/20. + */ + +@XmlAccessorType(XmlAccessType.FIELD) + +// XML文件中的根标识 +@XmlRootElement(name = "CompleteMultipartUploadResult") +// 控制JAXB 绑定类中属性和字段的排序 + +@XmlType(propOrder = { + "Location", + "Bucket", + "Key", + "ETag" +}) +public class CompleteMultipartUploadResult implements Serializable { + + private static final long serialVersionUID = 1L; + + private String Location; + private String Bucket; + private String Key; + private String ETag; + + public CompleteMultipartUploadResult(String location, String bucket, String key, String ETag) { + Location = location; + Bucket = bucket; + Key = key; + this.ETag = ETag; + } + + public CompleteMultipartUploadResult() { + super(); + } + + public String getLocation() { + return Location; + } + + public void setLocation(String location) { + Location = location; + } + + public String getBucket() { + return Bucket; + } + + public void setBucket(String bucket) { + Bucket = bucket; + } + + public String getKey() { + return Key; + } + + public void setKey(String key) { + Key = key; + } + + public String getETag() { + return ETag; + } + + public void setETag(String ETag) { + this.ETag = ETag; + } +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/MultipartUpload/InitiateMultipartUploadResult.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/MultipartUpload/InitiateMultipartUploadResult.java new file mode 100644 index 0000000..9c50235 --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/MultipartUpload/InitiateMultipartUploadResult.java @@ -0,0 +1,63 @@ +package com.mesalab.hos.entity.MultipartUpload; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; +import javax.xml.bind.annotation.XmlType; +import java.io.Serializable; + +/** + * Created by wk1 on 2020/7/20. + */ + +@XmlAccessorType(XmlAccessType.FIELD) + +// XML文件中的根标识 +@XmlRootElement(name = "InitiateMultipartUploadResult") +// 控制JAXB 绑定类中属性和字段的排序 + +@XmlType(propOrder = { + "Bucket", + "Key", + "UploadId" +}) +public class InitiateMultipartUploadResult implements Serializable { + + private static final long serialVersionUID = 1L; + + private String Bucket; + private String Key; + private String UploadId; + + public InitiateMultipartUploadResult(String bucket, String key, String uploadId) { + Bucket = bucket; + Key = key; + UploadId = uploadId; + } + public InitiateMultipartUploadResult() { + super(); + } + public String getBucket() { + return Bucket; + } + + public void setBucket(String bucket) { + Bucket = bucket; + } + + public String getKey() { + return Key; + } + + public void setKey(String key) { + Key = key; + } + + public String getUploadId() { + return UploadId; + } + + public void setUploadId(String uploadId) { + UploadId = uploadId; + } +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/MultipartUpload/Part.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/MultipartUpload/Part.java new file mode 100644 index 0000000..48e78dd --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/MultipartUpload/Part.java @@ -0,0 +1,39 @@ +package com.mesalab.hos.entity.MultipartUpload; + +import javax.xml.bind.annotation.XmlElement; + +/** + * Created by wk on 2020/5/11. + */ + +public class Part { + + private String ETag; + private int PartNumber; + + public Part(String ETag, int partNumber) { + this.ETag = ETag; + PartNumber = partNumber; + } + + public Part() { + } + + @XmlElement(name = "ETag") + public String getETag() { + return ETag; + } + + public void setETag(String ETag) { + this.ETag = ETag; + } + + @XmlElement(name = "PartNumber") + public int getPartNumber() { + return PartNumber; + } + + public void setPartNumber(int partNumber) { + PartNumber = partNumber; + } +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/ResEntity.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/ResEntity.java new file mode 100644 index 0000000..69603bc --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/ResEntity.java @@ -0,0 +1,47 @@ +package com.mesalab.hos.entity; + +/** + * Created by wk on 2020/5/12. + */ +public class ResEntity { + + private String code; + + private Boolean isSuccess; + + private Object returnEntity; + + + public ResEntity() { + } + + public ResEntity(String code, Boolean isSuccess, Object returnEntity) { + this.code = code; + this.isSuccess = isSuccess; + this.returnEntity = returnEntity; + } + + public String getCode() { + return code; + } + + public void setCode(String code) { + this.code = code; + } + + public Boolean getIsSuccess() { + return isSuccess; + } + + public void setIsSuccess(Boolean isSuccess) { + this.isSuccess = isSuccess; + } + + public Object getReturnEntity() { + return returnEntity; + } + + public void setReturnEntity(Object returnEntity) { + this.returnEntity = returnEntity; + } +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/bucketlist/Bucket.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/bucketlist/Bucket.java new file mode 100644 index 0000000..a46779b --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/bucketlist/Bucket.java @@ -0,0 +1,39 @@ +package com.mesalab.hos.entity.bucketlist; + +import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlRootElement; +import javax.xml.bind.annotation.XmlType; + + +@XmlRootElement +@XmlType(propOrder = {"name", "creationDate"}) +public class Bucket { + private String creationDate; + private String name; + + public Bucket() { + } + + public Bucket(String creationDate, String name) { + this.creationDate = creationDate; + this.name = name; + } + + @XmlElement(name = "CreationDate") + public String getCreationDate() { + return creationDate; + } + + public void setCreationDate(String creationDate) { + this.creationDate = creationDate; + } + + @XmlElement(name = "Name") + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/bucketlist/BucketListResponse.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/bucketlist/BucketListResponse.java new file mode 100644 index 0000000..a548d3a --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/bucketlist/BucketListResponse.java @@ -0,0 +1,42 @@ +package com.mesalab.hos.entity.bucketlist; + + +import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlElementWrapper; +import javax.xml.bind.annotation.XmlRootElement; +import javax.xml.bind.annotation.XmlType; +import java.util.List; + +@XmlRootElement(name = "ListAllMyBucketsResult") +@XmlType(propOrder = {"owner", "bucket"}) +public class BucketListResponse { + private BucketOwner owner; + private List<Bucket> bucket; + + public BucketListResponse() { + } + + public BucketListResponse(BucketOwner owner, List<Bucket> bucket) { + this.owner = owner; + this.bucket = bucket; + } + + @XmlElement(name = "Owner") + public BucketOwner getOwner() { + return owner; + } + + public void setOwner(BucketOwner owner) { + this.owner = owner; + } + + @XmlElementWrapper(name = "Buckets") + @XmlElement(name = "Bucket") + public List<Bucket> getBucket() { + return bucket; + } + + public void setBucket(List<Bucket> bucket) { + this.bucket = bucket; + } +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/bucketlist/BucketOwner.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/bucketlist/BucketOwner.java new file mode 100644 index 0000000..6896256 --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/bucketlist/BucketOwner.java @@ -0,0 +1,36 @@ +package com.mesalab.hos.entity.bucketlist; + +import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlType; + +@XmlType(propOrder = {"id", "displayName"}) +public class BucketOwner { + private String displayName; + private String id; + + public BucketOwner() { + } + + public BucketOwner(String displayName, String id) { + this.displayName = displayName; + this.id = id; + } + + @XmlElement(name = "DisplayName") + public String getDisplayName() { + return displayName; + } + + public void setDisplayName(String displayName) { + this.displayName = displayName; + } + + @XmlElement(name = "ID") + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/delete/DeleteError.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/delete/DeleteError.java new file mode 100644 index 0000000..b980bed --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/delete/DeleteError.java @@ -0,0 +1,53 @@ +package com.mesalab.hos.entity.delete; + +import javax.xml.bind.annotation.XmlElement; + +/** + * Created by wk on 2020/5/11. + */ +//@XmlRootElement(name = "Error") +public class DeleteError { + + private String Key; + + private String Code; + + private String Message; + + + public DeleteError(String key, String code, String message) { + Key = key; + Code = code; + Message = message; + } + + public DeleteError() { + } + + @XmlElement(name = "Key") + public String getKey() { + return Key; + } + + public void setKey(String key) { + Key = key; + } + + @XmlElement(name = "Code") + public String getCode() { + return Code; + } + + public void setCode(String code) { + Code = code; + } + + @XmlElement(name = "Message") + public String getMessage() { + return Message; + } + + public void setMessage(String message) { + Message = message; + } +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/delete/DeleteObj.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/delete/DeleteObj.java new file mode 100644 index 0000000..9ceac47 --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/delete/DeleteObj.java @@ -0,0 +1,28 @@ +package com.mesalab.hos.entity.delete; + +import javax.xml.bind.annotation.XmlElement; + +/** + * Created by wk on 2020/5/11. + */ + +public class DeleteObj { + + private String key; + + public DeleteObj(String key) { + this.key = key; + } + + public DeleteObj() { + } + + @XmlElement(name = "Key") + public String getKey() { + return key; + } + + public void setKey(String key) { + this.key = key; + } +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/delete/DeleteRequest.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/delete/DeleteRequest.java new file mode 100644 index 0000000..ea6252b --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/delete/DeleteRequest.java @@ -0,0 +1,37 @@ +package com.mesalab.hos.entity.delete; + +import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlRootElement; +import java.util.List; + +/** + * Created by wk on 2020/4/23. + */ +// XML文件中的根标识 +@XmlRootElement(name = "Delete") +// 控制JAXB 绑定类中属性和字段的排序 +public class DeleteRequest { + + + private List<DeleteObj> Objects; + private String Quiet; + + @XmlElement(name = "Object") + public List<DeleteObj> getObjects() { + return Objects; + } + + public void setObjects(List<DeleteObj> objects) { + Objects = objects; + } + + @XmlElement(name = "Quiet") + public String getQuiet() { + return Quiet; + } + + public void setQuiet(String quiet) { + Quiet = quiet; + } + +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/delete/DeleteResponse.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/delete/DeleteResponse.java new file mode 100644 index 0000000..a8a7534 --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/delete/DeleteResponse.java @@ -0,0 +1,28 @@ +package com.mesalab.hos.entity.delete; + +import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlRootElement; +import java.util.List; + +/** + * Created by wk on 2020/4/23. + */ +// XML文件中的根标识 +@XmlRootElement(name = "DeleteResult") +// 控制JAXB 绑定类中属性和字段的排序 +public class DeleteResponse { + + + private List<DeleteError> errors; + + @XmlElement(name = "Error") + public List<DeleteError> getErrors() { + return errors; + } + + public void setErrors(List<DeleteError> errors) { + this.errors = errors; + } + + +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/download/DownloadResponse.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/download/DownloadResponse.java new file mode 100644 index 0000000..ce2d0b0 --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/download/DownloadResponse.java @@ -0,0 +1,82 @@ +package com.mesalab.hos.entity.download; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; +import javax.xml.bind.annotation.XmlType; +import java.io.Serializable; + +/** + * Created by wk on 2020/4/23. + */ + +@XmlAccessorType(XmlAccessType.FIELD) +// XML文件中的根标识 +@XmlRootElement(name = "Error") +// 控制JAXB 绑定类中属性和字段的排序 + +@XmlType(propOrder = { + "Code", + "Message", + "RequestId", + "HostId", + +}) +public class DownloadResponse implements Serializable { + + private static final long serialVersionUID = 2L; + + private String Code; + private String Message; + private String RequestId; + private String HostId; + + public DownloadResponse() { + super(); + } + + + public DownloadResponse(String code, String message, String requestId, String hostId) { + super(); + this.Code = code; + this.Message = message; + this.RequestId = requestId; + this.HostId = hostId; + } + + public static long getSerialVersionUID() { + return serialVersionUID; + } + + public String getCode() { + return Code; + } + + public void setCode(String code) { + Code = code; + } + + public String getMessage() { + return Message; + } + + public void setMessage(String message) { + Message = message; + } + + public String getRequestId() { + return RequestId; + } + + public void setRequestId(String requestId) { + RequestId = requestId; + } + + public String getHostId() { + return HostId; + } + + public void setHostId(String hostId) { + HostId = hostId; + } +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/getMultipartUpload/CommonPrefixes.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/getMultipartUpload/CommonPrefixes.java new file mode 100644 index 0000000..dc49672 --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/getMultipartUpload/CommonPrefixes.java @@ -0,0 +1,21 @@ +package com.mesalab.hos.entity.getMultipartUpload; + +import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlRootElement; + +@XmlRootElement(name = "CommonPrefixes") +public class CommonPrefixes { + private String prefix; + + public CommonPrefixes() { + } + + @XmlElement(name = "Prefix") + public String getPrefix() { + return prefix; + } + + public void setPrefix(String prefix) { + this.prefix = prefix; + } +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/getMultipartUpload/Initiator.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/getMultipartUpload/Initiator.java new file mode 100644 index 0000000..690a877 --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/getMultipartUpload/Initiator.java @@ -0,0 +1,36 @@ +package com.mesalab.hos.entity.getMultipartUpload; + +import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlRootElement; + +@XmlRootElement(name = "Initiator") +public class Initiator { + private String displayName; + private String id; + + public Initiator() { + } + + public Initiator(String displayName, String id) { + this.displayName = displayName; + this.id = id; + } + + @XmlElement(name = "DisplayName") + public String getDisplayName() { + return displayName; + } + + public void setDisplayName(String displayName) { + this.displayName = displayName; + } + + @XmlElement(name = "ID") + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/getMultipartUpload/ListMultipartUploadsResponse.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/getMultipartUpload/ListMultipartUploadsResponse.java new file mode 100644 index 0000000..c317497 --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/getMultipartUpload/ListMultipartUploadsResponse.java @@ -0,0 +1,137 @@ +package com.mesalab.hos.entity.getMultipartUpload; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; +import javax.xml.bind.annotation.XmlType; +import java.util.List; + +@XmlAccessorType(XmlAccessType.FIELD) +@XmlRootElement(name = "ListMultipartUploadsResult") +@XmlType(propOrder = { + "Bucket", + "KeyMarker", + "UploadIdMarker", + "NextKeyMarker", + "NextUploadIdMarker", + "Delimiter", + "Prefix", + "MaxUploads", + "IsTruncated", + "Upload", + "CommonPrefixes", + "EncodingType" +}) +public class ListMultipartUploadsResponse { + private String Bucket; + private String KeyMarker; + private String UploadIdMarker; + private String NextKeyMarker; + private String Prefix; + private String Delimiter; + private String NextUploadIdMarker; + private int MaxUploads; + private boolean IsTruncated; + private List<Upload> Upload; + private List<CommonPrefixes> CommonPrefixes; + private String EncodingType; + + public ListMultipartUploadsResponse() { + } + + public String getBucket() { + return Bucket; + } + + public void setBucket(String bucket) { + Bucket = bucket; + } + + public String getKeyMarker() { + return KeyMarker; + } + + public void setKeyMarker(String keyMarker) { + KeyMarker = keyMarker; + } + + public String getUploadIdMarker() { + return UploadIdMarker; + } + + public void setUploadIdMarker(String uploadIdMarker) { + UploadIdMarker = uploadIdMarker; + } + + public String getNextKeyMarker() { + return NextKeyMarker; + } + + public void setNextKeyMarker(String nextKeyMarker) { + NextKeyMarker = nextKeyMarker; + } + + public String getPrefix() { + return Prefix; + } + + public void setPrefix(String prefix) { + Prefix = prefix; + } + + public String getDelimiter() { + return Delimiter; + } + + public void setDelimiter(String delimiter) { + Delimiter = delimiter; + } + + public String getNextUploadIdMarker() { + return NextUploadIdMarker; + } + + public void setNextUploadIdMarker(String nextUploadIdMarker) { + NextUploadIdMarker = nextUploadIdMarker; + } + + public int getMaxUploads() { + return MaxUploads; + } + + public void setMaxUploads(int maxUploads) { + MaxUploads = maxUploads; + } + + public boolean isTruncated() { + return IsTruncated; + } + + public void setTruncated(boolean truncated) { + IsTruncated = truncated; + } + + public List<com.mesalab.hos.entity.getMultipartUpload.Upload> getUpload() { + return Upload; + } + + public void setUpload(List<com.mesalab.hos.entity.getMultipartUpload.Upload> upload) { + Upload = upload; + } + + public List<com.mesalab.hos.entity.getMultipartUpload.CommonPrefixes> getCommonPrefixes() { + return CommonPrefixes; + } + + public void setCommonPrefixes(List<com.mesalab.hos.entity.getMultipartUpload.CommonPrefixes> commonPrefixes) { + CommonPrefixes = commonPrefixes; + } + + public String getEncodingType() { + return EncodingType; + } + + public void setEncodingType(String encodingType) { + EncodingType = encodingType; + } +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/getMultipartUpload/ListPartsResponse.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/getMultipartUpload/ListPartsResponse.java new file mode 100644 index 0000000..fd04a6f --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/getMultipartUpload/ListPartsResponse.java @@ -0,0 +1,142 @@ +package com.mesalab.hos.entity.getMultipartUpload; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; +import javax.xml.bind.annotation.XmlType; +import java.util.List; + +@XmlAccessorType(XmlAccessType.FIELD) +@XmlRootElement(name = "ListPartsResult") +@XmlType(propOrder = { + "Bucket", + "Key", + "UploadId", + "Initiator", + "Owner", + "StorageClass", + "PartNumberMarker", + "NextPartNumberMarker", + "MaxParts", + "IsTruncated", + "Part" +}) +public class ListPartsResponse { + private String Bucket; + private String Key; + private String UploadId; + private Initiator Initiator; + private Owner Owner; + private String StorageClass; + private String PartNumberMarker; + private String NextPartNumberMarker; + private int MaxParts; + private boolean IsTruncated; + private List<Part> Part; + + public ListPartsResponse() { + } + + public String getBucket() { + return Bucket; + } + + public void setBucket(String bucket) { + Bucket = bucket; + } + + public String getKey() { + return Key; + } + + public void setKey(String key) { + Key = key; + } + + public String getUploadId() { + return UploadId; + } + + public void setUploadId(String uploadId) { + UploadId = uploadId; + } + + public Initiator getInitiator() { + return Initiator; + } + + public void setInitiator(Initiator initiator) { + Initiator = initiator; + } + + public Owner getOwner() { + return Owner; + } + + public void setOwner(Owner owner) { + Owner = owner; + } + + public String getStorageClass() { + return StorageClass; + } + + public void setStorageClass(String storageClass) { + StorageClass = storageClass; + } + + + public List<Part> getPart() { + return Part; + } + + public void setPart(List<Part> part) { + Part = part; + } + + public int getMaxParts() { + return MaxParts; + } + + public void setMaxParts(int maxParts) { + MaxParts = maxParts; + } + + public boolean isTruncated() { + return IsTruncated; + } + + public void setTruncated(boolean truncated) { + IsTruncated = truncated; + } + + public String getPartNumberMarker() { + return PartNumberMarker; + } + + public void setPartNumberMarker(String partNumberMarker) { + PartNumberMarker = partNumberMarker; + } + + public String getNextPartNumberMarker() { + return NextPartNumberMarker; + } + + public void setNextPartNumberMarker(String nextPartNumberMarker) { + NextPartNumberMarker = nextPartNumberMarker; + } + + public ListPartsResponse(String bucket, String key, String uploadId, com.mesalab.hos.entity.getMultipartUpload.Initiator initiator, com.mesalab.hos.entity.getMultipartUpload.Owner owner, String storageClass, String partNumberMarker, String nextPartNumberMarker, int maxParts, boolean isTruncated, List<com.mesalab.hos.entity.getMultipartUpload.Part> part) { + Bucket = bucket; + Key = key; + UploadId = uploadId; + Initiator = initiator; + Owner = owner; + StorageClass = storageClass; + PartNumberMarker = partNumberMarker; + NextPartNumberMarker = nextPartNumberMarker; + MaxParts = maxParts; + IsTruncated = isTruncated; + Part = part; + } +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/getMultipartUpload/Owner.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/getMultipartUpload/Owner.java new file mode 100644 index 0000000..1f4d8da --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/getMultipartUpload/Owner.java @@ -0,0 +1,37 @@ +package com.mesalab.hos.entity.getMultipartUpload; + +import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlRootElement; + +@XmlRootElement(name = "Owner") +public class Owner { + private String displayName; + private String id; + + public Owner() { + } + + public Owner(String displayName, String id) { + this.displayName = displayName; + this.id = id; + } + + @XmlElement(name = "DisplayName") + public String getDisplayName() { + return displayName; + } + + public void setDisplayName(String displayName) { + this.displayName = displayName; + } + + @XmlElement(name = "ID") + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/getMultipartUpload/Part.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/getMultipartUpload/Part.java new file mode 100644 index 0000000..1b57b21 --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/getMultipartUpload/Part.java @@ -0,0 +1,65 @@ +package com.mesalab.hos.entity.getMultipartUpload; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; +import javax.xml.bind.annotation.XmlType; + +@XmlAccessorType(XmlAccessType.FIELD) +@XmlRootElement(name = "Part") +@XmlType(propOrder = { + "PartNumber", + "LastModified", + "ETag", + "Size" +}) +public class Part { + private int PartNumber; + private String LastModified; + private String ETag; + private int Size; + + public Part() { + } + + + + public String getLastModified() { + return LastModified; + } + + public void setLastModified(String lastModified) { + LastModified = lastModified; + } + + public String getETag() { + return ETag; + } + + public void setETag(String ETag) { + this.ETag = ETag; + } + + public int getPartNumber() { + return PartNumber; + } + + public void setPartNumber(int partNumber) { + PartNumber = partNumber; + } + + public int getSize() { + return Size; + } + + public void setSize(int size) { + Size = size; + } + + public Part(int partNumber, String lastModified, String ETag, int size) { + PartNumber = partNumber; + LastModified = lastModified; + this.ETag = ETag; + Size = size; + } +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/getMultipartUpload/Upload.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/getMultipartUpload/Upload.java new file mode 100644 index 0000000..441d068 --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/getMultipartUpload/Upload.java @@ -0,0 +1,79 @@ +package com.mesalab.hos.entity.getMultipartUpload; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; +import javax.xml.bind.annotation.XmlType; + +@XmlAccessorType(XmlAccessType.FIELD) +@XmlRootElement(name = "Upload") +@XmlType(propOrder = { + "Initiated", + "Initiator", + "Key", + "Owner", + "StorageClass", + "UploadId" +}) +public class Upload{ + private String Initiated; + private Initiator Initiator; + private String Key; + private Owner Owner; + private String StorageClass; + private String UploadId; + + public Upload() { + } + + public String getInitiated() { + return Initiated; + } + + public void setInitiated(String initiated) { + Initiated = initiated; + } + + public com.mesalab.hos.entity.getMultipartUpload.Initiator getInitiator() { + return Initiator; + } + + public void setInitiator(com.mesalab.hos.entity.getMultipartUpload.Initiator initiator) { + Initiator = initiator; + } + + public String getKey() { + return Key; + } + + public void setKey(String key) { + Key = key; + } + + public com.mesalab.hos.entity.getMultipartUpload.Owner getOwner() { + return Owner; + } + + public void setOwner(com.mesalab.hos.entity.getMultipartUpload.Owner owner) { + Owner = owner; + } + + public String getStorageClass() { + return StorageClass; + } + + public void setStorageClass(String storageClass) { + StorageClass = storageClass; + } + + public String getUploadId() { + return UploadId; + } + + public void setUploadId(String uploadId) { + UploadId = uploadId; + } + + public void setUploadId() { + } +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/lifecycle/ExpirationObj.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/lifecycle/ExpirationObj.java new file mode 100644 index 0000000..b216c5f --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/lifecycle/ExpirationObj.java @@ -0,0 +1,26 @@ +package com.mesalab.hos.entity.lifecycle; + +import javax.xml.bind.annotation.XmlElement; + +public class ExpirationObj { + private int days; + + public ExpirationObj() { + } + + @XmlElement(name = "Days") + public int getDays() { + return days; + } + + public void setDays(int days) { + this.days = days; + } + + @Override + public String toString() { + return "ExpirationObj{" + + "days=" + days + + '}'; + } +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/lifecycle/LifecycleRequest.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/lifecycle/LifecycleRequest.java new file mode 100644 index 0000000..40ca015 --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/lifecycle/LifecycleRequest.java @@ -0,0 +1,28 @@ +package com.mesalab.hos.entity.lifecycle; + +import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlRootElement; + +@XmlRootElement(name = "LifecycleConfiguration") +public class LifecycleRequest { + private RuleObj ruleObj; + + public LifecycleRequest() { + } + + @XmlElement(name = "Rule") + public RuleObj getRuleObj() { + return ruleObj; + } + + public void setRuleObj(RuleObj ruleObj) { + this.ruleObj = ruleObj; + } + + @Override + public String toString() { + return "LifecycleRequest{" + + "ruleObj=" + ruleObj + + '}'; + } +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/lifecycle/RuleObj.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/lifecycle/RuleObj.java new file mode 100644 index 0000000..39bf9d2 --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/lifecycle/RuleObj.java @@ -0,0 +1,60 @@ +package com.mesalab.hos.entity.lifecycle; + +import javax.xml.bind.annotation.XmlElement; + + +public class RuleObj { + private String id; + private String prefix; + private String status; + private ExpirationObj expirationObj; + + public RuleObj() { + } + + @XmlElement(name = "ID") + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + @XmlElement(name = "Prefix") + public String getPrefix() { + return prefix; + } + + public void setPrefix(String prefix) { + this.prefix = prefix; + } + + @XmlElement(name = "Status") + public String getStatus() { + return status; + } + + public void setStatus(String status) { + this.status = status; + } + + @XmlElement(name = "Expiration") + public ExpirationObj getExpirationObj() { + return expirationObj; + } + + public void setExpirationObj(ExpirationObj expirationObj) { + this.expirationObj = expirationObj; + } + + @Override + public String toString() { + return "RuleObj{" + + "id='" + id + '\'' + + ", prefix='" + prefix + '\'' + + ", status='" + status + '\'' + + ", expirationObj=" + expirationObj + + '}'; + } +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/partEntity.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/partEntity.java new file mode 100644 index 0000000..076ea07 --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/partEntity.java @@ -0,0 +1,55 @@ +package com.mesalab.hos.entity; + +/** + * Created by wk on 2020/4/20. + */ +public class partEntity { + + private String fileName; + private String message; + private String fileType; + private String partName; + private String uploadId; + + + public String getFileName() { + return fileName; + } + + public void setFileName(String fileName) { + this.fileName = fileName; + } + + public String getMessage() { + return message; + } + + public void setMessage(String message) { + this.message = message; + } + + public String getFileType() { + return fileType; + } + + public void setFileType(String fileType) { + this.fileType = fileType; + } + + public String getPartName() { + return partName; + } + + public void setPartName(String partName) { + this.partName = partName; + } + + + public String getUploadId() { + return uploadId; + } + + public void setUploadId(String uploadId) { + this.uploadId = uploadId; + } +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/query/Bucket.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/query/Bucket.java new file mode 100644 index 0000000..d20b690 --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/query/Bucket.java @@ -0,0 +1,40 @@ +package com.mesalab.hos.entity.query; + +import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlRootElement; + +/** + * Created by wk on 2020/5/12. + */ +@XmlRootElement(name = "Bucket") + +public class Bucket { + private String CreationDate; + private String Name; + + public Bucket() { + } + + public Bucket(String creationDate, String name) { + CreationDate = creationDate; + Name = name; + } + + @XmlElement(name = "CreationDate") + public String getCreationDate() { + return CreationDate; + } + + public void setCreationDate(String creationDate) { + CreationDate = creationDate; + } + + @XmlElement(name = "Name") + public String getName() { + return Name; + } + + public void setName(String name) { + Name = name; + } +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/query/BucketOwner.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/query/BucketOwner.java new file mode 100644 index 0000000..886b729 --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/query/BucketOwner.java @@ -0,0 +1,39 @@ +package com.mesalab.hos.entity.query; + +import javax.xml.bind.annotation.XmlElement; + +/** + * Created by wk on 2020/5/12. + */ +public class BucketOwner { + + private String DisplayName; + private String ID; + + public BucketOwner(String displayName, String ID) { + DisplayName = displayName; + this.ID = ID; + } + + public BucketOwner() { + + } + + @XmlElement(name = "DisplayName") + public String getDisplayName() { + return DisplayName; + } + + public void setDisplayName(String displayName) { + DisplayName = displayName; + } + + @XmlElement(name = "ID") + public String getID() { + return ID; + } + + public void setID(String ID) { + this.ID = ID; + } +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/query/ListBuckets.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/query/ListBuckets.java new file mode 100644 index 0000000..f990c4d --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/query/ListBuckets.java @@ -0,0 +1,41 @@ +package com.mesalab.hos.entity.query; + +import com.mesalab.hos.entity.ResEntity; + +import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlElementWrapper; +import javax.xml.bind.annotation.XmlRootElement; +import java.util.List; + +/** + * Created by wk on 2020/5/12. + */ + +@XmlRootElement(name = "ListBucketsOutput") + +public class ListBuckets extends ResEntity { + + private List<Bucket> Buckets; + private BucketOwner Owner; + + + @XmlElementWrapper(name = "Buckets") + @XmlElement(name = "Bucket") + public List<Bucket> getBuckets() { + return Buckets; + } + + public void setBuckets(List<Bucket> buckets) { + Buckets = buckets; + } + + + @XmlElement(name = "Owner") + public BucketOwner getOwner() { + return Owner; + } + + public void setOwner(BucketOwner owner) { + Owner = owner; + } +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/select/Content.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/select/Content.java new file mode 100644 index 0000000..48fb407 --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/select/Content.java @@ -0,0 +1,75 @@ +package com.mesalab.hos.entity.select; + +import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlRootElement; + +/** + * Created by wk on 2020/5/12. + */ +@XmlRootElement(name = "Contents") + +public class Content { + + private String Key; + private String LastModified; + private String ETag; + private String Size; + private String StorageClass; + + public Content(String key, String lastModified, String ETag, String size, String storageClass) { + Key = key; + LastModified = lastModified; + this.ETag = ETag; + Size = size; + StorageClass = storageClass; + } + + public Content() { + } + + @XmlElement(name = "Key") + public String getKey() { + return Key; + } + + public void setKey(String key) { + Key = key; + } + + @XmlElement(name = "LastModified") + public String getLastModified() { + return LastModified; + } + + public void setLastModified(String lastModified) { + LastModified = lastModified; + } + + @XmlElement(name = "ETag") + public String getETag() { + return ETag; + } + + public void setETag(String ETag) { + this.ETag = ETag; + } + + @XmlElement(name = "Size") + public String getSize() { + return Size; + } + + public void setSize(String size) { + Size = size; + } + + @XmlElement(name = "StorageClass") + public String getStorageClass() { + return StorageClass; + } + + public void setStorageClass(String storageClass) { + StorageClass = storageClass; + } + +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/select/ListObjects.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/select/ListObjects.java new file mode 100644 index 0000000..f646e00 --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/select/ListObjects.java @@ -0,0 +1,78 @@ +package com.mesalab.hos.entity.select; + +import com.mesalab.hos.entity.ResEntity; + +import javax.xml.bind.annotation.XmlElement; +import javax.xml.bind.annotation.XmlRootElement; +import java.util.List; + +/** + * Created by wk on 2020/5/12. + */ + +@XmlRootElement(name = "ListBucketResult") + + +public class ListObjects extends ResEntity { + + private String Name; + private String Prefix; + private String KeyCount; + private String MaxKeys; + private Boolean IsTruncated; + private List<Content> Contents; + + @XmlElement(name = "Name") + public String getName() { + return Name; + } + + public void setName(String name) { + Name = name; + } + + @XmlElement(name = "Prefix") + public String getPrefix() { + return Prefix; + } + + public void setPrefix(String prefix) { + Prefix = prefix; + } + + @XmlElement(name = "KeyCount") + public String getKeyCount() { + return KeyCount; + } + + public void setKeyCount(String keyCount) { + KeyCount = keyCount; + } + + @XmlElement(name = "MaxKeys") + public String getMaxKeys() { + return MaxKeys; + } + + public void setMaxKeys(String maxKeys) { + MaxKeys = maxKeys; + } + + @XmlElement(name = "IsTruncated") + public Boolean getTruncated() { + return IsTruncated; + } + + public void setTruncated(Boolean truncated) { + IsTruncated = truncated; + } + + @XmlElement(name = "Contents") + public List<Content> getContents() { + return Contents; + } + + public void setContents(List<Content> contents) { + Contents = contents; + } +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/upload/UploadResponse.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/upload/UploadResponse.java new file mode 100644 index 0000000..e4769a6 --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/entity/upload/UploadResponse.java @@ -0,0 +1,81 @@ +package com.mesalab.hos.entity.upload; + +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; +import javax.xml.bind.annotation.XmlType; +import java.io.Serializable; + +/** + * Created by wk on 2020/4/23. + */ + +@XmlAccessorType(XmlAccessType.FIELD) +// XML文件中的根标识 +@XmlRootElement(name = "Error") +// 控制JAXB 绑定类中属性和字段的排序 + +@XmlType(propOrder = { + "Code", + "Message", + "Resource", + "RequestId" +}) +public class UploadResponse implements Serializable { + + private static final long serialVersionUID = 1L; + + private String Code; + private String Message; + private String Resource; + private String RequestId; + + public UploadResponse() { + super(); + } + + + public UploadResponse(String code, String message, String resource, String requestId) { + super(); + this.Code = code; + this.Message = message; + this.Resource = resource; + this.RequestId = requestId; + } + + public static long getSerialVersionUID() { + return serialVersionUID; + } + + public String getCode() { + return Code; + } + + public void setCode(String code) { + Code = code; + } + + public String getMessage() { + return Message; + } + + public void setMessage(String message) { + Message = message; + } + + public String getResource() { + return Resource; + } + + public void setResource(String resource) { + Resource = resource; + } + + public String getRequestId() { + return RequestId; + } + + public void setRequestId(String requestId) { + RequestId = requestId; + } +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/service/BucketService.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/service/BucketService.java new file mode 100644 index 0000000..0e0bad9 --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/service/BucketService.java @@ -0,0 +1,21 @@ +package com.mesalab.hos.service; + +import com.mesalab.hos.entity.ResEntity; + +/** + * Created by wk on 2020/4/20. + */ + +public interface BucketService { + + ResEntity changeTTL(int day, String user, String bucket); + + ResEntity truncateData(String user, String bucket); + + ResEntity getBucketList(String user); + + ResEntity createBucket(String user, String bucket); + + ResEntity deleteBucket(String user, String bucket); + +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/service/DiskService.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/service/DiskService.java new file mode 100644 index 0000000..e25964f --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/service/DiskService.java @@ -0,0 +1,18 @@ +package com.mesalab.hos.service; + +import com.mesalab.hos.entity.DiskResponse; +import com.mesalab.hos.entity.ResEntity; +import org.apache.hadoop.conf.Configuration; + +import java.util.List; + +/** + * Created by wk on 2020/4/20. + */ + +public interface DiskService { + + + DiskResponse getHBaseSize(List<String> users, String path) ; + + } diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/service/ObjectService.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/service/ObjectService.java new file mode 100644 index 0000000..c052833 --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/service/ObjectService.java @@ -0,0 +1,42 @@ +package com.mesalab.hos.service; + +import com.mesalab.hos.entity.FileEntity; +import com.mesalab.hos.entity.MultipartUpload.Part; +import com.mesalab.hos.entity.ResEntity; +import com.mesalab.hos.entity.delete.DeleteObj; + +import java.util.List; + + +/** + * Created by wk on 2020/4/20. + */ +public interface ObjectService { + ResEntity putFile(FileEntity fileEntity); + + ResEntity createMultipartUpload(FileEntity fileEntity); + + ResEntity putFilePart(FileEntity fileEntity); + + ResEntity appendFile(FileEntity fileEntity); + + ResEntity getFile(String user, String bucket, String fileName); + + ResEntity getColumn(String user, String bucket, String fileName, String column); + + ResEntity deleteFile(FileEntity fileEntity); + + ResEntity deleteFileList(String bucket, String user, List<DeleteObj> dr); + + ResEntity getObjectList(String username, String bucket, int maxKeys, String prefix, String starttime, String endtime); + + ResEntity abortMultipartUpload(String user,String bucket,String filename); + + ResEntity completeMultipartUpload(String bucket, String user,String filename, List<Part> parts,long timestamp); + + ResEntity getMultipartUploadObjects(String bucket, int maxUploads,String user,String uploadsPrefix); + + ResEntity getObjectMultipartUploadParts(String bucket, int maxUploads,String user,String fileName,String partNumberMarker,String uploadId); + + long getFileSize(String bucket,String filename); +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/service/impl/BucketServiceImpl.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/service/impl/BucketServiceImpl.java new file mode 100644 index 0000000..a99e2e5 --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/service/impl/BucketServiceImpl.java @@ -0,0 +1,170 @@ +package com.mesalab.hos.service.impl; + +import com.mesalab.hos.config.HbaseProperties; +import com.mesalab.hos.dao.BucketDao; +import com.mesalab.hos.entity.ResEntity; +import com.mesalab.hos.service.BucketService; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.stereotype.Service; + +/** + * Created by wk1 on 2020/7/14. + */ +@Service +@ConditionalOnProperty(value = "hos.simple", havingValue = "1") +public class BucketServiceImpl implements BucketService { + private final org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(this.getClass()); + + @Autowired + private BucketDao bucketDao; + @Autowired + HbaseProperties hbaseProperties; + + @Override + public ResEntity changeTTL(int day, String user, String bucket) { + ResEntity resEntity = new ResEntity(); + resEntity.setCode(""); + resEntity.setIsSuccess(false); + String tableName = user + ":" + bucket; + String indexTimeTableName = user + ":" + hbaseProperties.getTime_index_table_prefix() + bucket; + String indexFilenameTableName = user + ":" + hbaseProperties.getFilename_index_table_prefix() + bucket; + String partFileTableName = user + ":" + hbaseProperties.getPartfile_index_table_prefix() + bucket; + try { + if (!bucketDao.ifExistTable(tableName) || !bucketDao.ifExistTable(indexTimeTableName) || !bucketDao.ifExistTable(indexFilenameTableName) || !bucketDao.ifExistTable(partFileTableName)) { + return new ResEntity("NoSuchBucket", false, ""); + } + resEntity = bucketDao.changeDataTableTTL(day, tableName); + if (!resEntity.getIsSuccess()) { + return resEntity; + } + resEntity = bucketDao.changeIndexTableTTL(day, indexTimeTableName); + if (!resEntity.getIsSuccess()) { + return resEntity; + } + resEntity = bucketDao.changeIndexTableTTL(day, indexFilenameTableName); + if (!resEntity.getIsSuccess()) { + return resEntity; + } + resEntity = bucketDao.changeIndexTableTTL(day, partFileTableName); + } catch (Exception e) { + logger.error(e.toString()); + resEntity.setIsSuccess(false); + resEntity.setCode("InternalError"); + } + return resEntity; + } + + @Override + public ResEntity truncateData(String user, String bucket) { + ResEntity resEntity = new ResEntity(); + resEntity.setCode(""); + resEntity.setIsSuccess(false); + String tableName = user + ":" + bucket; + String indexTimeTableName = user + ":" + hbaseProperties.getTime_index_table_prefix() + bucket; + String indexFilenameTableName = user + ":" + hbaseProperties.getFilename_index_table_prefix() + bucket; + String partFileTableName = user + ":" + hbaseProperties.getPartfile_index_table_prefix() + bucket; + try { + if (!bucketDao.ifExistTable(tableName) || !bucketDao.ifExistTable(indexTimeTableName) || !bucketDao.ifExistTable(indexFilenameTableName) || !bucketDao.ifExistTable(partFileTableName)) { + return new ResEntity("NoSuchBucket", false, ""); + } + resEntity = bucketDao.truncateTable(tableName); + if (!resEntity.getIsSuccess()) { + return resEntity; + } + resEntity = bucketDao.truncateTable(indexTimeTableName); + if (!resEntity.getIsSuccess()) { + return resEntity; + } + resEntity = bucketDao.truncateTable(indexFilenameTableName); + if (!resEntity.getIsSuccess()) { + return resEntity; + } + resEntity = bucketDao.truncateTable(partFileTableName); + } catch (Exception e) { + logger.error(e.toString()); + resEntity.setIsSuccess(false); + resEntity.setCode("InternalError"); + } + return resEntity; + } + + @Override + public ResEntity getBucketList(String user) { + return bucketDao.getTableListForNs(user); + } + + @Override + public ResEntity createBucket(String user, String bucket) { + ResEntity resEntity = new ResEntity(); + resEntity.setCode(""); + resEntity.setIsSuccess(false); + String tableName = user + ":" + bucket; + String indexTimeTableName = user + ":" + hbaseProperties.getTime_index_table_prefix() + bucket; + String indexFilenameTableName = user + ":" + hbaseProperties.getFilename_index_table_prefix() + bucket; + String partFileTableName = user + ":" + hbaseProperties.getPartfile_index_table_prefix() + bucket; + try { + if (bucketDao.ifExistTable(tableName) || bucketDao.ifExistTable(indexTimeTableName) || bucketDao.ifExistTable(indexFilenameTableName) || bucketDao.ifExistTable(partFileTableName)) { + return new ResEntity("BucketAlreadyOwnedByYou", false, ""); + } + resEntity = bucketDao.createDataTable(tableName); + if (resEntity.getIsSuccess()) { + resEntity = bucketDao.createIndexTable(indexTimeTableName); + if (resEntity.getIsSuccess()) { + resEntity = bucketDao.createIndexTable(indexFilenameTableName); + if (resEntity.getIsSuccess()) { + resEntity = bucketDao.createIndexTable(partFileTableName); + if (!resEntity.getIsSuccess()) { + bucketDao.deleteTable(tableName); + bucketDao.deleteTable(indexTimeTableName); + bucketDao.deleteTable(indexFilenameTableName); + } + } else { + bucketDao.deleteTable(tableName); + bucketDao.deleteTable(indexTimeTableName); + } + } else { + bucketDao.deleteTable(tableName); + } + } + } catch (Exception e) { + logger.error(e.toString()); + resEntity.setIsSuccess(false); + resEntity.setCode("InternalError"); + } + return resEntity; + } + + @Override + public ResEntity deleteBucket(String user, String bucket) { + ResEntity resEntity = new ResEntity(); + resEntity.setCode(""); + resEntity.setIsSuccess(false); + String tableName = user + ":" + bucket; + String indexTimeTableName = user + ":" + hbaseProperties.getTime_index_table_prefix() + bucket; + String indexFilenameTableName = user + ":" + hbaseProperties.getFilename_index_table_prefix() + bucket; + String partFileTableName = user + ":" + hbaseProperties.getPartfile_index_table_prefix() + bucket; + try { + resEntity = bucketDao.deleteTable(tableName); + if (!resEntity.getIsSuccess()) { + return resEntity; + } + resEntity = bucketDao.deleteTable(indexTimeTableName); + if (!resEntity.getIsSuccess()) { + return resEntity; + } + resEntity = bucketDao.deleteTable(indexFilenameTableName); + if (!resEntity.getIsSuccess()) { + return resEntity; + } + resEntity = bucketDao.deleteTable(partFileTableName); + } catch (Exception e) { + logger.error(e.toString()); + resEntity.setIsSuccess(false); + resEntity.setCode("InternalError"); + } + return resEntity; + } + + +}
\ No newline at end of file diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/service/impl/BucketSimpleServiceImpl.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/service/impl/BucketSimpleServiceImpl.java new file mode 100644 index 0000000..622e68f --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/service/impl/BucketSimpleServiceImpl.java @@ -0,0 +1,121 @@ +package com.mesalab.hos.service.impl; + +import com.mesalab.hos.config.HbaseProperties; +import com.mesalab.hos.config.HosProperties; +import com.mesalab.hos.dao.BucketDao; +import com.mesalab.hos.entity.ResEntity; +import com.mesalab.hos.service.BucketService; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.stereotype.Service; + +@Service +@ConditionalOnProperty(value = "hos.simple", havingValue = "0") +public class BucketSimpleServiceImpl implements BucketService { + private final org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(this.getClass()); + @Autowired + private BucketDao bucketDao; + @Autowired + HbaseProperties hbaseProperties; + + @Override + public ResEntity changeTTL(int day, String user, String bucket) { + ResEntity resEntity = new ResEntity(); + resEntity.setCode(""); + resEntity.setIsSuccess(false); + String tableName = user + ":" + bucket; + String partFileTableName = user + ":" + hbaseProperties.getPartfile_index_table_prefix() + bucket; + try { + if (!bucketDao.ifExistTable(tableName) || !bucketDao.ifExistTable(partFileTableName)) { + return new ResEntity("NoSuchBucket", false, ""); + } + resEntity = bucketDao.changeDataTableTTL(day, tableName); + if (!resEntity.getIsSuccess()) { + return resEntity; + } + resEntity = bucketDao.changeIndexTableTTL(day, partFileTableName); + } catch (Exception e) { + logger.error(e.toString()); + resEntity.setIsSuccess(false); + resEntity.setCode("InternalError"); + } + return resEntity; + } + + @Override + public ResEntity truncateData(String user, String bucket) { + ResEntity resEntity = new ResEntity(); + resEntity.setCode(""); + resEntity.setIsSuccess(false); + String tableName = user + ":" + bucket; + String partFileTableName = user + ":" + hbaseProperties.getPartfile_index_table_prefix() + bucket; + try { + if (!bucketDao.ifExistTable(tableName) || !bucketDao.ifExistTable(partFileTableName)) { + return new ResEntity("NoSuchBucket", false, ""); + } + resEntity = bucketDao.truncateTable(tableName); + if (!resEntity.getIsSuccess()) { + return resEntity; + } + resEntity = bucketDao.truncateTable(partFileTableName); + } catch (Exception e) { + logger.error(e.toString()); + resEntity.setIsSuccess(false); + resEntity.setCode("InternalError"); + } + return resEntity; + } + + @Override + public ResEntity getBucketList(String user) { + return bucketDao.getTableListForNs(user); + } + + @Override + public ResEntity createBucket(String user, String bucket) { + ResEntity resEntity = new ResEntity(); + resEntity.setCode(""); + resEntity.setIsSuccess(false); + String tableName = user + ":" + bucket; + String partFileTableName = user + ":" + hbaseProperties.getPartfile_index_table_prefix() + bucket; + try { + if (bucketDao.ifExistTable(tableName) || bucketDao.ifExistTable(partFileTableName)) { + return new ResEntity("BucketAlreadyOwnedByYou", false, ""); + } + resEntity = bucketDao.createDataTable(tableName); + if (resEntity.getIsSuccess()) { + resEntity = bucketDao.createIndexTable(partFileTableName); + if (!resEntity.getIsSuccess()) { + bucketDao.deleteTable(tableName); + } + } + } catch (Exception e) { + logger.error(e.toString()); + resEntity.setIsSuccess(false); + resEntity.setCode("InternalError"); + } + return resEntity; + } + + @Override + public ResEntity deleteBucket(String user, String bucket) { + ResEntity resEntity = new ResEntity(); + resEntity.setCode(""); + resEntity.setIsSuccess(false); + String tableName = user + ":" + bucket; + String partFileTableName = user + ":" + hbaseProperties.getPartfile_index_table_prefix() + bucket; + try { + resEntity = bucketDao.deleteTable(tableName); + if (!resEntity.getIsSuccess()) { + return resEntity; + } + resEntity = bucketDao.deleteTable(partFileTableName); + } catch (Exception e) { + logger.error(e.toString()); + resEntity.setIsSuccess(false); + resEntity.setCode("InternalError"); + } + return resEntity; + } + +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/service/impl/DiskServiceImpl.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/service/impl/DiskServiceImpl.java new file mode 100644 index 0000000..5c5ece9 --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/service/impl/DiskServiceImpl.java @@ -0,0 +1,46 @@ +package com.mesalab.hos.service.impl; + +import com.mesalab.hos.config.HbaseProperties; +import com.mesalab.hos.dao.BucketDao; +import com.mesalab.hos.dao.DiskDao; +import com.mesalab.hos.entity.DiskResponse; +import com.mesalab.hos.entity.ResEntity; +import com.mesalab.hos.service.BucketService; +import com.mesalab.hos.service.DiskService; +import org.apache.hadoop.conf.Configuration; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.stereotype.Service; + +import java.util.List; + +/** + * Created by wk1 on 2020/7/14. + */ +@Service +public class DiskServiceImpl implements DiskService { + private final org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(this.getClass()); + + @Autowired + private DiskDao diskDao; + + + public DiskResponse getHBaseSize(List<String> users, String path) { + DiskResponse diskResponse = new DiskResponse(); + try { + diskResponse.setHosCapacity(diskDao.getHBaseMaxSize(path)); + Long used = 0L; + for (String user : users) { + used += diskDao.getHBaseCurrSize(path + "/data/" + user); + used += diskDao.getHBaseCurrSize(path + "/mobdir/data/" + user); + } + diskResponse.setHosUsed(used); + }catch (Exception e){ + diskResponse.setHosUsed(-1L); + diskResponse.setHosCapacity(-1L); + logger.error(e); + } + return diskResponse; + } + +}
\ No newline at end of file diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/service/impl/ObjectServiceImpl.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/service/impl/ObjectServiceImpl.java new file mode 100644 index 0000000..66828fa --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/service/impl/ObjectServiceImpl.java @@ -0,0 +1,648 @@ +package com.mesalab.hos.service.impl; + +import com.mesalab.hos.config.HbaseProperties; +import com.mesalab.hos.dao.*; +import com.mesalab.hos.entity.FileEntity; +import com.mesalab.hos.entity.MultipartUpload.Part; +import com.mesalab.hos.entity.ResEntity; +import com.mesalab.hos.entity.ColumnEntity; +import com.mesalab.hos.entity.delete.DeleteError; +import com.mesalab.hos.entity.delete.DeleteObj; +import com.mesalab.hos.entity.delete.DeleteResponse; +import com.mesalab.hos.entity.getMultipartUpload.*; +import com.mesalab.hos.entity.partEntity; +import com.mesalab.hos.entity.select.Content; +import com.mesalab.hos.entity.select.ListObjects; +import com.mesalab.hos.service.ObjectService; +import com.mesalab.hos.util.PublicUtil; +import com.mesalab.hos.util.ResponseMessageUtil; +import org.apache.hadoop.hbase.TableNotFoundException; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.stereotype.Service; +import org.springframework.util.DigestUtils; + +import java.time.Instant; +import java.util.*; +import java.util.stream.Collectors; + +import static com.mesalab.hos.util.PublicUtil.getRandomHead; +import static com.mesalab.hos.util.PublicUtil.getRowKey; + +/** + * Created by wk on 2020/4/20. + */ +@Service +@ConditionalOnProperty(value = "hos.simple", havingValue = "1") +public class ObjectServiceImpl implements ObjectService { + private final org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(this.getClass()); + + @Autowired + private IndexFileNameDao indexFileNameDao; + @Autowired + private IndexTimeDao indexTimeDao; + @Autowired + private ObjectDao objectDao; + @Autowired + private PartFileDao partFileDao; + @Autowired + HbaseProperties hbaseProperties; + + @Override + public ResEntity putFile(FileEntity fileEntity) { + + try { + String randomHead = getRandomHead(hbaseProperties.getFilename_head()); + fileEntity.setIndexTimeKey(randomHead + "|" + fileEntity.getTimestamp() + "|" + getRowKey(fileEntity.getFileName())); + fileEntity.setIndexFileNameKey(randomHead + "|" + fileEntity.getFileName()); + indexTimeDao.putFile(fileEntity.getUser() + ":" + hbaseProperties.getTime_index_table_prefix() + fileEntity.getBucket(), fileEntity.getIndexTimeKey(), fileEntity.getFileName(), String.valueOf(fileEntity.getTimestamp())); + indexFileNameDao.putFile(fileEntity.getUser() + ":" + hbaseProperties.getFilename_index_table_prefix() + fileEntity.getBucket(), fileEntity.getIndexFileNameKey(), fileEntity.getFileName(), String.valueOf(fileEntity.getTimestamp())); + } catch (Exception e) { + logger.error(e.toString()); + } + return objectDao.putFile(fileEntity); + } + + @Override + public ResEntity putFilePart(FileEntity fileEntity) { + ResEntity resEntity = new ResEntity(); + resEntity.setCode(""); + resEntity.setIsSuccess(false); + try { + Boolean isExit = partFileDao.ifExistRowKey(fileEntity.getUser() + ":" + hbaseProperties.getPartfile_index_table_prefix() + fileEntity.getBucket(), getRowKey(fileEntity.getFileName())); + if (isExit) { + String partFilename = fileEntity.getFileName() + "|" + fileEntity.getPartNumber(); + partFileDao.appendColumn(fileEntity.getUser() + ":" + hbaseProperties.getPartfile_index_table_prefix() + fileEntity.getBucket(), getRowKey(fileEntity.getFileName()), "partname", fileEntity.getPartNumber() + ","); + fileEntity.setFileName(partFilename); + resEntity = objectDao.putFile(fileEntity); + } else { + logger.error("NoSuchUpload ! not create multipart upload"); + resEntity.setCode("NoSuchUpload"); + } + } catch (Exception e) { + resEntity.setCode("InternalError"); + logger.error(e.toString()); + } + return resEntity; + } + + @Override + public ResEntity createMultipartUpload(FileEntity fileEntity) { + ResEntity resEntity = new ResEntity(); + resEntity.setCode(""); + resEntity.setIsSuccess(false); + try { + Boolean isPartFileExit = partFileDao.ifExistRowKey(fileEntity.getUser() + ":" + hbaseProperties.getPartfile_index_table_prefix() + fileEntity.getBucket(), getRowKey(fileEntity.getFileName())); + Boolean isDataFileExit = objectDao.ifExistRowKey(fileEntity.getUser() + ":" + fileEntity.getBucket(), getRowKey(fileEntity.getFileName())); + if (isPartFileExit || isDataFileExit) { + resEntity.setCode("InvalidObjectState"); + } else { + resEntity = partFileDao.putFile(fileEntity); + } + } catch (Exception e) { + resEntity.setCode("InternalError"); + logger.error(e.toString()); + } + return resEntity; + } + + @Override + public ResEntity appendFile(FileEntity fileEntity) { + ResEntity resEntity = new ResEntity(); + resEntity.setCode(""); + resEntity.setIsSuccess(false); + String tableName = fileEntity.getUser() + ":" + fileEntity.getBucket(); + String partfileIndexTableName = fileEntity.getUser() + ":" + hbaseProperties.getPartfile_index_table_prefix() + fileEntity.getBucket(); + String fileName = fileEntity.getFileName(); + String partName = fileEntity.getPartName(); + fileEntity.setPartName(partName + ","); + + try { + if (objectDao.ifExistRowKey(partfileIndexTableName, getRowKey(fileName))) { + resEntity.setCode("ObjectNotAppendable"); + } else { + + Boolean ifExist =objectDao.ifExistRowKey(tableName, getRowKey(fileName)); + if (ifExist) { + resEntity = objectDao.getAllFileMeta(tableName, getRowKey(fileName)); + FileEntity fileMeta = (FileEntity) resEntity.getReturnEntity(); + if (fileMeta.getIsParent() < 1) { + resEntity.setIsSuccess(false); + resEntity.setCode("ObjectNotAppendable"); + return resEntity; + } else { + fileEntity.setFileName(fileName + "|" + partName); + objectDao.putFile(fileEntity); + resEntity = objectDao.appendColumn(tableName, getRowKey(fileName), "partname", fileEntity.getPartName()); + } + if (partName.equals("1")) { + String randomHead = getRandomHead(hbaseProperties.getFilename_head()); + fileEntity.setIndexTimeKey(randomHead + "|" + fileEntity.getTimestamp() + "|" + getRowKey(fileName)); + fileEntity.setIndexFileNameKey(randomHead + "|" + fileName); + fileEntity.setFileName(fileName); + objectDao.appendIndexKey(fileEntity); + indexTimeDao.putFile(fileEntity.getUser() + ":" + hbaseProperties.getTime_index_table_prefix() + fileEntity.getBucket(), fileEntity.getIndexTimeKey(), fileName, String.valueOf(fileEntity.getTimestamp())); + indexFileNameDao.putFile(fileEntity.getUser() + ":" + hbaseProperties.getFilename_index_table_prefix() + fileEntity.getBucket(), fileEntity.getIndexFileNameKey(), fileName, String.valueOf(fileEntity.getTimestamp())); + } + } else { + fileEntity.setFileName(fileName + "|" + partName); + resEntity = objectDao.putFile(fileEntity); + fileEntity.setFileName(fileName); + fileEntity.setFileSize(0); + fileEntity.setIsParent(1); + if (partName.equals("1")) { + String randomHead = getRandomHead(hbaseProperties.getFilename_head()); + fileEntity.setIndexTimeKey(randomHead + "|" + fileEntity.getTimestamp() + "|" + getRowKey(fileName)); + fileEntity.setIndexFileNameKey(randomHead + "|" + fileName); + resEntity = objectDao.putAppendFileMeta(fileEntity); + //objectDao.appendIndexKey(fileEntity); + indexTimeDao.putFile(fileEntity.getUser() + ":" + hbaseProperties.getTime_index_table_prefix() + fileEntity.getBucket(), fileEntity.getIndexTimeKey(), fileName, String.valueOf(fileEntity.getTimestamp())); + indexFileNameDao.putFile(fileEntity.getUser() + ":" + hbaseProperties.getFilename_index_table_prefix() + fileEntity.getBucket(), fileEntity.getIndexFileNameKey(), fileName, String.valueOf(fileEntity.getTimestamp())); + + } else { + resEntity = objectDao.putAppendFileMeta(fileEntity); + } + } + } + } catch (Exception e) { + resEntity.setCode("InternalError"); + logger.error(e.toString()); + } + return resEntity; + } + + @Override + public ResEntity getFile(String user, String bucket, String fileName) { + return objectDao.getFile(user + ":" + bucket, fileName); + } + + @Override + public ResEntity getColumn(String user, String bucket, String fileName, String column) { + return objectDao.getColumn(user + ":" + bucket, getRowKey(fileName), column); + } + + @Override + public ResEntity deleteFile(FileEntity fileEntity) { + ResEntity resEntity = new ResEntity(); + resEntity.setCode(""); + resEntity.setIsSuccess(false); + try { + if (objectDao.ifExistRowKey(fileEntity.getUser() + ":" + fileEntity.getBucket(), getRowKey(fileEntity.getFileName()))) { + ResEntity re = objectDao.getAllFileMeta(fileEntity.getUser() + ":" + fileEntity.getBucket(), getRowKey(fileEntity.getFileName())); + FileEntity fileMeta = (FileEntity) re.getReturnEntity(); + String[] indexTimeKeys = fileMeta.getIndexTimeKey().split(","); + Boolean isDeleteIndexTime = false; + for (String indexTimeKey : indexTimeKeys) { + isDeleteIndexTime = indexTimeDao.deleteFile(fileEntity.getUser() + ":" + hbaseProperties.getTime_index_table_prefix() + fileEntity.getBucket(), indexTimeKey); + if (!isDeleteIndexTime) { + return resEntity; + } + } + if (isDeleteIndexTime) { + String[] indexFileNameKeys = fileMeta.getIndexFileNameKey().split(","); + Boolean isDeleteIndexFilename = false; + for (String indexFileNameKey : indexFileNameKeys) { + isDeleteIndexFilename = indexFileNameDao.deleteFile(fileEntity.getUser() + ":" + hbaseProperties.getFilename_index_table_prefix() + fileEntity.getBucket(), indexFileNameKey); + if (!isDeleteIndexFilename) { + return resEntity; + } + } + if (isDeleteIndexFilename) { + /*if (objectDao.ifMultiFile(fileEntity.getUser() + ":" + fileEntity.getBucket(), getRowKey(fileEntity.getFileName()))) { + resEntity = objectDao.deleteMultipartUploadFile(fileEntity.getUser() + ":" + fileEntity.getBucket(), fileEntity.getFileName()); + } else {*/ + resEntity = objectDao.deleteFile(fileEntity.getUser() + ":" + fileEntity.getBucket(), getRowKey(fileEntity.getFileName())); + + /* if (!resEntity.getIsSuccess()) { + indexTimeDao.putFile(fileEntity.getUser() + ":" + hbaseProperties.getTime_index_table_prefix() + fileEntity.getBucket(), String.valueOf(timestamp) + "|" + getRowKey(fileEntity.getFileName()), fileEntity.getFileName()); + indexFileNameDao.putFile(fileEntity.getUser() + ":" + hbaseProperties.getFilename_index_table_prefix() + fileEntity.getBucket(), fileEntity.getFileName()); + logger.error("interrupt delete ! delete file error rollback 2 table"); + }*/ + } else { + // indexFileNameDao.putFile(fileEntity.getUser() + ":" + hbaseProperties.getFilename_index_table_prefix() + fileEntity.getBucket(), fileEntity.getFileName()); + logger.error("interrupt delete ! delete filename_table error rollback 1 table"); + } + } else { + logger.error("interrupt delete ! delete time_table error"); + } + } else { + logger.error("interrupt delete ! get columnEntity from filname_table error"); + } + resEntity.setIsSuccess(true); + } catch (Exception e) { + logger.error(e.toString()); + resEntity.setCode("InternalError"); + } + return resEntity; + } + + @Override + public ResEntity deleteFileList(String bucket, String user, List<DeleteObj> dr) { + ResEntity resEntity = new ResEntity(); + resEntity.setCode(""); + resEntity.setIsSuccess(false); + List<DeleteError> errors = new ArrayList<>(); + try { + for (DeleteObj deleteObj : dr) { + FileEntity fileEntity = new FileEntity(); + fileEntity.setBucket(bucket); + fileEntity.setUser(user); + fileEntity.setFileName(deleteObj.getKey()); + ResEntity re = deleteFile(fileEntity); + if (!re.getIsSuccess()) { + DeleteError deleteError = new DeleteError(); + deleteError.setKey(deleteObj.getKey()); + deleteError.setCode(re.getCode()); + deleteError.setMessage(ResponseMessageUtil.getResponseMessage(resEntity.getCode())); + errors.add(deleteError); + } + } + if (errors.size() > 0) { + DeleteResponse deleteResponse = new DeleteResponse(); + deleteResponse.setErrors(errors); + resEntity.setReturnEntity(deleteResponse); + } else { + resEntity.setIsSuccess(true); + } + } catch (Exception e) { + logger.error(e.toString()); + resEntity.setCode("InternalError"); + } + return resEntity; + } + + @Override + public ResEntity getObjectList(String user, String bucket, int maxKeys, String prefix, String starttime, String endtime) { + ResEntity resEntity = new ResEntity(); + resEntity.setCode(""); + resEntity.setIsSuccess(false); + ListObjects listObjects = new ListObjects(); + List<Content> listContents = new ArrayList<>(); + try { + if (prefix.equals("")) { + for (String head : hbaseProperties.getFilename_head()) { + listContents.addAll(indexTimeDao.getObjectListWithTimeFilter(user + ":" + hbaseProperties.getTime_index_table_prefix() + bucket, maxKeys, head + "|" + starttime, head + "|" + endtime, prefix)); + } + } else { + for (String head : hbaseProperties.getFilename_head()) { + listContents.addAll(indexFileNameDao.getObjectListWithPrefixFilter(user + ":" + hbaseProperties.getFilename_index_table_prefix() + bucket, maxKeys, starttime, endtime, head + "|" + prefix)); + } + } + + /*Set<Content> treeSet = new TreeSet<Content>(new Comparator<Content>(){ + @Override + public int compare(Content o1, Content o2) { + return (int) (Instant.parse(o2.getLastModified()).toEpochMilli() - Instant.parse(o1.getLastModified()).toEpochMilli()); + } + }); + treeSet.addAll(listContents); + List<Content> list = new LinkedList<Content>(treeSet);*/ + + listContents.sort(new Comparator<Content>() { + @Override + public int compare(Content o1, Content o2) { + return (int) (Instant.parse(o2.getLastModified()).toEpochMilli() - Instant.parse(o1.getLastModified()).toEpochMilli()); + } + }); + listContents = listContents.stream().collect(Collectors.collectingAndThen(Collectors.toCollection(() -> new TreeSet<>(Comparator.comparing(Content::getKey))), ArrayList::new)); + + + if (listContents.size() > maxKeys && maxKeys != 0) { + listContents.subList(0, maxKeys); + } + for (Content content:listContents){ + long size = getFileSize(user+":"+bucket,content.getKey()); + content.setSize(String.valueOf(size)); + } + listObjects.setContents(listContents); + listObjects.setKeyCount(String.valueOf(listContents.size())); + listObjects.setMaxKeys(String.valueOf(maxKeys)); + listObjects.setTruncated(false); + listObjects.setPrefix(prefix); + listObjects.setName(bucket); + resEntity.setReturnEntity(listObjects); + resEntity.setIsSuccess(true); + } catch (TableNotFoundException e) { + logger.error(e.toString()); + resEntity.setCode("NoSuchBucket"); + } catch (Exception e) { + logger.error(e.toString()); + resEntity.setCode("InternalError"); + } + return resEntity; + } + + @Override + public ResEntity abortMultipartUpload(String user, String bucket, String filename) { + ResEntity resEntity = new ResEntity(); + resEntity.setCode(""); + resEntity.setIsSuccess(false); + try { + ResEntity ce = partFileDao.getFile(user + ":" + hbaseProperties.getPartfile_index_table_prefix() + bucket, getRowKey(filename)); + if (ce.getIsSuccess()) { + partEntity pe = (partEntity) ce.getReturnEntity(); + if (!"".equals(pe.getFileName()) && pe.getFileName() != null) { + String partFile = pe.getPartName(); + String[] partArray = partFile.split(","); + for (String part : partArray) { + if (!"".equals(part)) { + resEntity = objectDao.deleteFile(user + ":" + bucket, getRowKey(filename + "|" + part)); + if (!resEntity.getIsSuccess()) { + break; + } + } + } + resEntity = partFileDao.deleteFile(user + ":" + hbaseProperties.getPartfile_index_table_prefix() + bucket, getRowKey(filename)); + } + } else { + resEntity.setCode("NoSuchKey"); + logger.error("interrupt Multipart delete ! get partfile error"); + } + resEntity.setIsSuccess(true); + } catch (Exception e) { + resEntity.setCode("InternalError"); + logger.error(e.toString()); + } + return resEntity; + } + + @Override + public ResEntity completeMultipartUpload(String bucket, String user, String filename, List<Part> parts, long timestamp) { + ResEntity resEntity = new ResEntity(); + resEntity.setCode(""); + resEntity.setIsSuccess(false); + long filesize = 0; + int i = 1; + try { + ResEntity ce = partFileDao.getFile(user + ":" + hbaseProperties.getPartfile_index_table_prefix() + bucket, getRowKey(filename)); + if (ce.getIsSuccess()) { + partEntity pEntity = (partEntity) ce.getReturnEntity(); + String partFile = pEntity.getPartName(); + String[] partArray = partFile.split(","); + Map partNames = new HashMap<>(); + for (String part : partArray) { + if (!"".equals(part)) { + partNames.put(getRowKey(filename + "|" + part), ""); + } + } + if (partNames.size() != parts.size()) { + logger.error("parts size not equal exist partfile size"); + resEntity.setCode("InvalidPart"); + } else { + StringBuilder partFileName = new StringBuilder(); + for (Part part : parts) { + String key = part.getETag(); + String rowKey = getRowKey(filename + "|" + i); + if (key.equals(rowKey) && partNames.containsKey(key)) { + ResEntity res = objectDao.getColumn(user + ":" + bucket, key, "filesize"); + if (!res.getIsSuccess()) { + logger.error("get part key error"); + resEntity.setCode("InvalidPart"); + return resEntity; + } + partFileName.append(i).append(","); + ColumnEntity columnEntity = (ColumnEntity) res.getReturnEntity(); + long partSize = Long.parseLong(columnEntity.getColumn()); + filesize = filesize + partSize; + } else { + logger.error("partkey not equal partfile"); + resEntity.setCode("InvalidPart"); + return resEntity; + } + i++; + } + FileEntity fileEntity = new FileEntity(); + fileEntity.setFileName(pEntity.getFileName()); + fileEntity.setUser(user); + fileEntity.setBucket(bucket); + fileEntity.setMessage(pEntity.getMessage()); + fileEntity.setTimestamp(timestamp); + fileEntity.setFileSize(filesize); + fileEntity.setIsParent(1); + fileEntity.setFileType(pEntity.getFileType()); + fileEntity.setPartName(partFileName.toString()); + String randomHead = getRandomHead(hbaseProperties.getFilename_head()); + fileEntity.setIndexTimeKey(randomHead + "|" + fileEntity.getTimestamp() + "|" + getRowKey(fileEntity.getFileName())); + fileEntity.setIndexFileNameKey(randomHead + "|" + fileEntity.getFileName()); + objectDao.putAllFileMeta(fileEntity); + //objectDao.appendIndexKey(fileEntity); + indexTimeDao.putFile(fileEntity.getUser() + ":" + hbaseProperties.getTime_index_table_prefix() + fileEntity.getBucket(), fileEntity.getIndexTimeKey(), fileEntity.getFileName(), String.valueOf(fileEntity.getTimestamp())); + indexFileNameDao.putFile(fileEntity.getUser() + ":" + hbaseProperties.getFilename_index_table_prefix() + fileEntity.getBucket(), fileEntity.getIndexFileNameKey(), fileEntity.getFileName(), String.valueOf(fileEntity.getTimestamp())); + partFileDao.deleteFile(user + ":" + hbaseProperties.getPartfile_index_table_prefix() + bucket, pEntity.getUploadId()); + resEntity.setIsSuccess(true); + } + } + } catch (Exception e) { + logger.error(e.toString()); + resEntity.setCode("InternalError"); + } + return resEntity; + } + + @Override + public ResEntity getMultipartUploadObjects(String bucket, int maxUploads, String user, String uploadsPrefix) { + ResEntity resEntity = new ResEntity(); + resEntity.setCode(""); + resEntity.setIsSuccess(false); + try { + ListMultipartUploadsResponse listMultipartUploadsResponse = new ListMultipartUploadsResponse(); + listMultipartUploadsResponse.setTruncated(false); + listMultipartUploadsResponse.setBucket(bucket); + listMultipartUploadsResponse.setMaxUploads(maxUploads); + if (!"".equals(uploadsPrefix)) { + resEntity = partFileDao.getMultipartUploadObjectsByPrefix(user + ":" + hbaseProperties.getPartfile_index_table_prefix() + bucket, resEntity, uploadsPrefix); + listMultipartUploadsResponse.setPrefix(uploadsPrefix); + } else { + resEntity = partFileDao.getMultipartUploadObjects(user + ":" + hbaseProperties.getPartfile_index_table_prefix() + bucket, resEntity); + } + Upload upload; + List<Upload> maxUploadsList = new ArrayList<>(); + List<Upload> allUploadList = (ArrayList<Upload>) resEntity.getReturnEntity(); + if (allUploadList.size() > 0) { + allUploadList.sort(new Comparator<Upload>() { + @Override + public int compare(Upload o1, Upload o2) { + return Long.valueOf(o1.getInitiated()).compareTo(Long.valueOf(o2.getInitiated())); + } + }); + if (maxUploads < allUploadList.size()) { + listMultipartUploadsResponse.setTruncated(true); + for (int i = 0; i < maxUploads; i++) { + upload = allUploadList.get(i); + upload.setStorageClass("STANDARD"); + upload.setInitiator(new Initiator(user, DigestUtils.md5DigestAsHex(user.getBytes()))); + upload.setOwner(new Owner(user, DigestUtils.md5DigestAsHex(user.getBytes()))); + long timestamp = Long.parseLong(upload.getInitiated()); + upload.setInitiated(PublicUtil.getUTCTime(timestamp)); + maxUploadsList.add(i, upload); + } + listMultipartUploadsResponse.setUpload(maxUploadsList); + Upload nextUpload = allUploadList.get(maxUploads); + String nextKeyMarker = nextUpload.getKey(); + String nextUploadIdMarker = nextUpload.getUploadId(); + listMultipartUploadsResponse.setNextKeyMarker(nextKeyMarker); + listMultipartUploadsResponse.setNextUploadIdMarker(nextUploadIdMarker); + } else { + listMultipartUploadsResponse.setTruncated(false); + for (int i = 0; i < allUploadList.size(); i++) { + upload = allUploadList.get(i); + upload.setStorageClass("STANDARD"); + upload.setInitiator(new Initiator(user, DigestUtils.md5DigestAsHex(user.getBytes()))); + upload.setOwner(new Owner(user, DigestUtils.md5DigestAsHex(user.getBytes()))); + long timestamp = Long.parseLong(upload.getInitiated()); + upload.setInitiated(PublicUtil.getUTCTime(timestamp)); + allUploadList.set(i, upload); + } + listMultipartUploadsResponse.setUpload(allUploadList); + } + } + resEntity.setReturnEntity(listMultipartUploadsResponse); + resEntity.setIsSuccess(true); + } catch (Exception e) { + resEntity.setCode("InternalError"); + logger.error(e.toString()); + } + return resEntity; + } + + @Override + public ResEntity getObjectMultipartUploadParts(String bucket, int maxUploads, String user, String fileName, String partNumberMarker, String uploadId) { + ResEntity resEntity = new ResEntity(); + resEntity.setCode(""); + resEntity.setIsSuccess(false); + try { + ListPartsResponse listPartsResponse = new ListPartsResponse(); + listPartsResponse.setBucket(bucket); + listPartsResponse.setKey(fileName); + listPartsResponse.setInitiator(new Initiator(user, DigestUtils.md5DigestAsHex(user.getBytes()))); + listPartsResponse.setOwner(new Owner(user, DigestUtils.md5DigestAsHex(user.getBytes()))); + listPartsResponse.setStorageClass("STANDARD"); + listPartsResponse.setTruncated(false); + listPartsResponse.setMaxParts(maxUploads); + if (partFileDao.ifExistRowKey(user + ":" + hbaseProperties.getPartfile_index_table_prefix() + bucket, PublicUtil.getRowKey(fileName)) && partFileDao.ifExistRowKey(user + ":" + hbaseProperties.getPartfile_index_table_prefix() + bucket, uploadId)) { + resEntity.setReturnEntity(listPartsResponse); + resEntity = partFileDao.getObjectParts(user, bucket, fileName, resEntity); + listPartsResponse = (ListPartsResponse) resEntity.getReturnEntity(); + List<com.mesalab.hos.entity.getMultipartUpload.Part> partList = listPartsResponse.getPart(); + partList.sort(new Comparator<com.mesalab.hos.entity.getMultipartUpload.Part>() { + @Override + public int compare(com.mesalab.hos.entity.getMultipartUpload.Part o1, com.mesalab.hos.entity.getMultipartUpload.Part o2) { + return o1.getPartNumber() - (o2.getPartNumber()); + } + }); + int objectCount = partList.size(); + List<com.mesalab.hos.entity.getMultipartUpload.Part> maxPartsList = new ArrayList<>(); + if (!"".equals(partNumberMarker)) { + int intPartNumberMarker = Integer.parseInt(partNumberMarker); + if (intPartNumberMarker >= 0) { + listPartsResponse.setPartNumberMarker(partNumberMarker); + if (maxUploads + intPartNumberMarker < objectCount) { + listPartsResponse.setTruncated(true); + for (int i = intPartNumberMarker; i < maxUploads + intPartNumberMarker; i++) { + com.mesalab.hos.entity.getMultipartUpload.Part part = partList.get(i); + long timestamp = Long.parseLong(part.getLastModified()); + part.setLastModified(PublicUtil.getUTCTime(timestamp)); + maxPartsList.add(part); + } + listPartsResponse.setPart(maxPartsList); + com.mesalab.hos.entity.getMultipartUpload.Part nextPart = partList.get(maxUploads + intPartNumberMarker); + int partNumber = nextPart.getPartNumber(); + listPartsResponse.setNextPartNumberMarker(partNumber + ""); + } else { + listPartsResponse.setTruncated(false); + for (int i = intPartNumberMarker; i < objectCount; i++) { + com.mesalab.hos.entity.getMultipartUpload.Part part = partList.get(i); + long timestamp = Long.parseLong(part.getLastModified()); + part.setLastModified(PublicUtil.getUTCTime(timestamp)); + maxPartsList.add(part); + } + listPartsResponse.setPart(maxPartsList); + } + resEntity.setReturnEntity(listPartsResponse); + } else { + resEntity.setCode("InvalidArgument"); + return resEntity; + } + } else { + if (maxUploads < objectCount) { + listPartsResponse.setTruncated(true); + for (int i = 0; i < maxUploads; i++) { + com.mesalab.hos.entity.getMultipartUpload.Part part = partList.get(i); + long timestamp = Long.parseLong(part.getLastModified()); + part.setLastModified(PublicUtil.getUTCTime(timestamp)); + maxPartsList.add(i, part); + } + listPartsResponse.setPart(maxPartsList); + com.mesalab.hos.entity.getMultipartUpload.Part nextPart = partList.get(maxUploads); + int nextPartNumber = nextPart.getPartNumber(); + listPartsResponse.setNextPartNumberMarker(nextPartNumber + ""); + } else { + listPartsResponse.setTruncated(false); + for (int i = 0; i < objectCount; i++) { + com.mesalab.hos.entity.getMultipartUpload.Part part = partList.get(i); + long timestamp = Long.parseLong(part.getLastModified()); + part.setLastModified(PublicUtil.getUTCTime(timestamp)); + maxPartsList.add(i, part); + } + listPartsResponse.setPart(maxPartsList); + } + resEntity.setReturnEntity(listPartsResponse); + } + resEntity.setIsSuccess(true); + } else { + resEntity.setCode("NoSuchUpload"); + return resEntity; + } + } catch (NumberFormatException e) { + resEntity.setCode("InvalidArgument"); + logger.error(e.toString()); + } catch (Exception e) { + resEntity.setCode("InternalError"); + logger.error(e.toString()); + } + return resEntity; + } + + @Override + public long getFileSize(String bucket, String filename) { + ResEntity resEntity =objectDao.getAllFileMeta(bucket,getRowKey(filename)); + long filesize = 0; + try { + if(resEntity.getIsSuccess()){ + + FileEntity fileEntity = (FileEntity)resEntity.getReturnEntity(); + if(fileEntity.getIsParent()< 1){ + + filesize=fileEntity.getFileSize(); + } + else{ + + String partFile=fileEntity.getPartName(); + String[] partsArray = partFile.split(","); + TreeSet<Integer> partsSet = new TreeSet<>(); + StringBuffer messageBuffer = new StringBuffer(); + for (String s : partsArray) { + partsSet.add(Integer.parseInt(s)); + } + for (int part : partsSet) { + ResEntity re= objectDao.getColumn(bucket,getRowKey(filename+"|"+part),"filesize"); + ColumnEntity ce = (ColumnEntity) re.getReturnEntity(); + long partsize = Long.parseLong(ce.getColumn()); + filesize+=partsize; + } + + } + + + } + return filesize; + }catch (Exception e){ + + logger.error(e.toString()); + return filesize; + + } + + } +} + diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/service/impl/ObjectSimpleServiceImpl.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/service/impl/ObjectSimpleServiceImpl.java new file mode 100644 index 0000000..78aa4c8 --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/service/impl/ObjectSimpleServiceImpl.java @@ -0,0 +1,471 @@ +package com.mesalab.hos.service.impl; + +import com.mesalab.hos.config.HbaseProperties; +import com.mesalab.hos.dao.*; +import com.mesalab.hos.entity.FileEntity; +import com.mesalab.hos.entity.MultipartUpload.Part; +import com.mesalab.hos.entity.ResEntity; +import com.mesalab.hos.entity.ColumnEntity; +import com.mesalab.hos.entity.delete.DeleteError; +import com.mesalab.hos.entity.delete.DeleteObj; +import com.mesalab.hos.entity.delete.DeleteResponse; +import com.mesalab.hos.entity.getMultipartUpload.*; +import com.mesalab.hos.entity.partEntity; +import com.mesalab.hos.service.ObjectService; +import com.mesalab.hos.util.PublicUtil; +import com.mesalab.hos.util.ResponseMessageUtil; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.stereotype.Service; +import org.springframework.util.DigestUtils; +import org.springframework.util.StringUtils; + +import java.util.*; + +import static com.mesalab.hos.util.PublicUtil.getRowKey; + +@Service +@ConditionalOnProperty(value = "hos.simple", havingValue = "0") +public class ObjectSimpleServiceImpl implements ObjectService { + private final org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(this.getClass()); + + @Autowired + private ObjectDao objectDao; + @Autowired + private PartFileDao partFileDao; + @Autowired + HbaseProperties hbaseProperties; + + @Override + public ResEntity putFile(FileEntity fileEntity) { + return objectDao.putFile(fileEntity); + } + + @Override + public ResEntity putFilePart(FileEntity fileEntity) { + ResEntity resEntity = new ResEntity(); + resEntity.setCode(""); + resEntity.setIsSuccess(false); + try { + Boolean isExit = partFileDao.ifExistRowKey(fileEntity.getUser() + ":" + hbaseProperties.getPartfile_index_table_prefix() + fileEntity.getBucket(), getRowKey(fileEntity.getFileName())); + if (isExit) { + String partFilename = fileEntity.getFileName() + "|" + fileEntity.getPartNumber(); + partFileDao.appendColumn(fileEntity.getUser() + ":" + hbaseProperties.getPartfile_index_table_prefix() + fileEntity.getBucket(), getRowKey(fileEntity.getFileName()), "partname", fileEntity.getPartNumber() + ","); + fileEntity.setFileName(partFilename); + resEntity = objectDao.putFile(fileEntity); + } else { + logger.error("NoSuchUpload ! not create multipart upload"); + resEntity.setCode("NoSuchUpload"); + } + } catch (Exception e) { + resEntity.setCode("InternalError"); + logger.error(e.toString()); + } + return resEntity; + } + + @Override + public ResEntity createMultipartUpload(FileEntity fileEntity) { + ResEntity resEntity = new ResEntity(); + resEntity.setCode(""); + resEntity.setIsSuccess(false); + try { + Boolean isPartFileExit = partFileDao.ifExistRowKey(fileEntity.getUser() + ":" + hbaseProperties.getPartfile_index_table_prefix() + fileEntity.getBucket(), getRowKey(fileEntity.getFileName())); + Boolean isDataFileExit = objectDao.ifExistRowKey(fileEntity.getUser() + ":" + fileEntity.getBucket(), getRowKey(fileEntity.getFileName())); + if (isPartFileExit || isDataFileExit) { + resEntity.setCode("InvalidObjectState"); + } else { + resEntity = partFileDao.putFile(fileEntity); + } + } catch (Exception e) { + resEntity.setCode("InternalError"); + logger.error(e.toString()); + } + return resEntity; + } + + @Override + public ResEntity appendFile(FileEntity fileEntity) { + ResEntity resEntity = new ResEntity(); + resEntity.setCode(""); + resEntity.setIsSuccess(false); + String tableName = fileEntity.getUser() + ":" + fileEntity.getBucket(); + String partfileIndexTableName = fileEntity.getUser() + ":" + hbaseProperties.getPartfile_index_table_prefix() + fileEntity.getBucket(); + String fileName = fileEntity.getFileName(); + String partName = fileEntity.getPartName(); + fileEntity.setPartName(partName + ","); + + try { + if (objectDao.ifExistRowKey(partfileIndexTableName, getRowKey(fileName))) { + resEntity.setCode("ObjectNotAppendable"); + } else { + if (objectDao.ifExistRowKey(tableName, getRowKey(fileName))) { + resEntity = objectDao.getAllFileMeta(tableName, getRowKey(fileName)); + FileEntity fileMeta = (FileEntity) resEntity.getReturnEntity(); + if (fileMeta.getIsParent() < 1) { + resEntity.setIsSuccess(false); + resEntity.setCode("ObjectNotAppendable"); + return resEntity; + } + else{ + fileEntity.setFileName(fileName + "|" + partName); + resEntity = putFile(fileEntity); + resEntity = objectDao.appendColumn(tableName, getRowKey(fileName), "partname", fileEntity.getPartName()); + } + } + else{ + fileEntity.setFileName(fileName + "|" + partName); + resEntity = putFile(fileEntity); + fileEntity.setFileName(fileName); + fileEntity.setFileSize(0); + fileEntity.setIsParent(1); + resEntity = objectDao.putAppendFileMeta(fileEntity); + } + } + } catch (Exception e) { + resEntity.setCode("InternalError"); + logger.error(e.toString()); + } + return resEntity; + } + + @Override + public ResEntity getFile(String user, String bucket, String fileName) { + return objectDao.getFile(user + ":" + bucket, fileName); + } + + @Override + public ResEntity getColumn(String user, String bucket, String fileName, String column) { + return objectDao.getColumn(user + ":" + bucket, getRowKey(fileName), column); + } + + @Override + public ResEntity deleteFile(FileEntity fileEntity) { + ResEntity resEntity = new ResEntity(); + resEntity.setCode(""); + resEntity.setIsSuccess(false); + try { + if (objectDao.ifMultiFile(fileEntity.getUser() + ":" + fileEntity.getBucket(), getRowKey(fileEntity.getFileName()))) { + resEntity = objectDao.deleteMultipartUploadFile(fileEntity.getUser() + ":" + fileEntity.getBucket(), fileEntity.getFileName()); + } else { + resEntity = objectDao.deleteFile(fileEntity.getUser() + ":" + fileEntity.getBucket(), getRowKey(fileEntity.getFileName())); + } + } catch (Exception e) { + resEntity.setCode("InternalError"); + logger.error(e.toString()); + } + return resEntity; + } + + @Override + public ResEntity deleteFileList(String bucket, String user, List<DeleteObj> dr) { + ResEntity resEntity = new ResEntity(); + resEntity.setCode(""); + resEntity.setIsSuccess(false); + List<DeleteError> errors = new ArrayList<>(); + try { + for (DeleteObj deleteObj : dr) { + FileEntity fileEntity = new FileEntity(); + fileEntity.setBucket(bucket); + fileEntity.setUser(user); + fileEntity.setFileName(deleteObj.getKey()); + resEntity = deleteFile(fileEntity); + if (!resEntity.getIsSuccess()) { + DeleteError deleteError = new DeleteError(); + deleteError.setKey(deleteObj.getKey()); + deleteError.setCode(resEntity.getCode()); + deleteError.setMessage(ResponseMessageUtil.getResponseMessage(resEntity.getCode())); + errors.add(deleteError); + } + } + if (errors.size() > 0) { + DeleteResponse deleteResponse = new DeleteResponse(); + deleteResponse.setErrors(errors); + resEntity.setReturnEntity(deleteResponse); + } else { + resEntity.setIsSuccess(true); + } + } catch (Exception e) { + resEntity.setCode("InternalError"); + logger.error(e.toString()); + } + return resEntity; + } + + @Override + public ResEntity getObjectList(String user, String bucket, int maxKeys, String prefix, String starttime, String endtime) { + return null; + } + + @Override + public ResEntity abortMultipartUpload(String user, String bucket, String filename) { + ResEntity resEntity = new ResEntity(); + resEntity.setCode(""); + resEntity.setIsSuccess(false); + try { + ResEntity ce = partFileDao.getFile(user + ":" + hbaseProperties.getPartfile_index_table_prefix() + bucket, getRowKey(filename)); + if (ce.getIsSuccess()) { + partEntity pe = (partEntity) ce.getReturnEntity(); + if (!"".equals(pe.getFileName()) && pe.getFileName() != null) { + String partFile = pe.getPartName(); + String[] partArray = partFile.split(","); + for (String part : partArray) { + if (!"".equals(part)) { + resEntity = objectDao.deleteFile(user + ":" + bucket, getRowKey(filename + "|" + part)); + if (!resEntity.getIsSuccess()) { + break; + } + } + } + resEntity = partFileDao.deleteFile(user + ":" + hbaseProperties.getPartfile_index_table_prefix() + bucket, getRowKey(filename)); + } + } else { + resEntity.setCode("NoSuchKey"); + logger.error("interrupt Multipart delete ! get partfile error"); + } + resEntity.setIsSuccess(true); + } catch (Exception e) { + resEntity.setCode("InternalError"); + logger.error(e.toString()); + } + return resEntity; + } + + @Override + public ResEntity completeMultipartUpload(String bucket, String user, String filename, List<Part> parts, long timestamp) { + ResEntity resEntity = new ResEntity(); + resEntity.setCode(""); + resEntity.setIsSuccess(false); + long fileSize = 0; + int i = 1; + try { + ResEntity ce = partFileDao.getFile(user + ":" + hbaseProperties.getPartfile_index_table_prefix() + bucket, getRowKey(filename)); + if (ce.getIsSuccess()) { + partEntity pEntity = (partEntity) ce.getReturnEntity(); + String partFile = pEntity.getPartName(); + String[] partArray = partFile.split(","); + Map partNames = new HashMap<>(); + for (String part : partArray) { + if (!"".equals(part)) { + partNames.put(getRowKey(filename + "|" + part), ""); + } + } + if (partNames.size() != parts.size()) { + logger.error("parts size not equal exist partfile size"); + resEntity.setCode("InvalidPart"); + } else { + StringBuilder partFileName = new StringBuilder(); + for (Part part : parts) { + String key = part.getETag(); + String rowKey = getRowKey(filename + "|" + i); + if (key.equals(rowKey) && partNames.containsKey(key)) { + ResEntity res = objectDao.getColumn(user + ":" + bucket, key, "filesize"); + if (!res.getIsSuccess()) { + logger.error("get part key error"); + resEntity.setCode("InvalidPart"); + return resEntity; + } + partFileName.append(i).append(","); + ColumnEntity columnEntity = (ColumnEntity) res.getReturnEntity(); + long partSize = Long.parseLong(columnEntity.getColumn()); + fileSize = fileSize + partSize; + } else { + logger.error("partkey not equal partfile"); + resEntity.setCode("InvalidPart"); + return resEntity; + } + i++; + } + FileEntity fileEntity = new FileEntity(); + fileEntity.setFileName(pEntity.getFileName()); + fileEntity.setUser(user); + fileEntity.setBucket(bucket); + fileEntity.setMessage(pEntity.getMessage()); + fileEntity.setTimestamp(timestamp); + fileEntity.setFileSize(fileSize); + fileEntity.setIsParent(1); + fileEntity.setFileType(pEntity.getFileType()); + fileEntity.setPartName(partFileName.toString()); + fileEntity.setIndexFileNameKey(""); + fileEntity.setIndexTimeKey(""); + objectDao.putAllFileMeta(fileEntity); + partFileDao.deleteFile(user + ":" + hbaseProperties.getPartfile_index_table_prefix() + bucket, pEntity.getUploadId()); + resEntity.setIsSuccess(true); + } + } + } catch (Exception e) { + resEntity.setCode("InternalError"); + logger.error(e.toString()); + } + return resEntity; + } + + @Override + public ResEntity getMultipartUploadObjects(String bucket, int maxUploads, String user, String uploadsPrefix) { + ResEntity resEntity = new ResEntity(); + resEntity.setCode(""); + resEntity.setIsSuccess(false); + try { + ListMultipartUploadsResponse listMultipartUploadsResponse = new ListMultipartUploadsResponse(); + listMultipartUploadsResponse.setTruncated(false); + listMultipartUploadsResponse.setBucket(bucket); + listMultipartUploadsResponse.setMaxUploads(maxUploads); + if (!"".equals(uploadsPrefix)) { + resEntity = partFileDao.getMultipartUploadObjectsByPrefix(user + ":" + hbaseProperties.getPartfile_index_table_prefix() + bucket, resEntity, uploadsPrefix); + listMultipartUploadsResponse.setPrefix(uploadsPrefix); + } else { + resEntity = partFileDao.getMultipartUploadObjects(user + ":" + hbaseProperties.getPartfile_index_table_prefix() + bucket, resEntity); + } + Upload upload; + List<Upload> maxUploadsList = new ArrayList<>(); + List<Upload> allUploadList = (ArrayList<Upload>) resEntity.getReturnEntity(); + if (allUploadList.size() > 0) { + allUploadList.sort(new Comparator<Upload>() { + @Override + public int compare(Upload o1, Upload o2) { + return Long.valueOf(o1.getInitiated()).compareTo(Long.valueOf(o2.getInitiated())); + } + }); + if (maxUploads < allUploadList.size()) { + listMultipartUploadsResponse.setTruncated(true); + for (int i = 0; i < maxUploads; i++) { + upload = allUploadList.get(i); + upload.setStorageClass("STANDARD"); + upload.setInitiator(new Initiator(user, DigestUtils.md5DigestAsHex(user.getBytes()))); + upload.setOwner(new Owner(user, DigestUtils.md5DigestAsHex(user.getBytes()))); + long timestamp = Long.parseLong(upload.getInitiated()); + upload.setInitiated(PublicUtil.getUTCTime(timestamp)); + maxUploadsList.add(i, upload); + } + listMultipartUploadsResponse.setUpload(maxUploadsList); + Upload nextUpload = allUploadList.get(maxUploads); + String nextKeyMarker = nextUpload.getKey(); + String nextUploadIdMarker = nextUpload.getUploadId(); + listMultipartUploadsResponse.setNextKeyMarker(nextKeyMarker); + listMultipartUploadsResponse.setNextUploadIdMarker(nextUploadIdMarker); + } else { + listMultipartUploadsResponse.setTruncated(false); + for (int i = 0; i < allUploadList.size(); i++) { + upload = allUploadList.get(i); + upload.setStorageClass("STANDARD"); + upload.setInitiator(new Initiator(user, DigestUtils.md5DigestAsHex(user.getBytes()))); + upload.setOwner(new Owner(user, DigestUtils.md5DigestAsHex(user.getBytes()))); + long timestamp = Long.parseLong(upload.getInitiated()); + upload.setInitiated(PublicUtil.getUTCTime(timestamp)); + allUploadList.set(i, upload); + } + listMultipartUploadsResponse.setUpload(allUploadList); + } + } + resEntity.setReturnEntity(listMultipartUploadsResponse); + resEntity.setIsSuccess(true); + } catch (Exception e) { + resEntity.setCode("InternalError"); + logger.error(e.toString()); + } + return resEntity; + } + + @Override + public ResEntity getObjectMultipartUploadParts(String bucket, int maxUploads, String user, String fileName, String partNumberMarker, String uploadId) { + ResEntity resEntity = new ResEntity(); + resEntity.setCode(""); + resEntity.setIsSuccess(false); + try { + ListPartsResponse listPartsResponse = new ListPartsResponse(); + listPartsResponse.setBucket(bucket); + listPartsResponse.setKey(fileName); + listPartsResponse.setInitiator(new Initiator(user, DigestUtils.md5DigestAsHex(user.getBytes()))); + listPartsResponse.setOwner(new Owner(user, DigestUtils.md5DigestAsHex(user.getBytes()))); + listPartsResponse.setStorageClass("STANDARD"); + listPartsResponse.setTruncated(false); + listPartsResponse.setMaxParts(maxUploads); + if (partFileDao.ifExistRowKey(user + ":" + hbaseProperties.getPartfile_index_table_prefix() + bucket, PublicUtil.getRowKey(fileName)) && partFileDao.ifExistRowKey(user + ":" + hbaseProperties.getPartfile_index_table_prefix() + bucket, uploadId)) { + resEntity.setReturnEntity(listPartsResponse); + resEntity = partFileDao.getObjectParts(user, bucket, fileName, resEntity); + listPartsResponse = (ListPartsResponse) resEntity.getReturnEntity(); + List<com.mesalab.hos.entity.getMultipartUpload.Part> partList = listPartsResponse.getPart(); + partList.sort(new Comparator<com.mesalab.hos.entity.getMultipartUpload.Part>() { + @Override + public int compare(com.mesalab.hos.entity.getMultipartUpload.Part o1, com.mesalab.hos.entity.getMultipartUpload.Part o2) { + return o1.getPartNumber() - (o2.getPartNumber()); + } + }); + int objectCount = partList.size(); + List<com.mesalab.hos.entity.getMultipartUpload.Part> maxPartsList = new ArrayList<>(); + if (!"".equals(partNumberMarker)) { + int intPartNumberMarker = Integer.parseInt(partNumberMarker); + if (intPartNumberMarker >= 0) { + listPartsResponse.setPartNumberMarker(partNumberMarker); + if (maxUploads + intPartNumberMarker < objectCount) { + listPartsResponse.setTruncated(true); + for (int i = intPartNumberMarker; i < maxUploads + intPartNumberMarker; i++) { + com.mesalab.hos.entity.getMultipartUpload.Part part = partList.get(i); + long timestamp = Long.parseLong(part.getLastModified()); + part.setLastModified(PublicUtil.getUTCTime(timestamp)); + maxPartsList.add(part); + } + listPartsResponse.setPart(maxPartsList); + com.mesalab.hos.entity.getMultipartUpload.Part nextPart = partList.get(maxUploads + intPartNumberMarker); + int partNumber = nextPart.getPartNumber(); + listPartsResponse.setNextPartNumberMarker(partNumber + ""); + } else { + listPartsResponse.setTruncated(false); + for (int i = intPartNumberMarker; i < objectCount; i++) { + com.mesalab.hos.entity.getMultipartUpload.Part part = partList.get(i); + long timestamp = Long.parseLong(part.getLastModified()); + part.setLastModified(PublicUtil.getUTCTime(timestamp)); + maxPartsList.add(part); + } + listPartsResponse.setPart(maxPartsList); + } + resEntity.setReturnEntity(listPartsResponse); + } else { + resEntity.setCode("InvalidArgument"); + return resEntity; + } + } else { + if (maxUploads < objectCount) { + listPartsResponse.setTruncated(true); + for (int i = 0; i < maxUploads; i++) { + com.mesalab.hos.entity.getMultipartUpload.Part part = partList.get(i); + long timestamp = Long.parseLong(part.getLastModified()); + part.setLastModified(PublicUtil.getUTCTime(timestamp)); + maxPartsList.add(i, part); + } + listPartsResponse.setPart(maxPartsList); + com.mesalab.hos.entity.getMultipartUpload.Part nextPart = partList.get(maxUploads); + int nextPartNumber = nextPart.getPartNumber(); + listPartsResponse.setNextPartNumberMarker(nextPartNumber + ""); + } else { + listPartsResponse.setTruncated(false); + for (int i = 0; i < objectCount; i++) { + com.mesalab.hos.entity.getMultipartUpload.Part part = partList.get(i); + long timestamp = Long.parseLong(part.getLastModified()); + part.setLastModified(PublicUtil.getUTCTime(timestamp)); + maxPartsList.add(i, part); + } + listPartsResponse.setPart(maxPartsList); + } + resEntity.setReturnEntity(listPartsResponse); + } + resEntity.setIsSuccess(true); + } else { + resEntity.setCode("NoSuchUpload"); + return resEntity; + } + } catch (NumberFormatException e) { + resEntity.setCode("InvalidArgument"); + logger.error(e.toString()); + } catch (Exception e) { + resEntity.setCode("InternalError"); + logger.error(e.toString()); + } + return resEntity; + } + + @Override + public long getFileSize(String bucket, String filename) { + return 0; + } +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/util/AwsUtil.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/util/AwsUtil.java new file mode 100644 index 0000000..6c1110e --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/util/AwsUtil.java @@ -0,0 +1,151 @@ +package com.mesalab.hos.util; + +import com.amazonaws.util.BinaryUtils; + +import javax.crypto.Mac; +import javax.crypto.spec.SecretKeySpec; +import javax.servlet.http.HttpServletRequest; +import java.io.UnsupportedEncodingException; +import java.nio.charset.StandardCharsets; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.security.SignatureException; +import java.util.Map; +import java.util.TreeMap; + +/** + * Created by wk1 on 2020/5/26. + */ +public class AwsUtil { + private static final org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(AwsUtil.class); + + private final static char[] hexArray = "0123456789ABCDEF".toCharArray(); + private static final String HMAC_SHA256_ALGORITHM = "HmacSHA256"; + private static final String HMAC_SHA1_ALGORITHM = "HmacSHA1"; + + public static String calculateSignatureV4(HttpServletRequest request, String signHeaders, String credential, String accessKey, String secretKey) throws Exception { + String[] signArray = credential.split("/"); + StringBuilder canonicalURL = new StringBuilder(""); + /* Step 1.1 以HTTP方法(GET, PUT, POST, etc.)开头, 然后换行. */ + canonicalURL.append(request.getMethod()).append("\n"); + /* Step 1.2 添加URI参数,换行. */ + canonicalURL.append(request.getRequestURI()).append("\n"); + Map<String, String[]> paraMap = new TreeMap<String, String[]>(request.getParameterMap()); + if (paraMap.size() == 0) { + canonicalURL.append("\n"); + } else { + StringBuilder queryString = new StringBuilder(""); + for (Map.Entry<String, String[]> entry : paraMap.entrySet()) { + queryString.append(entry.getKey()).append("=").append(entry.getValue()[0]).append("&"); + } + String paramString = queryString.toString(); + paramString = paramString.substring(0, paramString.length() - 1); + canonicalURL.append(paramString).append("\n"); + } + String[] shs = signHeaders.split(";"); + + for (String sh : shs) { + request.getHeader(sh); + canonicalURL.append(sh).append(":").append(request.getHeader(sh)).append("\n"); + } + + canonicalURL.append("\n"); + canonicalURL.append(signHeaders).append("\n"); + + String hashcode = request.getHeader("x-amz-content-sha256"); + if (hashcode == null) { + hashcode = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"; + } + canonicalURL.append(hashcode); + + byte[] kSigning = ("AWS4" + secretKey).getBytes(StandardCharsets.UTF_8); + + StringBuilder sign = new StringBuilder(""); + for (int i = 1; i < signArray.length; i++) { + kSigning = HmacSHA256(kSigning, signArray[i]); + if (i == signArray.length - 1) { + sign.append(signArray[i]); + } else { + sign.append(signArray[i]).append("/"); + } + } + String StringToSign = "AWS4-HMAC-SHA256" + "\n" + request.getHeader("x-amz-date") + "\n" + sign + "\n" + generateHex(canonicalURL.toString()); + return bytesToHex(HmacSHA256(kSigning, StringToSign)); + } + + + public static String calculateSignatureV2(HttpServletRequest request, String SecretKey) throws Exception { + StringBuilder stringToSign = new StringBuilder(""); + stringToSign.append(request.getMethod()).append("\n"); + String md5 = request.getHeader("Content-MD5"); + if (md5 == null) { + stringToSign.append("\n"); + } else { + stringToSign.append(md5).append("\n"); + } + String ctype = request.getHeader("Content-Type"); + if (ctype == null) { + stringToSign.append("\n"); + } else { + stringToSign.append(ctype).append("\n"); + } + String date = request.getHeader("Date"); + stringToSign.append(date).append("\n"); + stringToSign.append(request.getRequestURI()); + + String queryString = request.getQueryString(); + + if (queryString != null && !queryString.contains("=")) { + stringToSign.append(queryString); + } + return calculateV2HMAC(stringToSign.toString(), "222"); + } + + public static byte[] HmacSHA256(byte[] key, String data) throws Exception { + String algorithm = "HmacSHA256"; + Mac mac = Mac.getInstance(algorithm); + mac.init(new SecretKeySpec(key, algorithm)); + return mac.doFinal(data.getBytes(StandardCharsets.UTF_8)); + } + + public static String bytesToHex(byte[] bytes) { + char[] hexChars = new char[bytes.length * 2]; + for (int j = 0; j < bytes.length; j++) { + int v = bytes[j] & 0xFF; + hexChars[j * 2] = hexArray[v >>> 4]; + hexChars[j * 2 + 1] = hexArray[v & 0x0F]; + } + return new String(hexChars).toLowerCase(); + } + + private static String generateHex(String data) throws NoSuchAlgorithmException, UnsupportedEncodingException { + MessageDigest messageDigest; + try { + messageDigest = MessageDigest.getInstance("SHA-256"); + messageDigest.update(data.getBytes(StandardCharsets.UTF_8)); + byte[] digest = messageDigest.digest(); + return String.format("%064x", new java.math.BigInteger(1, digest)); + } catch (NoSuchAlgorithmException e) { + logger.error(e.toString()); + } + return null; + } + + private static String calculateV2HMAC(String data, String key) throws java.security.SignatureException { + String result; + try { + // Get an hmac_sha256 key from the raw key bytes. + SecretKeySpec signingKey = new SecretKeySpec(key.getBytes(StandardCharsets.UTF_8), HMAC_SHA256_ALGORITHM); + // Get an hmac_sha256 Mac instance and initialize with the signing key. + Mac mac = Mac.getInstance(HMAC_SHA1_ALGORITHM); + mac.init(signingKey); + // Compute the hmac on input data bytes. + byte[] rawHmac = mac.doFinal(data.getBytes(StandardCharsets.UTF_8)); + // Base64-encode the hmac by using the utility in the SDK + result = BinaryUtils.toBase64(rawHmac); + } catch (Exception e) { + throw new SignatureException("Failed to generate HMAC : " + e.getMessage()); + } + return result; + } +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/util/ContentTypeUtil.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/util/ContentTypeUtil.java new file mode 100644 index 0000000..2349a70 --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/util/ContentTypeUtil.java @@ -0,0 +1,382 @@ +package com.mesalab.hos.util; + +import java.util.HashMap; +import java.util.Map; + +/** + * Created by wk on 2020/5/8. + */ +public class ContentTypeUtil { + + + private static Map<String, String> map = new HashMap<String, String>(); + + static { + map.put("anno", "application/octet-stream"); + map.put("tif", "image/tiff"); + map.put("0.001", "application/x-001"); + map.put("0.301", "application/x-301"); + map.put("0.323", "text/h323"); + map.put("0.906", "application/x-906"); + map.put("0.907", "drawing/907"); + map.put("a11", "application/x-a11"); + map.put("acp", "audio/x-mei-aac"); + map.put("ai", "application/postscript"); + map.put("aif", "audio/aiff"); + map.put("aifc", "audio/aiff"); + map.put("aiff", "audio/aiff"); + map.put("anv", "application/x-anv"); + map.put("asa", "text/asa"); + map.put("asf", "video/x-ms-asf"); + map.put("asp", "text/asp"); + map.put("asx", "video/x-ms-asf"); + map.put("au", "audio/basic"); + map.put("avi", "video/avi"); + map.put("awf", "application/vnd.adobe.workflow"); + map.put("biz", "text/xml"); + map.put("bmp", "application/x-bmp"); + map.put("bot", "application/x-bot"); + map.put("c4t", "application/x-c4t"); + map.put("c90", "application/x-c90"); + map.put("cal", "application/x-cals"); + map.put("cat", "application/vnd.ms-pki.seccat"); + map.put("cdf", "application/x-netcdf"); + map.put("cdr", "application/x-cdr"); + map.put("cel", "application/x-cel"); + map.put("cer", "application/x-x509-ca-cert"); + map.put("cg4", "application/x-g4"); + map.put("cgm", "application/x-cgm"); + map.put("cit", "application/x-cit"); + map.put("class", "java/"); + map.put("cml", "text/xml"); + map.put("cmp", "application/x-cmp"); + map.put("cmx", "application/x-cmx"); + map.put("cot", "application/x-cot"); + map.put("crl", "application/pkix-crl"); + map.put("crt", "application/x-x509-ca-cert"); + map.put("csi", "application/x-csi"); + map.put("css", "text/css"); + map.put("cut", "application/x-cut"); + map.put("dbf", "application/x-dbf"); + map.put("dbm", "application/x-dbm"); + map.put("dbx", "application/x-dbx"); + map.put("dcd", "text/xml"); + map.put("dcx", "application/x-dcx"); + map.put("der", "application/x-x509-ca-cert"); + map.put("dgn", "application/x-dgn"); + map.put("dib", "application/x-dib"); + map.put("dll", "application/x-msdownload"); + map.put("doc", "application/msword"); + map.put("docx", "application/vnd.openxmlformats-officedocument.wordprocessingml.document"); + map.put("dot", "application/msword"); + + map.put("drw", "application/x-drw"); + map.put("dtd", "text/xml"); + map.put("dwf", "Model/vnd.dwf"); + //map.put("dwf","application/x-dwf"); + map.put("dwg", "application/x-dwg"); + map.put("dxb", "application/x-dxb"); + map.put("dxf", "application/x-dxf"); + map.put("edn", "application/vnd.adobe.edn"); + map.put("emf", "application/x-emf"); + map.put("eml", "message/rfc822"); + map.put("ent", "text/xml"); + map.put("epi", "application/x-epi"); + // map.put("eps","application/x-ps"); + map.put("eps", "application/postscript"); + map.put("etd", "application/x-ebx"); + map.put("exe", "application/x-msdownload"); + map.put("fax", "image/fax"); + map.put("fdf", "application/vnd.fdf"); + map.put("fif", "application/fractals"); + map.put("fo", "text/xml"); + map.put("frm", "application/x-frm"); + map.put("g4", "application/x-g4"); + map.put("gbr", "application/x-gbr"); + map.put("", "application/x-"); + map.put("gif", "image/gif"); + map.put("gl2", "application/x-gl2"); + map.put("gp4", "application/x-gp4"); + map.put("hgl", "application/x-hgl"); + map.put("hmr", "application/x-hmr"); + map.put("hpg", "application/x-hpgl"); + map.put("hpl", "application/x-hpl"); + map.put("hqx", "application/mac-binhex40"); + map.put("hrf", "application/x-hrf"); + map.put("hta", "application/hta"); + map.put("htc", "text/x-component"); + map.put("htm", "text/html"); + map.put("html", "text/html"); + map.put("htt", "text/webviewhtml"); + map.put("htx", "text/html"); + map.put("icb", "application/x-icb"); + map.put("ico", "image/x-icon"); + //map.put("ico","application/x-ico"); + map.put("iff", "application/x-iff"); + map.put("ig4", "application/x-g4"); + map.put("igs", "application/x-igs"); + map.put("iii", "application/x-iphone"); + map.put("img", "application/x-img"); + map.put("ins", "application/x-internet-signup"); + map.put("isp", "application/x-internet-signup"); + map.put("IVF", "video/x-ivf"); + map.put("java", "java/*"); + map.put("jfif", "image/jpeg"); + map.put("jpe", "image/jpeg"); + map.put("jpe", "application/x-jpe"); + map.put("jpeg", "image/jpeg"); + map.put("jpg", "image/jpeg"); + //map.put("jpg","application/x-jpg"); + map.put("js", "application/x-javascript"); + map.put("jsp", "text/html"); + map.put("la1", "audio/x-liquid-file"); + map.put("lar", "application/x-laplayer-reg"); + map.put("latex", "application/x-latex"); + map.put("lavs", "audio/x-liquid-secure"); + map.put("lbm", "application/x-lbm"); + map.put("lmsff", "audio/x-la-lms"); + map.put("ls", "application/x-javascript"); + map.put("ltr", "application/x-ltr"); + map.put("m1v", "video/x-mpeg"); + map.put("m2v", "video/x-mpeg"); + map.put("m3u", "audio/mpegurl"); + map.put("m4e", "video/mpeg4"); + map.put("mac", "application/x-mac"); + map.put("man", "application/x-troff-man"); + map.put("math", "text/xml"); + //map.put("mdb","application/msaccess"); + map.put("mdb", "application/x-mdb"); + map.put("mfp", "application/x-shockwave-flash"); + map.put("mht", "message/rfc822"); + map.put("mhtml", "message/rfc822"); + map.put("mi", "application/x-mi"); + map.put("mid", "audio/mid"); + map.put("midi", "audio/mid"); + map.put("mil", "application/x-mil"); + map.put("mml", "text/xml"); + map.put("mnd", "audio/x-musicnet-download"); + map.put("mns", "audio/x-musicnet-stream"); + map.put("mocha", "application/x-javascript"); + map.put("movie", "video/x-sgi-movie"); + map.put("mp1", "audio/mp1"); + map.put("mp2", "audio/mp2"); + map.put("mp2v", "video/mpeg"); + map.put("mp3", "audio/mp3"); + map.put("mp4", "video/mpeg4"); + map.put("mpa", "video/x-mpg"); + map.put("mpd", "application/vnd.ms-project"); + map.put("mpe", "video/x-mpeg"); + map.put("mpeg", "video/mpg"); + map.put("mpg", "video/mpg"); + map.put("mpga", "audio/rn-mpeg"); + map.put("mpp", "application/vnd.ms-project"); + map.put("mps", "video/x-mpeg"); + map.put("mpt", "application/vnd.ms-project"); + map.put("mpv", "video/mpg"); + map.put("mpv2", "video/mpeg"); + map.put("mpw", "application/vnd.ms-project"); + map.put("mpx", "application/vnd.ms-project"); + map.put("mtx", "text/xml"); + map.put("mxp", "application/x-mmxp"); + map.put("net", "image/pnetvue"); + map.put("nrf", "application/x-nrf"); + map.put("nws", "message/rfc822"); + map.put("odc", "text/x-ms-odc"); + map.put("out", "application/x-out"); + map.put("p10", "application/pkcs10"); + map.put("p12", "application/x-pkcs12"); + map.put("p7b", "application/x-pkcs7-certificates"); + map.put("p7c", "application/pkcs7-mime"); + map.put("p7m", "application/pkcs7-mime"); + map.put("p7r", "application/x-pkcs7-certreqresp"); + map.put("p7s", "application/pkcs7-signature"); + map.put("pc5", "application/x-pc5"); + map.put("pci", "application/x-pci"); + map.put("pcl", "application/x-pcl"); + map.put("pcx", "application/x-pcx"); + map.put("pdf", "application/pdf"); + map.put("pdf", "application/pdf"); + map.put("pdx", "application/vnd.adobe.pdx"); + map.put("pfx", "application/x-pkcs12"); + map.put("pgl", "application/x-pgl"); + map.put("pic", "application/x-pic"); + map.put("pko", "application/vnd.ms-pki.pko"); + map.put("pl", "application/x-perl"); + map.put("plg", "text/html"); + map.put("pls", "audio/scpls"); + map.put("plt", "application/x-plt"); + map.put("png", "image/png"); + // map.put("png","application/x-png"); + map.put("pot", "application/vnd.ms-powerpoint"); + map.put("ppa", "application/vnd.ms-powerpoint"); + map.put("ppm", "application/x-ppm"); + map.put("pps", "application/vnd.ms-powerpoint"); + map.put("ppt", "application/vnd.ms-powerpoint"); + // map.put("ppt","application/x-ppt"); + map.put("pr", "application/x-pr"); + map.put("prf", "application/pics-rules"); + map.put("prn", "application/x-prn"); + map.put("prt", "application/x-prt"); + map.put("ps", "application/x-ps"); + map.put("ps", "application/postscript"); + map.put("ptn", "application/x-ptn"); + map.put("pwz", "application/vnd.ms-powerpoint"); + map.put("r3t", "text/vnd.rn-realtext3d"); + map.put("ra", "audio/vnd.rn-realaudio"); + map.put("ram", "audio/x-pn-realaudio"); + map.put("ras", "application/x-ras"); + map.put("rat", "application/rat-file"); + map.put("rdf", "text/xml"); + map.put("rec", "application/vnd.rn-recording"); + map.put("red", "application/x-red"); + map.put("rgb", "application/x-rgb"); + map.put("rjs", "application/vnd.rn-realsystem-rjs"); + map.put("rjt", "application/vnd.rn-realsystem-rjt"); + map.put("rlc", "application/x-rlc"); + map.put("rle", "application/x-rle"); + map.put("rm", "application/vnd.rn-realmedia"); + map.put("rmf", "application/vnd.adobe.rmf"); + map.put("rmi", "audio/mid"); + map.put("rmj", "application/vnd.rn-realsystem-rmj"); + map.put("rmm", "audio/x-pn-realaudio"); + map.put("rmp", "application/vnd.rn-rn_music_package"); + map.put("rms", "application/vnd.rn-realmedia-secure"); + map.put("rmvb", "application/vnd.rn-realmedia-vbr"); + map.put("rmx", "application/vnd.rn-realsystem-rmx"); + map.put("rnx", "application/vnd.rn-realplayer"); + map.put("rp", "image/vnd.rn-realpix"); + map.put("rpm", "audio/x-pn-realaudio-plugin"); + map.put("rsml", "application/vnd.rn-rsml"); + map.put("rt", "text/vnd.rn-realtext"); + map.put("rtf", "application/msword"); + //map.put("rtf","application/x-rtf"); + map.put("rv", "video/vnd.rn-realvideo"); + map.put("sam", "application/x-sam"); + map.put("sat", "application/x-sat"); + map.put("sdp", "application/sdp"); + map.put("sdw", "application/x-sdw"); + map.put("sit", "application/x-stuffit"); + map.put("slb", "application/x-slb"); + map.put("sld", "application/x-sld"); + map.put("slk", "drawing/x-slk"); + map.put("smi", "application/smil"); + map.put("smil", "application/smil"); + map.put("smk", "application/x-smk"); + map.put("snd", "audio/basic"); + map.put("sol", "text/plain"); + map.put("sor", "text/plain"); + map.put("spc", "application/x-pkcs7-certificates"); + map.put("spl", "application/futuresplash"); + map.put("spp", "text/xml"); + map.put("ssm", "application/streamingmedia"); + map.put("sst", "application/vnd.ms-pki.certstore"); + map.put("stl", "application/vnd.ms-pki.stl"); + map.put("stm", "text/html"); + map.put("sty", "application/x-sty"); + map.put("svg", "text/xml"); + map.put("swf", "application/x-shockwave-flash"); + map.put("tdf", "application/x-tdf"); + map.put("tg4", "application/x-tg4"); + map.put("tga", "application/x-tga"); + map.put("tif", "image/tiff"); + map.put("tif", "application/x-tif"); + map.put("tiff", "image/tiff"); + map.put("tld", "text/xml"); + map.put("top", "drawing/x-top"); + map.put("torrent", "application/x-bittorrent"); + map.put("tsd", "text/xml"); + map.put("txt", "text/plain"); + map.put("uin", "application/x-icq"); + map.put("uls", "text/iuls"); + map.put("vcf", "text/x-vcard"); + map.put("vda", "application/x-vda"); + map.put("vdx", "application/vnd.visio"); + map.put("vml", "text/xml"); + map.put("vpg", "application/x-vpeg005"); + map.put("vsd", "application/vnd.visio"); + map.put("vsd", "application/x-vsd"); + map.put("vss", "application/vnd.visio"); + map.put("vst", "application/vnd.visio"); + map.put("vst", "application/x-vst"); + map.put("vsw", "application/vnd.visio"); + map.put("vsx", "application/vnd.visio"); + map.put("vtx", "application/vnd.visio"); + map.put("vxml", "text/xml"); + map.put("wav", "audio/wav"); + map.put("wax", "audio/x-ms-wax"); + map.put("wb1", "application/x-wb1"); + map.put("wb2", "application/x-wb2"); + map.put("wb3", "application/x-wb3"); + map.put("wbmp", "image/vnd.wap.wbmp"); + map.put("wiz", "application/msword"); + map.put("wk3", "application/x-wk3"); + map.put("wk4", "application/x-wk4"); + map.put("wkq", "application/x-wkq"); + map.put("wks", "application/x-wks"); + map.put("wm", "video/x-ms-wm"); + map.put("wma", "audio/x-ms-wma"); + map.put("wmd", "application/x-ms-wmd"); + map.put("wmf", "application/x-wmf"); + map.put("wml", "text/vnd.wap.wml"); + map.put("wmv", "video/x-ms-wmv"); + map.put("wmx", "video/x-ms-wmx"); + map.put("wmz", "application/x-ms-wmz"); + map.put("wp6", "application/x-wp6"); + map.put("wpd", "application/x-wpd"); + map.put("wpg", "application/x-wpg"); + map.put("wpl", "application/vnd.ms-wpl"); + map.put("wq1", "application/x-wq1"); + map.put("wr1", "application/x-wr1"); + map.put("wri", "application/x-wri"); + map.put("wrk", "application/x-wrk"); + map.put("ws", "application/x-ws"); + map.put("ws2", "application/x-ws"); + map.put("wsc", "text/scriptlet"); + map.put("wsdl", "text/xml"); + map.put("wvx", "video/x-ms-wvx"); + map.put("xdp", "application/vnd.adobe.xdp"); + map.put("xdr", "text/xml"); + map.put("xfd", "application/vnd.adobe.xfd"); + map.put("xfdf", "application/vnd.adobe.xfdf"); + map.put("xhtml", "text/html"); + map.put("xls", "application/vnd.ms-excel"); + // map.put("xls","application/x-xls"); + map.put("xlw", "application/x-xlw"); + map.put("xml", "text/xml"); + map.put("xpl", "audio/scpls"); + map.put("xq", "text/xml"); + map.put("xql", "text/xml"); + map.put("xquery", "text/xml"); + map.put("xsd", "text/xml"); + map.put("xsl", "text/xml"); + map.put("xslt", "text/xml"); + map.put("xwd", "application/x-xwd"); + map.put("x_b", "application/x-x_b"); + map.put("sis", "application/vnd.symbian.install"); + map.put("sisx", "application/vnd.symbian.install"); + map.put("x_t", "application/x-x_t"); + map.put("ipa", "application/vnd.iphone"); + map.put("apk", "application/vnd.android.package-archive"); + map.put("xap", "application/x-silverlight-app"); + } + + /** + * @return + */ + public static String getContentType(String filename) { + + String result = "application/octet-stream"; + + if (filename != null) { + String fileExt = ""; + if (filename.lastIndexOf(".") != -1 && filename.lastIndexOf(".") != 0) { + fileExt = filename.substring(filename.lastIndexOf(".") + 1); + } + if (map.containsKey(fileExt)) { + result = map.get(fileExt); + } + } + return result; + } + +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/util/PublicUtil.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/util/PublicUtil.java new file mode 100644 index 0000000..abcd293 --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/util/PublicUtil.java @@ -0,0 +1,81 @@ +package com.mesalab.hos.util; + +import java.math.BigInteger; +import java.security.MessageDigest; +import java.text.SimpleDateFormat; +import java.util.Date; +import java.util.List; +import java.util.TimeZone; +import java.util.UUID; + +public class PublicUtil { + private static final org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(PublicUtil.class); + + public static String getUUID() { + return UUID.randomUUID().toString().replace("-", "").toLowerCase(); + } + + public static String stringTonextAscii(String value) { + StringBuilder sbu = new StringBuilder(); + char[] chars = value.toCharArray(); + for (int i = 0; i < chars.length; i++) { + if (i != chars.length - 1) { + sbu.append((int) chars[i]).append(","); + } else { + sbu.append((int) chars[i] + 1); + } + } + StringBuilder ats = new StringBuilder(); + String[] atsChars = sbu.toString().split(","); + for (int i = 0; i < chars.length; i++) { + ats.append((char) Integer.parseInt(atsChars[i])); + } + return ats.toString(); + } + + public static String getRowKey(String filename) { + String newFilename; + try { + MessageDigest md = MessageDigest.getInstance("MD5"); + // 计算md5函数 + md.update(filename.getBytes()); + newFilename = new BigInteger(1, md.digest()).toString(16).substring(8, 24); + } catch (Exception e) { + newFilename = filename; + logger.error(e.toString()); + } + return newFilename; + } + + public static byte[] byteMerger(byte[] bt1, byte[] bt2){ + byte[] bt3 = new byte[bt1.length+bt2.length]; + System.arraycopy(bt1, 0, bt3, 0, bt1.length); + System.arraycopy(bt2, 0, bt3, bt1.length, bt2.length); + return bt3; + } + + public static String getUTCTime(long timeStamp){ + Date date = new Date(timeStamp); + TimeZone tz = TimeZone.getTimeZone("UTC"); + SimpleDateFormat df = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'"); + df.setTimeZone(tz); + return df.format(date); + } + + public static byte[][] stringToByteArray(String regions){ + String[] regionStartKey = regions.split(","); + byte[][] regionParts = new byte[regionStartKey.length][1]; + for (int i = 0; i < regionStartKey.length; i++) { + regionParts[i][0] = regionStartKey[i].getBytes()[0]; + } + return regionParts; + } + + public static String getRandomHead(List<String> list){ + int max=list.size(),min=1; + int ran = (int) (Math.random()*(max-min)+min); + return list.get(ran); + } + + +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/util/ResponseMessageUtil.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/util/ResponseMessageUtil.java new file mode 100644 index 0000000..e82b4a2 --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/util/ResponseMessageUtil.java @@ -0,0 +1,139 @@ +package com.mesalab.hos.util; + +import com.mesalab.hos.entity.ErrorResponse; + +import javax.servlet.http.HttpServletResponse; + +public class ResponseMessageUtil { + + public static ErrorResponse getResponseMessage(String code, String hostId, String requestId, String bucketName, String objectName, HttpServletResponse response) { + ErrorResponse errorResponse = new ErrorResponse(); + String resource = "/" + bucketName + "/" + objectName; + errorResponse.setHostId(hostId); + errorResponse.setRequestId(requestId); + errorResponse.setResource(resource); + errorResponse.setCode(code); + switch (code) { + case "SignatureDoesNotMatch": + response.setStatus(403); + errorResponse.setMessage("The request signature we calculated does not match the signature you provided. Check your AWS secret access key and signing method."); + break; + case "InvalidAccessKeyId": + response.setStatus(403); + errorResponse.setMessage("The access key ID you provided does not exist in our records."); + break; + case "NoSuchBucket": + response.setStatus(404); + errorResponse.setMessage("The specified bucket does not exist."); + break; + case "InventoryFull": + response.setStatus(400); + errorResponse.setMessage("The list of tasks has reached its limit.Limit your list to 1000 tasks."); + break; + case "InternalError": + response.setStatus(500); + errorResponse.setMessage("We encountered an internal error. Please try again."); + break; + case "NoSuchKey": + response.setStatus(404); + errorResponse.setMessage("The specified key does not exist."); + break; + case "MalformedXML": + response.setStatus(400); + errorResponse.setMessage("The XML you provided was not well-formed or did not validate against our published schema."); + break; + case "BucketAlreadyOwnedByYou": + response.setStatus(409); + errorResponse.setMessage("The bucket you tried to create already exists, and you own it."); + break; + case "InvalidPart": + response.setStatus(400); + errorResponse.setMessage("One or more of the specified parts could not be found. The part might not have been uploaded, or the specified entity tag might not have matched the part's entity tag."); + break; + case "InvalidObjectState": + response.setStatus(403); + errorResponse.setMessage("The operation is not valid for the current state of the object.the multipart file already exist.If you want to overwrite please delete it first."); + break; + case "InlineDataTooLarge": + response.setStatus(400); + errorResponse.setMessage("Inline data exceeds the maximum allowed size."); + break; + case "InvalidRange": + response.setStatus(416); + errorResponse.setMessage("The requested range cannot be satisfied."); + break; + case "NoSuchUpload": + response.setStatus(404); + errorResponse.setMessage("The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed."); + break; + case "InvalidArgument": + response.setStatus(400); + errorResponse.setMessage("Argument must be an integer between 0 and 2147483647."); + break; + case "ObjectNotAppendable": + response.setStatus(409); + errorResponse.setMessage("This object can not be appended."); + break; + case "PositionNotEqualToLength": + response.setStatus(409); + errorResponse.setMessage("Appends position error."); + break; + default: + response.setStatus(500); + errorResponse.setMessage("Unknow Error."); + + } + return errorResponse; + } + + public static String getResponseMessage(String code) { + String message; + switch (code) { + case "SignatureDoesNotMatch": + message = "The request signature we calculated does not match the signature you provided. Check your AWS secret access key and signing method."; + break; + case "InvalidAccessKeyId": + message = "The access key ID you provided does not exist in our records."; + break; + case "NoSuchBucket": + message = "The specified bucket does not exist."; + break; + case "InventoryFull": + message = "The list of tasks has reached its limit.Limit your list to 1000 tasks."; + break; + case "InternalError": + message = "We encountered an internal error. Please try again."; + break; + case "NoSuchKey": + message = "The specified key does not exist."; + break; + case "MalformedXML": + message = "The XML you provided was not well-formed or did not validate against our published schema."; + break; + case "BucketAlreadyOwnedByYou": + message = "The bucket you tried to create already exists, and you own it."; + break; + case "InvalidPart": + message = "One or more of the specified parts could not be found. The part might not have been uploaded, or the specified entity tag might not have matched the part's entity tag."; + break; + case "InvalidObjectState": + message = "The operation is not valid for the current state of the object.the multipart file already exist.If you want to overwrite please delete it first."; + break; + case "InlineDataTooLarge": + message = "Inline data exceeds the maximum allowed size."; + break; + case "InvalidRange": + message = "The requested range cannot be satisfied."; + break; + case "NoSuchUpload": + message = "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed."; + break; + case "InvalidArgument": + message = "Argument must be an integer between 0 and 2147483647."; + break; + default: + message = "Unknow Error."; + } + return message; + } +} diff --git a/galaxy-hos-service/src/main/java/com/mesalab/hos/util/XmlUtil.java b/galaxy-hos-service/src/main/java/com/mesalab/hos/util/XmlUtil.java new file mode 100644 index 0000000..ae58a26 --- /dev/null +++ b/galaxy-hos-service/src/main/java/com/mesalab/hos/util/XmlUtil.java @@ -0,0 +1,112 @@ +package com.mesalab.hos.util; + +/** + * Created by wk on 2020/4/27. + */ + +import javax.xml.bind.JAXBContext; +import javax.xml.bind.JAXBException; +import javax.xml.bind.Marshaller; +import javax.xml.bind.Unmarshaller; +import java.io.*; + +public class XmlUtil { + private static final org.apache.log4j.Logger logger = org.apache.log4j.Logger.getLogger(XmlUtil.class); + + /** + * 将对象直接转换成String类型的 XML输出 + * + * @param obj + * @return + */ + public static String convertToXml(Object obj) { + // 创建输出流 + StringWriter sw = new StringWriter(); + try { + // 利用jdk中自带的转换类实现 + JAXBContext context = JAXBContext.newInstance(obj.getClass()); + + Marshaller marshaller = context.createMarshaller(); + // 格式化xml输出的格式 + + marshaller.setProperty(Marshaller.JAXB_FORMATTED_OUTPUT, + Boolean.TRUE); + // 将对象转换成输出流形式的xml + marshaller.marshal(obj, sw); + } catch (JAXBException e) { + return "JAXBException error"; + } + return sw.toString().replace(" standalone=\"yes\"", ""); + + } + + /** + * 将对象根据路径转换成xml文件 + * + * @param obj + * @param path + * @return + */ + public static void convertToXml(Object obj, String path) { + try { + // 利用jdk中自带的转换类实现 + JAXBContext context = JAXBContext.newInstance(obj.getClass()); + + Marshaller marshaller = context.createMarshaller(); + // 格式化xml输出的格式 + marshaller.setProperty(Marshaller.JAXB_FORMATTED_OUTPUT, + Boolean.TRUE); + // 将对象转换成输出流形式的xml + // 创建输出流 + FileWriter fw = null; + try { + fw = new FileWriter(path); + } catch (IOException e) { + logger.error(e.toString()); + } + marshaller.marshal(obj, fw); + } catch (JAXBException e) { + logger.error(e.toString()); + } + } + + @SuppressWarnings("unchecked") + /** + * 将String类型的xml转换成对象 + */ + public static Object convertXmlStrToObject(Class clazz, String xmlStr) { + Object xmlObject = null; + try { + JAXBContext context = JAXBContext.newInstance(clazz); + // 进行将Xml转成对象的核心接口 + Unmarshaller unmarshaller = context.createUnmarshaller(); + StringReader sr = new StringReader(xmlStr); + xmlObject = unmarshaller.unmarshal(sr); + } catch (JAXBException e) { + logger.error(e.toString()); + } + return xmlObject; + } + + @SuppressWarnings("unchecked") + /** + * 将file类型的xml转换成对象 + */ + public static Object convertXmlFileToObject(Class clazz, String xmlPath) { + Object xmlObject = null; + try { + JAXBContext context = JAXBContext.newInstance(clazz); + Unmarshaller unmarshaller = context.createUnmarshaller(); + FileReader fr = null; + try { + fr = new FileReader(xmlPath); + } catch (FileNotFoundException e) { + logger.error(e.toString()); + } + xmlObject = unmarshaller.unmarshal(fr); + } catch (JAXBException e) { + logger.error(e.toString()); + } + return xmlObject; + } +} diff --git a/galaxy-report-service/pom.xml b/galaxy-report-service/pom.xml index 26158d7..81e2541 100644 --- a/galaxy-report-service/pom.xml +++ b/galaxy-report-service/pom.xml @@ -53,22 +53,22 @@ <dependency> <groupId>com.alibaba</groupId> <artifactId>fastjson</artifactId> - <version>1.2.69</version> - </dependency> - <dependency> - <groupId>ru.yandex.clickhouse</groupId> - <artifactId>clickhouse-jdbc</artifactId> - <version>0.1.51</version> + <version>${fastjson.version}</version> </dependency> <dependency> <groupId>org.apache.httpcomponents</groupId> <artifactId>httpclient</artifactId> - <version>4.5.2</version> + <version>${httpclient.version}</version> </dependency> <dependency> <groupId>org.apache.commons</groupId> <artifactId>commons-lang3</artifactId> - <version>3.5</version> + <version>${commons.lang3.version}</version> + </dependency> + <dependency> + <groupId>ru.yandex.clickhouse</groupId> + <artifactId>clickhouse-jdbc</artifactId> + <version>0.1.51</version> </dependency> <dependency> <groupId>org.apache.curator</groupId> @@ -117,7 +117,7 @@ <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-client</artifactId> - <version>2.7.1</version> + <version>${hadoop.client.version}</version> <exclusions> <exclusion> <groupId>org.slf4j</groupId> @@ -8,7 +8,7 @@ <packaging>pom</packaging> <!--nexus中央仓库--> - <repositories> + <repositories> <repository> <id>nexus</id> <name>Team Nexus Repository</name> @@ -64,6 +64,7 @@ <zdjizhi.version>1.0.4</zdjizhi.version> <jakarta.version>3.0.0</jakarta.version> <commonsio.version>2.2</commonsio.version> + <fastjson.version>1.2.69</fastjson.version> <joda.time.version>2.6</joda.time.version> <jsqlparser.version>3.0</jsqlparser.version> <jsqlparser.version>3.0</jsqlparser.version> @@ -79,6 +80,7 @@ <maven.compiler.target>1.8</maven.compiler.target> <commons.lang3.version>3.5</commons.lang3.version> <hbase.client.version>2.2.3</hbase.client.version> + <hadoop.client.version>2.7.1</hadoop.client.version> <calcite.core.version>1.22.0</calcite.core.version> <netty-all.version>4.1.50.Final</netty-all.version> <active.record.version>4.9.01</active.record.version> |
