summaryrefslogtreecommitdiff
path: root/PCAP-PIC/deployment configuration/tsg/components.yml
diff options
context:
space:
mode:
authorwangchengcheng <[email protected]>2023-07-27 15:43:51 +0800
committerwangchengcheng <[email protected]>2023-07-27 15:43:51 +0800
commit124f687daace8b85e5c74abac04bcd0a92744a8d (patch)
tree4f563326b1be67cfb51bf6a04f1ca4d953536e76 /PCAP-PIC/deployment configuration/tsg/components.yml
parent08686ae87f9efe7a590f48db74ed133b481c85b1 (diff)
P19 23.07 online-configP19
Diffstat (limited to 'PCAP-PIC/deployment configuration/tsg/components.yml')
-rw-r--r--PCAP-PIC/deployment configuration/tsg/components.yml217
1 files changed, 217 insertions, 0 deletions
diff --git a/PCAP-PIC/deployment configuration/tsg/components.yml b/PCAP-PIC/deployment configuration/tsg/components.yml
new file mode 100644
index 0000000..7c4d1ba
--- /dev/null
+++ b/PCAP-PIC/deployment configuration/tsg/components.yml
@@ -0,0 +1,217 @@
+#===========================Apache Zookeeper configuration===============================#
+#The zookeeper JVM heap size MB,The -Xmx value must be greater than or equal to 1024 considering the running capacity。
+zookeeper_java_opt: '-Xmx4096m -Xms1024m'
+
+#===========================Apache Druid configuration===============================#
+#Druid's MariaDB database name
+mariadb_druid_database: druid
+
+#The maxmium size of segment data,bytes
+#The sum of available disk space across these locations is set as the default value for property
+#druid.server.maxSize:which controls the total size of segment data that can be assigned by the Coordinator to a Historical.
+server_disk_maxsize: 5000000000000
+
+#The maxmium size of cache segment data,bytes
+#druid.segmentCache.locations specifies locations where segment data can be stored on the Historical.
+#A greater proportion of segments can be kept in memory, allowing for better query performance.
+segmentCache_max_size: 300000000000
+
+#========historical configuration========
+#JVM heap size,MB
+historical_mem: 512
+
+#The maximum size of direct memory,MB
+historical_MaxDirectMemorySize: 512
+
+#Buffer size,Bytes
+#druid.processing.buffer.sizeBytes,controls the size of the off-heap buffers allocated to the processing threads.
+#The TopN and GroupBy queries use these buffers to store intermediate computed results.
+historical_buffer_sizeBytes: 50000000
+
+#druid.processing.numMergeBuffers
+#The number of direct memory buffers available for merging query results.
+historical_numMergeBuffers: 4
+
+#druid.processing.numThreads
+#The number of processing threads to have available for parallel processing of segments.
+#It should generally be set to (number of cores - 1)
+historical_numThreads: 5
+
+#========middlemanager configuration========
+#A string of -X Java options to pass to the peon's(worker) JVM.
+#druid.indexer.runner.javaOpts,JVM configuration for each task execution
+middlemanager_runner_javaOpts: '-Xms512m -Xmx512m -XX:MaxDirectMemorySize=512m'
+
+#druid.processing.numMergeBuffers
+#The number of direct memory buffers available for merging query results.
+middlemanager_numMergeBuffers: 2
+
+#Buffer size,Bytes
+#druid.indexer.fork.property.druid.processing.buffer.sizeBytes,controls the size of the off-heap buffers allocated to the processing threads.
+##The TopN and GroupBy queries use these buffers to store intermediate computed results.
+middlemanager_buffer_sizeBytes: 20000000
+
+#druid.indexer.fork.property.druid.processing.numThreads
+#The number of processing threads to have available for parallel processing of segments.
+##It should generally be set to (number of cores - 1)
+middlemanager_numThreads: 1
+
+#========coordinator configuration========
+#coordinator-overlord JVM heap size,MB
+coordinator_mem: 512
+
+#========broker configuration========
+#JVM heap size,MB
+broker_mem: 512
+
+#The maximum size of direct memory,MB
+broker_MaxDirectMemorySize: 1024
+
+#Buffer size,Bytes
+##druid.processing.buffer.sizeBytes,controls the size of the off-heap buffers allocated to the processing threads.
+##The TopN and GroupBy queries use these buffers to store intermediate computed results.
+broker_sizeBytes: 50000000
+
+#druid.processing.numMergeBuffers
+#The number of direct memory buffers available for merging query results.
+broker_numMergeBuffers: 6
+
+#druid.processing.numThreads
+#The number of processing threads to have available for parallel processing of segments.
+#It should generally be set to (number of cores - 1)
+broker_numThreads: 1
+
+#===========================Hadoop configuration===============================#
+#---------------------------------HDFS config----------------------------#
+#namenode JVM heap size MB
+#The -Xmx value must be greater than or equal to 512 considering the running capacity.
+namenode_java_opt: '-Xmx10240m -Xms10240m'
+
+#datanode JVM heap size MB
+#The -Xmx value must be greater than or equal to 512 considering the running capacity.
+datanode_java_opt: '-Xmx5120m -Xms5120m'
+
+#journalnode JVM heap size MB
+#The -Xmx value must be greater than or equal to 1024 considering the running capacity.
+journal_java_opt: '-Xmx1024m -Xms1024m'
+
+#zkfc JVM heap size MB
+#The -Xmx value must be greater than or equal to 512 considering the running capacity.
+zkfc_java_opt: '-Xmx1024m -Xms1024m'
+
+#The number of server threads for the namenode.
+#dfs.namenode.handler.count
+#It should generally be set to 20*log2(cluster size)
+namenode_handlers: 30
+
+#The number of server threads for the datanode
+#dfs.datanode.handler.count
+datanode_handlers: 40
+
+#---------------------------------Yarn config----------------------------#
+#ResourceManager JVM heap size,Unit in megabytes(MB).
+resource_manager_java_opt: '-Xmx2048m -Xms1024m'
+
+#NodeManager JVM heap size,Unit in megabytes(MB).
+node_manager_java_opt: '-Xmx2048m -Xms1024m'
+
+#The NodeManager maximum amount of memory that can be allocated for a single container request. Unit in megabytes(MB).
+nodemanager_mem: 61440
+
+#The maximum number of virtual CPU cores that can be allocated for a single container request.
+#It's generally equal to the number of CPU cores.
+nodemanager_cores: 48
+
+#parameter is used to specify the maximum percentage of resources available for applications in the cluster.
+#It limits the resource percentage that can be used by the ApplicationMaster (AM).
+resource_scheduler_capacity_percent: 0.5
+
+#===========================HBase Configuration===============================#
+#Hmaster JVM heap size,MB
+hmaster_java_opt: '-Xmx2048m -Xms2048m'
+
+#Hregionserver JVM heap size,MB
+hregion_java_opt: '-Xmx20480m -Xms20480m -Xmn128m'
+
+#The number of server threads for the regionserver
+#It should generally be set to (number of cores - 1)
+#hbase.regionserver.handler.count
+regionserverhandlers: 40
+
+#Maximum HStoreFile size.
+#If any one of a column families' HStoreFiles has grown to exceed this value, the hosting HRegion is split in two.
+hregion_max_filesize: 10737418240
+
+#The HBase resource isolation function is used to group tables for storage.
+#open: 1 , close: 0
+hbase_enable_rsgroup: 0
+
+#===========================Apache Kafka Configuration===============================#
+#Kafka JVM heap size,MB
+kafka_java_opt: '-Xmx16384m -Xms4096m'
+
+#The minimum age of a log file to be eligible for deletion due to age
+#hours,default 168 hours
+log_reten_hours: 168
+
+#A size-based retention policy for logs. Segments are pruned from the log unless the remaining
+# segments drop below topic_max_bytes. Functions independently of log_reten_hours.
+topic_max_bytes: 10737418240
+
+#Record topic partition num, whose default value is the number of clusters.
+#If a value is greater than the default value and is an integer multiple of it, this value can be used as the record topic partition num.
+record_topic_partition: 24
+#===========================Mariadb configuration===============================
+#Buffer pool size,MB
+mariadb_innodb_buffer_pool_size: 2048
+
+#mariadb port
+galaxy_mariadb_port: 3306
+
+#Mariadb username
+galaxy_mariadb_username: root
+
+#===========================Spark configuration===============================#
+#spark worker JVM heap size,MB
+spark_worker_mem: 1024
+
+#The number of processing threads of worker
+spark_worker_cores: 30
+
+#===========================Nacos Configuration===============================#
+#A string of -X Java options to the Nacos
+nacos_java_opt: '-Xmx1024m -Xms1024m -Xmn256m'
+
+#Nacos's MaraiDB database name
+mariadb_nacos_database: nacos
+
+#===========================Flink Configuration================================#
+#JobManager JVM heap size,MB
+jobmanager_memory_size: 1024
+
+#taskmanager Network buffer size,MB
+taskmanager_memory_network_min: 64
+taskmanager_memory_network_max: 128
+
+#Taskmanager direct memory,MB
+taskmanager_memory_managed_size: 10
+
+#TaskManager JVM heap size,MB
+taskmanager_memory_size: 1024
+
+#Taskmanager JVM metaspace size,MB
+taskmanager_jvm_metaspace_size: 384
+
+#Taskmanager Framework Off-Heap,MB
+taskmanager_memory_framework_offheap_size: 128
+
+#The number of slots for taskmanager
+taskmanager_numberOfTaskSlots: 1
+
+#===========================Clickhouse Configuration================================#
+#Clickhouse node max memory use,KB
+clickhouse_max_memory: 150000000000
+
+#Number of threads performing background operations in the Mutiation table engine
+clickhouse_background_pool_size: 16
+