diff options
Diffstat (limited to 'kubernetes/clamp/charts')
7 files changed, 44 insertions, 50 deletions
diff --git a/kubernetes/clamp/charts/clamp-dash-es/resources/config/elasticsearch.yml b/kubernetes/clamp/charts/clamp-dash-es/resources/config/elasticsearch.yml index d631f44f34..ab3ec43eba 100644 --- a/kubernetes/clamp/charts/clamp-dash-es/resources/config/elasticsearch.yml +++ b/kubernetes/clamp/charts/clamp-dash-es/resources/config/elasticsearch.yml @@ -86,7 +86,6 @@ network.host: 0.0.0.0 # By default, Elasticsearch will bind to the available loopback addresses and will scan ports 9300 to 9305 to try # to connect to other nodes running on the same server. # -#discovery.zen.ping.unicast.hosts: ["elasticsearch.{{.Values.nsPrefix}}" #$discovery.zen.ping.unicast.hosts # # This setting tells Elasticsearch to not elect a master unless there are enough master-eligible nodes diff --git a/kubernetes/clamp/charts/clamp-dash-es/values.yaml b/kubernetes/clamp/charts/clamp-dash-es/values.yaml index 83fb73e9a8..19e85fde7a 100644 --- a/kubernetes/clamp/charts/clamp-dash-es/values.yaml +++ b/kubernetes/clamp/charts/clamp-dash-es/values.yaml @@ -32,8 +32,8 @@ busyboxImage: library/busybox:latest # application image loggingRepository: docker.elastic.co -image: elasticsearch/elasticsearch:5.6.8 -pullPolicy: IfNotPresent +image: elasticsearch/elasticsearch:5.6.9 +pullPolicy: Always # flag to enable debugging - application support required debugEnabled: false @@ -81,6 +81,7 @@ persistence: size: 4Gi mountPath: /dockerdata-nfs mountSubPath: clamp/dashboard-elasticsearch/data + mountSubPathLogs: clamp service: type: ClusterIP @@ -96,7 +97,7 @@ service: ingress: enabled: false -resources: {} +#resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following @@ -107,10 +108,10 @@ resources: {} # ref: http://kubernetes.io/docs/user-guide/compute-resources/ # Minimum memory for development is 2 CPU cores and 4GB memory # Minimum memory for production is 4 CPU cores and 8GB memory -#resources: -# limits: -# cpu: 2 -# memory: 4Gi -# requests: -# cpu: 2 -# memory: 4Gi +resources: + limits: + cpu: 1 + memory: 4Gi + requests: + cpu: 10m + memory: 2.5Gi diff --git a/kubernetes/clamp/charts/clamp-dash-kibana/values.yaml b/kubernetes/clamp/charts/clamp-dash-kibana/values.yaml index 64b515c1d5..e4987b093a 100644 --- a/kubernetes/clamp/charts/clamp-dash-kibana/values.yaml +++ b/kubernetes/clamp/charts/clamp-dash-kibana/values.yaml @@ -39,8 +39,8 @@ busyboxImage: library/busybox:latest # application image loggingRepository: docker.elastic.co -image: kibana/kibana:5.6.8 -pullPolicy: IfNotPresent +image: kibana/kibana:5.6.9 +pullPolicy: Always # flag to enable debugging - application support required debugEnabled: false @@ -82,7 +82,7 @@ service: ingress: enabled: false -resources: {} +#resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following @@ -93,10 +93,11 @@ resources: {} # ref: http://kubernetes.io/docs/user-guide/compute-resources/ # Minimum memory for development is 2 CPU cores and 4GB memory # Minimum memory for production is 4 CPU cores and 8GB memory -#resources: -# limits: -# cpu: 2 -# memory: 4Gi -# requests: -# cpu: 2 -# memory: 4Gi +resources: + limits: + cpu: 1 + memory: 2Gi + requests: + cpu: 10m + memory: 750Mi + diff --git a/kubernetes/clamp/charts/clamp-dash-logstash/values.yaml b/kubernetes/clamp/charts/clamp-dash-logstash/values.yaml index e3463e91ab..904798077a 100644 --- a/kubernetes/clamp/charts/clamp-dash-logstash/values.yaml +++ b/kubernetes/clamp/charts/clamp-dash-logstash/values.yaml @@ -30,8 +30,8 @@ global: # application image loggingRepository: docker.elastic.co -image: logstash/logstash:5.6.8 -pullPolicy: IfNotPresent +image: logstash/logstash:5.6.9 +pullPolicy: Always # flag to enable debugging - application support required debugEnabled: false @@ -80,7 +80,7 @@ service: ingress: enabled: false -resources: {} +#resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following @@ -91,10 +91,10 @@ resources: {} # ref: http://kubernetes.io/docs/user-guide/compute-resources/ # Minimum memory for development is 2 CPU cores and 4GB memory # Minimum memory for production is 4 CPU cores and 8GB memory -#resources: -# limits: -# cpu: 2 -# memory: 4Gi -# requests: -# cpu: 2 -# memory: 4Gi +resources: + limits: + cpu: 1 + memory: 1.3Gi + requests: + cpu: 10m + memory: 750Mi diff --git a/kubernetes/clamp/charts/mariadb/resources/config/mariadb/conf.d/conf1/my.cnf b/kubernetes/clamp/charts/mariadb/resources/config/mariadb/conf.d/conf1/my.cnf index 0be1bd7aa3..c6631fb4ef 100644 --- a/kubernetes/clamp/charts/mariadb/resources/config/mariadb/conf.d/conf1/my.cnf +++ b/kubernetes/clamp/charts/mariadb/resources/config/mariadb/conf.d/conf1/my.cnf @@ -141,10 +141,10 @@ binlog_format=row ##innodb_log_group_home_dir = //opt/app/mysql/iblogs # You can set .._buffer_pool_size up to 50 - 80 % # of RAM but beware of setting memory usage too high -innodb_buffer_pool_size = 6380M +innodb_buffer_pool_size = 128M #innodb_additional_mem_pool_size = 2M # Set .._log_file_size to 25 % of buffer pool size -innodb_log_file_size = 150M +innodb_log_file_size = 10M innodb_log_files_in_group = 3 innodb_log_buffer_size = 8M #innodb_flush_log_at_trx_commit = 1 @@ -156,7 +156,7 @@ transaction-isolation=READ-COMMITTED ####### Galera parameters ####### ## Galera Provider configuration wsrep_provider=/usr/lib/galera/libgalera_smm.so -wsrep_provider_options="gcache.size=2G; gcache.page_size=1G" +wsrep_provider_options="gcache.size=128M; gcache.page_size=10M" ## Galera Cluster configuration wsrep_cluster_name="MSO-automated-tests-cluster" wsrep_cluster_address="gcomm://" @@ -169,7 +169,7 @@ wsrep_sst_method=rsync ## Galera Node configuration wsrep_node_name="mariadb1" ##wsrep_node_address="192.169.3.184" -wsrep_on=ON +wsrep_on=OFF ## Status notification #wsrep_notify_cmd=/opt/app/mysql/bin/wsrep_notify ####### diff --git a/kubernetes/clamp/charts/mariadb/resources/config/mariadb/docker-entrypoint-initdb.d/bulkload/clds-create-db-objects.sql b/kubernetes/clamp/charts/mariadb/resources/config/mariadb/docker-entrypoint-initdb.d/bulkload/clds-create-db-objects.sql index 7530d5a161..308ec7da62 100644 --- a/kubernetes/clamp/charts/mariadb/resources/config/mariadb/docker-entrypoint-initdb.d/bulkload/clds-create-db-objects.sql +++ b/kubernetes/clamp/charts/mariadb/resources/config/mariadb/docker-entrypoint-initdb.d/bulkload/clds-create-db-objects.sql @@ -17,13 +17,6 @@ # Create CLDS database objects (tables, etc.) # # -CREATE DATABASE `camundabpm`; -USE `camundabpm`; -DROP USER 'camunda'; -CREATE USER 'camunda'; -GRANT ALL on camundabpm.* to 'camunda' identified by 'ndMSpw4CAM' with GRANT OPTION; -FLUSH PRIVILEGES; - CREATE DATABASE `cldsdb4`; USE `cldsdb4`; DROP USER 'clds'; diff --git a/kubernetes/clamp/charts/mariadb/values.yaml b/kubernetes/clamp/charts/mariadb/values.yaml index da62319cb4..459040549d 100644 --- a/kubernetes/clamp/charts/mariadb/values.yaml +++ b/kubernetes/clamp/charts/mariadb/values.yaml @@ -85,7 +85,7 @@ ingress: enabled: false -resources: {} +#resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following @@ -96,10 +96,10 @@ resources: {} # ref: http://kubernetes.io/docs/user-guide/compute-resources/ # Minimum memory for development is 2 CPU cores and 4GB memory # Minimum memory for production is 4 CPU cores and 8GB memory -#resources: -# limits: -# cpu: 2 -# memory: 4Gi -# requests: -# cpu: 2 -# memory: 4Gi +resources: + limits: + cpu: 1 + memory: 500Mi + requests: + cpu: 10m + memory: 200Mi |