summaryrefslogtreecommitdiffstats
path: root/kubernetes/sdnc/resources/config/conf/org.opendaylight.controller.cluster.datastore.cfg
diff options
context:
space:
mode:
authorTimoney, Dan (dt5972) <dtimoney@att.com>2019-08-21 16:50:54 -0400
committerTimoney, Dan (dt5972) <dtimoney@att.com>2019-08-22 13:41:48 -0400
commit58774777260192181015ad388fa2b446fd7ca31b (patch)
tree863cc872e83ea77a0eca569025f1e633a5e6153f /kubernetes/sdnc/resources/config/conf/org.opendaylight.controller.cluster.datastore.cfg
parent86fe16201c5ce7682db873caeafbbb28f0f4dd0e (diff)
Tune OpenDaylight parameters
Add files to tune akka settings in OpenDaylight. Also, added setenv to tune Java settings. Change-Id: I2071fceb48f990929cd9f4c885bd0c9f7db0263c Issue-ID: SDNC-858 Signed-off-by: Timoney, Dan (dt5972) <dtimoney@att.com>
Diffstat (limited to 'kubernetes/sdnc/resources/config/conf/org.opendaylight.controller.cluster.datastore.cfg')
-rw-r--r--kubernetes/sdnc/resources/config/conf/org.opendaylight.controller.cluster.datastore.cfg107
1 files changed, 107 insertions, 0 deletions
diff --git a/kubernetes/sdnc/resources/config/conf/org.opendaylight.controller.cluster.datastore.cfg b/kubernetes/sdnc/resources/config/conf/org.opendaylight.controller.cluster.datastore.cfg
new file mode 100644
index 0000000000..29dd0e54dd
--- /dev/null
+++ b/kubernetes/sdnc/resources/config/conf/org.opendaylight.controller.cluster.datastore.cfg
@@ -0,0 +1,107 @@
+# This file specifies property settings for the clustered data store to control its behavior. A
+# property may be applied to every data store type ("config" and "operational") or can be customized
+# differently for each data store type by prefixing the data store type + '.'. For example, specifying
+# the "shard-election-timeout-factor" property would be applied to both data stores whereas specifying
+# "operational.shard-election-timeout-factor" would only apply to the "operational" data store. Similarly,
+# specifying "config.shard-election-timeout-factor" would only apply to the "config" data store.
+
+# The multiplication factor to be used to determine shard election timeout. The shard election timeout
+# is determined by multiplying shardHeartbeatIntervalInMillis with the shardElectionTimeoutFactor.
+shard-election-timeout-factor=20
+
+# The interval at which a shard will send a heart beat message to its remote shard.
+#shard-heartbeat-interval-in-millis=500
+
+# The amount by which to divide election timeout in case of a candidate. This serves as a counter-balance
+# to shard-election-timeout-factor. The default value is 1, i.e. election timeout is the same in all
+# situations.
+#shard-candidate-election-timeout-divisor=1
+
+# The maximum amount of time to wait for a shard to elect a leader before failing an operation (eg transaction create).
+#shard-leader-election-timeout-in-seconds=30
+
+# Enable or disable data persistence.
+#persistent=true
+
+# Disable persistence for the operational data store by default.
+operational.persistent=false
+
+# The maximum amount of time a shard transaction can be idle without receiving any messages before it self-destructs.
+#shard-transaction-idle-timeout-in-minutes=10
+
+# The maximum amount of time a shard transaction three-phase commit can be idle without receiving the
+# next messages before it aborts the transaction.
+#shard-transaction-commit-timeout-in-seconds=30
+
+# The maximum allowed capacity for each shard's transaction commit queue.
+#shard-transaction-commit-queue-capacity=20000
+
+# The maximum amount of time to wait for a shard to initialize from persistence on startup before
+# failing an operation (eg transaction create and change listener registration).
+#shard-initialization-timeout-in-seconds=300
+
+# The minimum number of entries to be present in the in-memory journal log before a snapshot is to be taken.
+#shard-snapshot-batch-count=20000
+
+# The percentage of Runtime.totalMemory() used by the in-memory journal log before a snapshot is to be taken.
+#shard-snapshot-data-threshold-percentage=12
+
+# The interval at which the leader of the shard will check if its majority followers are active and
+# term itself as isolated.
+#shard-isolated-leader-check-interval-in-millis=5000
+
+# The number of transaction modification operations (put, merge, delete) to batch before sending to the
+# shard transaction actor. Batching improves performance as less modifications messages are sent to the
+# actor and thus lessens the chance that the transaction actor's mailbox queue could get full.
+#shard-batched-modification-count=1000
+
+# The maximum amount of time for akka operations (remote or local) to complete before failing.
+#operation-timeout-in-seconds=5
+
+# The initial number of transactions per second that are allowed before the data store should begin
+# applying back pressure. This number is only used as an initial guidance, subsequently the datastore
+# measures the latency for a commit and auto-adjusts the rate limit.
+#transaction-creation-initial-rate-limit=100
+
+# The maximum thread pool size for each shard's data store data change notification executor.
+#max-shard-data-change-executor-pool-size=20
+
+# The maximum queue size for each shard's data store data change notification executor.
+#max-shard-data-change-executor-queue-size=1000
+
+# The maximum queue size for each shard's data store data change listener.
+#max-shard-data-change-listener-queue-size=1000
+
+# The maximum queue size for each shard's data store executor.
+#max-shard-data-store-executor-queue-size=5000
+
+# A fully qualified java class name. The class should implement
+# org.opendaylight.controller.cluster.raft.policy.RaftPolicy. This java class should be
+# accessible to the distributed data store OSGi module so that it can be dynamically loaded via
+# reflection. For now let's assume that these classes to customize raft behaviors should be
+# present in the distributed data store module itself. If this property is set to a class which
+# cannot be found then the default raft policy will be applied
+#custom-raft-policy-implementation=
+
+# When fragmenting messages thru the akka remoting framework, this is the maximum size in bytes
+# for a message slice.
+#maximum-message-slice-size=20480000
+
+# Enable tell-based protocol between frontend (applications) and backend (shards). Using this protocol
+# should avoid AskTimeoutExceptions seen under heavy load. Defaults to false (use ask-based protocol).
+#use-tell-based-protocol=true
+
+# Tune the maximum number of entries a follower is allowed to lag behind the leader before it is
+# considered out-of-sync. This flag may require tuning in face of a large number of small transactions.
+#sync-index-threshold=10
+
+# Record new transaction allocation stack trace, useful for debugging. This makes the log include
+# the stack trace of the creator of the Tx when there is an exception when the transaction is submitted
+# (e.g. for a failed validation). Defaults to false due to performance impact.
+#transaction-debug-context-enabled=true
+persistent-actor-restart-min-backoff-in-seconds={{.Values.config.odl.datastore.persistentActorRestartMinBackoffInSeconds}}
+persistent-actor-restart-max-backoff-in-seconds={{.Values.config.odl.datastore.persistentActorRestartMaxBackoffInSeconds}}
+persistent-actor-restart-reset-backoff-in-seconds={{.Values.config.odl.datastore.persistentActorRestartResetBackoffInSeconds}}
+shard-transaction-commit-timeout-in-seconds={{.Values.config.odl.datastore.shardTransactionCommitTimeoutInSeconds}}
+shard-isolated-leader-check-interval-in-millis={{.Values.config.odl.datastore.shardIsolatedLeaderCheckIntervalInMillis}}
+operation-timeout-in-seconds={{.Values.config.odl.datastore.operationTimeoutInSeconds}}