summaryrefslogtreecommitdiffstats
path: root/feature-server-pool/src
diff options
context:
space:
mode:
Diffstat (limited to 'feature-server-pool/src')
-rw-r--r--feature-server-pool/src/assembly/assemble_zip.xml75
-rw-r--r--feature-server-pool/src/main/feature/config/feature-server-pool.properties142
-rw-r--r--feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/Bucket.java2579
-rw-r--r--feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/Discovery.java352
-rw-r--r--feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/Events.java108
-rw-r--r--feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/ExtendedObjectInputStream.java70
-rw-r--r--feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/FeatureServerPool.java1027
-rw-r--r--feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/Keyword.java500
-rw-r--r--feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/Leader.java594
-rw-r--r--feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/MainLoop.java195
-rw-r--r--feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/RestServerPool.java437
-rw-r--r--feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/Server.java1361
-rw-r--r--feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/ServerPoolApi.java82
-rw-r--r--feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/ServerPoolProperties.java338
-rw-r--r--feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/TargetLock.java2852
-rw-r--r--feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/Util.java183
-rw-r--r--feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/persistence/Persistence.java899
-rw-r--r--feature-server-pool/src/main/resources/META-INF/services/org.onap.policy.drools.control.api.DroolsPdpStateControlApi1
-rw-r--r--feature-server-pool/src/main/resources/META-INF/services/org.onap.policy.drools.core.PolicySessionFeatureApi2
-rw-r--r--feature-server-pool/src/main/resources/META-INF/services/org.onap.policy.drools.features.PolicyControllerFeatureApi1
-rw-r--r--feature-server-pool/src/main/resources/META-INF/services/org.onap.policy.drools.features.PolicyEngineFeatureApi1
-rw-r--r--feature-server-pool/src/main/resources/META-INF/services/org.onap.policy.drools.serverpool.ServerPoolApi1
-rw-r--r--feature-server-pool/src/test/java/org/onap/policy/drools/serverpool/AdapterImpl.java455
-rw-r--r--feature-server-pool/src/test/java/org/onap/policy/drools/serverpool/BucketWrapperImpl.java172
-rw-r--r--feature-server-pool/src/test/java/org/onap/policy/drools/serverpool/ServerWrapperImpl.java145
-rw-r--r--feature-server-pool/src/test/java/org/onap/policy/drools/serverpool/TargetLockWrapperImpl.java197
-rw-r--r--feature-server-pool/src/test/java/org/onap/policy/drools/serverpooltest/Adapter.java356
-rw-r--r--feature-server-pool/src/test/java/org/onap/policy/drools/serverpooltest/BlockingClassLoader.java176
-rw-r--r--feature-server-pool/src/test/java/org/onap/policy/drools/serverpooltest/BucketWrapper.java132
-rw-r--r--feature-server-pool/src/test/java/org/onap/policy/drools/serverpooltest/ServerWrapper.java103
-rw-r--r--feature-server-pool/src/test/java/org/onap/policy/drools/serverpooltest/SimDmaap.java327
-rw-r--r--feature-server-pool/src/test/java/org/onap/policy/drools/serverpooltest/TargetLockWrapper.java98
-rw-r--r--feature-server-pool/src/test/java/org/onap/policy/drools/serverpooltest/Test1.java916
-rw-r--r--feature-server-pool/src/test/java/org/onap/policy/drools/serverpooltest/TestDroolsObject.java58
-rw-r--r--feature-server-pool/src/test/resources/TestController-controller.properties13
-rw-r--r--feature-server-pool/src/test/resources/drools-artifact-1.1/pom.xml31
-rw-r--r--feature-server-pool/src/test/resources/drools-artifact-1.1/src/main/resources/META-INF/kmodule.xml25
-rw-r--r--feature-server-pool/src/test/resources/drools-artifact-1.1/src/main/resources/rules.drl51
-rw-r--r--feature-server-pool/src/test/resources/feature-server-pool-test.properties142
-rw-r--r--feature-server-pool/src/test/resources/logback-test.xml32
40 files changed, 0 insertions, 15229 deletions
diff --git a/feature-server-pool/src/assembly/assemble_zip.xml b/feature-server-pool/src/assembly/assemble_zip.xml
deleted file mode 100644
index e735a8d6..00000000
--- a/feature-server-pool/src/assembly/assemble_zip.xml
+++ /dev/null
@@ -1,75 +0,0 @@
-<!--
- ============LICENSE_START=======================================================
- feature-server-pool
- ================================================================================
- Copyright (C) 2020 AT&T Intellectual Property. All rights reserved.
- ================================================================================
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
- ============LICENSE_END=========================================================
- -->
-
-<!-- Defines how we build the .zip file which is our distribution. -->
-
-<assembly
- xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0 http://maven.apache.org/xsd/assembly-1.1.0.xsd">
- <id>feature-server-pool-package</id>
- <formats>
- <format>zip</format>
- </formats>
-
- <includeBaseDirectory>false</includeBaseDirectory>
-
- <fileSets>
- <fileSet>
- <directory>target</directory>
- <outputDirectory>lib/feature</outputDirectory>
- <includes>
- <include>feature-server-pool-${project.version}.jar</include>
- </includes>
- </fileSet>
- <fileSet>
- <directory>target/assembly/lib</directory>
- <outputDirectory>lib/dependencies</outputDirectory>
- <includes>
- <include>*.jar</include>
- </includes>
- </fileSet>
- <fileSet>
- <directory>src/main/feature/config</directory>
- <outputDirectory>config</outputDirectory>
- <fileMode>0644</fileMode>
- <excludes/>
- </fileSet>
- <fileSet>
- <directory>src/main/feature/bin</directory>
- <outputDirectory>bin</outputDirectory>
- <fileMode>0744</fileMode>
- <excludes/>
- </fileSet>
- <fileSet>
- <directory>src/main/feature/db</directory>
- <outputDirectory>db</outputDirectory>
- <fileMode>0744</fileMode>
- <excludes/>
- </fileSet>
- <fileSet>
- <directory>src/main/feature/install</directory>
- <outputDirectory>install</outputDirectory>
- <fileMode>0744</fileMode>
- <excludes/>
- </fileSet>
- </fileSets>
-
-</assembly>
diff --git a/feature-server-pool/src/main/feature/config/feature-server-pool.properties b/feature-server-pool/src/main/feature/config/feature-server-pool.properties
deleted file mode 100644
index 00380294..00000000
--- a/feature-server-pool/src/main/feature/config/feature-server-pool.properties
+++ /dev/null
@@ -1,142 +0,0 @@
-###
-# ============LICENSE_START=======================================================
-# feature-server-pool
-# ================================================================================
-# Copyright (C) 2020 AT&T Intellectual Property. All rights reserved.
-# ================================================================================
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============LICENSE_END=========================================================
-###
-
-# The following properties control the IP address and port that a given
-# server binds to. The default will bind to all interfaces on the host,
-# and choose a port number at random.
-
-server.pool.server.ipAddress=${envd:SERVER_POOL_SERVER_IP}
-server.pool.server.port=${envd:SERVER_POOL_PORT:20000}
-
-# The following properties determine whether HTTPS is used -- note that HTTPS
-# also requires that the 'java.net.ssl.*' parameters in 'system.properties' be
-# specified, and the key store and trust store be configured, as appropriate.
-server.pool.server.https=${envd:SERVER_POOL_HTTPS}
-server.pool.server.selfSignedCerts=false
-
-# The IP address and port that servers on the geo-redundant site
-# should use to connect to servers on this site.
-server.pool.server.site.ip=${envd:SERVER_POOL_SITE_IP}
-server.pool.server.site.port=${envd:SERVER_POOL_SITE_PORT}
-
-# A comma-separated list of host names -- if an entry is found that refers
-# to an HTTP/HTTPS destination IP address, the host name will used as the
-# destination, instead of the IP address
-server.pool.server.hostlist=${envd:SERVER_POOL_HOST_LIST}
-
-# The servers send 'pings' to each other once per main loop cycle. They
-# also measure the gap between 'pings' from each server, and calculate
-# an allowed time gap based upon this. 'server.pool.server.allowedGap' is the initial
-# allowed gap prior to receiving any pings (default=30 seconds), and
-# 'server.pool.server.adaptiveGapAdjust' is a value that is added to the calculated
-# gap "just in case" (default=5 seconds)
-
-server.pool.server.allowedGap=30000
-server.pool.server.adaptiveGapAdjust=5000
-
-# 'connectTimeout' and 'readTimeout' affect the client end of a REST
-# connection (default=10 seconds each)
-
-server.pool.server.connectTimeout=10000
-server.pool.server.readTimeout=10000
-
-# Each server has a thread pool per remote server, which is used when
-# sending HTTP REST messages -- the following parameters determine the
-# configuration.
-
-server.pool.server.threads.corePoolSize=5
-server.pool.server.threads.maximumPoolSize=10
-server.pool.server.threads.keepAliveTime=5000
-
-# The server pool members use a UEB/DMAAP topic to connect with other
-# servers in the pool. The following set of parameters are passed to
-# the CambriaClient library, and are used in setting up the consumer and
-# publisher ends of the connection. 'discovery.servers' and 'discovery.topic'
-# are the minimum values that need to be specified. The last parameter in
-# this set, 'discovery.publisherLoopCycleTime' isn't passed to the
-# CambriaClient library; instead, it controls how frequently the 'ping'
-# messages are sent out on this channel. Note that only the lead server
-# keeps this channel open long-term.
-
-server.pool.discovery.servers=${envd:SERVER_POOL_DISCOVERY_SERVERS}
-server.pool.discovery.port=${envd:SERVER_POOL_DISCOVERY_PORT:3904}
-server.pool.discovery.topic=${envd:SERVER_POOL_DISCOVERY_TOPIC:DISCOVERY-TOPIC}
-server.pool.discovery.username=${envd:SERVER_POOL_DISCOVERY_USERNAME}
-server.pool.discovery.password=${envd:SERVER_POOL_DISCOVERY_PASSWORD}
-server.pool.discovery.https=${envd:DMAAP_USE_HTTPS}
-server.pool.discovery.apiKey=
-server.pool.discovery.apiSecret=
-#server.pool.discovery.publisherSocketTimeout=5000
-#server.pool.discovery.consumerSocketTimeout=70000
-server.pool.discovery.fetchTimeout=60000
-server.pool.discovery.fetchLimit=100
-server.pool.discovery.selfSignedCertificates=false
-server.pool.discovery.publisherLoopCycleTime=5000
-
-# The 'leader.*' parameters affect behavior during an election. The value of
-# 'mainLoop.cycle' determines the actual time delay. 'leader.stableIdCycles'
-# is the required minimum number of "stable" cycles before voting can start
-# (in this case, "stable" means no servers added or failing). Once voting has
-# started, "leader.stableVotingCycles' is the minimum number of "stable"
-# cycles needed before declaring a winner (in this case, "stable" means no
-# votes changing).
-
-server.pool.leader.stableIdleCycles=5
-server.pool.leader.stableVotingCycles=5
-
-# The value of 'mainLoop.cycle' (default = 1 second) determines how frequently
-# various events occur, such as the sending of 'ping' messages, and the
-# duration of a "cycle" while voting for a lead server.
-
-server.pool.mainLoop.cycle=1000
-
-# 'keyword.path' is used when looking for "keywords" within JSON messages.
-# The first keyword located is hashed to determine which bucket to route
-# through.
-
-keyword.path=requestID,CommonHeader.RequestID,body.output.common-header.request-id,result-info.request-id:uuid
-# 'keyword.<CLASS-NAME>.lookup' is used to locate "keywords" within objects.
-# The 'value' field contains a list of method calls or field names separated
-# by '.' that are used to locate the keyword
-# (e.g. 'method1().field2.method3()')
-
-keyword.java.lang.String.lookup=toString()
-keyword.org.onap.policy.m2.base.Transaction.lookup=getRequestID()
-keyword.org.onap.policy.controlloop.ControlLoopEvent.lookup=requestID
-keyword.org.onap.policy.appclcm.LcmRequestWrapper.lookup=getBody().getCommonHeader().getRequestId()
-keyword.org.onap.policy.appclcm.LcmResponseWrapper.lookup=getBody().getCommonHeader().getRequestId()
-keyword.org.onap.policy.drools.serverpool.TargetLock.lookup=getOwnerKey()
-keyword.org.onap.policy.drools.serverpooltest.TestDroolsObject.lookup=getKey()
-keyword.org.onap.policy.drools.serverpooltest.Test1$KeywordWrapper.lookup=key
-
-# The following properties affect distributed 'TargetLock' behavior.
-#
-# server.pool.lock.ttl - controls how many hops a 'TargetLock' message can take
-# server.pool.lock.audit.period - how frequently should the audit run?
-# server.pool.lock.audit.gracePeriod - how long to wait after bucket reassignments
-# before running the audit again
-# server.pool.lock.audit.retryDelay - mismatches can occur due to the transient nature
-# of the lock state: this property controls how long to wait before
-# trying again
-
-server.pool.lock.ttl=3
-server.pool.lock.audit.period=300000
-server.pool.lock.audit.gracePeriod=60000
-server.pool.lock.audit.retryDelay=5000 \ No newline at end of file
diff --git a/feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/Bucket.java b/feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/Bucket.java
deleted file mode 100644
index a1afebc9..00000000
--- a/feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/Bucket.java
+++ /dev/null
@@ -1,2579 +0,0 @@
-/*
- * ============LICENSE_START=======================================================
- * feature-server-pool
- * ================================================================================
- * Copyright (C) 2020 AT&T Intellectual Property. All rights reserved.
- * ================================================================================
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ============LICENSE_END=========================================================
- */
-
-package org.onap.policy.drools.serverpool;
-
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.BUCKET_CONFIRMED_TIMEOUT;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.BUCKET_TIME_TO_LIVE;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.BUCKET_UNCONFIRMED_GRACE_PERIOD;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.BUCKET_UNCONFIRMED_TIMEOUT;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.DEFAULT_BUCKET_CONFIRMED_TIMEOUT;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.DEFAULT_BUCKET_TIME_TO_LIVE;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.DEFAULT_BUCKET_UNCONFIRMED_GRACE_PERIOD;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.DEFAULT_BUCKET_UNCONFIRMED_TIMEOUT;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.getProperty;
-
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.io.Serializable;
-import java.net.InetSocketAddress;
-import java.nio.charset.StandardCharsets;
-import java.security.MessageDigest;
-import java.security.NoSuchAlgorithmException;
-import java.util.Base64;
-import java.util.Comparator;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Objects;
-import java.util.Queue;
-import java.util.Random;
-import java.util.Set;
-import java.util.TreeMap;
-import java.util.TreeSet;
-import java.util.UUID;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ConcurrentLinkedQueue;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.FutureTask;
-import java.util.concurrent.LinkedTransferQueue;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import javax.ws.rs.client.Entity;
-import javax.ws.rs.client.WebTarget;
-import javax.ws.rs.core.MediaType;
-import javax.ws.rs.core.Response;
-import lombok.EqualsAndHashCode;
-import lombok.Getter;
-import lombok.Setter;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * The server pool uses an algorithmic way to map things like transactions
- * (identified by a 'requestID') and locks (identified by a string key)
- * into a server handling that transaction or lock. It does this by mapping
- * the string name into one of a set of predefined hash buckets, with each
- * bucket being assigned to one of the active servers.
- * In other words:
- * string key -> hash bucket (fixed mapping, known to all servers)
- * hash bucket -> server (assignments may change when servers go up or down,
- * but remains fairly static when the system is stable)
- * With this approach, there is no global dynamic table that needs to be
- * updated as transactions, or other objects come and go.
- * Each instance of class 'Bucket' corresponds to one of the hash buckets,
- * there are static methods that provide the overall abstraction, as well
- * as some supporting classes.
- */
-
-@Getter
-@Setter
-public class Bucket {
- private static Logger logger = LoggerFactory.getLogger(Bucket.class);
-
- /*
- * Listener class to handle state changes that may lead to
- * reassignments of buckets
- */
- private static EventHandler eventHandler = new EventHandler();
-
- // Used to hash keywords into buckets
- private static MessageDigest messageDigest;
-
- static {
- // register Listener class
- Events.register(eventHandler);
-
- // create MD5 MessageDigest -- used to hash keywords
- try {
- // disabling sonar, as the digest is only used to hash keywords - it isn't
- // used for security purposes
- messageDigest = MessageDigest.getInstance("MD5"); // NOSONAR
- } catch (NoSuchAlgorithmException e) {
- throw new ExceptionInInitializerError(e);
- }
- }
-
- /*
- * Values extracted from properties
- */
-
- private static String timeToLive;
- private static long confirmedTimeout;
- private static long unconfirmedTimeout;
- private static long unconfirmedGracePeriod;
-
- /*
- * Tags for encoding of bucket data
- */
- private static final int END_OF_PARAMETERS_TAG = 0;
- private static final int OWNER_UPDATE = 1;
- private static final int OWNER_NULL = 2;
- private static final int PRIMARY_BACKUP_UPDATE = 3;
- private static final int PRIMARY_BACKUP_NULL = 4;
- private static final int SECONDARY_BACKUP_UPDATE = 5;
- private static final int SECONDARY_BACKUP_NULL = 6;
-
- // This is the table itself -- the current size is fixed at 1024 buckets
- public static final int BUCKETCOUNT = 1024;
- private static Bucket[] indexToBucket = new Bucket[BUCKETCOUNT];
-
- static {
- // create hash bucket entries, but there are no assignments yet
- for (int i = 0; i < indexToBucket.length; i += 1) {
- Bucket bucket = new Bucket(i);
- indexToBucket[i] = bucket;
- }
- }
-
- // this is a list of all objects registered for the 'Backup' interface
- private static List<Backup> backupList = new LinkedList<>();
-
- // 'rebalance' is a non-null value when rebalancing is in progress
- private static Object rebalanceLock = new Object();
- private static Rebalance rebalance = null;
-
- // bucket number
- private volatile int index;
-
- // owner of the bucket -- this is the host where messages should be directed
- private volatile Server owner = null;
-
- // this host will take over as the owner if the current owner goes down,
- // and may also contain backup data to support persistence
- private volatile Server primaryBackup = null;
-
- // this is a secondary backup host, which can be used if both owner and
- // primary backup go out in quick succession
- private volatile Server secondaryBackup = null;
-
- // when we are in a transient state, certain events are forwarded to
- // this object
- private volatile State state = null;
-
- // storage for additional data
- private Map<Class<?>, Object> adjuncts = new HashMap<>();
-
- // HTTP query parameters
- private static final String QP_BUCKET = "bucket";
- private static final String QP_KEYWORD = "keyword";
- private static final String QP_DEST = "dest";
- private static final String QP_TTL = "ttl";
- private static final String OWNED_STR = "Owned";
-
- // BACKUP data (only buckets for where we are the owner, or a backup)
-
- // TBD: need fields for outgoing queues for application message transfers
-
- /**
- * This method triggers registration of 'eventHandler', and also extracts
- * property values.
- */
- static void startup() {
- int intTimeToLive =
- getProperty(BUCKET_TIME_TO_LIVE, DEFAULT_BUCKET_TIME_TO_LIVE);
- timeToLive = String.valueOf(intTimeToLive);
- confirmedTimeout =
- getProperty(BUCKET_CONFIRMED_TIMEOUT, DEFAULT_BUCKET_CONFIRMED_TIMEOUT);
- unconfirmedTimeout =
- getProperty(BUCKET_UNCONFIRMED_TIMEOUT,
- DEFAULT_BUCKET_UNCONFIRMED_TIMEOUT);
- unconfirmedGracePeriod =
- getProperty(BUCKET_UNCONFIRMED_GRACE_PERIOD,
- DEFAULT_BUCKET_UNCONFIRMED_GRACE_PERIOD);
- }
-
- /**
- * Constructor -- called when building the 'indexToBucket' table.
- *
- * @param index the bucket number
- */
- private Bucket(int index) {
- this.index = index;
- }
-
- /**
- * This method converts a String keyword into the corresponding bucket
- * number.
- *
- * @param value the keyword to be converted
- * @return the bucket number
- */
- public static int bucketNumber(String value) {
- /*
- * It would be possible to create a new 'MessageDigest' instance each
- * It would be possible to create a new 'MessageDigest' instance each
- * time this method is called, and avoid the need for synchronization.
- * However, past experience has taught me that this might involve a
- * considerable amount of computation, due to internal table
- * initialization, so it shouldn't be done this way for performance
- * reasons.
- * If we start running into blocking issues because there are too many
- * simultaneous calls to this method, we can initialize an array of these
- * objects, and iterate over them using an AtomicInteger index.
- */
- synchronized (messageDigest) {
- /*
- * Note that we only need the first two bytes of this, even though
- * 16 bytes are produced. There may be other operations that can be
- * used to more efficiently map keyword -> hash bucket. The only
- * issue is the same algorithm must be used on all servers, and it
- * should produce a fairly even distribution across all of the buckets.
- */
- byte[] digest = messageDigest.digest(value.getBytes());
- return ((Byte.toUnsignedInt(digest[0]) << 8)
- | Byte.toUnsignedInt(digest[1])) & 0x3ff;
- }
- }
-
- /**
- * Fetch the server associated with a particular bucket number.
- *
- * @param bucketNumber a bucket number in the range 0-1023
- * @return the Server that currently handles the bucket,
- * or 'null' if none is currently assigned
- */
- public static Server bucketToServer(int bucketNumber) {
- Bucket bucket = indexToBucket[bucketNumber];
- return bucket.getOwner();
- }
-
- /**
- * Fetch the bucket object associated with a particular bucket number.
- *
- * @param bucketNumber a bucket number in the range 0-1023
- * @return the Bucket associated with this bucket number
- */
- public static Bucket getBucket(int bucketNumber) {
- return indexToBucket[bucketNumber];
- }
-
- /**
- * Fetch the bucket object associated with a particular keyword.
- *
- * @param value the keyword to be converted
- * @return the Bucket associated with this keyword
- */
- public static Bucket getBucket(String value) {
- return indexToBucket[bucketNumber(value)];
- }
-
- /**
- * Determine if the associated key is assigned to the current server.
- *
- * @param key the keyword to be hashed
- * @return 'true' if the associated bucket is assigned to this server,
- * 'false' if not
- */
- public static boolean isKeyOnThisServer(String key) {
- int bucketNumber = bucketNumber(key);
- Bucket bucket = indexToBucket[bucketNumber];
- return bucket.getOwner() == Server.getThisServer();
- }
-
- /**
- * Handle an incoming /bucket/update REST message.
- *
- * @param data base64-encoded data, containing all bucket updates
- */
- static void updateBucket(byte[] data) {
- final byte[] packet = Base64.getDecoder().decode(data);
- Runnable task = () -> {
- try {
- /*
- * process the packet, handling any updates
- */
- if (updateBucketInternal(packet)) {
- /*
- * updates have occurred -- forward this packet to
- * all servers in the "notify list"
- */
- logger.info("One or more bucket updates occurred");
- Entity<String> entity =
- Entity.entity(new String(data, StandardCharsets.UTF_8),
- MediaType.APPLICATION_OCTET_STREAM_TYPE);
- for (Server server : Server.getNotifyList()) {
- server.post("bucket/update", entity);
- }
- }
- } catch (Exception e) {
- logger.error("Exception in Bucket.updateBucket", e);
- }
- };
- MainLoop.queueWork(task);
- }
-
- /**
- * This method supports the 'updateBucket' method, and runs entirely within
- * the 'MainLoop' thread.
- */
- private static boolean updateBucketInternal(byte[] packet) throws IOException {
- boolean changes = false;
-
- ByteArrayInputStream bis = new ByteArrayInputStream(packet);
- DataInputStream dis = new DataInputStream(bis);
-
- // the packet contains a sequence of bucket updates
- while (dis.available() != 0) {
- // first parameter = bucket number
- int index = dis.readUnsignedShort();
-
- // locate the corresponding 'Bucket' object
- Bucket bucket = indexToBucket[index];
-
- // indicates whether changes occurred to the bucket
- boolean bucketChanges = false;
-
- /*
- * the remainder of the information for this bucket consists of
- * a sequence of '<tag> [ <associated-data> ]' followed by the tag
- * value 'END_OF_PARAMETERS_TAG'.
- */
- int tag;
- while ((tag = dis.readUnsignedByte()) != END_OF_PARAMETERS_TAG) {
- switch (tag) {
- case OWNER_UPDATE:
- // <OWNER_UPDATE> <owner-uuid> -- owner UUID specified
- bucketChanges = updateBucketInternalOwnerUpdate(bucket, dis, index);
- break;
-
- case OWNER_NULL:
- // <OWNER_NULL> -- owner UUID should be set to 'null'
- bucketChanges = nullifyOwner(index, bucket, bucketChanges);
- break;
-
- case PRIMARY_BACKUP_UPDATE:
- // <PRIMARY_BACKUP_UPDATE> <primary-backup-uuid> --
- // primary backup UUID specified
- bucketChanges = updatePrimaryBackup(dis, index, bucket, bucketChanges);
- break;
-
- case PRIMARY_BACKUP_NULL:
- // <PRIMARY_BACKUP_NULL> --
- // primary backup should be set to 'null'
- bucketChanges = nullifyPrimaryBackup(index, bucket, bucketChanges);
- break;
-
- case SECONDARY_BACKUP_UPDATE:
- // <SECONDARY_BACKUP_UPDATE> <secondary-backup-uuid> --
- // secondary backup UUID specified
- bucketChanges = updateSecondaryBackup(dis, index, bucket, bucketChanges);
- break;
-
- case SECONDARY_BACKUP_NULL:
- // <SECONDARY_BACKUP_NULL> --
- // secondary backup should be set to 'null'
- bucketChanges = nullifySecondaryBackup(index, bucket, bucketChanges);
- break;
-
- default:
- logger.error("Illegal tag: {}", tag);
- break;
- }
- }
- if (bucketChanges) {
- // give audit a chance to run
- changes = true;
- bucket.stateChanged();
- }
- }
- return changes;
- }
-
- private static boolean nullifyOwner(int index, Bucket bucket, boolean bucketChanges) {
- if (bucket.getOwner() != null) {
- logger.info("Bucket {} owner: {}->null",
- index, bucket.getOwner());
- bucketChanges = true;
- bucket.nullifyOwner();
- }
- return bucketChanges;
- }
-
- private synchronized void nullifyOwner() {
- setOwner(null);
- setState(null);
- }
-
- /**
- * Gets the set of backups.
- *
- * @return the set of backups
- */
- public synchronized Set<Server> getBackups() {
- /*
- * For some reason, the junit tests break if Set.of() is used, so we'll stick with
- * the long way for now.
- */
- Set<Server> backups = new HashSet<>();
- backups.add(getPrimaryBackup());
- backups.add(getSecondaryBackup());
- return backups;
- }
-
- private static boolean updatePrimaryBackup(DataInputStream dis, int index, Bucket bucket, boolean bucketChanges)
- throws IOException {
- Server newPrimaryBackup =
- Server.getServer(Util.readUuid(dis));
- if (bucket.primaryBackup != newPrimaryBackup) {
- logger.info("Bucket {} primary backup: {}->{}", index,
- bucket.primaryBackup, newPrimaryBackup);
- bucketChanges = true;
- bucket.primaryBackup = newPrimaryBackup;
- }
- return bucketChanges;
- }
-
- private static boolean nullifyPrimaryBackup(int index, Bucket bucket, boolean bucketChanges) {
- if (bucket.primaryBackup != null) {
- logger.info("Bucket {} primary backup: {}->null",
- index, bucket.primaryBackup);
- bucketChanges = true;
- bucket.primaryBackup = null;
- }
- return bucketChanges;
- }
-
- private static boolean updateSecondaryBackup(DataInputStream dis, int index, Bucket bucket, boolean bucketChanges)
- throws IOException {
- Server newSecondaryBackup =
- Server.getServer(Util.readUuid(dis));
- if (bucket.secondaryBackup != newSecondaryBackup) {
- logger.info("Bucket {} secondary backup: {}->{}", index,
- bucket.secondaryBackup, newSecondaryBackup);
- bucketChanges = true;
- bucket.secondaryBackup = newSecondaryBackup;
- }
- return bucketChanges;
- }
-
- private static boolean nullifySecondaryBackup(int index, Bucket bucket, boolean bucketChanges) {
- if (bucket.secondaryBackup != null) {
- logger.info("Bucket {} secondary backup: {}->null",
- index, bucket.secondaryBackup);
- bucketChanges = true;
- bucket.secondaryBackup = null;
- }
- return bucketChanges;
- }
-
- /**
- * Update bucket owner information.
- *
- * @param bucket the bucket in process
- * @param dis data input stream contains the update
- * @param index the bucket number
- * @return a value indicate bucket changes
- */
- private static boolean updateBucketInternalOwnerUpdate(Bucket bucket, DataInputStream dis,
- int index) throws IOException {
- boolean bucketChanges = false;
- Server newOwner = Server.getServer(Util.readUuid(dis));
- if (bucket.getOwner() != newOwner) {
- logger.info("Bucket {} owner: {}->{}",
- index, bucket.getOwner(), newOwner);
- bucketChanges = true;
-
- Server thisServer = Server.getThisServer();
- Server oldOwner = bucket.getOwner();
- bucket.setOwner(newOwner);
- if (thisServer == oldOwner) {
- // the current server is the old owner
- if (bucket.getState() == null) {
- bucket.state = bucket.new OldOwner(newOwner);
- }
- } else if (thisServer == newOwner) {
- // the current server the new owner
- if (bucket.getState() == null) {
- bucket.state = bucket.new NewOwner(true, oldOwner);
- } else {
- // new owner has been confirmed
- bucket.state.newOwner();
- }
- }
- }
- return bucketChanges;
- }
-
- /**
- * Forward a message to the specified bucket number. If the bucket is
- * in a transient state (the value of 'state' is not 'null'), the handling
- * is determined by that state.
- *
- * @param bucketNumber the bucket number determined by extracting the
- * keyword from 'message'
- * @param message the message to be forwarded/processed
- * @return a value of 'true' indicates the message has been "handled"
- * (forwarded or queued), and 'false' indicates it has not, and needs
- * to be handled locally.
- */
- public static boolean forward(int bucketNumber, Message message) {
- Bucket bucket = indexToBucket[bucketNumber];
- Server server;
-
- synchronized (bucket) {
- if (bucket.state != null) {
- // we are in a transient state -- the handling is state-specific
- return bucket.state.forward(message);
- }
- server = bucket.getOwner();
- }
-
- if (server == null || server == Server.getThisServer()) {
- // this needs to be processed locally
- return false;
- } else {
- // send message to remote server
- message.sendToServer(server, bucketNumber);
- return true;
- }
- }
-
- /**
- * This is a convenience method, which forwards a message through the
- * bucket associated with the specified keyword.
- *
- * @param keyword the keyword extracted from 'message'
- * keyword from 'message'
- * @param message the message to be forwarded/processed
- * @return a value of 'true' indicates the message has been "handled"
- * (forwarded or queued), and 'false' indicates it has not, and needs
- * to be handled locally.
- */
- public static boolean forward(String keyword, Message message) {
- return forward(bucketNumber(keyword), message);
- }
-
- /**
- * Forward a message to the specified bucket number. If the bucket is
- * in a transient state (the value of 'state' is not 'null'), the handling
- * is determined by that state. This is a variant of the 'forward' method,
- * which handles local processing, instead of just returning 'false'.
- *
- * @param bucketNumber the bucket number determined by extracting the
- * keyword from 'message'
- * @param message the message to be forwarded/processed
- */
- public static void forwardAndProcess(int bucketNumber, Message message) {
- if (!forward(bucketNumber, message)) {
- message.process();
- }
- }
-
- /**
- * Forward a message to the specified bucket number. If the bucket is
- * in a transient state (the value of 'state' is not 'null'), the handling
- * is determined by that state. This is a variant of the 'forward' method,
- * which handles local processing, instead of just returning 'false'.
- *
- * @param keyword the keyword extracted from 'message'
- * keyword from 'message'
- * @param message the message to be forwarded/processed
- */
- public static void forwardAndProcess(String keyword, Message message) {
- forwardAndProcess(bucketNumber(keyword), message);
- }
-
- /**
- * Handle an incoming /cmd/dumpBuckets REST message.
- *
- * @param out the 'PrintStream' to use for displaying information
- */
- public static void dumpBuckets(final PrintStream out) {
- /*
- * we aren't really doing a 'Rebalance' here, but the 'copyData' method
- * is useful for extracting the data, and determining the buckets
- * associated with each server.
- */
- Rebalance rb = new Rebalance();
- rb.copyData();
-
- /*
- * this method is not accessing anything in the 'Server' or 'Bucket'
- * table, so it doesn't need to run within the 'MainLoop' thread.
- */
- rb.dumpBucketsInternal(out);
- }
-
- /**
- * Handle an incoming /cmd/bucketMessage REST message -- this is only
- * used for testing the routing of messages between servers.
- *
- * @param out the 'PrintStream' to use for displaying information
- * @param keyword the keyword that is hashed to select the bucket number
- * @param message the message to send to the remote end
- * @throws IOException when error occurred
- */
- public static void bucketMessage(
- final PrintStream out, final String keyword, String message) {
-
- if (keyword == null) {
- out.println("'keyword' is mandatory");
- return;
- }
- if (message == null) {
- message = "Message generated at " + new Date();
- }
- final int bucketNumber = bucketNumber(keyword);
- Server server = bucketToServer(bucketNumber);
-
- if (server == null) {
- /*
- * selected bucket has no server assigned -- this should only be a
- * transient situation, until 'rebalance' is run.
- */
- out.format("Bucket is %d, which has no owner%n", bucketNumber);
- } else if (server == Server.getThisServer()) {
- /*
- * the selected bucket is associated with this particular server --
- * no forwarding is needed.
- */
- out.format("Bucket is %d, which is owned by this server: %s%n",
- bucketNumber, server.getUuid());
- } else {
- /*
- * the selected bucket is assigned to a different server -- forward
- * the message.
- */
- out.format("Bucket is %d: sending from%n"
- + " %s to%n"
- + " %s%n",
- bucketNumber, Server.getThisServer().getUuid(), server.getUuid());
-
- // do a POST call of /bucket/bucketResponse to the remoote server
- Entity<String> entity =
- Entity.entity(new String(message.getBytes(), StandardCharsets.UTF_8),
- MediaType.TEXT_PLAIN);
-
- /*
- * the POST itself runs in a server-specific thread, and
- * 'responseQueue' is used to pass back the response.
- */
- final LinkedTransferQueue<Response> responseQueue =
- new LinkedTransferQueue<>();
-
- server.post("bucket/bucketResponse", entity, new Server.PostResponse() {
- /**
- * {@inheritDoc}
- */
- @Override
- public WebTarget webTarget(WebTarget webTarget) {
- // we need to include the 'bucket' and 'keyword' parameters
- // in the POST that we are sending out
- return webTarget
- .queryParam(QP_BUCKET, bucketNumber)
- .queryParam(QP_KEYWORD, keyword);
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void response(Response response) {
- // this is the POST response --
- // pass it back to the calling thread
- responseQueue.put(response);
- }
- });
-
- try {
- // this is the calling thread -- wait for the POST response
- Response response = responseQueue.poll(60, TimeUnit.SECONDS);
- if (response == null) {
- out.println("Timed out waiting for a response");
- } else {
- out.format("Received response code %s%nEntity = %s%n",
- response.getStatus(), response.readEntity(String.class));
- }
- } catch (InterruptedException e) {
- out.println(e);
- Thread.currentThread().interrupt();
- }
- }
- }
-
- /**
- * Handle an incoming /bucket/bucketResponse REST message -- this runs on
- * the destination host, and is the continuation of an operation triggered
- * by the /cmd/bucketMessage REST message running on the originating host.
- *
- * @param out the 'PrintStream' to use for passing back information
- * in a human-readable form
- * @param bucket the bucket number, which should be owned by this host
- * if we are in sync with the sending host, and didn't get caught
- * in a transient state
- * @param keyword the keyword selected on the originating end, which should
- * hash to 'bucket'
- * @param message the message selected on the originating end
- */
- public static void bucketResponse(
- final PrintStream out, int bucket, String keyword, byte[] message) {
-
- Server thisServer = Server.getThisServer();
- Server server = bucketToServer(bucket);
-
- if (server != thisServer) {
- /*
- * this isn't expected, and either indicates we are out-of-sync with
- * pthe originating server, or this operation was triggered while in
- * a transient state.
- */
- out.println("ERROR: " + thisServer.toString() + ": bucket " + bucket
- + "is owned by\n " + server);
- } else {
- /*
- * As expected, we are the owner of this bucket. Print out a message,
- * which will be returned to the originating host, and displayed.
- */
- out.println(thisServer.toString() + ":\n"
- + " bucket = " + bucket
- + "\n keyword = " + keyword
- + "\n message = " + new String(message));
- }
- }
-
- /**
- * Handle an incoming /cmd/moveBucket REST message -- this is only
- * used for testing bucket migration. It only works on the lead server.
- *
- * @param out the 'PrintStream' to use for displaying information
- * @param bucketNumber the bucket number to be moved
- * @param newHostUuid the UUID of the destination host (if 'null', a
- * destination host will be chosen at random)
- */
- public static void moveBucket(PrintStream out, int bucketNumber, String newHostUuid) {
- Server leader = Leader.getLeader();
- if (leader != Server.getThisServer()) {
- out.println("This is not the lead server");
- return;
- }
-
- if (bucketNumber < 0 || bucketNumber >= indexToBucket.length) {
- out.println("Bucket number out of range");
- return;
- }
-
- Rebalance rb = new Rebalance();
- rb.copyData();
-
- TestBucket bucket = rb.buckets[bucketNumber];
- TestServer oldHost = bucket.owner;
-
- if (oldHost == rb.nullServer) {
- out.println("Bucket " + bucketNumber + " is currently unassigned");
- return;
- }
-
- TestServer newHost = null;
-
- if (newHostUuid != null) {
- // the UUID of a destination host has been specified
- newHost = rb.testServers.get(UUID.fromString(newHostUuid));
- if (newHost == null) {
- out.println("Can't locate UUID " + newHostUuid);
- return;
- }
- } else {
- /*
- * Choose a destination host at random, other than the current owner.
- * Step a random count in the range of 1 to (n-1) relative to the
- * current host.
- */
- UUID key = oldHost.uuid;
- /*
- * Disabling sonar, because this Random() is not used for security purposes.
- */
- int randomStart = new Random().nextInt(rb.testServers.size() - 1); // NOSONAR
- for (int count = randomStart; count >= 0; count -= 1) {
- key = rb.testServers.higherKey(key);
- if (key == null) {
- // wrap to the beginning of the list
- key = rb.testServers.firstKey();
- }
- }
- newHost = rb.testServers.get(key);
- }
- out.println("Moving bucket " + bucketNumber + " from "
- + oldHost + " to " + newHost);
-
- updateOwner(out, bucket, oldHost, newHost);
-
- try {
- /*
- * build a message containing all of the updated bucket
- * information, process it internally in this host
- * (lead server), and send it out to others in the
- * "notify list".
- */
- rb.generateBucketMessage();
- } catch (IOException e) {
- logger.error("Exception in Rebalance.generateBucketMessage",
- e);
- }
- }
-
- private static void updateOwner(PrintStream out, TestBucket bucket, TestServer oldHost, TestServer newHost) {
- /*
- * update the owner, and ensure that the primary and secondary backup
- * remain different from the owner.
- */
- bucket.setOwner(newHost);
- if (newHost == bucket.primaryBackup) {
- out.println("Moving primary back from " + newHost + " to " + oldHost);
- bucket.setPrimaryBackup(oldHost);
- } else if (newHost == bucket.secondaryBackup) {
- out.println("Moving secondary back from " + newHost
- + " to " + oldHost);
- bucket.setSecondaryBackup(oldHost);
- }
- }
-
- /**
- * This method is called when an incoming /bucket/sessionData message is
- * received from the old owner of the bucket, which presumably means that
- * we are the new owner of the bucket.
- *
- * @param bucketNumber the bucket number
- * @param dest the UUID of the intended destination
- * @param ttl similar to IP time-to-live -- it controls the number of hops
- * the message may take
- * @param data serialized data associated with this bucket, encoded using
- * base64
- */
-
- static void sessionData(int bucketNumber, UUID dest, int ttl, byte[] data) {
- logger.info("Bucket.sessionData: bucket={}, data length={}",
- bucketNumber, data.length);
-
- if (dest != null && !dest.equals(Server.getThisServer().getUuid())) {
- // the message needs to be forwarded to the intended destination
- Server server;
- WebTarget webTarget;
-
- ttl -= 1;
- if (ttl > 0
- && (server = Server.getServer(dest)) != null
- && (webTarget = server.getWebTarget("bucket/sessionData")) != null) {
- logger.info("Forwarding 'bucket/sessionData' to uuid {}",
- server.getUuid());
- Entity<String> entity =
- Entity.entity(new String(data, StandardCharsets.UTF_8),
- MediaType.APPLICATION_OCTET_STREAM_TYPE);
- Response response =
- webTarget
- .queryParam(QP_BUCKET, bucketNumber)
- .queryParam(QP_DEST, dest)
- .queryParam(QP_TTL, String.valueOf(ttl))
- .request().post(entity);
- logger.info("/bucket/sessionData response code = {}",
- response.getStatus());
- } else {
- logger.error("Couldn't forward 'bucket/sessionData' to uuid {}, ttl={}",
- dest, ttl);
- }
- return;
- }
-
- byte[] decodedData = Base64.getDecoder().decode(data);
- Bucket bucket = indexToBucket[bucketNumber];
-
- logger.info("Bucket.sessionData: decoded data length = {}",
- decodedData.length);
-
- if (bucket.state == null) {
- /*
- * We received the serialized data prior to being notified
- * that we are the owner -- this happens sometimes. Behave as
- * though we are the new owner, but intidate it is unconfirmed.
- */
- logger.info("Bucket {} session data received unexpectedly",
- bucketNumber);
- bucket.state = bucket.new NewOwner(false, bucket.getOwner());
- }
- bucket.state.bulkSerializedData(decodedData);
- }
-
- /**
- * This method is called whenever the bucket's state has changed in a
- * way that it should be audited.
- */
- private synchronized void stateChanged() {
- if (state != null) {
- return;
- }
- // the audit should be run
- Server thisServer = Server.getThisServer();
- boolean isOwner = (thisServer == owner);
- boolean isBackup = (!isOwner && (thisServer == primaryBackup
- || thisServer == secondaryBackup));
-
- // invoke 'TargetLock' directly
- TargetLock.auditBucket(this, isOwner);
- for (ServerPoolApi feature : ServerPoolApi.impl.getList()) {
- feature.auditBucket(this, isOwner, isBackup);
- }
- }
-
- /**
- * Returns an adjunct of the specified class
- * (it is created if it doesn't exist).
- *
- * @param clazz this is the class of the adjunct
- * @return an adjunct of the specified class ('null' may be returned if
- * the 'newInstance' method is unable to create the adjunct)
- */
- public <T> T getAdjunct(Class<T> clazz) {
- Object adj = adjuncts.computeIfAbsent(clazz, key -> {
- try {
- // create the adjunct, if needed
- return clazz.getDeclaredConstructor().newInstance();
- } catch (Exception e) {
- logger.error("Can't create adjunct of {}", clazz, e);
- return null;
- }
- });
- return clazz.cast(adj);
- }
-
- /**
- * Returns an adjunct of the specified class.
- *
- * @param clazz this is the class of the adjunct
- * @return an adjunct of the specified class, if it exists,
- * and 'null' if it does not
- */
- public <T> T getAdjunctDontCreate(Class<T> clazz) {
- synchronized (adjuncts) {
- // look up the adjunct in the table
- return clazz.cast(adjuncts.get(clazz));
- }
- }
-
- /**
- * Explicitly create an adjunct -- this is useful when the adjunct
- * initialization requires that some parameters be passed.
- *
- * @param adj this is the adjunct to insert into the table
- * @return the previous adjunct of this type ('null' if none)
- */
- public Object putAdjunct(Object adj) {
- synchronized (adjuncts) {
- Class<?> clazz = adj.getClass();
- return adjuncts.put(clazz, adj);
- }
- }
-
- /**
- * Remove an adjunct.
- *
- * @param clazz this is the class of adjuncts to remove
- * @return the object, if found, and 'null' if not
- */
- public <T> T removeAdjunct(Class<T> clazz) {
- synchronized (adjuncts) {
- // remove the adjunct in the table
- return clazz.cast(adjuncts.remove(clazz));
- }
- }
-
- /**
- * Dump out all buckets with adjuncts.
- *
- * @param out the 'PrintStream' to use for displaying information
- */
- public static void dumpAdjuncts(PrintStream out) {
- boolean noneFound = true;
- String format = "%6s %s\n";
-
- for (Bucket bucket : indexToBucket) {
- synchronized (bucket.adjuncts) {
- if (bucket.adjuncts.size() != 0) {
- if (noneFound) {
- out.printf(format, "Bucket", "Adjunct Classes");
- out.printf(format, "------", "---------------");
- noneFound = false;
- }
- boolean first = true;
- for (Class<?> clazz : bucket.adjuncts.keySet()) {
- if (first) {
- out.printf(format, bucket.index, clazz.getName());
- first = false;
- } else {
- out.printf(format, "", clazz.getName());
- }
- }
- }
- }
- }
- }
-
- /* ============================================================ */
-
- /**
- * There is a single instance of this class (Bucket.eventHandler), which
- * is registered to listen for notifications of state transitions. Note
- * that all of these methods are running within the 'MainLoop' thread.
- */
- private static class EventHandler extends Events {
- /**
- * {@inheritDoc}
- */
- @Override
- public void serverFailed(Server server) {
- // remove this server from any bucket where it is referenced
-
- Server thisServer = Server.getThisServer();
- for (Bucket bucket : indexToBucket) {
- synchronized (bucket) {
- boolean changes = false;
- if (bucket.getOwner() == server) {
- // the failed server owns this bucket --
- // move to the primary backup
- bucket.setOwner(bucket.getPrimaryBackup());
- bucket.primaryBackup = null;
- changes = true;
-
- if (bucket.getOwner() == null) {
- // bucket owner is still null -- presumably, we had no
- // primary backup, so use the secondary backup instead
- bucket.setOwner(bucket.getSecondaryBackup());
- bucket.setSecondaryBackup(null);
- }
- }
- if (bucket.getPrimaryBackup() == server) {
- // the failed server was a primary backup to this bucket --
- // mark the entry as 'null'
- bucket.setPrimaryBackup(null);
- changes = true;
- }
- if (bucket.getSecondaryBackup() == server) {
- // the failed server was a secondary backup to this bucket --
- // mark the entry as 'null'
- bucket.setSecondaryBackup(null);
- changes = true;
- }
-
- if (bucket.owner == thisServer && bucket.state == null) {
- // the current server is the new owner
- bucket.setState(bucket.new NewOwner(false, null));
- changes = true;
- }
-
- if (changes) {
- // may give audits a chance to run
- bucket.stateChanged();
- }
- }
- }
-
- // trigger a rebalance (only happens if we are the lead server)
- rebalance();
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void newLeader(Server server) {
- // trigger a rebalance (only happens if we are the new lead server)
- rebalance();
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void leaderConfirmed(Server server) {
- // trigger a rebalance (only happens if we are the lead server)
- rebalance();
- }
-
- /**
- * This method is called to start a 'rebalance' operation in a background
- * thread, but it only does this on the lead server. Being balanced means
- * the following:
- * 1) Each server owns approximately the same number of buckets
- * 2) If any server were to fail, and the designated primaries take over
- * for all of that server's buckets, all remaining servers would still
- * own approximately the same number of buckets.
- * 3) If any two servers were to fail, and the designated primaries were
- * to take over for the failed server's buckets (secondaries would take
- * for buckets where the owner and primary are OOS), all remaining
- * servers would still own approximately the same number of buckets.
- * 4) Each server should have approximately the same number of
- * (primary-backup + secondary-backup) buckets that it is responsible for.
- * 5) The primary backup for each bucket must be on the same site as the
- * owner, and the secondary backup must be on a different site.
- */
- private void rebalance() {
- if (Leader.getLeader() == Server.getThisServer()) {
- Rebalance rb = new Rebalance();
- synchronized (rebalanceLock) {
- // the most recent 'Rebalance' instance is the only valid one
- rebalance = rb;
- }
-
- new Thread("BUCKET REBALANCER") {
- @Override
- public void run() {
- /*
- * copy bucket and host data,
- * generating a temporary internal table.
- */
- rb.copyData();
-
- /*
- * allocate owners for all buckets without an owner,
- * and rebalance bucket owners, if necessary --
- * this takes card of item #1, above.
- */
- rb.allocateBuckets();
-
- /*
- * make sure that primary backups always have the same site
- * as the owner, and secondary backups always have a different
- * site -- this takes care of #5, above.
- */
- rb.checkSiteValues();
-
- /*
- * adjust primary backup lists to take care of item #2, above
- * (taking #5 into account).
- */
- rb.rebalancePrimaryBackups();
-
- /*
- * allocate secondary backups, and take care of items
- * #3 and #4, above (taking #5 into account).
- */
- rb.rebalanceSecondaryBackups();
-
- try {
- synchronized (rebalanceLock) {
- /*
- * if another 'Rebalance' instance has started in the
- * mean time, don't do the update.
- */
- if (rebalance == rb) {
- /*
- * build a message containing all of the updated bucket
- * information, process it internally in this host
- * (lead server), and send it out to others in the
- * "notify list".
- */
- rb.generateBucketMessage();
- rebalance = null;
- }
- }
- } catch (IOException e) {
- logger.error("Exception in Rebalance.generateBucketMessage",
- e);
- }
- }
- }.start();
- }
- }
- }
-
- /* ============================================================ */
-
- /**
- * Instances of this class are created as part of the 'rebalance'
- * operation on the lead server, or as part of a 'dumpBuckets' operation
- * on any server.
- * Each instance of this class corresponds to a 'Bucket' instance.
- */
- @EqualsAndHashCode
- private static class TestBucket implements Comparable<TestBucket> {
- // bucket number
- int index;
-
- // owner of the bucket
- TestServer owner;
-
- // primary backup for this bucket
-
- TestServer primaryBackup;
-
- // secondary backup for this bucket
- TestServer secondaryBackup;
-
- /**
- * Constructor -- initialize the 'TestBucket' instance.
- *
- * @param index the bucket number
- */
- TestBucket(int index) {
- this.index = index;
- }
-
- /**
- * Update the owner of a bucket, which also involves updating the
- * backward links in the 'TestServer' instances.
- *
- * @param newOwner the new owner of the bucket
- */
- void setOwner(TestServer newOwner) {
- if (owner != newOwner) {
- // the 'owner' field does need to be changed
- if (owner != null) {
- // remove this bucket from the 'buckets' list of the old owner
- owner.buckets.remove(this);
- }
- if (newOwner != null) {
- // add this bucket to the 'buckets' list of the new owner
- newOwner.buckets.add(this);
- }
- // update the 'owner' field in the bucket
- owner = newOwner;
- }
- }
-
- /**
- * Update the primary backup of a bucket, which also involves updating
- * the backward links in the 'TestServer' instances.
- *
- * @param newPrimaryBackup the new primary of the bucket
- */
- void setPrimaryBackup(TestServer newPrimaryBackup) {
- if (primaryBackup != newPrimaryBackup) {
- // the 'primaryBackup' field does need to be changed
- if (primaryBackup != null) {
- // remove this bucket from the 'buckets' list of the
- // old primary backup
- primaryBackup.primaryBackupBuckets.remove(this);
- }
- if (newPrimaryBackup != null) {
- // add this bucket to the 'buckets' list of the
- // new primary backup
- newPrimaryBackup.primaryBackupBuckets.add(this);
- }
- // update the 'primaryBackup' field in the bucket
- primaryBackup = newPrimaryBackup;
- }
- }
-
- /**
- * Update the secondary backup of a bucket, which also involves updating
- * the backward links in the 'TestServer' instances.
- *
- * @param newSecondaryBackup the new secondary of the bucket
- */
- void setSecondaryBackup(TestServer newSecondaryBackup) {
- if (secondaryBackup != newSecondaryBackup) {
- // the 'secondaryBackup' field does need to be changed
- if (secondaryBackup != null) {
- // remove this bucket from the 'buckets' list of the
- // old secondary backup
- secondaryBackup.secondaryBackupBuckets.remove(this);
- }
- if (newSecondaryBackup != null) {
- // add this bucket to the 'buckets' list of the
- // new secondary backup
- newSecondaryBackup.secondaryBackupBuckets.add(this);
- }
- // update the 'secondaryBackup' field in the bucket
- secondaryBackup = newSecondaryBackup;
- }
- }
-
- /*==================================*/
- /* Comparable<TestBucket> interface */
- /*==================================*/
-
- /**
- * Compare two 'TestBucket' instances, for use in a 'TreeSet'.
- *
- * @param other the other 'TestBucket' instance to compare to
- */
- @Override
- public int compareTo(TestBucket other) {
- return index - other.index;
- }
-
- /**
- * Return a string representation of this 'TestBucket' instance.
- *
- * @return a string representation of this 'TestBucket' instance
- */
- @Override
- public String toString() {
- return "TestBucket[" + index + "]";
- }
- }
-
- /* ============================================================ */
-
- /**
- * Instances of this class are created as part of the 'rebalance'
- * operation on the lead server, or as part of a 'dumpBuckets' operation
- * on any server.
- * Each instance of this class corresponds to a 'Server' instance.
- * Unlike the actual 'Server' instances, each 'TestServer' instance
- * contains back links to all of the buckets it is associated with.
- */
- private static class TestServer {
- // unique id for this server
- // (matches the one in the corresponding 'Server' entry)
- final UUID uuid;
-
- // site socket information (matches 'Server' entry)
- final InetSocketAddress siteSocketAddress;
-
- // the set of all 'TestBucket' instances,
- // where this 'TestServer' is listed as 'owner'
- final TreeSet<TestBucket> buckets = new TreeSet<>();
-
- // the set of all 'TestBucket' instances,
- // where this 'TestServer' is listed as 'primaryBackup'
- final TreeSet<TestBucket> primaryBackupBuckets = new TreeSet<>();
-
- // the set of all 'TestBucket' instances,
- // where this 'TestServer' is listed as 'secondaryBackup'
- final TreeSet<TestBucket> secondaryBackupBuckets = new TreeSet<>();
-
- /**
- * Constructor.
- *
- * @param uuid uuid of this 'TestServer' instance
- * @param siteSocketAddress matches the value in the corresponding 'Server'
- */
- TestServer(UUID uuid, InetSocketAddress siteSocketAddress) {
- this.uuid = uuid;
- this.siteSocketAddress = siteSocketAddress;
- }
-
- /**
- * Return a string representation of this 'TestServer' instance.
- *
- * @return a string representation of this 'TestServer' instance
- */
- @Override
- public String toString() {
- return "TestServer[" + uuid + "]";
- }
- }
-
- /* ============================================================ */
-
- /**
- * This class supports the 'rebalance' operation. Each instance is a wrapper
- * around a 'TestServer' instance, as it would be if another specific
- * server failed.
- */
- @EqualsAndHashCode
- private static class AdjustedTestServer
- implements Comparable<AdjustedTestServer> {
- TestServer server;
-
- // simulated fail on this server
- TestServer failedServer;
-
- // expected bucket count if 'failedServer' fails
- int bucketCount;
-
- // total number of primary backup buckets used by this host
- int primaryBackupBucketCount;
-
- // total number of secondary backup buckets used by this host
- int secondaryBackupBucketCount;
-
- /**
- * Constructor.
- *
- * @param server the server this 'AdjustedTestServer' instance represents
- * @param failedServer the server going through a simulated failure --
- * the 'bucketCount' value is adjusted based upon this
- */
- AdjustedTestServer(TestServer server, TestServer failedServer) {
- this.server = server;
- this.failedServer = failedServer;
-
- this.bucketCount = server.buckets.size();
- this.primaryBackupBucketCount = server.primaryBackupBuckets.size();
- this.secondaryBackupBucketCount = server.secondaryBackupBuckets.size();
-
- // need to adjust 'bucketCount' for the case where the current
- // host fails
- for (TestBucket bucket : server.primaryBackupBuckets) {
- if (bucket.owner == failedServer) {
- bucketCount += 1;
- // TBD: should 'primaryBackupBucketCount' be decremented?
- }
- }
-
- // need to adjust 'bucketCount' for the case where the current
- // host fails
- for (TestBucket bucket : server.secondaryBackupBuckets) {
- if (bucket.owner == failedServer) {
- bucketCount += 1;
- // TBD: should 'secondaryBackupBucketCount' be decremented?
- }
- }
- }
-
- /* ****************************************** */
- /* Comparable<AdjustedTestServer> interface */
- /* ****************************************** */
-
- /**
- * {@inheritDoc}
- */
- @Override
- public int compareTo(AdjustedTestServer other) {
- /*
- * Comparison order:
- * 1) minimal expected bucket count if current host fails
- * (differences of 1 are treated as a match)
- * 2) minimal backup bucket count
- * 3) UUID order
- */
- int rval = bucketCount - other.bucketCount;
- if (rval <= 1 && rval >= -1) {
- rval = (primaryBackupBucketCount + secondaryBackupBucketCount)
- - (other.primaryBackupBucketCount
- + other.secondaryBackupBucketCount);
- if (rval == 0) {
- rval = -Util.uuidComparator.compare(server.uuid, other.server.uuid);
- }
- }
- return rval;
- }
- }
-
- /* ============================================================ */
-
- /**
- * This class is primarily used to do a 'Rebalance' operation on the
- * lead server, which is then distributed to all of the other servers.
- * Part of it is also useful for implementing the /cmd/dumpBuckets
- * REST message handler.
- */
- private static class Rebalance {
- // this table resembles the 'Bucket.indexToBucket' table
- TestBucket[] buckets = new TestBucket[indexToBucket.length];
-
- // this table resembles the 'Server.servers' table
- TreeMap<UUID, TestServer> testServers = new TreeMap<>(Util.uuidComparator);
-
- /* this is a special server instance, which is allocated any
- * owned, primary, or secondary buckets that haven't been allocated to
- * any of the real servers
- */
- TestServer nullServer = new TestServer(null, null);
-
- /**
- * Copy all of the bucket data in the 'buckets' table, and also return
- * a copy of the 'Server.servers' table
- */
- void copyData() {
- // will contain a copy of the 'Bucket' table
- final Bucket[] bucketSnapshot = new Bucket[indexToBucket.length];
-
- /*
- * This method is running within the 'MainLoop' thread,
- * and builds a copy of the 'Bucket' table, as well as the
- * list of active servers -- these can then be examined
- * in a different thread, without potentially distrupting
- * the 'MainLoop' thread.
- *
- * @return 0 (the return value is not significant at present)
- */
- Callable<Integer> callable = () -> {
- // copy the 'Bucket' table
- for (int i = 0; i < indexToBucket.length; i += 1) {
- // makes a snapshot of the bucket information
- Bucket bucket = indexToBucket[i];
-
- Bucket tmpBucket = new Bucket(i);
- tmpBucket.setOwner(bucket.getOwner());
- tmpBucket.setPrimaryBackup(bucket.getPrimaryBackup());
- tmpBucket.setSecondaryBackup(bucket.getSecondaryBackup());
- bucketSnapshot[i] = tmpBucket;
- }
-
- /*
- * At this point, 'bucketSnapshot' and 'servers' should be
- * complete. The next step is to create a 'TestServer' entry
- * that matches each 'Server' entry.
- */
- for (Server server : Server.getServers()) {
- UUID uuid = server.getUuid();
- testServers.put(uuid, new TestServer(uuid, server.getSiteSocketAddress()));
- }
-
- return 0;
- };
- FutureTask<Integer> ft = new FutureTask<>(callable);
- MainLoop.queueWork(ft);
- try {
- ft.get(60, TimeUnit.SECONDS);
- } catch (InterruptedException e) {
- logger.error("Interrupted", e);
- Thread.currentThread().interrupt();
- return;
- } catch (ExecutionException | TimeoutException e) {
- logger.error("Exception in Rebalance.copyData", e);
- return;
- }
-
- makeTestBucket(bucketSnapshot);
- }
-
- private void makeTestBucket(final Bucket[] bucketSnapshot) {
- /*
- * Now, create a 'TestBucket' table that mirrors the 'Bucket' table.
- * Unlike the standard 'Bucket' and 'Server' tables, the 'TestServer'
- * entries contain links referring back to the 'TestBucket' entries.
- * This information is useful when rebalancing.
- */
- for (Bucket bucket : bucketSnapshot) {
- int index = bucket.index;
- TestBucket testBucket = new TestBucket(index);
-
- // populate the 'owner' field
- if (bucket.getOwner() != null) {
- testBucket.setOwner(testServers.get(bucket.getOwner().getUuid()));
- } else {
- testBucket.setOwner(nullServer);
- }
-
- // populate the 'primaryBackup' field
- if (bucket.primaryBackup != null) {
- testBucket.setPrimaryBackup(
- testServers.get(bucket.primaryBackup.getUuid()));
- } else {
- testBucket.setPrimaryBackup(nullServer);
- }
-
- // populate the 'secondaryBackup' field
- if (bucket.secondaryBackup != null) {
- testBucket.setSecondaryBackup(
- testServers.get(bucket.secondaryBackup.getUuid()));
- } else {
- testBucket.setSecondaryBackup(nullServer);
- }
- buckets[index] = testBucket;
- }
- }
-
- /**
- * Allocate unowned 'TestBucket' entries across all of the 'TestServer'
- * entries. When every 'TestBucket' has an owner, rebalance as needed,
- * so the 'TestServer' entry with the most buckets has at most one more
- * bucket than the 'TestServer' entry with the least.
- */
- void allocateBuckets() {
- /*
- * the following 'Comparator' is used to control the order of the
- * 'needBuckets' TreeSet: those with the fewest buckets allocated are
- * at the head of the list.
- */
- Comparator<TestServer> bucketCount = (s1, s2) -> {
- int rval = s1.buckets.size() - s2.buckets.size();
- if (rval == 0) {
- rval = Util.uuidComparator.compare(s1.uuid, s2.uuid);
- }
- return rval;
- };
-
- // sort servers according to the order in which they can
- // take on ownership of buckets
- TreeSet<TestServer> needBuckets = new TreeSet<>(bucketCount);
- for (TestServer ts : testServers.values()) {
- needBuckets.add(ts);
- }
-
- // go through all of the unowned buckets, and allocate them
- for (TestBucket bucket : new LinkedList<TestBucket>(nullServer.buckets)) {
- // take first entry off of sorted server list
- TestServer ts = needBuckets.first();
- needBuckets.remove(ts);
-
- // add this bucket to the 'buckets' list
- bucket.setOwner(ts);
-
- // place it back in the list, possibly in a new position
- // (it's attributes have changed)
- needBuckets.add(ts);
- }
- nullServer.buckets.clear();
-
- // there may still be rebalancing needed -- no host should contain
- // 2 or more buckets more than any other host
- for ( ; ; ) {
- TestServer first = needBuckets.first();
- TestServer last = needBuckets.last();
-
- if (last.buckets.size() - first.buckets.size() <= 1) {
- // no more rebalancing needed
- break;
- }
-
- // remove both from sorted list
- needBuckets.remove(first);
- needBuckets.remove(last);
-
- // take one bucket from 'last', and assign it to 'first'
- last.buckets.first().setOwner(first);
-
- // place back in sorted list
- needBuckets.add(first);
- needBuckets.add(last);
- }
- }
-
- /**
- * Make sure that the primary backups have the same site as the owner,
- * and the secondary backups have a different site.
- */
- void checkSiteValues() {
- for (TestBucket bucket : buckets) {
- if (bucket.owner != null) {
- InetSocketAddress siteSocketAddress =
- bucket.owner.siteSocketAddress;
- TestServer primaryBackup = bucket.primaryBackup;
- TestServer secondaryBackup = bucket.secondaryBackup;
-
- validateSiteOwner(bucket, siteSocketAddress,
- primaryBackup, secondaryBackup);
- }
- }
- }
-
- /**
- * Validate primary site owner and secondary site owner are valid.
- * @param bucket TestBucket
- * @param siteSocketAddress site socket address
- * @param primaryBackup primary backups
- * @param secondaryBackup secondary backups
- */
- private void validateSiteOwner(TestBucket bucket, InetSocketAddress siteSocketAddress,
- TestServer primaryBackup, TestServer secondaryBackup) {
- if (primaryBackup != null
- && !Objects.equals(siteSocketAddress,
- primaryBackup.siteSocketAddress)) {
- /*
- * primary backup is from the wrong site -- see if we can
- * use the secondary.
- */
- if (secondaryBackup != null
- && Objects.equals(siteSocketAddress,
- secondaryBackup.siteSocketAddress)) {
- // swap primary and secondary
- bucket.setPrimaryBackup(secondaryBackup);
- bucket.setSecondaryBackup(primaryBackup);
- } else {
- // just invalidate primary backup
- bucket.setPrimaryBackup(null);
- }
- } else if (secondaryBackup != null
- && Objects.equals(siteSocketAddress,
- secondaryBackup.siteSocketAddress)) {
- // secondary backup is from the wrong site
- bucket.setSecondaryBackup(null);
- if (primaryBackup == null) {
- // we can use this as the primary
- bucket.setPrimaryBackup(secondaryBackup);
- }
- }
- }
-
- /**
- * Allocate and rebalance the primary backups.
- */
- void rebalancePrimaryBackups() {
- for (TestServer failedServer : testServers.values()) {
- /*
- * to allocate primary backups for this server,
- * simulate a failure, and balance the backup hosts
- */
-
- // get siteSocketAddress from server
- InetSocketAddress siteSocketAddress = failedServer.siteSocketAddress;
-
- // populate a 'TreeSet' of 'AdjustedTestServer' instances based
- // the failure of 'failedServer'
- TreeSet<AdjustedTestServer> adjustedTestServers = new TreeSet<>();
- for (TestServer server : testServers.values()) {
- if (server == failedServer
- || !Objects.equals(siteSocketAddress,
- server.siteSocketAddress)) {
- continue;
- }
- adjustedTestServers.add(new AdjustedTestServer(server, failedServer));
- }
-
- if (adjustedTestServers.isEmpty()) {
- // this is presumably the only server -- there is no other server
- // to act as primary backup, and no rebalancing can occur
- continue;
- }
-
- // we need a backup host for each bucket
- for (TestBucket bucket : failedServer.buckets) {
- if (bucket.primaryBackup == null
- || bucket.primaryBackup == nullServer) {
- // need a backup host for this bucket -- remove the first
- // entry from 'adjustedTestServers', which is most favored
- AdjustedTestServer backupHost = adjustedTestServers.first();
- adjustedTestServers.remove(backupHost);
-
- // update add this bucket to the list
- bucket.setPrimaryBackup(backupHost.server);
-
- // update counts in 'AdjustedTestServer'
- backupHost.bucketCount += 1;
- backupHost.primaryBackupBucketCount += 1;
-
- // place it back in the table, possibly in a new position
- // (it's attributes have changed)
- adjustedTestServers.add(backupHost);
- }
- }
-
- // TBD: Is additional rebalancing needed?
- }
- }
-
- /**
- * Allocate and rebalance the secondary backups.
- */
- void rebalanceSecondaryBackups() {
- for (TestServer failedServer : testServers.values()) {
- /*
- * to allocate secondary backups for this server,
- * simulate a failure, and balance the backup hosts
- */
-
- // get siteSocketAddress from server
- InetSocketAddress siteSocketAddress = failedServer.siteSocketAddress;
-
- // populate a 'TreeSet' of 'AdjustedTestServer' instances based
- // the failure of 'failedServer'
- TreeSet<AdjustedTestServer> adjustedTestServers =
- new TreeSet<>();
- for (TestServer server : testServers.values()) {
- if (server == failedServer
- || Objects.equals(siteSocketAddress,
- server.siteSocketAddress)) {
- continue;
- }
- adjustedTestServers.add(new AdjustedTestServer(server, failedServer));
- }
-
- if (adjustedTestServers.isEmpty()) {
- // this is presumably the only server -- there is no other server
- // to act as secondary backup, and no rebalancing can occur
- continue;
- }
-
- // we need a backup host for each bucket
- for (TestBucket bucket : failedServer.buckets) {
- if (bucket.secondaryBackup == null
- || bucket.secondaryBackup == nullServer) {
- // need a backup host for this bucket -- remove the first
- // entry from 'adjustedTestServers', which is most favored
- AdjustedTestServer backupHost = adjustedTestServers.first();
- adjustedTestServers.remove(backupHost);
-
- // update add this bucket to the list
- bucket.setSecondaryBackup(backupHost.server);
-
- // update counts in 'AdjustedTestServer'
- backupHost.bucketCount += 1;
- backupHost.secondaryBackupBucketCount += 1;
-
- // place it back in the table, possibly in a new position
- // (it's attributes have changed)
- adjustedTestServers.add(backupHost);
- }
- }
-
- // TBD: Is additional rebalancing needed?
- }
- }
-
- /**
- * Generate a message with all of the bucket updates, process it locally,
- * and send it to all servers in the "Notify List".
- */
- void generateBucketMessage() throws IOException {
- ByteArrayOutputStream bos = new ByteArrayOutputStream();
- DataOutputStream dos = new DataOutputStream(bos);
-
- // go through the entire 'buckets' table
- for (int i = 0; i < buckets.length; i += 1) {
- // fetch the 'TestBucket' associated with index 'i'
- TestBucket testBucket = buckets[i];
-
- /*
- * Get the UUID of the owner, primary backup, and secondary backup
- * for this bucket. If the associated value does not exist, 'null'
- * is used.
- */
- UUID newOwner = null;
- UUID newPrimary = null;
- UUID newSecondary = null;
-
- if (testBucket.owner != nullServer && testBucket.owner != null) {
- newOwner = testBucket.owner.uuid;
- }
- if (testBucket.primaryBackup != nullServer
- && testBucket.primaryBackup != null) {
- newPrimary = testBucket.primaryBackup.uuid;
- }
- if (testBucket.secondaryBackup != nullServer
- && testBucket.secondaryBackup != null) {
- newSecondary = testBucket.secondaryBackup.uuid;
- }
-
- // write bucket number
- dos.writeShort(i);
-
- // 'owner' field
- writeOwner(dos, newOwner);
-
- // 'primaryBackup' field
- writePrimary(dos, newPrimary);
-
- // 'secondaryBackup' field
- writeSecondary(dos, newSecondary);
-
- dos.writeByte(END_OF_PARAMETERS_TAG);
- }
-
- // get the unencoded 'packet'
- final byte[] packet = bos.toByteArray();
-
- // create an 'Entity' containing the encoded packet
- final Entity<String> entity =
- Entity.entity(new String(Base64.getEncoder().encode(packet),
- StandardCharsets.UTF_8), MediaType.APPLICATION_OCTET_STREAM_TYPE);
- /*
- * This method is running within the 'MainLoop' thread.
- */
- Runnable task = () -> {
- try {
- /*
- * update the buckets on this host,
- * which is presumably the lead server.
- */
- Bucket.updateBucketInternal(packet);
- } catch (Exception e) {
- logger.error("Exception updating buckets", e);
- }
-
- // send a message to all servers on the notify list
- for (Server server : Server.getNotifyList()) {
- server.post("bucket/update", entity);
- }
- };
- MainLoop.queueWork(task);
- }
-
- private void writeOwner(DataOutputStream dos, UUID newOwner) throws IOException {
- if (newOwner != null) {
- dos.writeByte(OWNER_UPDATE);
- Util.writeUuid(dos, newOwner);
- } else {
- dos.writeByte(OWNER_NULL);
- }
- }
-
- private void writePrimary(DataOutputStream dos, UUID newPrimary) throws IOException {
- if (newPrimary != null) {
- dos.writeByte(PRIMARY_BACKUP_UPDATE);
- Util.writeUuid(dos, newPrimary);
- } else {
- dos.writeByte(PRIMARY_BACKUP_NULL);
- }
- }
-
- private void writeSecondary(DataOutputStream dos, UUID newSecondary) throws IOException {
- if (newSecondary != null) {
- dos.writeByte(SECONDARY_BACKUP_UPDATE);
- Util.writeUuid(dos, newSecondary);
- } else {
- dos.writeByte(SECONDARY_BACKUP_NULL);
- }
- }
-
- /**
- * Supports the '/cmd/dumpBuckets' REST message -- this isn't part of
- * a 'rebalance' operation, but it turned out to be a convenient way
- * to dump out the bucket table.
- *
- * @param out the output stream
- */
- private void dumpBucketsInternal(PrintStream out) {
- // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxx *
- // UUID Type Buckets
- String format = "%-36s %-9s %5s %s\n";
-
- int totalOwner = 0;
- int totalPrimary = 0;
- int totalSecondary = 0;
-
- out.printf(format, "UUID", "Type", "Count", "Buckets");
- out.printf(format, "----", "----", "-----", "-------");
- for (TestServer ts : testServers.values()) {
- // dump out 'owned' bucket information
- if (ts.buckets.isEmpty()) {
- // no buckets owned by this server
- out.printf(format, ts.uuid, OWNED_STR, 0, "");
- } else {
- // dump out primary buckets information
- totalOwner +=
- dumpBucketsSegment(out, format, ts.buckets, ts.uuid.toString(), OWNED_STR);
- }
- // optionally dump out primary buckets information
- totalPrimary +=
- dumpBucketsSegment(out, format, ts.primaryBackupBuckets, "", "Primary");
- // optionally dump out secondary buckets information
- totalSecondary +=
- dumpBucketsSegment(out, format, ts.secondaryBackupBuckets, "", "Secondary");
- }
-
- if (!nullServer.buckets.isEmpty()
- || !nullServer.primaryBackupBuckets.isEmpty()
- || !nullServer.secondaryBackupBuckets.isEmpty()) {
- /*
- * There are some owned, primary, or secondary buckets that are
- * unassigned. It is displayed in a manner similar to buckets that
- * do have a server, but the UUID field is marked as 'UNASSIGNED'
- * in the first line of the display.
- */
- String uuidField = "UNASSIGNED";
-
- // optionally dump out unassigned owned buckets information
- if (dumpBucketsSegment(out, format, nullServer.buckets,
- uuidField, OWNED_STR) != 0) {
- uuidField = "";
- }
- // optionally dump out unassigned primary backup buckets information
- if (dumpBucketsSegment(out, format, nullServer.primaryBackupBuckets,
- uuidField, "Primary") != 0) {
- uuidField = "";
- }
- // optionally dump out unassigned secondary backup buckets information
- dumpBucketsSegment(out, format, nullServer.secondaryBackupBuckets,
- uuidField, "Secondary");
- }
- out.println("\nTotal assigned: owner = " + totalOwner
- + ", primary = " + totalPrimary
- + ", secondary = " + totalSecondary);
- }
-
- /**
- * Supports the 'dumpBucketsInternal' command, and indirectly, the
- * '/cmd/dumpBuckets' REST message. It formats one segment of bucket data
- * (owned, primary backup, or secondary backup), and dumps out the
- * associated bucket data in segments of 8. Note: if the size of 'buckets'
- * is 0, nothing is displayed.
- *
- * @param out the output stream
- * @param format the message format string
- * @param buckets the entire set of buckets to be displayed
- * @param uuid string to display under the 'UUID' header
- * @param segmentDescription string to display under the 'Type' header
- * @return the size of the 'buckets' set
- */
- private static int dumpBucketsSegment(
- PrintStream out, String format, TreeSet<TestBucket> buckets,
- String uuid, String segmentDescription) {
-
- int size = buckets.size();
- if (size != 0) {
- // generate a linked list of the bucket data to display
- LinkedList<String> data = new LinkedList<>();
- StringBuilder sb = new StringBuilder();
- int count = 8;
-
- for (TestBucket bucket : buckets) {
- if (sb.length() != 0) {
- // this is not the first bucket in the line --
- // prepend a space
- sb.append(' ');
- }
-
- // add the bucket number
- sb.append(String.format("%4s", bucket.index));
- count -= 1;
- if (count <= 0) {
- // filled up a row --
- // add it to the list, and start a new line
- data.add(sb.toString());
- sb = new StringBuilder();
- count = 8;
- }
- }
- if (sb.length() != 0) {
- // there is a partial line remaining -- add it to the list
- data.add(sb.toString());
- }
-
- /*
- * The first line displayed includes the UUID and size information,
- * and the first line of bucket data (owned, primary, or secondary).
- * The remaining lines of bucket data are displayed alone,
- * without any UUID or size information.
- */
- out.printf(format, uuid, segmentDescription, buckets.size(),
- data.removeFirst());
- while (!data.isEmpty()) {
- out.printf(format, "", "", "", data.removeFirst());
- }
- }
- return size;
- }
- }
-
- /* ============================================================ */
-
- /**
- * This interface is an abstraction for all messages that are routed
- * through buckets. It exists, so that messages may be queued while
- * bucket migration is taking place, and it makes it possible to support
- * multiple types of messages (routed UEB/DMAAP messages, or lock messages)
- */
- public static interface Message {
- /**
- * Process the current message -- this may mean delivering it locally,
- * or forwarding it.
- */
- public void process();
-
- /**
- * Send the message to another host for processing.
- *
- * @param server the destination host (although it could end up being
- * forwarded again)
- * @param bucketNumber the bucket number determined by extracting the
- * current message's keyword
- */
- public void sendToServer(Server server, int bucketNumber);
- }
-
- /* ============================================================ */
-
- /**
- * This interface implements a type of backup; for example, there is one
- * for backing up Drools objects within sessions, and another for
- * backing up lock data.
- */
- public static interface Backup {
- /**
- * This method is called to add a 'Backup' instance to the registered list.
- *
- * @param backup an object implementing the 'Backup' interface
- */
- public static void register(Backup backup) {
- synchronized (backupList) {
- if (!backupList.contains(backup)) {
- backupList.add(backup);
- }
- }
- }
-
- /**
- * Generate Serializable backup data for the specified bucket.
- *
- * @param bucketNumber the bucket number to back up
- * @return a Serializable object containing backkup data
- */
- public Restore generate(int bucketNumber);
- }
-
- /* ============================================================ */
-
- /**
- * Objects implementing this interface may be serialized, and restored
- * on a different host.
- */
- public static interface Restore extends Serializable {
- /**
- * Restore from deserialized data.
- *
- * @param bucketNumber the bucket number being restored
- */
- void restore(int bucketNumber);
- }
-
- /* ============================================================ */
-
- /**
- * This interface corresponds to a transient state within a Bucket.
- */
- private interface State {
- /**
- * This method allows state-specific handling of the
- * 'Bucket.forward()' methods
- *
- * @param message the message to be forwarded/processed
- * @return a value of 'true' indicates the message has been "handled"
- * (forwarded or queued), and 'false' indicates it has not, and needs
- * to be handled locally.
- */
- boolean forward(Message message);
-
- /**
- * This method indicates that the current server is the new owner
- * of the current bucket.
- */
- void newOwner();
-
- /**
- * This method indicates that serialized data has been received,
- * presumably from the old owner of the bucket. The data could correspond
- * to Drools objects within sessions, as well as global locks.
- *
- * @param data serialized data associated with this bucket (at present,
- * this is assumed to be complete, all within a single message)
- */
- void bulkSerializedData(byte[] data);
- }
-
- /* ============================================================ */
-
- /**
- * Each state instance is associated with a bucket, and is used when
- * that bucket is in a transient state where it is the new owner of a
- * bucket, or is presumed to be the new owner, based upon other events
- * that have occurred.
- */
- private class NewOwner extends Thread implements State {
- /*
- * this value is 'true' if we have explicitly received a 'newOwner'
- * indication, and 'false' if there was another trigger for entering this
- * transient state (e.g. receiving serialized data)
- */
- boolean confirmed;
-
- // when 'System.currentTimeMillis()' reaches this value, we time out
- long endTime;
-
- // If not 'null', we are queueing messages for this bucket
- // otherwise, we are sending them through.
- Queue<Message> messages = new ConcurrentLinkedQueue<>();
-
- // this is used to signal the thread that we have data available
- CountDownLatch dataAvailable = new CountDownLatch(1);
-
- // this is the data
- byte[] data = null;
-
- // this is the old owner of the bucket
- Server oldOwner;
-
- /**
- * Constructor - a transient state, where we are expecting to receive
- * bulk data from the old owner.
- *
- * @param confirmed 'true' if we were explicitly notified that we
- * are the new owner of the bucket, 'false' if not
- */
- NewOwner(boolean confirmed, Server oldOwner) {
- super("New Owner for Bucket " + index);
- this.confirmed = confirmed;
- this.oldOwner = oldOwner;
- if (oldOwner == null) {
- // we aren't expecting any data -- this is indicated by 0-length data
- bulkSerializedData(new byte[0]);
- }
- endTime = System.currentTimeMillis()
- + (confirmed ? confirmedTimeout : unconfirmedTimeout);
- start();
- }
-
- /**
- * Return the 'confirmed' indicator.
- *
- * @return the 'confirmed' indicator
- */
- private boolean getConfirmed() {
- synchronized (Bucket.this) {
- return confirmed;
- }
- }
-
- /**
- * This returns the timeout delay, which will always be less than or
- * equal to 1 second. This allows us to periodically check whether the
- * old server is still active.
- *
- * @return the timeout delay, which is the difference between the
- * 'endTime' value and the current time or 1 second
- * (whichever is less)
- */
- private long getTimeout() {
- long lclEndTime;
- synchronized (Bucket.this) {
- lclEndTime = endTime;
- }
- return Math.min(lclEndTime - System.currentTimeMillis(), 1000L);
- }
-
- /**
- * Return the current value of the 'data' field.
- *
- * @return the current value of the 'data' field
- */
- private byte[] getData() {
- synchronized (Bucket.this) {
- return data;
- }
- }
-
- /* ******************* */
- /* 'State' interface */
- /* ******************* */
-
- /**
- * {@inheritDoc}
- */
- @Override
- public boolean forward(Message message) {
- // the caller of this method is synchronized on 'Bucket.this'
- if (messages != null && Thread.currentThread() != this) {
- // just queue the message
- messages.add(message);
- return true;
- } else {
- /*
- * Either:
- *
- * 1) We are in a grace period, where 'state' is still set, but
- * we are no longer forwarding messages.
- * 2) We are calling 'message.process()' from this thread
- * in the 'finally' block of 'NewOwner.run()'.
- *
- * In either case, messages should be processed locally.
- */
- return false;
- }
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void newOwner() {
- // the caller of this method is synchronized on 'Bucket.this'
- if (!confirmed) {
- confirmed = true;
- endTime += (confirmedTimeout - unconfirmedTimeout);
- }
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void bulkSerializedData(byte[] data) {
- // the caller of this method is synchronized on 'Bucket.this'
- if (this.data == null) {
- this.data = data;
- dataAvailable.countDown();
- }
- }
-
- /* ******************** */
- /* 'Thread' interface */
- /* ******************** */
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void run() {
- logger.info("{}: 'run' method invoked", this);
- try {
- byte[] lclData;
- long delay;
-
- while ((lclData = getData()) == null
- && oldOwner.isActive()
- && (delay = getTimeout()) > 0) {
- // ignore return value -- 'data' will indicate the result
- if (!dataAvailable.await(delay, TimeUnit.MILLISECONDS)) {
- logger.error("CountDownLatch await time reached");
- }
- }
- if (lclData == null) {
- // no data available -- log an error, and abort
- if (getConfirmed()) {
- // we never received the data, but we are the new owner
- logger.error("{}: never received session data", this);
- } else {
- /*
- * no data received, and it was never confirmed --
- * assume that the forwarded message that triggered this was
- * erroneus
- */
- logger.error("{}: no confirmation or data received -- aborting", this);
- return;
- }
- } else {
- logger.info("{}: {} bytes of data available",
- this, lclData.length);
- }
-
- // if we reach this point, this server is the new owner
- if (lclData == null || lclData.length == 0) {
- // see if any features can do the restore
- for (ServerPoolApi feature : ServerPoolApi.impl.getList()) {
- feature.restoreBucket(Bucket.this);
- }
- } else {
- // deserialize data
- Object obj = Util.deserialize(lclData);
- restoreBucketData(obj);
- }
- } catch (Exception e) {
- logger.error("Exception in {}", this, e);
- } finally {
- runCleanup();
- }
- }
-
- private void runCleanup() {
- /*
- * cleanly leave state -- we want to make sure that messages
- * are processed in order, so the queue needs to remain until
- * it is empty
- */
- logger.info("{}: entering cleanup state", this);
- for ( ; ; ) {
- Message message = messages.poll();
- if (message == null) {
- // no messages left, but this could change
- synchronized (Bucket.this) {
- message = messages.poll();
- if (message == null) {
- // no messages left
- noMoreMessages();
- break;
- }
- }
- }
- // this doesn't work -- it ends up right back in the queue
- // if 'messages' is defined
- message.process();
- }
- if (messages == null) {
- // this indicates we need to enter a grace period before cleanup,
- sleepBeforeCleanup();
- }
- logger.info("{}: exiting cleanup state", this);
- }
-
- private void noMoreMessages() {
- if (state == this) {
- if (owner == Server.getThisServer()) {
- // we can now exit the state
- state = null;
- stateChanged();
- } else {
- /*
- * We need a grace period before we can
- * remove the 'state' value (this can happen
- * if we receive and process the bulk data
- * before receiving official confirmation
- * that we are owner of the bucket.
- */
- messages = null;
- }
- }
- }
-
- private void sleepBeforeCleanup() {
- try {
- logger.info("{}: entering grace period before terminating",
- this);
- Thread.sleep(unconfirmedGracePeriod);
- } catch (InterruptedException e) {
- // we are exiting in any case
- Thread.currentThread().interrupt();
- } finally {
- synchronized (Bucket.this) {
- // Do we need to confirm that we really are the owner?
- // What does it mean if we are not?
- if (state == this) {
- state = null;
- stateChanged();
- }
- }
- }
- }
-
- /**
- * Return a useful value to display in log messages.
- *
- * @return a useful value to display in log messages
- */
- public String toString() {
- return "Bucket.NewOwner(" + index + ")";
- }
-
- /**
- * Restore bucket data.
- *
- * @param obj deserialized bucket data
- */
- private void restoreBucketData(Object obj) {
- if (obj instanceof List) {
- for (Object entry : (List<?>) obj) {
- if (entry instanceof Restore) {
- // entry-specific 'restore' operation
- ((Restore) entry).restore(Bucket.this.index);
- } else {
- logger.error("{}: Expected '{}' but got '{}'",
- this, Restore.class.getName(),
- entry.getClass().getName());
- }
- }
- } else {
- logger.error("{}: expected 'List' but got '{}'",
- this, obj.getClass().getName());
- }
- }
- }
-
-
- /* ============================================================ */
-
- /**
- * Each state instance is associated with a bucket, and is used when
- * that bucket is in a transient state where it is the old owner of
- * a bucket, and the data is being transferred to the new owner.
- */
- private class OldOwner extends Thread implements State {
- Server newOwner;
-
- OldOwner(Server newOwner) {
- super("Old Owner for Bucket " + index);
- this.newOwner = newOwner;
- start();
- }
-
- /* ******************* */
- /* 'State' interface */
- /* ******************* */
-
- /**
- * {@inheritDoc}
- */
- @Override
- public boolean forward(Message message) {
- // forward message to new owner
- message.sendToServer(newOwner, index);
- return true;
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void newOwner() {
- // shouldn't happen -- just log an error
- logger.error("{}: 'newOwner()' shouldn't be called in this state", this);
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void bulkSerializedData(byte[] data) {
- // shouldn't happen -- just log an error
- logger.error("{}: 'bulkSerializedData()' shouldn't be called in this state", this);
- }
-
- /* ******************** */
- /* 'Thread' interface */
- /* ******************** */
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void run() {
- logger.info("{}: 'run' method invoked", this);
- try {
- // go through all of the entries in the list, collecting restore data
- List<Restore> restoreData = new LinkedList<>();
- for (Backup backup : backupList) {
- Restore restore = backup.generate(index);
- if (restore != null) {
- restoreData.add(restore);
- }
- }
-
- // serialize all of the objects,
- // and send what we have to the new owner
- Entity<String> entity = Entity.entity(
- new String(Base64.getEncoder().encode(Util.serialize(restoreData))),
- MediaType.APPLICATION_OCTET_STREAM_TYPE);
- newOwner.post("bucket/sessionData", entity, new Server.PostResponse() {
- @Override
- public WebTarget webTarget(WebTarget webTarget) {
- return webTarget
- .queryParam(QP_BUCKET, index)
- .queryParam(QP_DEST, newOwner.getUuid())
- .queryParam(QP_TTL, timeToLive);
- }
-
- @Override
- public void response(Response response) {
- logger.info("/bucket/sessionData response code = {}",
- response.getStatus());
- }
- });
- } catch (Exception e) {
- logger.error("Exception in {}", this, e);
- } finally {
- synchronized (Bucket.this) {
- // restore the state
- if (state == this) {
- state = null;
- stateChanged();
- }
- }
- }
- }
-
- /**
- * Return a useful value to display in log messages.
- *
- * @return a useful value to display in log messages
- */
- public String toString() {
- return "Bucket.OldOwner(" + index + ")";
- }
- }
-}
diff --git a/feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/Discovery.java b/feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/Discovery.java
deleted file mode 100644
index a53fb4d1..00000000
--- a/feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/Discovery.java
+++ /dev/null
@@ -1,352 +0,0 @@
-/*
- * ============LICENSE_START=======================================================
- * feature-server-pool
- * ================================================================================
- * Copyright (C) 2020 AT&T Intellectual Property. All rights reserved.
- * ================================================================================
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ============LICENSE_END=========================================================
- */
-
-package org.onap.policy.drools.serverpool;
-
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.DEFAULT_DISCOVERY_FETCH_LIMIT;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.DEFAULT_DISCOVERY_FETCH_TIMEOUT;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.DEFAULT_DISCOVER_PUBLISHER_LOOP_CYCLE_TIME;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.DISCOVERY_ALLOW_SELF_SIGNED_CERTIFICATES;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.DISCOVERY_API_KEY;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.DISCOVERY_API_SECRET;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.DISCOVERY_FETCH_LIMIT;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.DISCOVERY_FETCH_TIMEOUT;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.DISCOVERY_HTTPS;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.DISCOVERY_PASSWORD;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.DISCOVERY_SERVERS;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.DISCOVERY_TOPIC;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.DISCOVERY_USERNAME;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.DISCOVER_PUBLISHER_LOOP_CYCLE_TIME;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.getProperty;
-
-import com.google.gson.Gson;
-import java.io.ByteArrayOutputStream;
-import java.io.DataOutputStream;
-import java.nio.charset.StandardCharsets;
-import java.util.Base64;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import org.onap.policy.common.endpoints.event.comm.Topic.CommInfrastructure;
-import org.onap.policy.common.endpoints.event.comm.TopicEndpointManager;
-import org.onap.policy.common.endpoints.event.comm.TopicListener;
-import org.onap.policy.common.endpoints.event.comm.TopicSink;
-import org.onap.policy.common.endpoints.event.comm.TopicSource;
-import org.onap.policy.common.endpoints.properties.PolicyEndPointProperties;
-import org.onap.policy.common.utils.coder.CoderException;
-import org.onap.policy.common.utils.coder.StandardCoder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class makes use of UEB/DMAAP to discover other servers in the pool.
- * The discovery processes ordinarily run only on the lead server, but they
- * run on other servers up until the point that they determine who the
- * leader is.
- */
-public class Discovery implements TopicListener {
- private static Logger logger = LoggerFactory.getLogger(Discovery.class);
-
- // used for JSON <-> String conversion
- private static StandardCoder coder = new StandardCoder();
-
- private static Discovery discovery = null;
-
- private volatile Publisher publisherThread = null;
-
- private List<TopicSource> consumers = null;
- private List<TopicSink> publishers = null;
-
- private Discovery() {
- // we want to modify the properties we send to 'TopicManager'
- PropBuilder builder = new PropBuilder(ServerPoolProperties.getProperties());
- builder.convert(DISCOVERY_SERVERS, null,
- PolicyEndPointProperties.PROPERTY_TOPIC_SERVERS_SUFFIX);
- builder.convert(DISCOVERY_USERNAME, null,
- PolicyEndPointProperties.PROPERTY_TOPIC_AAF_MECHID_SUFFIX);
- builder.convert(DISCOVERY_PASSWORD, null,
- PolicyEndPointProperties.PROPERTY_TOPIC_AAF_PASSWORD_SUFFIX);
- builder.convert(DISCOVERY_HTTPS, null,
- PolicyEndPointProperties.PROPERTY_HTTP_HTTPS_SUFFIX);
- builder.convert(DISCOVERY_API_KEY, null,
- PolicyEndPointProperties.PROPERTY_TOPIC_API_KEY_SUFFIX);
- builder.convert(DISCOVERY_API_SECRET, null,
- PolicyEndPointProperties.PROPERTY_TOPIC_API_SECRET_SUFFIX);
- builder.convert(DISCOVERY_FETCH_TIMEOUT,
- String.valueOf(DEFAULT_DISCOVERY_FETCH_TIMEOUT),
- PolicyEndPointProperties.PROPERTY_TOPIC_SOURCE_FETCH_TIMEOUT_SUFFIX);
- builder.convert(DISCOVERY_FETCH_LIMIT,
- String.valueOf(DEFAULT_DISCOVERY_FETCH_LIMIT),
- PolicyEndPointProperties.PROPERTY_TOPIC_SOURCE_FETCH_LIMIT_SUFFIX);
- builder.convert(DISCOVERY_ALLOW_SELF_SIGNED_CERTIFICATES, null,
- PolicyEndPointProperties.PROPERTY_ALLOW_SELF_SIGNED_CERTIFICATES_SUFFIX);
- Properties prop = builder.finish();
- logger.debug("Discovery converted properties: {}", prop);
-
- consumers = TopicEndpointManager.getManager().addTopicSources(prop);
- publishers = TopicEndpointManager.getManager().addTopicSinks(prop);
-
- if (consumers.isEmpty()) {
- logger.error("No consumer topics");
- }
- if (publishers.isEmpty()) {
- logger.error("No publisher topics");
- }
- logger.debug("Discovery: {} consumers, {} publishers",
- consumers.size(), publishers.size());
- }
-
- /**
- * Start all consumers and publishers, and start the publisher thread.
- */
- static synchronized void startDiscovery() {
- if (discovery == null) {
- discovery = new Discovery();
- }
- discovery.start();
- }
-
- /**
- * Stop all consumers and publishers, and stop the publisher thread.
- */
- static synchronized void stopDiscovery() {
- if (discovery != null) {
- discovery.stop();
- }
- }
-
- /**
- * Start all consumers and publishers, and start the publisher thread.
- */
- private void start() {
- for (TopicSource consumer : consumers) {
- consumer.register(this);
- consumer.start();
- }
- for (TopicSink publisher : publishers) {
- publisher.start();
- }
- if (publisherThread == null) {
- // send thread wasn't running -- start it
- publisherThread = new Publisher();
- publisherThread.start();
- }
- }
-
- /**
- * Stop all consumers and publishers, and stop the publisher thread.
- */
- private void stop() {
- publisherThread = null;
- for (TopicSink publisher : publishers) {
- publisher.stop();
- }
- for (TopicSource consumer : consumers) {
- consumer.unregister(this);
- consumer.stop();
- }
- }
-
- /*===========================*/
- /* 'TopicListener' interface */
- /*===========================*/
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void onTopicEvent(CommInfrastructure infra, String topic, String event) {
- /*
- * a JSON message has been received -- it should contain
- * a single string parameter 'pingData', which contains the
- * same format base64-encoded message that 'Server'
- * instances periodically exchange
- */
- try {
- @SuppressWarnings("unchecked")
- LinkedHashMap<String, String> map = coder.decode(event, LinkedHashMap.class);
- String message = map.get("pingData");
- Server.adminRequest(message.getBytes(StandardCharsets.UTF_8));
- logger.info("Received a message, server count={}", Server.getServerCount());
- } catch (CoderException e) {
- logger.error("Can't decode message", e);
- }
- }
-
- /* ============================================================ */
-
- /**
- * This class is used to convert internal 'discovery.*' properties to
- * properties that 'TopicEndpointManager' can use.
- */
- private static class PropBuilder {
- // properties being incrementally modified
- Properties prop;
-
- // value from 'discovery.topic' parameter
- String topic;
-
- // 'true' only if both 'discovery.topic' and 'discovery.servers'
- // has been defined
- boolean doConversion = false;
-
- // contains "ueb.source.topics" or "dmaap.source.topics"
- String sourceTopicsName = null;
-
- // contains "<TYPE>.source.topics.<TOPIC>" (<TYPE> = ueb|dmaap)
- String sourcePrefix = null;
-
- // contains "ueb.sink.topics" or "dmaap.sink.topics"
- String sinkTopicsName = null;
-
- // contains "<TYPE>.sink.topics.<TOPIC>" (<TYPE> = ueb|dmaap)
- String sinkPrefix = null;
-
- /**
- * Constructor - decide whether we are going to do conversion or not,
- * and initialize accordingly.
- *
- * @param prop the initial list of properties
- */
- PropBuilder(Properties prop) {
- this.prop = new Properties(prop);
- this.topic = prop.getProperty(DISCOVERY_TOPIC);
- String servers = prop.getProperty(DISCOVERY_SERVERS);
- if (topic != null && servers != null) {
- // we do have property conversion to do
- doConversion = true;
- String type = topic.contains(".") ? "dmaap" : "ueb";
- sourceTopicsName = type + ".source.topics";
- sourcePrefix = sourceTopicsName + "." + topic;
- sinkTopicsName = type + ".sink.topics";
- sinkPrefix = sinkTopicsName + "." + topic;
- }
- }
-
- /**
- * If we are doing conversion, convert an internal property
- * to something that 'TopicEndpointManager' can use.
- *
- * @param intName server pool property name (e.g. "discovery.servers")
- * @param defaultValue value to use if property 'intName' is not specified
- * @param extSuffix TopicEndpointManager suffix, including leading "."
- */
- void convert(String intName, String defaultValue, String extSuffix) {
- if (doConversion) {
- String value = prop.getProperty(intName, defaultValue);
- if (value != null) {
- prop.setProperty(sourcePrefix + extSuffix, value);
- prop.setProperty(sinkPrefix + extSuffix, value);
- }
- }
- }
-
- /**
- * Generate/update the '*.source.topics' and '*.sink.topics' parameters.
- *
- * @return the updated properties list
- */
- Properties finish() {
- if (doConversion) {
- String currentValue = prop.getProperty(sourceTopicsName);
- if (currentValue == null) {
- // '*.source.topics' is not defined -- set it
- prop.setProperty(sourceTopicsName, topic);
- } else {
- // '*.source.topics' is defined -- append to it
- prop.setProperty(sourceTopicsName, currentValue + "," + topic);
- }
- currentValue = prop.getProperty(sinkTopicsName);
- if (currentValue == null) {
- // '*.sink.topics' is not defined -- set it
- prop.setProperty(sinkTopicsName, topic);
- } else {
- // '*.sink.topics' is defined -- append to it
- prop.setProperty(sinkTopicsName, currentValue + "," + topic);
- }
- }
- return prop;
- }
- }
-
- /* ============================================================ */
-
- /**
- * This is the sender thread, which periodically sends out 'ping' messages.
- */
- private class Publisher extends Thread {
- /**
- * Constructor -- read in the properties, and initialze 'publisher'.
- */
- Publisher() {
- super("Discovery Publisher Thread");
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void run() {
- // this loop will terminate once 'publisher' is set to 'null',
- // or some other 'Publisher' instance replaces it
- long cycleTime = getProperty(DISCOVER_PUBLISHER_LOOP_CYCLE_TIME,
- DEFAULT_DISCOVER_PUBLISHER_LOOP_CYCLE_TIME);
- while (this == publisherThread) {
- try {
- // wait 5 seconds (default)
- Thread.sleep(cycleTime);
-
- // generate a 'ping' message
- ByteArrayOutputStream bos = new ByteArrayOutputStream();
- DataOutputStream dos = new DataOutputStream(bos);
-
- // write the 'ping' data for this server
- Server thisServer = Server.getThisServer();
- thisServer.writeServerData(dos);
- String encodedData =
- new String(Base64.getEncoder().encode(bos.toByteArray()));
-
- // base64-encoded value is passed as JSON parameter 'pingData'
- LinkedHashMap<String, String> map = new LinkedHashMap<>();
- map.put("pingData", encodedData);
- String jsonString = new Gson().toJson(map, Map.class);
- for (TopicSink publisher : publishers) {
- publisher.send(jsonString);
- }
- } catch (InterruptedException e) {
- Thread.currentThread().interrupt();
- logger.error("Exception in Discovery.Publisher.run():", e);
- return;
- } catch (Exception e) {
- logger.error("Exception in Discovery.Publisher.run():", e);
- // grace period -- we don't want to get UEB upset at us
- try {
- Thread.sleep(15000);
- } catch (InterruptedException e2) {
- Thread.currentThread().interrupt();
- logger.error("Discovery.Publisher sleep interrupted");
- }
- return;
- }
- }
- }
- }
-}
diff --git a/feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/Events.java b/feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/Events.java
deleted file mode 100644
index d2ea1a5c..00000000
--- a/feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/Events.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * ============LICENSE_START=======================================================
- * feature-server-pool
- * ================================================================================
- * Copyright (C) 2020 AT&T Intellectual Property. All rights reserved.
- * ================================================================================
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ============LICENSE_END=========================================================
- */
-
-package org.onap.policy.drools.serverpool;
-
-import java.util.Collection;
-import java.util.Queue;
-import java.util.concurrent.ConcurrentLinkedQueue;
-
-/**
- * This interface is used to distribute notifications of various system
- * events, such as new 'Server' instances, or a server failing.
- */
-public class Events {
- // set of listeners receiving event notifications
- private static final Queue<Events> listeners =
- new ConcurrentLinkedQueue<>();
-
- /**
- * add a listener to the set of listeners receiving events.
- *
- * @param handler the listener
- */
- public static void register(Events handler) {
- // if it is already here, remove it first
- listeners.remove(handler);
-
- // add to the end of the queue
- listeners.add(handler);
- }
-
- /**
- * remove a listener from the set of listeners.
- */
- public static boolean unregister(Events handler) {
- return listeners.remove(handler);
- }
-
- public static Collection<Events> getListeners() {
- return listeners;
- }
-
- /* ============================================================ */
-
- /**
- * Notification that a new server has been discovered.
- *
- * @param server this is the new server
- */
- public void newServer(Server server) {
- // do nothing
- }
-
- /**
- * Notification that a server has failed.
- *
- * @param server this is the server that failed
- */
- public void serverFailed(Server server) {
- // do nothing
- }
-
- /**
- * Notification that a new lead server has been selected.
- *
- * @param server this is the new lead server
- */
- public void newLeader(Server server) {
- // do nothing
- }
-
- /**
- * Notification that the lead server has gone down.
- *
- * @param server the lead server that failed
- */
- public void leaderFailed(Server server) {
- // do nothing
- }
-
- /**
- * Notification that a new selection just completed, but the same
- * leader has been chosen (this may be in response to a new server
- * joining earlier).
- *
- * @param server the current leader, which has been confirmed
- */
- public void leaderConfirmed(Server server) {
- // do nothing
- }
-}
diff --git a/feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/ExtendedObjectInputStream.java b/feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/ExtendedObjectInputStream.java
deleted file mode 100644
index 5ec6f341..00000000
--- a/feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/ExtendedObjectInputStream.java
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * ============LICENSE_START=======================================================
- * feature-server-pool
- * ================================================================================
- * Copyright (C) 2020 AT&T Intellectual Property. All rights reserved.
- * ================================================================================
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ============LICENSE_END=========================================================
- */
-
-package org.onap.policy.drools.serverpool;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.ObjectInputStream;
-import java.io.ObjectStreamClass;
-
-/**
- * This class provides an 'ObjectInputStream' variant that uses the
- * specified 'ClassLoader' instance.
- */
-public class ExtendedObjectInputStream extends ObjectInputStream {
- // the 'ClassLoader' to use when doing class lookups
- private ClassLoader classLoader;
-
- /**
- * Constructor -- invoke the superclass, and save the 'ClassLoader'.
- *
- * @param in input stream to read from
- * @param classLoader 'ClassLoader' to use when doing class lookups
- */
- public ExtendedObjectInputStream(InputStream in, ClassLoader classLoader) throws IOException {
- super(in);
- this.classLoader = classLoader;
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- protected Class<?> resolveClass(ObjectStreamClass desc) throws IOException, ClassNotFoundException {
-
- // Standard ClassLoader implementations first attempt to load classes
- // via the parent class loader, and then attempt to load it using the
- // current class loader if that fails. For some reason, Drools container
- // class loaders define a different order -- in theory, this is only a
- // problem if different versions of the same class are accessible through
- // different class loaders, which is exactly what happens in some Junit
- // tests.
- //
- // This change restores the order, at least when deserializing objects
- // into a Drools container.
- try {
- // try the parent class loader first
- return classLoader.getParent().loadClass(desc.getName());
- } catch (ClassNotFoundException e) {
- return classLoader.loadClass(desc.getName());
- }
- }
-}
diff --git a/feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/FeatureServerPool.java b/feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/FeatureServerPool.java
deleted file mode 100644
index 064af79e..00000000
--- a/feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/FeatureServerPool.java
+++ /dev/null
@@ -1,1027 +0,0 @@
-/*
- * ============LICENSE_START=======================================================
- * feature-server-pool
- * ================================================================================
- * Copyright (C) 2020 AT&T Intellectual Property. All rights reserved.
- * Modifications Copyright (C) 2020 Nordix Foundation
- * ================================================================================
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ============LICENSE_END=========================================================
- */
-
-package org.onap.policy.drools.serverpool;
-
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.BUCKET_DROOLS_TIMEOUT;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.BUCKET_TIME_TO_LIVE;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.DEFAULT_BUCKET_DROOLS_TIMEOUT;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.DEFAULT_BUCKET_TIME_TO_LIVE;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.getProperty;
-
-import java.io.ByteArrayInputStream;
-import java.io.IOException;
-import java.io.Serializable;
-import java.nio.charset.StandardCharsets;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Base64;
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Properties;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import javax.ws.rs.client.Entity;
-import javax.ws.rs.client.WebTarget;
-import javax.ws.rs.core.MediaType;
-import javax.ws.rs.core.Response;
-import lombok.AllArgsConstructor;
-import org.apache.commons.lang3.tuple.Pair;
-import org.kie.api.runtime.KieSession;
-import org.kie.api.runtime.rule.FactHandle;
-import org.onap.policy.common.endpoints.event.comm.Topic.CommInfrastructure;
-import org.onap.policy.common.endpoints.event.comm.TopicListener;
-import org.onap.policy.common.utils.coder.CoderException;
-import org.onap.policy.common.utils.coder.StandardCoder;
-import org.onap.policy.common.utils.coder.StandardCoderObject;
-import org.onap.policy.drools.control.api.DroolsPdpStateControlApi;
-import org.onap.policy.drools.core.DroolsRunnable;
-import org.onap.policy.drools.core.PolicyContainer;
-import org.onap.policy.drools.core.PolicySession;
-import org.onap.policy.drools.core.PolicySessionFeatureApi;
-import org.onap.policy.drools.core.lock.PolicyResourceLockManager;
-import org.onap.policy.drools.features.PolicyControllerFeatureApi;
-import org.onap.policy.drools.features.PolicyEngineFeatureApi;
-import org.onap.policy.drools.system.PolicyController;
-import org.onap.policy.drools.system.PolicyControllerConstants;
-import org.onap.policy.drools.system.PolicyEngine;
-import org.onap.policy.drools.system.PolicyEngineConstants;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * </p>
- * This class hooks the server pool implementation into DroolsPDP.
- * <dl>
- * <dt>PolicyEngineFeatureApi</dt><dd> - the <i>afterStart</i> hook is where we initialize.</dd>
- * <dt>PolicyControllerFeatureApi</dt><dd> - the <i>beforeOffer</i> hook is used to look
- * at incoming topic messages, and decide whether to process them
- * on this host, or forward to another host.</dd>
- * </dl>
- */
-public class FeatureServerPool
- implements PolicyEngineFeatureApi, PolicySessionFeatureApi,
- PolicyControllerFeatureApi, DroolsPdpStateControlApi {
- private static Logger logger =
- LoggerFactory.getLogger(FeatureServerPool.class);
-
- // used for JSON <-> String conversion
- private static StandardCoder coder = new StandardCoder();
-
- private static final String CONFIG_FILE =
- "config/feature-server-pool.properties";
-
- /*
- * Properties used when searching for keyword entries
- *
- * The following types are supported:
- *
- * 1) keyword.<topic>.path=<field-list>
- * 2) keyword.path=<field-list>
- * 3) ueb.source.topics.<topic>.keyword=<field-list>
- * 4) ueb.source.topics.keyword=<field-list>
- * 5) dmaap.source.topics.<topic>.keyword=<field-list>
- * 6) dmaap.source.topics.keyword=<field-list>
- *
- * 1, 3, and 5 are functionally equivalent
- * 2, 4, and 6 are functionally equivalent
- */
-
- static final String KEYWORD_PROPERTY_START_1 = "keyword.";
- static final String KEYWORD_PROPERTY_END_1 = ".path";
- static final String KEYWORD_PROPERTY_START_2 = "ueb.source.topics.";
- static final String KEYWORD_PROPERTY_END_2 = ".keyword";
- static final String KEYWORD_PROPERTY_START_3 = "dmaap.source.topics.";
- static final String KEYWORD_PROPERTY_END_3 = ".keyword";
-
- /*
- * maps topic names to a keyword table derived from <field-list> (above)
- *
- * Example <field-list>: requestID,CommonHeader.RequestID
- *
- * Table generated from this example has length 2:
- * table 0 is "requestID"
- * table 1 is "CommonHeader", "RequestID"
- */
- private static HashMap<String, String[][]> topicToPaths = new HashMap<>();
-
- // this table is used for any topics that aren't in 'topicToPaths'
- private static String[][] defaultPaths = new String[0][];
-
- // extracted from properties
- private static long droolsTimeoutMillis;
- private static String timeToLiveSecond;
-
- // HTTP query parameters
- private static final String QP_KEYWORD = "keyword";
- private static final String QP_SESSION = "session";
- private static final String QP_BUCKET = "bucket";
- private static final String QP_TTL = "ttl";
- private static final String QP_CONTROLLER = "controller";
- private static final String QP_PROTOCOL = "protocol";
- private static final String QP_TOPIC = "topic";
-
- /* **************************** */
- /* 'OrderedService' interface */
- /* **************************** */
-
- /**
- * {@inheritDoc}
- */
- @Override
- public int getSequenceNumber() {
- // we need to make sure we have an early position in 'selectThreadModel'
- // (in case there is feature that provides a thread model)
- return -1000000;
- }
-
- /* ************************************ */
- /* 'PolicyEngineFeatureApi' interface */
- /* ************************************ */
-
- /**
- * {@inheritDoc}
- */
- @Override
- public boolean afterStart(PolicyEngine engine) {
- logger.info("Starting FeatureServerPool");
- Server.startup(CONFIG_FILE);
- TargetLock.startup();
- setDroolsTimeoutMillis(
- getProperty(BUCKET_DROOLS_TIMEOUT, DEFAULT_BUCKET_DROOLS_TIMEOUT));
- int intTimeToLive =
- getProperty(BUCKET_TIME_TO_LIVE, DEFAULT_BUCKET_TIME_TO_LIVE);
- setTimeToLiveSecond(String.valueOf(intTimeToLive));
- buildKeywordTable();
- Bucket.Backup.register(new DroolsSessionBackup());
- Bucket.Backup.register(new TargetLock.LockBackup());
- return false;
- }
-
- private static void setDroolsTimeoutMillis(long timeoutMs) {
- droolsTimeoutMillis = timeoutMs;
- }
-
- private static void setTimeToLiveSecond(String ttlSec) {
- timeToLiveSecond = ttlSec;
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public PolicyResourceLockManager beforeCreateLockManager(
- PolicyEngine engine, Properties properties) {
-
- return TargetLock.getLockFactory();
- }
-
- /*=====================================*/
- /* 'PolicySessionFeatureApi' interface */
- /*=====================================*/
-
- /**
- * {@inheritDoc}
- */
- @Override
- public boolean insertDrools(final PolicySession session, final Object object) { // NOSONAR
- // sonar complained that the method always returns the same value. However,
- // we prefer the code be structured this way, thus disabled sonar
-
- final String keyword = Keyword.lookupKeyword(object);
- if (keyword == null) {
- // no keyword was found, so we process locally
- KieSession kieSession = session.getKieSession();
- if (kieSession != null) {
- kieSession.insert(object);
- }
- return true;
- }
-
- /*
- * 'keyword' determines the destination host,
- * which may be local or remote
- */
- Bucket.forwardAndProcess(keyword, new Bucket.Message() {
- @Override
- public void process() {
- // if we reach this point, we process locally
- KieSession kieSession = session.getKieSession();
- if (kieSession != null) {
- kieSession.insert(object);
- }
- }
-
- @Override
- public void sendToServer(Server server, int bucketNumber) {
- // this object needs to sent to a remote host --
- // first, serialize the object
- byte[] data = null;
- try {
- data = Util.serialize(object);
- } catch (IOException e) {
- logger.error("insertDrools: can't serialize object of {}",
- object.getClass(), e);
- return;
- }
-
- // construct the message to insert remotely
- Entity<String> entity = Entity.entity(
- new String(Base64.getEncoder().encode(data), StandardCharsets.UTF_8),
- MediaType.APPLICATION_OCTET_STREAM_TYPE);
- server.post("session/insertDrools", entity,
- new Server.PostResponse() {
- @Override
- public WebTarget webTarget(WebTarget webTarget) {
- PolicyContainer pc = session.getPolicyContainer();
- String encodedSessionName =
- pc.getGroupId() + ":" + pc.getArtifactId() + ":"
- + session.getName();
-
- return webTarget
- .queryParam(QP_KEYWORD, keyword)
- .queryParam(QP_SESSION, encodedSessionName)
- .queryParam(QP_BUCKET, bucketNumber)
- .queryParam(QP_TTL, timeToLiveSecond);
- }
-
- @Override
- public void response(Response response) {
- logger.info("/session/insertDrools response code = {}",
- response.getStatus());
- }
- });
- }
- });
- return true;
- }
-
- /* **************************************** */
- /* 'PolicyControllerFeatureApi' interface */
- /* **************************************** */
-
- /**
- * This method is called from 'AggregatedPolicyController.onTopicEvent',
- * and provides a way to intercept the message before it is decoded and
- * delivered to a local Drools session.
- *
- * @param controller the PolicyController instance receiving the message
- * @param protocol communication infrastructure type
- * @param topic topic name
- * @param event event message as a string
- * @return 'false' if this event should be processed locally, or 'true'
- * if the message has been forwarded to a remote host, so local
- * processing should be bypassed
- */
- @Override
- public boolean beforeOffer(final PolicyController controller,
- final CommInfrastructure protocol,
- final String topic,
- final String event) {
- // choose the table, based upon the topic
- String[][] table = topicToPaths.getOrDefault(topic, defaultPaths);
-
- // build a JSON object from the event
- StandardCoderObject sco;
-
- try {
- sco = coder.decode(event, StandardCoderObject.class);
- } catch (CoderException e) {
- return false;
- }
- String keyword = null;
-
- for (String[] path : table) {
- /*
- * Each entry in 'table' is a String[] containing an encoding
- * of a possible keyword field. Suppose the value is 'a.b.c.d.e' --
- * 'path' would be encoded as 'String[] {"a", "b", "c", "d", "e"}'
- */
- String fieldName = path[path.length - 1];
- String conversionFunctionName = null;
- int index = fieldName.indexOf(':');
-
- if (index > 0) {
- conversionFunctionName = fieldName.substring(index + 1);
- fieldName = fieldName.substring(0, index);
- path = Arrays.copyOf(path, path.length);
- path[path.length - 1] = fieldName;
- }
- keyword = sco.getString((Object[]) path);
-
- if (keyword != null) {
- if (conversionFunctionName != null) {
- keyword = Keyword.convertKeyword(keyword, conversionFunctionName);
- }
- if (keyword != null) {
- break;
- }
- }
- }
-
- if (keyword == null) {
- // couldn't find any keywords -- just process this message locally
- logger.warn("Can't locate bucket keyword within message");
- return false;
- }
-
- /*
- * build a message object implementing the 'Bucket.Message' interface --
- * it will be processed locally, forwarded, or queued based upon the
- * current state.
- */
- TopicMessage message =
- new TopicMessage(keyword, controller, protocol, topic, event);
- int bucketNumber = Bucket.bucketNumber(keyword);
- if (Bucket.forward(bucketNumber, message)) {
- // message was queued or forwarded -- abort local processing
- return true;
- }
-
- /*
- * the bucket happens to be assigned to this server, and wasn't queued --
- * return 'false', so it will be processed locally
- */
- logger.info("Keyword={}, bucket={} -- owned by this server",
- keyword, bucketNumber);
- return false;
- }
-
- /**
- * Incoming topic message has been forwarded from a remote host.
- *
- * @param bucketNumber the bucket number calculated on the remote host
- * @param keyword the keyword associated with the message
- * @param controllerName the controller the message was directed to
- * on the remote host
- * @param protocol String value of the 'Topic.CommInfrastructure' value
- * (UEB, DMAAP, NOOP, or REST -- NOOP and REST shouldn't be used
- * here)
- * @param topic the UEB/DMAAP topic name
- * @param event this is the JSON message
- */
- static void topicMessage(
- int bucketNumber, String keyword, String controllerName,
- String protocol, String topic, String event) {
-
- // @formatter:off
- logger.info("Incoming topic message: Keyword={}, bucket={}\n"
- + " controller = {}\n"
- + " topic = {}",
- keyword, bucketNumber, controllerName, topic);
- // @formatter:on
-
- // locate the 'PolicyController'
- PolicyController controller = PolicyControllerConstants.getFactory().get(controllerName);
- if (controller == null) {
- /*
- * This controller existed on the sender's host, but doesn't exist
- * on the destination. This is a problem -- we are counting on all
- * hosts being configured with the same controllers.
- */
- logger.error("Can't locate controller '{}' for incoming topic message",
- controllerName);
- } else if (controller instanceof TopicListener) {
- /*
- * This is the destination host -- repeat the 'onTopicEvent'
- * method (the one that invoked 'beforeOffer' on the originating host).
- * Note that this message could be forwarded again if the sender's
- * bucket table was somehow different from ours -- perhaps there was
- * an update in progress.
- *
- * TBD: it would be nice to limit the number of hops, in case we
- * somehow have a loop.
- */
- ((TopicListener) controller).onTopicEvent(
- CommInfrastructure.valueOf(protocol), topic, event);
- } else {
- /*
- * This 'PolicyController' was also a 'TopicListener' on the sender's
- * host -- it isn't on this host, and we are counting on them being
- * config
- */
- logger.error("Controller {} is not a TopicListener", controllerName);
- }
- }
-
- /**
- * An incoming '/session/insertDrools' message was received.
- *
- * @param keyword the keyword associated with the incoming object
- * @param sessionName encoded session name(groupId:artifactId:droolsSession)
- * @param bucket the bucket associated with keyword
- * @param ttl similar to IP time-to-live -- it controls the number of hops
- * the message may take
- * @param data base64-encoded serialized data for the object
- */
- static void incomingInsertDrools(
- String keyword, String sessionName, int bucket, int ttl, byte[] data) {
-
- logger.info("Incoming insertDrools: keyword={}, session={}, bucket={}, ttl={}",
- keyword, sessionName, bucket, ttl);
-
- if (Bucket.isKeyOnThisServer(keyword)) {
- // process locally
-
- // [0]="<groupId>" [1]="<artifactId>", [2]="<sessionName>"
- String[] nameSegments = sessionName.split(":");
-
- // locate the 'PolicyContainer' and 'PolicySession'
- PolicySession policySession = locatePolicySession(nameSegments);
-
- if (policySession == null) {
- logger.error("incomingInsertDrools: Can't find PolicySession={}",
- sessionName);
- } else {
- KieSession kieSession = policySession.getKieSession();
- if (kieSession != null) {
- try {
- // deserialization needs to use the correct class loader
- Object obj = Util.deserialize(
- Base64.getDecoder().decode(data),
- policySession.getPolicyContainer().getClassLoader());
- kieSession.insert(obj);
- } catch (IOException | ClassNotFoundException
- | IllegalArgumentException e) {
- logger.error("incomingInsertDrools: failed to read data "
- + "for session '{}'", sessionName, e);
- }
- }
- }
- } else {
- ttl -= 1;
- if (ttl > 0) {
- /*
- * This host is not the intended destination -- this could happen
- * if it was sent from another site. Forward the message in the
- * same thread.
- */
- forwardInsertDroolsMessage(bucket, keyword, sessionName, ttl, data);
- }
- }
- }
-
- /**
- * step through all 'PolicyContainer' instances looking
- * for a matching 'artifactId' & 'groupId'.
- * @param nameSegments name portion from sessionName
- * @return policySession match artifactId and groupId
- */
- private static PolicySession locatePolicySession(String[] nameSegments) {
- PolicySession policySession = null;
- if (nameSegments.length == 3) {
- for (PolicyContainer pc : PolicyContainer.getPolicyContainers()) {
- if (nameSegments[1].equals(pc.getArtifactId())
- && nameSegments[0].equals(pc.getGroupId())) {
- policySession = pc.getPolicySession(nameSegments[2]);
- break;
- }
- }
- }
- return policySession;
- }
-
- /**
- * Forward the insertDrools message in the same thread.
- */
- private static void forwardInsertDroolsMessage(int bucket, String keyword,
- String sessionName, int ttl, byte[] data) {
- Server server = Bucket.bucketToServer(bucket);
- WebTarget webTarget = server.getWebTarget("session/insertDrools");
- if (webTarget != null) {
- logger.info("Forwarding 'session/insertDrools' "
- + "(key={},session={},bucket={},ttl={})",
- keyword, sessionName, bucket, ttl);
- Entity<String> entity =
- Entity.entity(new String(data, StandardCharsets.UTF_8),
- MediaType.APPLICATION_OCTET_STREAM_TYPE);
- webTarget
- .queryParam(QP_KEYWORD, keyword)
- .queryParam(QP_SESSION, sessionName)
- .queryParam(QP_BUCKET, bucket)
- .queryParam(QP_TTL, ttl)
- .request().post(entity);
- }
- }
-
- /**
- * This method builds the table that is used to locate the appropriate
- * keywords within incoming JSON messages (e.g. 'requestID'). The
- * associated values are then mapped into bucket numbers.
- */
- private static void buildKeywordTable() {
- Properties prop = ServerPoolProperties.getProperties();
-
- // iterate over all of the properties, picking out those we are
- // interested in
- for (String name : prop.stringPropertyNames()) {
- String topic = null;
- String begin;
- String end;
-
- if (name.startsWith(KEYWORD_PROPERTY_START_1)
- && name.endsWith(KEYWORD_PROPERTY_END_1)) {
- // 1) keyword.<topic>.path=<field-list>
- // 2) keyword.path=<field-list>
- begin = KEYWORD_PROPERTY_START_1;
- end = KEYWORD_PROPERTY_END_1;
- } else if (name.startsWith(KEYWORD_PROPERTY_START_2)
- && name.endsWith(KEYWORD_PROPERTY_END_2)) {
- // 3) ueb.source.topics.<topic>.keyword=<field-list>
- // 4) ueb.source.topics.keyword=<field-list>
- begin = KEYWORD_PROPERTY_START_2;
- end = KEYWORD_PROPERTY_END_2;
- } else if (name.startsWith(KEYWORD_PROPERTY_START_3)
- && name.endsWith(KEYWORD_PROPERTY_END_3)) {
- // 5) dmaap.source.topics.<topic>.keyword=<field-list>
- // 6) dmaap.source.topics.keyword=<field-list>
- begin = KEYWORD_PROPERTY_START_3;
- end = KEYWORD_PROPERTY_END_3;
- } else {
- // we aren't interested in this property
- continue;
- }
-
- topic = detmTopic(name, begin, end);
-
- // now, process the value
- // Example: requestID,CommonHeader.RequestID
- String[][] paths = splitPaths(prop, name);
-
- if (topic == null) {
- // these paths are used for any topics not explicitly
- // in the 'topicToPaths' table
- defaultPaths = paths;
- } else {
- // these paths are specific to 'topic'
- topicToPaths.put(topic, paths);
- }
- }
- }
-
- private static String detmTopic(String name, String begin, String end) {
- int beginIndex = begin.length();
- int endIndex = name.length() - end.length();
- if (beginIndex < endIndex) {
- // <topic> is specified, so this table is limited to this
- // specific topic
- return name.substring(beginIndex, endIndex);
- }
-
- return null;
- }
-
- private static String[][] splitPaths(Properties prop, String name) {
- String[] commaSeparatedEntries = prop.getProperty(name).split(",");
- String[][] paths = new String[commaSeparatedEntries.length][];
- for (int i = 0; i < commaSeparatedEntries.length; i += 1) {
- paths[i] = commaSeparatedEntries[i].split("\\.");
- }
-
- return paths;
- }
-
- /*======================================*/
- /* 'DroolsPdpStateControlApi' interface */
- /*======================================*/
-
- /*
- * Stop the processing of messages and server pool participation(non-Javadoc)
- * Note: This is not static because it should only be used if feature-server-pool
- * has been enabled.
- * (non-Javadoc)
- * @see org.onap.policy.drools.control.api.DroolsPdpStateControlApi#shutdown()
- */
- @Override
- public void shutdown() {
- PolicyEngineConstants.getManager().deactivate();
- Server.shutdown();
- }
-
- /*
- * Stop the processing of messages and server pool participation(non-Javadoc)
- * Note: This is not static because it should only be used if feature-server-pool
- * has been enabled.
- * (non-Javadoc)
- * @see org.onap.policy.drools.control.api.DroolsPdpStateControlApi#restart()
- */
- @Override
- public void restart() {
- MainLoop.startThread();
- Discovery.startDiscovery();
- PolicyEngineConstants.getManager().activate();
- }
-
- /* ============================================================ */
-
- /**
- * This class implements the 'Bucket.Message' interface for UEB/DMAAP
- * messages.
- */
- @AllArgsConstructor
- private static class TopicMessage implements Bucket.Message {
- /*
- * the keyword associated with this message
- * (which determines the bucket number).
- */
- private final String keyword;
-
- // the controller receiving this message
- private final PolicyController controller;
-
- // enumeration: UEB or DMAAP
- private final CommInfrastructure protocol;
-
- // UEB/DMAAP topic
- private final String topic;
-
- // JSON message as a String
- private final String event;
-
- /**
- * Process this message locally using 'TopicListener.onTopicEvent'
- * (the 'PolicyController' instance is assumed to implement
- * the 'TopicListener' interface as well).
- */
- @Override
- public void process() {
- if (controller instanceof TopicListener) {
- /*
- * This is the destination host -- repeat the 'onTopicEvent' method
- * (the one that invoked 'beforeOffer' on the originating host).
- * Note that this message could be forwarded again if the sender's
- * bucket table was somehow different from ours -- perhaps there was
- * an update in progress.
- *
- * TBD: it would be nice to limit the number of hops, in case we
- * somehow have a loop.
- */
- ((TopicListener) controller).onTopicEvent(protocol, topic, event);
- } else {
- /*
- * This 'PolicyController' was also a 'TopicListener' on the sender's
- * host -- it isn't on this host, and we are counting on them being
- * configured the same way.
- */
- logger.error("Controller {} is not a TopicListener",
- controller.getName());
- }
- }
-
- /**
- * Send this message to a remote server for processing (presumably, it
- * is the destination host).
- *
- * @param server the Server instance to send the message to
- * @param bucketNumber the bucket number to send it to
- */
- @Override
- public void sendToServer(Server server, int bucketNumber) {
- // if we reach this point, we have determined the remote server
- // that should process this message
-
- // @formatter:off
- logger.info("Outgoing topic message: Keyword={}, bucket={}\n"
- + " controller = {}"
- + " topic = {}"
- + " sender = {}"
- + " receiver = {}",
- keyword, bucketNumber, controller.getName(), topic,
- Server.getThisServer().getUuid(), server.getUuid());
- // @formatter:on
-
- Entity<String> entity = Entity.entity(event, MediaType.APPLICATION_JSON);
- server.post("bucket/topic", entity, new Server.PostResponse() {
- @Override
- public WebTarget webTarget(WebTarget webTarget) {
- return webTarget
- .queryParam(QP_BUCKET, bucketNumber)
- .queryParam(QP_KEYWORD, keyword)
- .queryParam(QP_CONTROLLER, controller.getName())
- .queryParam(QP_PROTOCOL, protocol.toString())
- .queryParam(QP_TOPIC, topic);
- }
-
- @Override
- public void response(Response response) {
- // log a message indicating success/failure
- int status = response.getStatus();
- if (status >= 200 && status <= 299) {
- logger.info("/bucket/topic response code = {}", status);
- } else {
- logger.error("/bucket/topic response code = {}", status);
- }
- }
- });
- }
- }
-
- /* ============================================================ */
-
- /**
- * Backup data associated with a Drools session.
- */
- static class DroolsSessionBackup implements Bucket.Backup {
- /**
- * {@inheritDoc}
- */
- @Override
- public Bucket.Restore generate(int bucketNumber) {
- // Go through all of the Drools sessions, and generate backup data.
- // If there is no data to backup for this bucket, return 'null'
-
- DroolsSessionRestore restore = new DroolsSessionRestore();
- return restore.backup(bucketNumber) ? restore : null;
- }
- }
-
- /* ============================================================ */
-
- /**
- * This class is used to generate and restore backup Drools data.
- */
- static class DroolsSessionRestore implements Bucket.Restore, Serializable {
- private static final long serialVersionUID = 1L;
-
- // backup data for all Drools sessions on this host
- private final List<SingleSession> sessions = new LinkedList<>();
-
- /**
- * {@inheritDoc}
- */
- boolean backup(int bucketNumber) {
- /*
- * There may be multiple Drools sessions being backed up at the same
- * time. There is one 'Pair' in the list for each session being
- * backed up.
- */
- LinkedList<Pair<CompletableFuture<List<Object>>, PolicySession>>
- pendingData = new LinkedList<>();
- for (PolicyContainer pc : PolicyContainer.getPolicyContainers()) {
- for (PolicySession session : pc.getPolicySessions()) {
- // Wraps list of objects, to be populated in the session
- final CompletableFuture<List<Object>> droolsObjectsWrapper =
- new CompletableFuture<>();
-
- // 'KieSessionObject'
- final KieSession kieSession = session.getKieSession();
-
- logger.info("{}: about to fetch data for session {}",
- this, session.getFullName());
- DroolsRunnable backupAndRemove = () -> {
- List<Object> droolsObjects = new ArrayList<>();
- for (FactHandle fh : kieSession.getFactHandles()) {
- Object obj = kieSession.getObject(fh);
- String keyword = Keyword.lookupKeyword(obj);
- if (keyword != null
- && Bucket.bucketNumber(keyword) == bucketNumber) {
- // bucket matches -- include this object
- droolsObjects.add(obj);
- /*
- * delete this factHandle from Drools memory
- * this classes are used in bucket migration,
- * so the delete is intentional.
- */
- kieSession.delete(fh);
- }
- }
-
- // send notification that object list is complete
- droolsObjectsWrapper.complete(droolsObjects);
- };
- kieSession.insert(backupAndRemove);
-
- // add pending operation to the list
- pendingData.add(Pair.of(droolsObjectsWrapper, session));
- }
- }
-
- /*
- * data copying can start as soon as we receive results
- * from pending sessions (there may not be any)
- */
- copyDataFromSession(pendingData);
- return !sessions.isEmpty();
- }
-
- /**
- * Copy data from pending sessions.
- * @param pendingData a list of policy sessions
- */
- private void copyDataFromSession(List<Pair<CompletableFuture<List<Object>>, PolicySession>>
- pendingData) {
- long endTime = System.currentTimeMillis() + droolsTimeoutMillis;
-
- for (Pair<CompletableFuture<List<Object>>, PolicySession> pair :
- pendingData) {
- PolicySession session = pair.getRight();
- long delay = endTime - System.currentTimeMillis();
- if (delay < 0) {
- /*
- * we have already reached the time limit, so we will
- * only process data that has already been received
- */
- delay = 0;
- }
- try {
- List<Object> droolsObjects =
- pair.getLeft().get(delay, TimeUnit.MILLISECONDS);
-
- // if we reach this point, session data read has completed
- logger.info("{}: session={}, got {} object(s)",
- this, session.getFullName(),
- droolsObjects.size());
- if (!droolsObjects.isEmpty()) {
- sessions.add(new SingleSession(session, droolsObjects));
- }
- } catch (TimeoutException e) {
- logger.error("{}: Timeout waiting for data from session {}",
- this, session.getFullName());
- } catch (Exception e) {
- logger.error("{}: Exception writing output data", this, e);
- }
- }
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void restore(int bucketNumber) {
- /*
- * There may be multiple Drools sessions being restored at the same
- * time. There is one entry in 'sessionLatches' for each session
- * being restored.
- */
- LinkedList<CountDownLatch> sessionLatches = new LinkedList<>();
- for (SingleSession session : sessions) {
- try {
- CountDownLatch sessionLatch = session.restore();
- if (sessionLatch != null) {
- // there is a restore in progress -- add it to the list
- sessionLatches.add(sessionLatch);
- }
- } catch (IOException | ClassNotFoundException e) {
- logger.error("Exception in {}", this, e);
- }
- }
-
- // wait for all sessions to be updated
- try {
- for (CountDownLatch sessionLatch : sessionLatches) {
- if (!sessionLatch.await(droolsTimeoutMillis, TimeUnit.MILLISECONDS)) {
- logger.error("{}: timed out waiting for session latch", this);
- }
- }
- } catch (InterruptedException e) {
- logger.error("Exception in {}", this, e);
- Thread.currentThread().interrupt();
- }
- }
- }
-
- /* ============================================================ */
-
- /**
- * Each instance of this class corresponds to a Drools session that has
- * been backed up, or is being restored.
- */
- static class SingleSession implements Serializable {
- private static final long serialVersionUID = 1L;
-
- // the group id associated with the Drools container
- String groupId;
-
- // the artifact id associated with the Drools container
- String artifactId;
-
- // the session name within the Drools container
- String sessionName;
-
- // serialized data associated with this session (and bucket)
- byte[] data;
-
- /**
- * Constructor - initialize the 'SingleSession' instance, so it can
- * be serialized.
- *
- * @param session the Drools session being backed up
- * @param droolsObjects the Drools objects from this session associated
- * with the bucket currently being backed up
- */
- SingleSession(PolicySession session, List<Object> droolsObjects) throws IOException {
- // 'groupId' and 'artifactId' are set from the 'PolicyContainer'
- PolicyContainer pc = session.getPolicyContainer();
- groupId = pc.getGroupId();
- artifactId = pc.getArtifactId();
-
- // 'sessionName' is set from the 'PolicySession'
- sessionName = session.getName();
-
- /*
- * serialize the Drools objects -- we serialize them here, because they
- * need to be deserialized within the scope of the Drools session
- */
- data = Util.serialize(droolsObjects);
- }
-
- CountDownLatch restore() throws IOException, ClassNotFoundException {
- PolicySession session = null;
-
- // locate the 'PolicyContainer', and 'PolicySession'
- for (PolicyContainer pc : PolicyContainer.getPolicyContainers()) {
- if (artifactId.equals(pc.getArtifactId())
- && groupId.equals(pc.getGroupId())) {
- session = pc.getPolicySession(sessionName);
- return insertSessionData(session, new ByteArrayInputStream(data));
- }
- }
- logger.error("{}: unable to locate session name {}", this, sessionName);
- return null;
- }
-
- /**
- * Deserialize session data, and insert the objects into the session
- * from within the Drools session thread.
- *
- * @param session the associated PolicySession instance
- * @param bis the data to be deserialized
- * @return a CountDownLatch, which will indicate when the operation has
- * completed (null in case of failure)
- * @throws IOException IO errors while creating or reading from
- * the object stream
- * @throws ClassNotFoundException class not found during deserialization
- */
- private CountDownLatch insertSessionData(PolicySession session, ByteArrayInputStream bis)
- throws IOException, ClassNotFoundException {
- ClassLoader classLoader = session.getPolicyContainer().getClassLoader();
- ExtendedObjectInputStream ois =
- new ExtendedObjectInputStream(bis, classLoader);
-
- /*
- * associate the current thread with the session,
- * and deserialize
- */
- session.setPolicySession();
- Object obj = ois.readObject();
-
- if (obj instanceof List) {
- final List<?> droolsObjects = (List<?>) obj;
- logger.info("{}: session={}, got {} object(s)",
- this, session.getFullName(), droolsObjects.size());
-
- // signal when session update is complete
- final CountDownLatch sessionLatch = new CountDownLatch(1);
-
- // 'KieSession' object
- final KieSession kieSession = session.getKieSession();
-
- // run the following within the Drools session thread
- DroolsRunnable doRestore = () -> {
- try {
- /*
- * Insert all of the objects -- note that this is running
- * in the session thread, so no other rules can fire
- * until all of the objects are inserted.
- */
- for (Object droolsObj : droolsObjects) {
- kieSession.insert(droolsObj);
- }
- } finally {
- // send notification that the inserts have completed
- sessionLatch.countDown();
- }
- };
- kieSession.insert(doRestore);
- ois.close();
- return sessionLatch;
- } else {
- ois.close();
- logger.error("{}: Invalid session data for session={}, type={}",
- this, session.getFullName(), obj.getClass().getName());
- }
- return null;
- }
- }
-}
diff --git a/feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/Keyword.java b/feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/Keyword.java
deleted file mode 100644
index 0059a4b9..00000000
--- a/feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/Keyword.java
+++ /dev/null
@@ -1,500 +0,0 @@
-/*
- * ============LICENSE_START=======================================================
- * feature-server-pool
- * ================================================================================
- * Copyright (C) 2020 AT&T Intellectual Property. All rights reserved.
- * ================================================================================
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ============LICENSE_END=========================================================
- */
-
-package org.onap.policy.drools.serverpool;
-
-import java.lang.reflect.Field;
-import java.lang.reflect.Method;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Properties;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.function.UnaryOperator;
-import lombok.AllArgsConstructor;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class supports the lookup of keywords from objects within Drools
- * sessions. It maps the class of the object into an object implementing
- * the 'Keyword.Lookup' interface. At present, this requires writing
- * special code for each class that can exist in a Drools session that is
- * assignable and relocatable via a bucket. In theory, it would be possible
- * to populate this table through properties, which would use the reflective
- * interface, and indicate the methods and fields to use to do this lookup.
- */
-public class Keyword {
- private static Logger logger = LoggerFactory.getLogger(Keyword.class);
-
- // this table can be used to map an object class into the method
- // to invoke to do the lookup
- private static ConcurrentHashMap<Class<?>, Lookup> classToLookup =
- new ConcurrentHashMap<>();
-
- // this is a pre-defined 'Lookup' instance that always returns 'null'
- private static Lookup nullLookup = (Object obj) -> null;
-
- /**
- * This method takes the object's class, looks it up in the 'classToLookup'
- * table, and then performs the lookup to get the keyword. When a direct
- * lookup on a class fails, it will attempt to find a match using inheritance
- * rules -- if an appropriate match is found, the 'classToLookup' table is
- * updated, so it will be easier next time. If no match is found, the table
- * is also updated, but the 'value' will be 'nullLookup'.
- *
- * @param obj object to to the lookup on
- * @return a String keyword, if found; 'null' if not
- */
- public static String lookupKeyword(Object obj) {
- Lookup lu = classToLookup.get(obj.getClass());
- if (lu != null) {
- return lu.getKeyword(obj);
- }
- // no entry for this class yet --
- // try to locate a matching entry using 'inheritance' rules
- Class<?> thisClass = obj.getClass();
- Class<?> matchingClass = null;
- for (Map.Entry<Class<?>, Lookup> entry : classToLookup.entrySet()) {
- if (entry.getKey().isAssignableFrom(thisClass)
- && (matchingClass == null
- || matchingClass.isAssignableFrom(entry.getKey()))) {
- // we have a first match, or a more specific match
- matchingClass = entry.getKey();
- lu = entry.getValue();
- }
- }
-
- /*
- * whether we found a match or not, update the table accordingly
- * no match found -- see if the 'keyword.<CLASS-NAME>.lookup'
- * properties can provide a solution.
- */
- if (lu == null && (lu = buildReflectiveLookup(thisClass)) == null) {
- lu = nullLookup;
- }
-
- // update table
- classToLookup.put(thisClass, lu);
- return lu.getKeyword(obj);
- }
-
- /**
- * explicitly place an entry in the table.
- *
- * @param clazz the class to do the lookup on
- * @param handler an instance implementing the 'Lookup' interface,
- * can handle instances of type 'clazz'
- */
- public static void setLookupHandler(Class<?> clazz, Lookup handler) {
- classToLookup.put(clazz, handler);
- }
-
- /* ============================================================ */
-
- /**
- * These are the interface that must be implemented by objects in the
- * 'classToLookup' table.
- */
- public interface Lookup {
- /**
- * Map the object into a keyword string.
- *
- * @param obj the object to lookup, which should be an instance of the
- * associated class in the 'classToLookup' table
- * @return the keyword, if found; 'null' if not
- */
- public String getKeyword(Object obj);
- }
-
- /* ============================================================ */
-
- // this table maps class name to a sequence of method calls and field
- // references, based upon 'keyword.<CLASS-NAME>.lookup' entries found
- // in the property list
- private static Map<String, String> classNameToSequence = null;
-
- static final String KEYWORD_PROPERTY_START = "keyword.";
- static final String KEYWORD_PROPERTY_END = ".lookup";
-
- /**
- * Attempt to build a 'Lookup' instance for a particular class based upon
- * properties.
- *
- * @param clazz the class to build an entry for
- * @return a 'Lookup' instance to do the lookup, or 'null' if one can't
- * be generated from the available properties
- */
- private static synchronized Lookup buildReflectiveLookup(Class<?> clazz) {
- if (classNameToSequence == null) {
- classNameToSequence = new HashMap<>();
- Properties prop = ServerPoolProperties.getProperties();
-
- /*
- * iterate over all of the properties, picking out those
- * that match the name 'keyword.<CLASS-NAME>.lookup'
- */
- for (String name : prop.stringPropertyNames()) {
- if (name.startsWith(KEYWORD_PROPERTY_START)
- && name.endsWith(KEYWORD_PROPERTY_END)) {
- // this property matches -- locate the '<CLASS-NAME>' part
- int beginIndex = KEYWORD_PROPERTY_START.length();
- int endIndex = name.length()
- - KEYWORD_PROPERTY_END.length();
- if (beginIndex < endIndex) {
- // add it to the table
- classNameToSequence.put(name.substring(beginIndex, endIndex),
- prop.getProperty(name));
- }
- }
- }
- }
-
- Class<?> keyClass = buildReflectiveLookupFindKeyClass(clazz);
-
- if (keyClass == null) {
- // no matching class name found
- return null;
- }
-
- return buildReflectiveLookupBuild(clazz, keyClass);
- }
-
- /**
- * Look for the "best match" for class 'clazz' in the hash table.
- * First, look for the name of 'clazz' itself, followed by all of
- * interfaces. If no match is found, repeat with the superclass,
- * and all the way up the superclass chain.
- */
- private static Class<?> buildReflectiveLookupFindKeyClass(Class<?> clazz) {
- for (Class<?> cl = clazz; cl != null; cl = cl.getSuperclass()) {
- if (classNameToSequence.containsKey(cl.getName())) {
- // matches the class
- return cl;
- }
- for (Class<?> intf : cl.getInterfaces()) {
- if (classNameToSequence.containsKey(intf.getName())) {
- // matches one of the interfaces
- return intf;
- }
- // interface can have superclass
- for (Class<?> cla = clazz; cla != null; cla = intf.getSuperclass()) {
- if (classNameToSequence.containsKey(cla.getName())) {
- // matches the class
- return cla;
- }
- }
- }
- }
- return null;
- }
-
- private static Lookup buildReflectiveLookupBuild(Class<?> clazz, Class<?> keyClass) {
- // we found a matching key in the table -- now, process the values
- Class<?> currentClass = keyClass;
-
- /*
- * there may potentially be a chain of entries if multiple
- * field and/or method calls are in the sequence -- this is the first
- */
- ReflectiveLookup first = null;
-
- // this is the last entry in the list
- ReflectiveLookup last = null;
-
- /*
- * split the value into segments, where each segment has the form
- * 'FIELD-NAME' or 'METHOD-NAME()', with an optional ':CONVERSION'
- * at the end
- */
- String sequence = classNameToSequence.get(keyClass.getName());
- ConversionFunctionLookup conversionFunctionLookup = null;
- int index = sequence.indexOf(':');
- if (index >= 0) {
- // conversion function specified
- conversionFunctionLookup =
- new ConversionFunctionLookup(sequence.substring(index + 1));
- sequence = sequence.substring(0, index);
- }
- for (String segment : sequence.split("\\.")) {
- ReflectiveLookup current = null;
- ReflectiveOperationException error = null;
- try {
- if (segment.endsWith("()")) {
- // this segment is a method lookup
- current = new MethodLookup(currentClass,
- segment.substring(0, segment.length() - 2));
- } else {
- // this segment is a field lookup
- current = new FieldLookup(currentClass, segment);
- }
- } catch (ReflectiveOperationException e) {
- // presumably the field or method does not exist in this class
- error = e;
- }
- if (current == null) {
- logger.error("Keyword.buildReflectiveLookup: build error "
- + "(class={},value={},segment={})",
- clazz.getName(),
- classNameToSequence.get(keyClass.getName()),
- segment,
- error);
- return null;
- }
-
- // if we reach this point, we processed this segment successfully
- currentClass = current.nextClass();
- if (first == null) {
- // the initial segment
- first = current;
- } else {
- // link to the most recently created segment
- last.next = current;
- }
- // update most recently created segment
- last = current;
- }
-
- // add optional conversion function ('null' if it doesn't exist)
- // The preceding 'sequence.split(...)' will always return at
- // least one entry, so there will be at least one
- // attempt to go through the loop. If it then makes it out of the loop
- // without returning (or an exception), 'current' and 'last' will both be
- // set to non-null values.
- last.next = conversionFunctionLookup; // NOSONAR
-
- // successful - return the first 'Lookup' instance in the chain
- return first;
- }
-
- /* ============================================================ */
-
- /**
- * Abstract superclass of 'FieldLookup' and 'MethodLookup'.
- */
- private abstract static class ReflectiveLookup implements Lookup {
- // link to the next 'Lookup' instance in the chain
- Lookup next = null;
-
- /**
- * Return the next 'class' instance.
- *
- * @return the class associated with the return value of the
- * field or method lookup
- */
- abstract Class<?> nextClass();
- }
-
- /* ============================================================ */
-
- /**
- * This class is used to do a field lookup.
- */
- private static class FieldLookup extends ReflectiveLookup {
- // the reflective 'Field' instance associated with this lookup
- Field field;
-
- /**
- * Constructor.
- *
- * @param clazz the 'class' we are doing the field lookup on
- * @param segment a segment from the property value, which is just the
- * field name
- */
- FieldLookup(Class<?> clazz, String segment) throws NoSuchFieldException {
- field = clazz.getField(segment);
- }
-
- /* ****************************** */
- /* 'ReflectiveLookup' interface */
- /* ****************************** */
-
- /**
- * {@inheritDoc}
- */
- @Override
- Class<?> nextClass() {
- return field.getType();
- }
-
- /* ******************** */
- /* 'Lookup' interface */
- /* ******************** */
-
- /**
- * {@inheritDoc}
- */
- @Override
- public String getKeyword(Object obj) {
- try {
- // do the field lookup
- Object rval = field.get(obj);
- if (rval == null) {
- return null;
- }
-
- // If there is no 'next' entry specified, this value is the
- // keyword. Otherwise, move on to the next 'Lookup' entry in
- // the chain.
- return next == null ? rval.toString() : next.getKeyword(rval);
- } catch (Exception e) {
- logger.error("Keyword.FieldLookup error: field={}",
- field, e);
- return null;
- }
- }
- }
-
- /* ============================================================ */
-
- /**
- * This class is used to do a method call on the target object.
- */
- private static class MethodLookup extends ReflectiveLookup {
- // the reflective 'Method' instance associated with this lookup
- Method method;
-
- /**
- * Constructor.
- *
- * @param clazz the 'class' we are doing the method lookup on
- * @param name a method name extracted from a segment from the
- * property value, which is the
- */
- MethodLookup(Class<?> clazz, String name) throws NoSuchMethodException {
- method = clazz.getMethod(name);
- }
-
- /*==============================*/
- /* 'ReflectiveLookup' interface */
- /*==============================*/
-
- /**
- * {@inheritDoc}
- */
- @Override
- Class<?> nextClass() {
- return method.getReturnType();
- }
-
- /*====================*/
- /* 'Lookup' interface */
- /*====================*/
-
- /**
- * {@inheritDoc}
- */
- @Override
- public String getKeyword(Object obj) {
- try {
- // do the method call
- Object rval = method.invoke(obj);
- if (rval == null) {
- return null;
- }
-
- // If there is no 'next' entry specified, this value is the
- // keyword. Otherwise, move on to the next 'Lookup' entry in
- // the chain.
- return next == null ? rval.toString() : next.getKeyword(rval);
- } catch (Exception e) {
- logger.error("Keyword.MethodLookup error: method={}",
- method, e);
- return null;
- }
- }
- }
-
- /* ============================================================ */
-
- /*
- * Support for named "conversion functions", which take an input keyword,
- * and return a possibly different keyword derived from it. The initial
- * need is to take a string which consists of a UUID and a suffix, and
- * return the base UUID.
- */
-
- // used to lookup optional conversion functions
- private static Map<String, UnaryOperator<String>> conversionFunction =
- new ConcurrentHashMap<>();
-
- // conversion function 'uuid':
- // truncate strings to 36 characters(uuid length)
- static final int UUID_LENGTH = 36;
-
- static {
- conversionFunction.put("uuid", value ->
- // truncate strings to 36 characters
- value != null && value.length() > UUID_LENGTH
- ? value.substring(0, UUID_LENGTH) : value
- );
- }
-
- /**
- * Add a conversion function.
- *
- * @param name the conversion function name
- * @param function the object that does the transformation
- */
- public static void addConversionFunction(String name, UnaryOperator<String> function) {
- conversionFunction.put(name, function);
- }
-
- /**
- * Apply a named conversion function to a keyword.
- *
- * @param inputKeyword this is the keyword extracted from a message or object
- * @param functionName this is the name of the conversion function to apply
- * (if 'null', no conversion is done)
- * @return the converted keyword
- */
- public static String convertKeyword(String inputKeyword, String functionName) {
- if (functionName == null || inputKeyword == null) {
- // don't do any conversion -- just return the input keyword
- return inputKeyword;
- }
-
- // look up the function
- UnaryOperator<String> function = conversionFunction.get(functionName);
- if (function == null) {
- logger.error("{}: conversion function not found", functionName);
- return null;
- }
-
- // call the conversion function, and return the value
- return function.apply(inputKeyword);
- }
-
- /**
- * This class is used to invoke a conversion function.
- */
- @AllArgsConstructor
- private static class ConversionFunctionLookup implements Lookup {
- // the conversion function name
- private final String functionName;
-
- /**
- * {@inheritDoc}
- */
- @Override
- public String getKeyword(Object obj) {
- return obj == null ? null : convertKeyword(obj.toString(), functionName);
- }
- }
-}
diff --git a/feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/Leader.java b/feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/Leader.java
deleted file mode 100644
index be291708..00000000
--- a/feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/Leader.java
+++ /dev/null
@@ -1,594 +0,0 @@
-/*
- * ============LICENSE_START=======================================================
- * feature-server-pool
- * ================================================================================
- * Copyright (C) 2020 AT&T Intellectual Property. All rights reserved.
- * ================================================================================
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ============LICENSE_END=========================================================
- */
-
-package org.onap.policy.drools.serverpool;
-
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.DEFAULT_LEADER_STABLE_IDLE_CYCLES;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.DEFAULT_LEADER_STABLE_VOTING_CYCLES;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.LEADER_STABLE_IDLE_CYCLES;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.LEADER_STABLE_VOTING_CYCLES;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.getProperty;
-
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.nio.charset.StandardCharsets;
-import java.util.Base64;
-import java.util.HashSet;
-import java.util.TreeMap;
-import java.util.TreeSet;
-import java.util.UUID;
-import javax.ws.rs.client.Entity;
-import javax.ws.rs.core.MediaType;
-import lombok.EqualsAndHashCode;
-import lombok.Setter;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class handles the election of the lead server. The lead server
- * handles bucket assignments, and also is the server running the
- * 'Discovery' procedure long-term (other servers do run the procedure
- * until a leader is elected).
- * Note that everything in this class is run under the 'MainLoop' thread,
- * with the exception of the invocation and first two statements of the
- * 'voteData' method.
- */
-class Leader {
- private static Logger logger = LoggerFactory.getLogger(Leader.class);
-
- // Listener class to handle state changes that may lead to a new election
- private static EventHandler eventHandler = new EventHandler();
-
- static {
- Events.register(eventHandler);
- }
-
- // Server currently in the leader roll
- @Setter
- private static Server leaderLocal = null;
-
- // Vote state machine -- it is null, unless a vote is in progress
- @Setter
- private static VoteCycle voteCycle = null;
-
- private static UUID emptyUUID = new UUID(0L, 0L);
-
- /*==================================================*/
- /* Some properties extracted at initialization time */
- /*==================================================*/
-
- // how many cycles of stability before voting starts
- private static int stableIdleCycles;
-
- // how may cycles of stability before declaring a winner
- private static int stableVotingCycles;
-
- /**
- * Hide implicit public constructor.
- */
- private Leader() {
- // everything here is static -- no instances of this class are created
- }
-
- /**
- * Invoked at startup, or after some events -- immediately start a new vote.
- */
- static void startup() {
- // fetch some static properties
- stableIdleCycles = getProperty(LEADER_STABLE_IDLE_CYCLES,
- DEFAULT_LEADER_STABLE_IDLE_CYCLES);
- stableVotingCycles = getProperty(LEADER_STABLE_VOTING_CYCLES,
- DEFAULT_LEADER_STABLE_VOTING_CYCLES);
-
- startVoting();
- }
-
- /**
- * start, or restart voting.
- */
- private static void startVoting() {
- if (voteCycle == null) {
- voteCycle = new VoteCycle();
- MainLoop.addBackgroundWork(voteCycle);
- } else {
- voteCycle.serverChanged();
- }
- }
-
- /**
- * Return the current leader.
- *
- * @return the current leader ('null' if none has been selected)
- */
- public static Server getLeader() {
- return leaderLocal;
- }
-
- /**
- * Handle an incoming /vote REST message.
- *
- * @param data base64-encoded data, containing vote data
- */
- static void voteData(byte[] data) {
- // decode base64 data
- final byte[] packet = Base64.getDecoder().decode(data);
-
- MainLoop.queueWork(() -> {
- // This runs within the 'MainLoop' thread --
- // create the 'VoteCycle' state machine, if needed
- if (voteCycle == null) {
- voteCycle = new VoteCycle();
- MainLoop.addBackgroundWork(voteCycle);
- }
- try {
- // pass data to 'VoteCycle' state machine
- voteCycle.packetReceived(packet);
- } catch (IOException e) {
- logger.error("Exception in 'Leader.voteData", e);
- }
- });
- }
-
- /* ============================================================ */
-
- /**
- * There is a single instance of this class (Leader.eventHandler), which
- * is registered to listen for notifications of state transitions. Note
- * that all of these methods are running within the 'MainLoop' thread.
- */
- private static class EventHandler extends Events {
- /**
- * {@inheritDoc}
- */
- @Override
- public void newServer(Server server) {
- // a new server has joined -- start/restart the VoteCycle state machine
- startVoting();
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void serverFailed(Server server) {
- if (server == leaderLocal) {
- // the lead server has failed --
- // start/restart the VoteCycle state machine
- setLeaderLocal(null);
- startVoting();
-
- // send out a notification that the lead server has failed
- for (Events listener : Events.getListeners()) {
- listener.leaderFailed(server);
- }
- } else if (voteCycle != null) {
- // a vote is in progress -- restart the state machine
- // (don't do anything if there is no vote in progress)
- voteCycle.serverChanged();
- }
- }
- }
-
- /* ============================================================ */
-
- /**
- * This is the 'VoteCycle' state machine -- it runs as background work
- * on the 'MainLoop' thread, and goes away when a leader is elected.
- */
- private static class VoteCycle implements Runnable {
- enum State {
- // server just started up -- 5 second grace period
- STARTUP,
-
- // voting in progress -- changes have occurred in the last cycle
- VOTING,
- }
-
- // maps UUID voted for into the associated data
- private final TreeMap<UUID, VoteData> uuidToVoteData =
- new TreeMap<>(Util.uuidComparator);
-
- // maps voter UUID into the associated data
- private final TreeMap<UUID, VoterData> uuidToVoterData =
- new TreeMap<>(Util.uuidComparator);
-
- // sorted list of 'VoteData' (most preferable to least)
- private final TreeSet<VoteData> voteData = new TreeSet<>();
-
- // data to send out next cycle
- private final HashSet<VoterData> updatedVotes = new HashSet<>();
-
- private State state = State.STARTUP;
- private int cycleCount = stableIdleCycles;
-
- /**
- * Constructor - if there is no leader, or this server is the leader,
- * start the 'Discovery' thread.
- */
- VoteCycle() {
- if (leaderLocal == null || leaderLocal == Server.getThisServer()) {
- Discovery.startDiscovery();
- }
- }
-
- /**
- * A state change has occurred that invalidates any vote in progress --
- * restart the VoteCycle state machine.
- */
- void serverChanged() {
- // clear all of the tables
- uuidToVoteData.clear();
- uuidToVoterData.clear();
- voteData.clear();
- updatedVotes.clear();
-
- // wait for things to stabilize before continuing
- state = State.STARTUP;
- cycleCount = stableIdleCycles;
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void run() {
- switch (state) {
- case STARTUP:
- startupState();
- break;
-
- case VOTING:
- votingState();
- break;
-
- default:
- logger.error("Unknown state: {}", state);
- break;
- }
- }
-
- private void startupState() {
- // 5-second grace period -- wait for things to stablize before
- // starting the vote
- cycleCount -= 1;
- if (cycleCount <= 0) {
- logger.info("VoteCycle: {} seconds have passed",
- stableIdleCycles);
- updateMyVote();
- sendOutUpdates();
- state = State.VOTING;
- cycleCount = stableVotingCycles;
- }
- }
-
- private void votingState() {
- // need to be in the VOTING state without any vote changes
- // for 5 seconds -- once this happens, the leader is chosen
- if (sendOutUpdates()) {
- // changes have occurred -- set the grace period to 5 seconds
- cycleCount = stableVotingCycles;
- return;
- }
-
- cycleCount -= 1;
- if (cycleCount > 0) {
- return;
- }
-
- // 5 second grace period has passed -- the leader is one with
- // the most votes, which is the first entry in 'voteData'
- Server oldLeader = leaderLocal;
- setLeaderLocal(Server.getServer(voteData.first().uuid));
- if (leaderLocal != oldLeader) {
- // the leader has changed -- send out notifications
- for (Events listener : Events.getListeners()) {
- listener.newLeader(leaderLocal);
- }
- } else {
- // the election is over, and the leader has been confirmed
- for (Events listener : Events.getListeners()) {
- listener.leaderConfirmed(leaderLocal);
- }
- }
- if (leaderLocal == Server.getThisServer()) {
- // this is the lead server --
- // make sure the 'Discovery' threads are running
- Discovery.startDiscovery();
- } else {
- // this is not the lead server -- stop 'Discovery' threads
- Discovery.stopDiscovery();
- }
-
- // we are done with voting -- clean up, and report results
- MainLoop.removeBackgroundWork(this);
- setVoteCycle(null);
-
- ByteArrayOutputStream bos = new ByteArrayOutputStream();
- PrintStream out = new PrintStream(bos);
-
- out.println("Voting results:");
-
- // x(36) xxxxx x(36)
- // UUID Votes Voter
- String format = "%-36s %5s %-36s\n";
- out.format(format, "UUID", "Votes", "Voter(s)");
- out.format(format, "----", "-----", "--------");
-
- outputVotes(out, format);
- logger.info("Output - {}", bos);
- }
-
- private void outputVotes(PrintStream out, String format) {
- for (VoteData vote : voteData) {
- if (vote.voters.isEmpty()) {
- out.format(format, vote.uuid, 0, "");
- continue;
- }
- boolean headerNeeded = true;
- for (VoterData voter : vote.voters) {
- if (headerNeeded) {
- out.format(format, vote.uuid,
- vote.voters.size(), voter.uuid);
- headerNeeded = false;
- } else {
- out.format(format, "", "", voter.uuid);
- }
- }
- }
- }
-
- /**
- * Process an incoming /vote REST message.
- *
- * @param packet vote data, containing one or more votes
- */
- private void packetReceived(byte[] packet) throws IOException {
- DataInputStream dis =
- new DataInputStream(new ByteArrayInputStream(packet));
-
- while (dis.available() != 0) {
- // message is a series of:
- // 16-bytes voter UUID
- // 16-bytes vote UUID
- // 8-bytes timestamp
- long tmp = dis.readLong(); // most significant bits
- UUID voter = new UUID(tmp, dis.readLong());
-
- tmp = dis.readLong();
- UUID vote = new UUID(tmp, dis.readLong());
-
- long timestamp = dis.readLong();
-
- // process the single vote
- processVote(voter, vote, timestamp);
- }
- }
-
- /**
- * Process a single incoming vote.
- *
- * @param UUID voter the UUID of the Server making this vote
- * @param UUID vote the UUID of the Server that 'voter' voted for
- * @param timestamp the time when the vote was made
- */
- private void processVote(UUID voter, UUID vote, long timestamp) {
- // fetch old data for this voter
- VoterData voterData = uuidToVoterData.computeIfAbsent(voter,
- key -> new VoterData(voter, timestamp));
- if (timestamp >= voterData.timestamp) {
- // this is a new vote for this voter -- update the timestamp
- voterData.timestamp = timestamp;
- } else {
- // already processed vote, and it may even be obsolete
- return;
- }
-
- // fetch the old vote, if any, for this voter
- VoteData oldVoteData = voterData.vote;
- VoteData newVoteData = null;
-
- if (vote != null) {
- newVoteData = uuidToVoteData.computeIfAbsent(vote, key -> new VoteData(vote));
- }
-
- if (oldVoteData != newVoteData) {
- // the vote has changed -- update the 'voterData' entry,
- // and include this in the next set of outgoing messages
- logger.info("{} voting for {}", voter, vote);
- voterData.vote = newVoteData;
- updatedVotes.add(voterData);
-
- if (oldVoteData != null) {
- // remove the old vote data
- voteData.remove(oldVoteData);
- oldVoteData.voters.remove(voterData);
- if (oldVoteData.voters.isEmpty()) {
- // no voters left -- remove the entry
- uuidToVoteData.remove(oldVoteData.uuid);
- } else {
- // reinsert in a new position
- voteData.add(oldVoteData);
- }
- }
-
- if (newVoteData != null) {
- // update the new vote data
- voteData.remove(newVoteData);
- newVoteData.voters.add(voterData);
- voteData.add(newVoteData);
- }
- }
- }
-
- /**
- * If any updates have occurred, send then out to all servers on
- * the "notify list".
- *
- * @return 'true' if one or more votes have changed, 'false' if not
- */
- private boolean sendOutUpdates() {
- try {
- if (updatedVotes.isEmpty()) {
- // no changes to send out
- return false;
- }
-
- // possibly change vote based on current information
- updateMyVote();
-
- // generate message to send out
- ByteArrayOutputStream bos = new ByteArrayOutputStream();
- DataOutputStream dos = new DataOutputStream(bos);
-
- // go through all of the updated votes
- for (VoterData voterData : updatedVotes) {
- // voter UUID
- dos.writeLong(voterData.uuid.getMostSignificantBits());
- dos.writeLong(voterData.uuid.getLeastSignificantBits());
-
- // vote UUID
- UUID vote =
- (voterData.vote == null ? emptyUUID : voterData.vote.uuid);
- dos.writeLong(vote.getMostSignificantBits());
- dos.writeLong(vote.getLeastSignificantBits());
-
- // timestamp
- dos.writeLong(voterData.timestamp);
- }
- updatedVotes.clear();
-
- // create an 'Entity' that can be sent out to all hosts
- Entity<String> entity = Entity.entity(
- new String(Base64.getEncoder().encode(bos.toByteArray()), StandardCharsets.UTF_8),
- MediaType.APPLICATION_OCTET_STREAM_TYPE);
-
- // send out to all servers on the notify list
- for (Server server : Server.getNotifyList()) {
- server.post("vote", entity);
- }
- return true;
- } catch (IOException e) {
- logger.error("Exception in VoteCycle.sendOutUpdates", e);
- return false;
- }
- }
-
- /**
- * (Possibly) change this servers vote, based upon votes of other voters.
- */
- private void updateMyVote() {
- UUID myVote = null;
-
- if (uuidToVoterData.size() * 2 < Server.getServerCount()) {
- // fewer than half of the nodes have voted
- if (leaderLocal != null) {
- // choose the current leader
- myVote = leaderLocal.getUuid();
- } else {
- // choose the first entry in the servers list
- myVote = Server.getFirstServer().getUuid();
- }
- } else {
- // choose the first entry we know about
- for (VoteData vote : voteData) {
- if (Server.getServer(vote.uuid) != null) {
- myVote = vote.uuid;
- break;
- }
- }
- }
- if (myVote != null) {
- // update the vote for this host, and include it in the list
- processVote(Server.getThisServer().getUuid(), myVote,
- System.currentTimeMillis());
- }
- }
- }
-
- /* ============================================================ */
-
- /**
- * This class corresponds to a single vote recipient --
- * the Server being voted for.
- */
- @EqualsAndHashCode
- private static class VoteData implements Comparable<VoteData> {
- // uuid voted for
- private UUID uuid;
-
- // the set of all voters that voted for this server
- private HashSet<VoterData> voters = new HashSet<>();
-
- /**
- * Constructor -- set the UUID.
- */
- VoteData(UUID uuid) {
- this.uuid = uuid;
- }
-
- /*================================*/
- /* Comparable<VoteData> interface */
- /*================================*/
-
- /**
- * {@inheritDoc}
- */
- @Override
- public int compareTo(VoteData other) {
- // favor highest vote count
- // in case of a tie, compare UUIDs (favor smallest)
-
- int rval = other.voters.size() - voters.size();
- if (rval == 0) {
- // vote counts equal -- favor the smaller UUID
- rval = Util.uuidComparator.compare(uuid, other.uuid);
- }
- return rval;
- }
- }
-
- /* ============================================================ */
-
- /**
- * This class corresponds to the vote of a single server.
- */
- private static class VoterData {
- // voter UUID
- private UUID uuid;
-
- // most recently cast vote from this voter
- private VoteData vote = null;
-
- // time when the vote was cast
- private long timestamp = 0;
-
- /**
- * Constructor - store the UUID and timestamp.
- */
- private VoterData(UUID uuid, long timestamp) {
- this.uuid = uuid;
- this.timestamp = timestamp;
- }
- }
-}
diff --git a/feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/MainLoop.java b/feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/MainLoop.java
deleted file mode 100644
index ca5e86ac..00000000
--- a/feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/MainLoop.java
+++ /dev/null
@@ -1,195 +0,0 @@
-/*
- * ============LICENSE_START=======================================================
- * feature-server-pool
- * ================================================================================
- * Copyright (C) 2020 AT&T Intellectual Property. All rights reserved.
- * ================================================================================
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ============LICENSE_END=========================================================
- */
-
-package org.onap.policy.drools.serverpool;
-
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.DEFAULT_MAINLOOP_CYCLE;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.MAINLOOP_CYCLE;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.getProperty;
-
-import java.util.concurrent.ConcurrentLinkedQueue;
-import java.util.concurrent.LinkedTransferQueue;
-import java.util.concurrent.TimeUnit;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class provides a single thread that is used for 'Server' and 'Bucket'
- * updates. This simplifies things because it greatly reduces the need for
- * synchronization within these classes.
- */
-class MainLoop extends Thread {
- private static Logger logger = LoggerFactory.getLogger(MainLoop.class);
-
- // this queue is used to send work to the 'MainLoop' thread, for things
- // like processing incoming messages
- private static LinkedTransferQueue<Runnable> incomingWork =
- new LinkedTransferQueue<>();
-
- // this is used for work that should be invoked every cycle
- private static ConcurrentLinkedQueue<Runnable> backgroundWork =
- new ConcurrentLinkedQueue<>();
-
- // this is the 'MainLoop' thread
- private static volatile MainLoop mainLoop = null;
-
- // main loop cycle time
- private static long cycleTime;
-
- /**
- * If it isn't already running, start the 'MainLoop' thread.
- */
- public static synchronized void startThread() {
- cycleTime = getProperty(MAINLOOP_CYCLE, DEFAULT_MAINLOOP_CYCLE);
- if (mainLoop == null) {
- mainLoop = new MainLoop();
- mainLoop.start();
- }
- }
-
- /**
- * If it is currently running, stop the 'MainLoop' thread.
- */
- public static synchronized void stopThread() {
- // this won't be immediate, but the thread should discover it shortly
- MainLoop saveMainLoop = mainLoop;
-
- mainLoop = null;
- if (saveMainLoop != null) {
- saveMainLoop.interrupt();
- }
- }
-
- /**
- * Add some work to the 'incomingWork' queue -- this runs once, and is
- * automatically removed from the queue.
- *
- * @param work this is the Runnable to invoke
- */
- public static void queueWork(Runnable work) {
- if (!incomingWork.offer(work)) {
- logger.info("incomingWork returned false");
- }
- }
-
- /**
- * Add some work to the 'backgroundWork' queue -- this runs every cycle,
- * until it is manually removed.
- *
- * @param work this is the Runnable to invoke every cycle
- */
- public static void addBackgroundWork(Runnable work) {
- // if it is already here, remove it first
- backgroundWork.remove(work);
-
- // add to the end of the queue
- backgroundWork.add(work);
- }
-
- /**
- * Remove work from the 'backgroundWork' queue.
- *
- * @param work this is the Runnable to remove from the queue
- * @return true if the background work was found, and removed
- */
- public static boolean removeBackgroundWork(Runnable work) {
- return backgroundWork.remove(work);
- }
-
- /**
- * Constructor.
- */
- private MainLoop() {
- super("Main Administrative Loop");
- }
-
- /**
- * This is the main processing loop for "administrative" messages, which
- * manage 'Server' states.
- * 1) Process incoming messages (other threads are reading in and queueing
- * the messages), making note of information that should forwarded to
- * other servers.
- * 2) Send out updates to all servers on the 'notify' list
- * 3) Go through list of all 'Server' entries, and see which ones have
- * taken too long to respond -- those are treated as 'failed'
- */
- @Override
- public void run() {
- while (this == mainLoop) {
- try {
- // the following reads in messages over a period of 1 second
- handleIncomingWork();
-
- // send out notifications to other hosts
- Server.sendOutData();
-
- // search for hosts which have taken too long to respond
- Server.searchForFailedServers();
-
- // work that runs every cycle
- for (Runnable work : backgroundWork) {
- backgroundWorkRunnable(work);
- }
- } catch (Exception e) {
- logger.error("Exception in MainLoop", e);
- }
- }
- }
-
- /**
- * Runnable try loop.
- */
- static void backgroundWorkRunnable(Runnable work) {
- try {
- work.run();
- } catch (Exception e) {
- logger.error("Exception in MainLoop background work", e);
- }
- }
-
- /**
- * Poll for and process incoming messages for up to 1 second.
- */
- static void handleIncomingWork() {
- long currentTime = System.currentTimeMillis();
- long wakeUpTime = currentTime + cycleTime;
- long timeDiff;
-
- // receive incoming messages
- while ((timeDiff = wakeUpTime - currentTime) > 0) {
- try {
- Runnable work =
- incomingWork.poll(timeDiff, TimeUnit.MILLISECONDS);
- if (work == null) {
- // timeout -- we are done processing messages for now
- return;
- }
- work.run();
- } catch (InterruptedException e) {
- logger.error("Interrupted in MainLoop");
- Thread.currentThread().interrupt();
- return;
- } catch (Exception e) {
- logger.error("Exception in MainLoop incoming work", e);
- }
- currentTime = System.currentTimeMillis();
- }
- }
-}
diff --git a/feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/RestServerPool.java b/feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/RestServerPool.java
deleted file mode 100644
index 2c0a2544..00000000
--- a/feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/RestServerPool.java
+++ /dev/null
@@ -1,437 +0,0 @@
-/*
- * ============LICENSE_START=======================================================
- * feature-server-pool
- * ================================================================================
- * Copyright (C) 2020 AT&T Intellectual Property. All rights reserved.
- * ================================================================================
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ============LICENSE_END=========================================================
- */
-
-package org.onap.policy.drools.serverpool;
-
-import io.swagger.annotations.Api;
-import io.swagger.annotations.ApiOperation;
-import io.swagger.annotations.Info;
-import io.swagger.annotations.SwaggerDefinition;
-import io.swagger.annotations.Tag;
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.nio.charset.StandardCharsets;
-import java.util.UUID;
-import javax.ws.rs.Consumes;
-import javax.ws.rs.GET;
-import javax.ws.rs.POST;
-import javax.ws.rs.Path;
-import javax.ws.rs.Produces;
-import javax.ws.rs.QueryParam;
-import javax.ws.rs.core.MediaType;
-import javax.ws.rs.core.Response;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class contains methods for processing incoming REST messages.
- */
-
-@Path("/")
-@Api
-@SwaggerDefinition(
- info = @Info(
- description = "PDP-D Server Pool Telemetry Service",
- version = "v1.0",
- title = "PDP-D Server Pool Telemetry"
- ),
- consumes = {MediaType.APPLICATION_JSON},
- produces = {MediaType.APPLICATION_JSON, MediaType.TEXT_PLAIN},
- schemes = {SwaggerDefinition.Scheme.HTTP},
- tags = {
- @Tag(name = "pdp-d-server-pool-telemetry", description = "Drools PDP Server Pool Telemetry Operations")
- }
- )
-public class RestServerPool {
- private static Logger logger = LoggerFactory.getLogger(RestServerPool.class);
-
- /**
- * Handle the '/test' REST call.
- */
- @GET
- @Path("/test")
- @ApiOperation(
- value = "Perform an incoming /test request",
- notes = "Provides an acknowledge message back to requestor",
- response = String.class
- )
- @Produces(MediaType.TEXT_PLAIN)
- public String test() {
- return "RestServerPool.test()";
- }
-
- /* ============================================================ */
-
- /**
- * Handle the '/admin' REST call.
- */
- @POST
- @Path("/admin")
- @ApiOperation(
- value = "Perform an incoming /admin request",
- notes = "This rest call decodes incoming admin message (base-64-encoded) and "
- + "send to main thread for processing"
- )
- @Consumes(MediaType.APPLICATION_OCTET_STREAM)
- public void adminRequest(byte[] data) {
- Server.adminRequest(data);
- }
-
- /**
- * Handle the '/vote' REST call.
- */
- @POST
- @Path("/vote")
- @ApiOperation(
- value = "Perform an incoming /vote request",
- notes = "The request data containing voter and vote data to be processed"
- )
- @Consumes(MediaType.APPLICATION_OCTET_STREAM)
- public void voteData(byte[] data) {
- Leader.voteData(data);
- }
-
- /**
- * Handle the '/bucket/update' REST call.
- */
- @POST
- @Path("/bucket/update")
- @ApiOperation(
- value = "Perform an incoming /bucket/update request",
- notes = "The request data include owner, state, primaryBackup and secondaryBackup"
- )
- @Consumes(MediaType.APPLICATION_OCTET_STREAM)
- public void updateBucket(byte[] data) {
- Bucket.updateBucket(data);
- }
-
- /**
- * Handle the '/bucket/topic' REST call.
- */
- @POST
- @Path("/bucket/topic")
- @ApiOperation(
- value = "Perform an incoming /bucket/topic request",
- notes = "Forward an incoming topic message from a remote host, the request data include "
- + "bucketNumber the bucket number calculated on the remote host, keyword the keyword "
- + "associated with the message, controllerName the controller the message was directed to "
- + "on the remote host, protocol String value of the topic value (UEB, DMAAP, NOOP, or REST "
- + "-- NOOP and REST shouldn't be used here), topic the UEB/DMAAP topic name, event this is "
- + "the JSON message"
- )
- @Consumes(MediaType.APPLICATION_JSON)
- public void topicMessage(@QueryParam("bucket") Integer bucket,
- @QueryParam("keyword") String keyword,
- @QueryParam("controller") String controllerName,
- @QueryParam("protocol") String protocol,
- @QueryParam("topic") String topic,
- String event) {
- FeatureServerPool.topicMessage(bucket, keyword, controllerName, protocol, topic, event);
- }
-
- /**
- * Handle the '/bucket/sessionData' REST call.
- */
- @POST
- @Path("/bucket/sessionData")
- @ApiOperation(
- value = "Perform an incoming /bucket/sessionData request",
- notes = "A message is received from the old owner of the bucket and send to new owner, "
- + "the request data include bucketNumber the bucket number, dest the UUID of the intended "
- + "destination, ttl controls the number of hops the message may take, data serialized data "
- + "associated with this bucket, encoded using base64"
- )
- @Consumes(MediaType.APPLICATION_OCTET_STREAM)
- public void sessionData(@QueryParam("bucket") Integer bucket,
- @QueryParam("dest") UUID dest,
- @QueryParam("ttl") int ttl,
- byte[] data) {
- Bucket.sessionData(bucket, dest, ttl, data);
- }
-
- /**
- * Handle the '/session/insertDrools' REST call.
- */
- @POST
- @Path("/session/insertDrools")
- @ApiOperation(
- value = "Perform an incoming /session/insertDrools request",
- notes = "An incoming /session/insertDrools message was received, the request data include "
- + "keyword the keyword associated with the incoming object, sessionName encoded session name "
- + "(groupId:artifactId:droolsSession), bucket the bucket associated with keyword, "
- + "ttl controls the number of hops the message may take, data base64-encoded serialized data "
- + "for the object"
- )
- @Consumes(MediaType.APPLICATION_OCTET_STREAM)
- public void insertDrools(@QueryParam("keyword") String keyword,
- @QueryParam("session") String sessionName,
- @QueryParam("bucket") int bucket,
- @QueryParam("ttl") int ttl,
- byte[] data) {
- FeatureServerPool.incomingInsertDrools(keyword, sessionName, bucket, ttl, data);
- }
-
- /**
- * Handle the '/lock/lock' REST call.
- */
- @GET
- @Path("/lock/lock")
- @ApiOperation(
- value = "Perform an incoming /lock/lock request",
- notes = "An incoming /lock/lock REST message is received, the request data include "
- + "key string identifying the lock, which must hash to a bucket owned by the current host, "
- + "ownerKey string key identifying the owner, uuid the UUID that uniquely identifies "
- + "the original 'TargetLock', waitForLock this controls the behavior when 'key' is already "
- + "locked - 'true' means wait for it to be freed, 'false' means fail, ttl controls the number "
- + "of hops the message may take, the response is the message should be passed back to the "
- + "requestor"
- )
- @Consumes(MediaType.APPLICATION_OCTET_STREAM)
- @Produces(MediaType.APPLICATION_OCTET_STREAM)
- public Response lock(@QueryParam("key") String key,
- @QueryParam("owner") String keyOwner,
- @QueryParam("uuid") UUID uuid,
- @QueryParam("wait") boolean waitForLock,
- @QueryParam("ttl") int ttl) {
- return TargetLock.incomingLock(key, keyOwner, uuid, waitForLock, ttl);
- }
-
- /**
- * Handle the '/lock/free' REST call.
- */
- @GET
- @Path("/lock/free")
- @ApiOperation(
- value = "Perform an incoming /lock/free request",
- notes = "An incoming /lock/free REST message is received, the request data include "
- + "key string identifying the lock, which must hash to a bucket owned by the current host, "
- + "ownerKey string key identifying the owner, uuid the UUID that uniquely identifies "
- + "the original 'TargetLock', ttl controls the number of hops the message may take, "
- + "the response is the message should be passed back to the requestor"
- )
- @Consumes(MediaType.APPLICATION_OCTET_STREAM)
- @Produces(MediaType.APPLICATION_OCTET_STREAM)
- public Response free(@QueryParam("key") String key,
- @QueryParam("owner") String keyOwner,
- @QueryParam("uuid") UUID uuid,
- @QueryParam("ttl") int ttl) {
- return TargetLock.incomingFree(key, keyOwner, uuid, ttl);
- }
-
- /**
- * Handle the '/lock/locked' REST call.
- */
- @GET
- @Path("/lock/locked")
- @ApiOperation(
- value = "Perform an incoming /lock/locked request, (this is a callback to an earlier "
- + "requestor that the lock is now available)",
- notes = "An incoming /lock/locked REST message is received, the request data include "
- + "key string key identifying the lock, ownerKey string key identifying the owner "
- + "which must hash to a bucket owned by the current host (it is typically a 'RequestID') "
- + "uuid the UUID that uniquely identifies the original 'TargetLock', ttl controls the "
- + "number of hops the message may take, the response is the message should be passed back "
- + "to the requestor"
- )
- @Consumes(MediaType.APPLICATION_OCTET_STREAM)
- @Produces(MediaType.APPLICATION_OCTET_STREAM)
- public Response locked(@QueryParam("key") String key,
- @QueryParam("owner") String keyOwner,
- @QueryParam("uuid") UUID uuid,
- @QueryParam("ttl") int ttl) {
- return TargetLock.incomingLocked(key, keyOwner, uuid, ttl);
- }
-
- /**
- * Handle the '/lock/audit' REST call.
- */
- @POST
- @Path("/lock/audit")
- @ApiOperation(
- value = "Perform an incoming /lock/audit request",
- notes = "An incoming /lock/audit REST message is received, the request data include "
- + "serverUuid the UUID of the intended destination server, ttl controls the number of hops, "
- + "encodedData base64-encoded data, containing a serialized 'AuditData' instance "
- + "the response is a serialized and base64-encoded 'AuditData'"
- )
- @Consumes(MediaType.APPLICATION_OCTET_STREAM)
- @Produces(MediaType.APPLICATION_OCTET_STREAM)
- public byte[] lockAudit(@QueryParam("server") UUID server,
- @QueryParam("ttl") int ttl,
- byte[] data) {
- return TargetLock.Audit.incomingAudit(server, ttl, data);
- }
-
- /* ============================================================ */
-
- /**
- * Handle the '/cmd/dumpHosts' REST call.
- */
- @GET
- @Path("/cmd/dumpHosts")
- @ApiOperation(
- value = "Perform an incoming /cmd/dumpHosts request",
- notes = "Dump out the current 'servers' table in a human-readable table form"
- )
- @Produces(MediaType.TEXT_PLAIN)
- public String dumpHosts() {
- ByteArrayOutputStream bos = new ByteArrayOutputStream();
- Server.dumpHosts(new PrintStream(bos, true));
- return bos.toString(StandardCharsets.UTF_8);
- }
-
- /**
- * Handle the '/cmd/dumpBuckets' REST call.
- */
- @GET
- @Path("/cmd/dumpBuckets")
- @ApiOperation(
- value = "Perform an incoming /cmd/dumpBuckets request",
- notes = "Dump out buckets information in a human-readable form"
- )
- @Produces(MediaType.TEXT_PLAIN)
- public String dumpBuckets() {
- ByteArrayOutputStream bos = new ByteArrayOutputStream();
- Bucket.dumpBuckets(new PrintStream(bos, true));
- return bos.toString(StandardCharsets.UTF_8);
- }
-
- /**
- * Handle the '/cmd/ping' REST call.
- */
- @GET
- @Path("/cmd/ping")
- @ApiOperation(
- value = "Perform an incoming /cmd/ping request",
- notes = "Send information about 'thisServer' to the list of hosts"
- )
- @Produces(MediaType.TEXT_PLAIN)
- public String ping(@QueryParam("hosts") String hosts) {
- logger.info("Running '/cmd/ping', hosts={}", hosts);
-
- ByteArrayOutputStream bos = new ByteArrayOutputStream();
- Server.pingHosts(new PrintStream(bos, true), hosts);
- return bos.toString(StandardCharsets.UTF_8);
- }
-
- /**
- * Handle the '/cmd/bucketMessage' REST call.
- */
- @GET
- @Path("/cmd/bucketMessage")
- @ApiOperation(
- value = "Perform an incoming /cmd/bucketMessage request",
- notes = "This is only used for testing the routing of messages between servers"
- )
- @Produces(MediaType.TEXT_PLAIN)
- public String bucketMessage(@QueryParam("keyword") String keyword,
- @QueryParam("message") String message) {
- ByteArrayOutputStream bos = new ByteArrayOutputStream();
- Bucket.bucketMessage(new PrintStream(bos, true), keyword, message);
- return bos.toString(StandardCharsets.UTF_8);
- }
-
- /**
- * Handle the '/bucket/bucketResponse' REST call.
- */
- @POST
- @Path("/bucket/bucketResponse")
- @ApiOperation(
- value = "Perform an incoming /cmd/bucketResponse request",
- notes = "This runs on the destination host, and is the continuation of an operation "
- + "triggered by the /cmd/bucketMessage REST message running on the originating host"
- )
- @Consumes(MediaType.TEXT_PLAIN)
- @Produces(MediaType.TEXT_PLAIN)
- public String bucketResponse(@QueryParam("bucket") Integer bucket,
- @QueryParam("keyword") String keyword,
- byte[] data) {
- ByteArrayOutputStream bos = new ByteArrayOutputStream();
- Bucket.bucketResponse(new PrintStream(bos, true), bucket, keyword, data);
- return bos.toString(StandardCharsets.UTF_8);
- }
-
- /**
- * Handle the '/lock/moveBucket' REST call.
- */
- @GET
- @Path("/cmd/moveBucket")
- @ApiOperation(
- value = "Perform an incoming /cmd/moveBucket request",
- notes = "This is only used for testing bucket migration. It only works on the lead server"
- )
- @Produces(MediaType.TEXT_PLAIN)
- public String moveBucket(@QueryParam("bucket") Integer bucketNumber,
- @QueryParam("host") String newHost) {
- ByteArrayOutputStream bos = new ByteArrayOutputStream();
- Bucket.moveBucket(new PrintStream(bos, true), bucketNumber, newHost);
- return bos.toString(StandardCharsets.UTF_8);
- }
-
- /**
- * Handle the '/lock/dumpBucketAdjuncts' REST call.
- */
- @GET
- @Path("/cmd/dumpBucketAdjuncts")
- @ApiOperation(
- value = "Perform an incoming /cmd/dumpBucketAdjuncts request",
- notes = "Dump out all buckets with adjuncts"
- )
- @Produces(MediaType.TEXT_PLAIN)
- public String dumpBucketAdjuncts() {
- ByteArrayOutputStream bos = new ByteArrayOutputStream();
- Bucket.dumpAdjuncts(new PrintStream(bos, true));
- return bos.toString(StandardCharsets.UTF_8);
- }
-
- /**
- * Handle the '/lock/dumpLocks' REST call.
- */
- @GET
- @Path("/cmd/dumpLocks")
- @ApiOperation(
- value = "Perform an incoming /cmd/dumpLocks request",
- notes = "Dump out locks info, detail 'true' provides additional bucket and host information"
- )
- @Produces(MediaType.TEXT_PLAIN)
- public String dumpLocks(@QueryParam("detail") boolean detail)
- throws IOException, InterruptedException, ClassNotFoundException {
-
- ByteArrayOutputStream bos = new ByteArrayOutputStream();
- TargetLock.DumpLocks.dumpLocks(new PrintStream(bos, true), detail);
- return bos.toString(StandardCharsets.UTF_8);
- }
-
- /**
- * Handle the '/lock/dumpLocksData' REST call.
- */
- @GET
- @Path("/lock/dumpLocksData")
- @ApiOperation(
- value = "Perform an incoming /cmd/dumpLocksData request",
- notes = "Generate a byte stream containing serialized 'HostData'"
- )
- @Produces(MediaType.APPLICATION_OCTET_STREAM)
- public String dumpLocksData(@QueryParam("server") UUID server,
- @QueryParam("ttl") int ttl) throws IOException {
- return new String(TargetLock.DumpLocks.dumpLocksData(server, ttl), StandardCharsets.UTF_8);
- }
-}
diff --git a/feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/Server.java b/feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/Server.java
deleted file mode 100644
index 61a950ae..00000000
--- a/feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/Server.java
+++ /dev/null
@@ -1,1361 +0,0 @@
-/*
- * ============LICENSE_START=======================================================
- * feature-server-pool
- * ================================================================================
- * Copyright (C) 2020 AT&T Intellectual Property. All rights reserved.
- * ================================================================================
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ============LICENSE_END=========================================================
- */
-
-package org.onap.policy.drools.serverpool;
-
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.DEFAULT_HTTPS;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.DEFAULT_SELF_SIGNED_CERTIFICATES;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.DEFAULT_SERVER_ADAPTIVE_GAP_ADJUST;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.DEFAULT_SERVER_CONNECT_TIMEOUT;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.DEFAULT_SERVER_INITIAL_ALLOWED_GAP;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.DEFAULT_SERVER_IP_ADDRESS;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.DEFAULT_SERVER_PORT;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.DEFAULT_SERVER_READ_TIMEOUT;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.DEFAULT_SERVER_THREADS_CORE_POOL_SIZE;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.DEFAULT_SERVER_THREADS_KEEP_ALIVE_TIME;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.DEFAULT_SERVER_THREADS_MAXIMUM_POOL_SIZE;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.HOST_LIST;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.SERVER_ADAPTIVE_GAP_ADJUST;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.SERVER_CONNECT_TIMEOUT;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.SERVER_HTTPS;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.SERVER_INITIAL_ALLOWED_GAP;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.SERVER_IP_ADDRESS;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.SERVER_PORT;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.SERVER_READ_TIMEOUT;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.SERVER_SELF_SIGNED_CERTIFICATES;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.SERVER_THREADS_CORE_POOL_SIZE;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.SERVER_THREADS_KEEP_ALIVE_TIME;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.SERVER_THREADS_MAXIMUM_POOL_SIZE;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.SITE_IP_ADDRESS;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.SITE_PORT;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.getProperty;
-
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.io.StringReader;
-import java.lang.reflect.Field;
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
-import java.net.UnknownHostException;
-import java.nio.charset.StandardCharsets;
-import java.text.SimpleDateFormat;
-import java.util.Base64;
-import java.util.Collection;
-import java.util.Date;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.Objects;
-import java.util.Properties;
-import java.util.TreeMap;
-import java.util.TreeSet;
-import java.util.UUID;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.FutureTask;
-import java.util.concurrent.LinkedTransferQueue;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import javax.ws.rs.client.Client;
-import javax.ws.rs.client.Entity;
-import javax.ws.rs.client.WebTarget;
-import javax.ws.rs.core.MediaType;
-import javax.ws.rs.core.Response;
-import lombok.Setter;
-import org.glassfish.jersey.client.ClientProperties;
-import org.onap.policy.common.endpoints.event.comm.bus.internal.BusTopicParams;
-import org.onap.policy.common.endpoints.http.client.HttpClient;
-import org.onap.policy.common.endpoints.http.client.HttpClientConfigException;
-import org.onap.policy.common.endpoints.http.client.HttpClientFactoryInstance;
-import org.onap.policy.common.endpoints.http.server.HttpServletServer;
-import org.onap.policy.common.endpoints.http.server.HttpServletServerFactoryInstance;
-import org.onap.policy.drools.utils.PropertyUtil;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class Server implements Comparable<Server> {
- private static Logger logger = LoggerFactory.getLogger(Server.class);
-
- // maps UUID to Server object for all known servers
- private static TreeMap<UUID, Server> servers =
- new TreeMap<>(Util.uuidComparator);
-
- // maps UUID to Server object for all failed servers
- // (so they aren't accidentally restored, due to updates from other hosts)
- private static TreeMap<UUID, Server> failedServers =
- new TreeMap<>(Util.uuidComparator);
-
- // subset of servers to be notified (null means it needs to be rebuilt)
- @Setter
- private static LinkedList<Server> notifyList = null;
-
- // data to be sent out to notify list
- private static TreeSet<Server> updatedList = new TreeSet<>();
-
- // the server associated with the current host
- private static Server thisServer = null;
-
- // the current REST server
- private static HttpServletServer restServer;
-
- /*==================================================*/
- /* Some properties extracted at initialization time */
- /*==================================================*/
-
- // initial value of gap to allow between pings
- private static long initialAllowedGap;
-
- // used in adaptive calculation of allowed gap between pings
- private static long adaptiveGapAdjust;
-
- // time to allow for TCP connect (long)
- private static String connectTimeout;
-
- // time to allow before TCP read timeout (long)
- private static String readTimeout;
-
- // outgoing per-server thread pool parameters
- private static int corePoolSize;
- private static int maximumPoolSize;
- private static long keepAliveTime;
-
- // https-related parameters
- private static boolean useHttps;
- private static boolean useSelfSignedCertificates;
-
- // list of remote host names
- private static String[] hostList = new String[0];
-
- /*=========================================================*/
- /* Fields included in every 'ping' message between servers */
- /*=========================================================*/
-
- // unique id for this server
- private UUID uuid;
-
- // counter periodically incremented to indicate the server is "alive"
- private int count;
-
- // 16 byte MD5 checksum over additional data that is NOT included in
- // every 'ping' message -- used to determine whether the data is up-to-date
- private byte[] checksum;
-
- /*========================================================================*/
- /* The following data is included in the checksum, and doesn't change too */
- /* frequently (some fields may change as servers go up and down) */
- /*========================================================================*/
-
- // IP address and port of listener
- private InetSocketAddress socketAddress;
-
- // site IP address and port
- private InetSocketAddress siteSocketAddress = null;
-
- /*============================================*/
- /* Local information not included in checksum */
- /*============================================*/
-
- // destination socket information
- private InetSocketAddress destSocketAddress;
- private String destName;
-
- // REST client fields
- private HttpClient client;
- private WebTarget target;
- private ThreadPoolExecutor sendThreadPool = null;
-
- // time when the 'count' field was last updated
- private long lastUpdateTime;
-
- // calculated field indicating the maximum time between updates
- private long allowedGap = initialAllowedGap;
-
- // indicates whether the 'Server' instance is active or not (synchronized)
- private boolean active = true;
-
- /*
- * Tags for encoding of server data
- */
- static final int END_OF_PARAMETERS_TAG = 0;
- static final int SOCKET_ADDRESS_TAG = 1;
- static final int SITE_SOCKET_ADDRESS_TAG = 2;
-
- // 'pingHosts' error
- static final String PINGHOSTS_ERROR = "Server.pingHosts error";
-
- // a string for print
- static final String PRINTOUT_DASHES = "-------";
-
- /*==============================*/
- /* Comparable<Server> interface */
- /*==============================*/
-
- /**
- * Compare this instance to another one by comparing the 'uuid' field.
- */
- @Override
- public int compareTo(Server other) {
- return Util.uuidComparator.compare(uuid, other.uuid);
- }
-
- @Override
- public int hashCode() {
- return Objects.hash(uuid);
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (!(obj instanceof Server)) {
- return false;
- }
- Server other = (Server) obj;
- return Objects.equals(uuid, other.uuid);
- }
-
- /**
- * This method may be invoked from any thread, and is used as the main
- * entry point when testing.
- *
- * @param args arguments containing an '=' character are interpreted as
- * a property, other arguments are presumed to be a property file.
- */
- public static void main(String[] args) throws IOException {
- Properties prop = new Properties();
-
- for (String arg : args) {
- // arguments with an equals sign in them are a property definition -
- // otherwise, they are a properties file name
-
- if (arg.contains("=")) {
- prop.load(new StringReader(arg));
- } else {
- prop.putAll(PropertyUtil.getProperties(arg));
- }
- }
-
- String rval = startup(prop);
- if (rval != null) {
- logger.error("Server.startup failed: {}", rval);
- }
- }
-
- /**
- * This method may be invoked from any thread, and performs initialization.
- *
- * @param propertiesFile the name of a property file
- */
- public static String startup(String propertiesFile) {
- Properties properties;
- try {
- properties = PropertyUtil.getProperties(propertiesFile);
- } catch (IOException e) {
- logger.error("Server.startup: exception reading properties", e);
- properties = new Properties();
- }
- return startup(properties);
- }
-
- /**
- * This method may be invoked from any thread, and performs initialization.
- *
- * @param properties contains properties used by the server
- */
- public static String startup(Properties properties) {
- ServerPoolProperties.setProperties(properties);
- logger.info("startup: properties={}", properties);
-
- // fetch some static properties
- initialAllowedGap = getProperty(SERVER_INITIAL_ALLOWED_GAP,
- DEFAULT_SERVER_INITIAL_ALLOWED_GAP);
- adaptiveGapAdjust = getProperty(SERVER_ADAPTIVE_GAP_ADJUST,
- DEFAULT_SERVER_ADAPTIVE_GAP_ADJUST);
- connectTimeout =
- String.valueOf(getProperty(SERVER_CONNECT_TIMEOUT,
- DEFAULT_SERVER_CONNECT_TIMEOUT));
- readTimeout = String.valueOf(getProperty(SERVER_READ_TIMEOUT,
- DEFAULT_SERVER_READ_TIMEOUT));
- corePoolSize = getProperty(SERVER_THREADS_CORE_POOL_SIZE,
- DEFAULT_SERVER_THREADS_CORE_POOL_SIZE);
- maximumPoolSize = getProperty(SERVER_THREADS_MAXIMUM_POOL_SIZE,
- DEFAULT_SERVER_THREADS_MAXIMUM_POOL_SIZE);
- keepAliveTime = getProperty(SERVER_THREADS_KEEP_ALIVE_TIME,
- DEFAULT_SERVER_THREADS_KEEP_ALIVE_TIME);
- useHttps = getProperty(SERVER_HTTPS, DEFAULT_HTTPS);
- useSelfSignedCertificates = getProperty(SERVER_SELF_SIGNED_CERTIFICATES,
- DEFAULT_SELF_SIGNED_CERTIFICATES);
- String hostListNames = getProperty(HOST_LIST, null);
- if (hostListNames != null) {
- hostList = hostListNames.split(",");
- }
-
- String possibleError = null;
- try {
- // fetch server information
- String ipAddressString =
- getProperty(SERVER_IP_ADDRESS, DEFAULT_SERVER_IP_ADDRESS);
- int port = getProperty(SERVER_PORT, DEFAULT_SERVER_PORT);
-
- possibleError = "Unknown Host: " + ipAddressString;
- InetAddress address = InetAddress.getByName(ipAddressString);
- InetSocketAddress socketAddress = new InetSocketAddress(address, port);
-
- restServer = HttpServletServerFactoryInstance.getServerFactory().build(
- "SERVER-POOL", // name
- useHttps, // https
- socketAddress.getAddress().getHostAddress(), // host (maybe 0.0.0.0)
- port, // port (can no longer be 0)
- null, // contextPath
- false, // swagger
- false); // managed
- restServer.addServletClass(null, RestServerPool.class.getName());
-
- // add any additional servlets
- for (ServerPoolApi feature : ServerPoolApi.impl.getList()) {
- for (Class<?> clazz : feature.servletClasses()) {
- restServer.addServletClass(null, clazz.getName());
- }
- }
-
- // we may not know the port until after the server is started
- restServer.start();
- possibleError = null;
-
- // determine the address to use
- if (DEFAULT_SERVER_IP_ADDRESS.contentEquals(address.getHostAddress())) {
- address = InetAddress.getLocalHost();
- }
-
- thisServer = new Server(new InetSocketAddress(address, port));
-
- // TBD: is this really appropriate?
- thisServer.newServer();
-
- // start background thread
- MainLoop.startThread();
- MainLoop.queueWork(() -> {
- // run this in the 'MainLoop' thread
- Leader.startup();
- Bucket.startup();
- });
- logger.info("Listening on port {}", port);
-
- return null;
- } catch (UnknownHostException e) {
- logger.error("Server.startup: exception start server", e);
- if (possibleError == null) {
- possibleError = e.toString();
- }
- return possibleError;
- }
- }
-
- /**
- * Shut down all threads associate with server pool.
- */
- public static void shutdown() {
- Discovery.stopDiscovery();
- MainLoop.stopThread();
- TargetLock.shutdown();
- Util.shutdown();
-
- HashSet<Server> allServers = new HashSet<>();
- allServers.addAll(servers.values());
- allServers.addAll(failedServers.values());
-
- for (Server server : allServers) {
- if (server.sendThreadPool != null) {
- server.sendThreadPool.shutdown();
- }
- }
- if (restServer != null) {
- restServer.shutdown();
- }
- }
-
- /**
- * Return the Server instance associated with the current host.
- *
- * @return the Server instance associated with the current host
- */
- public static Server getThisServer() {
- return thisServer;
- }
-
- /**
- * Return the first Server instance in the 'servers' list.
- *
- * @return the first Server instance in the 'servers' list
- * (the one with the lowest UUID)
- */
- public static Server getFirstServer() {
- return servers.firstEntry().getValue();
- }
-
- /**
- * Lookup a Server instance associated with a UUID.
- *
- * @param uuid the key to the lookup
- @ @return the associated 'Server' instance, or 'null' if none
- */
- public static Server getServer(UUID uuid) {
- return servers.get(uuid);
- }
-
- /**
- * Return a count of the number of servers.
- *
- * @return a count of the number of servers
- */
- public static int getServerCount() {
- return servers.size();
- }
-
- /**
- * Return the complete list of servers.
- *
- * @return the complete list of servers
- */
- public static Collection<Server> getServers() {
- return servers.values();
- }
-
- /**
- * This method is invoked from the 'startup' thread, and creates a new
- * 'Server' instance for the current server.
- *
- * @param socketAddress the IP address and port the listener is bound to
- */
- private Server(InetSocketAddress socketAddress) {
- this.uuid = UUID.randomUUID();
- this.count = 1;
- this.socketAddress = socketAddress;
- this.lastUpdateTime = System.currentTimeMillis();
-
- // site information
-
- String siteIp = getProperty(SITE_IP_ADDRESS, null);
- int sitePort = getProperty(SITE_PORT, 0);
- if (siteIp != null && sitePort != 0) {
- // we do have site information specified
- try {
- siteSocketAddress = new InetSocketAddress(siteIp, sitePort);
- if (siteSocketAddress.getAddress() == null) {
- logger.error("Couldn't resolve site address: {}", siteIp);
- siteSocketAddress = null;
- }
- } catch (IllegalArgumentException e) {
- logger.error("Illegal 'siteSocketAddress'", e);
- siteSocketAddress = null;
- }
- }
-
- // TBD: calculate checksum
- }
-
- /**
- * Initialize a 'Server' instance from a 'DataInputStream'. If it is new,
- * it may get inserted in the table. If it is an update, fields in an
- * existing 'Server' may be updated.
- *
- * @param is the 'DataInputStream'
- */
- Server(DataInputStream is) throws IOException {
- // read in 16 byte UUID
- uuid = Util.readUuid(is);
-
- // read in 4 byte counter value
- count = is.readInt();
-
- // read in 16 byte MD5 checksum
- checksum = new byte[16];
- is.readFully(checksum);
-
- // optional parameters
- int tag;
- while ((tag = is.readUnsignedByte()) != END_OF_PARAMETERS_TAG) {
- switch (tag) {
- case SOCKET_ADDRESS_TAG:
- socketAddress = readSocketAddress(is);
- break;
- case SITE_SOCKET_ADDRESS_TAG:
- siteSocketAddress = readSocketAddress(is);
- break;
- default:
- // ignore tag
- logger.error("Illegal tag: {}", tag);
- break;
- }
- }
- }
-
- /**
- * Read an 'InetSocketAddress' from a 'DataInputStream'.
- *
- * @param is the 'DataInputStream'
- * @return the 'InetSocketAddress'
- */
- private static InetSocketAddress readSocketAddress(DataInputStream is) throws IOException {
-
- byte[] ipAddress = new byte[4];
- is.read(ipAddress, 0, 4);
- int port = is.readUnsignedShort();
- return new InetSocketAddress(InetAddress.getByAddress(ipAddress), port);
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public String toString() {
- return "Server[" + uuid + "]";
- }
-
- /**
- * Return the UUID associated with this Server.
- *
- * @return the UUID associated with this Server
- */
- public UUID getUuid() {
- return uuid;
- }
-
- /**
- * Return the external InetSocketAddress of the site.
- *
- * @return the external InetSocketAddress of the site
- * ('null' if it doesn't exist)
- */
- public InetSocketAddress getSiteSocketAddress() {
- return siteSocketAddress;
- }
-
- /**
- * This method may be called from any thread.
- *
- * @return 'true' if the this server is active, and 'false' if not
- */
- public synchronized boolean isActive() {
- return active;
- }
-
- /**
- * This method writes out the data associated with the current Server
- * instance.
- *
- * @param os outout stream that should receive the data
- */
- void writeServerData(DataOutputStream os) throws IOException {
- // write out 16 byte UUID
- Util.writeUuid(os, uuid);
-
- // write out 4 byte counter value
- os.writeInt(count);
-
- // write out 16 byte MD5 checksum
- // TBD: should this be implemented?
- os.write(checksum == null ? new byte[16] : checksum);
-
- if (socketAddress != null) {
- // write out socket address
- os.writeByte(SOCKET_ADDRESS_TAG);
- os.write(socketAddress.getAddress().getAddress(), 0, 4);
- os.writeShort(socketAddress.getPort());
- }
-
- if (siteSocketAddress != null) {
- // write out socket address
- os.writeByte(SITE_SOCKET_ADDRESS_TAG);
- os.write(siteSocketAddress.getAddress().getAddress(), 0, 4);
- os.writeShort(siteSocketAddress.getPort());
- }
-
- os.writeByte(END_OF_PARAMETERS_TAG);
- }
-
- /**
- * Do any processing needed to create a new server. This method is invoked
- * from the 'MainLoop' thread in every case except for the current server,
- * in which case it is invoked in 'startup' prior to creating 'MainLoop'.
- */
- private void newServer() {
- Server failed = failedServers.get(uuid);
- if (failed != null) {
- // this one is on the failed list -- see if the counter has advanced
- if ((count - failed.count) <= 0) {
- // the counter has not advanced -- ignore
- return;
- }
-
- // the counter has advanced -- somehow, this server has returned
- failedServers.remove(uuid);
- synchronized (this) {
- active = true;
- }
- logger.error("Server reawakened: {} ({})", uuid, socketAddress);
- }
-
- lastUpdateTime = System.currentTimeMillis();
- servers.put(uuid, this);
- updatedList.add(this);
-
- // notify list will need to be rebuilt
- setNotifyList(null);
-
- if (socketAddress != null && this != thisServer) {
- // initialize 'client' and 'target' fields
- if (siteSocketAddress != null
- && !siteSocketAddress.equals(thisServer.siteSocketAddress)) {
- // destination is on a remote site
- destSocketAddress = siteSocketAddress;
- } else {
- // destination is on the local site -- use direct addressing
- destSocketAddress = socketAddress;
- }
- destName = socketAddressToName(destSocketAddress);
- try {
- // 'client' is used for REST messages to the destination
- client = buildClient(uuid.toString(), destSocketAddress, destName);
-
- // initialize the 'target' field
- target = getTarget(client);
- } catch (NoSuchFieldException | IllegalAccessException | HttpClientConfigException e) {
- logger.error("Server.newServer: problems creating 'client'", e);
- }
- }
- logger.info("New server: {} ({})", uuid, socketAddress);
- for (Events listener : Events.getListeners()) {
- listener.newServer(this);
- }
- }
-
- /**
- * Check the server state in response to some issue. At present, only the
- * 'destName' information is checked.
- */
- private void checkServer() {
- // recalculate 'destName' (we have seen DNS issues)
- String newDestName = socketAddressToName(destSocketAddress);
- if (newDestName.equals(destName)) {
- return;
- }
- logger.warn("Remote host name for {} has changed from {} to {}",
- destSocketAddress, destName, newDestName);
-
- // shut down old client, and rebuild
- client.shutdown();
- client = null;
- target = null;
-
- // update 'destName', and rebuild the client
- destName = newDestName;
- try {
- // 'client' is used for REST messages to the destination
- client = buildClient(uuid.toString(), destSocketAddress, destName);
-
- // initialize the 'target' field
- target = getTarget(client);
- } catch (NoSuchFieldException | IllegalAccessException | HttpClientConfigException e) {
- logger.error("Server.checkServer: problems recreating 'client'", e);
- }
- }
-
- /**
- * Update server data.
- *
- * @param serverData this is a temporary 'Server' instance created from
- * an incoming message, which is used to update fields within the
- * 'Server' instance identified by 'this'
- */
- private void updateServer(Server serverData) {
- if (serverData.count > count) {
- // an update has occurred
- count = serverData.count;
-
- // TBD: calculate and verify checksum, more fields may be updated
-
- // adjust 'allowedGap' accordingly
- long currentTime = System.currentTimeMillis();
- long gap = currentTime - lastUpdateTime;
-
- // adjust 'allowedGap' accordingly
- // TBD: need properties to support overrides
- gap = gap * 3 / 2 + adaptiveGapAdjust;
- if (gap > allowedGap) {
- // update 'allowedGap' immediately
- allowedGap = gap;
- } else {
- // gradually pull the allowed gap down
- // TBD: need properties to support overrides
- allowedGap = (allowedGap * 15 + gap) / 16;
- }
- lastUpdateTime = currentTime;
-
- updatedList.add(this);
- }
- }
-
- /**
- * a server has failed.
- */
- private void serverFailed() {
- // mark as inactive
- synchronized (this) {
- active = false;
- }
-
- // remove it from the table
- servers.remove(uuid);
-
- // add it to the failed servers table
- failedServers.put(uuid, this);
-
- // clean up client information
- if (client != null) {
- client.shutdown();
- client = null;
- target = null;
- }
-
- // log an error message
- logger.error("Server failure: {} ({})", uuid, socketAddress);
- for (Events listener : Events.getListeners()) {
- listener.serverFailed(this);
- }
- }
-
- /**
- * Fetch, and possibly calculate, the "notify list" associated with this
- * server. This is the list of servers to forward a server and bucket
- * information to, and is approximately log2(n) in length, where 'n' is
- * the total number of servers.
- * It is calculated by starting with all of the servers sorted by UUID --
- * let's say the current server is at position 's'. The notify list will
- * contain the server at positions:
- * (s + 1) % n
- * (s + 2) % n
- * (s + 4) % n
- * ...
- * Using all powers of 2 less than 'n'. If the total server count is 50,
- * this list has 6 entries.
- * @return the notify list
- */
- static Collection<Server> getNotifyList() {
- // The 'notifyList' value is initially 'null', and it is reset to 'null'
- // every time a new host joins, or one leaves. That way, it is marked for
- // recalculation, but only when needed.
- if (notifyList != null) {
- return notifyList;
- }
-
- // next index we are looking for
- int dest = 1;
-
- // our current position in the Server table -- starting at 'thisServer'
- UUID current = thisServer.uuid;
-
- // site socket address of 'current'
- InetSocketAddress thisSiteSocketAddress = thisServer.siteSocketAddress;
-
- // hash set of all site socket addresses located
- HashSet<InetSocketAddress> siteSocketAddresses = new HashSet<>();
- siteSocketAddresses.add(thisSiteSocketAddress);
-
- // the list we are building
- notifyList = new LinkedList<>();
-
- int index = 1;
- for ( ; ; ) {
- // move to the next key (UUID) -- if we hit the end of the table,
- // wrap to the beginning
- current = servers.higherKey(current);
- if (current == null) {
- current = servers.firstKey();
- }
- if (current.equals(thisServer.uuid)) {
- // we have looped through the entire list
- break;
- }
-
- // fetch associated server & site socket address
- Server server = servers.get(current);
- InetSocketAddress currentSiteSocketAddress =
- server.siteSocketAddress;
-
- if (Objects.equals(thisSiteSocketAddress,
- currentSiteSocketAddress)) {
- // same site -- see if we should add this one
- if (index == dest) {
- // this is the next index we are looking for --
- // add the server
- notifyList.add(server);
-
- // advance to the next offset (current-offset * 2)
- dest = dest << 1;
- }
- index += 1;
- } else if (!siteSocketAddresses.contains(currentSiteSocketAddress)) {
- // we need at least one member from each site
- notifyList.add(server);
- siteSocketAddresses.add(currentSiteSocketAddress);
- }
- }
-
- return notifyList;
- }
-
- /**
- * See if there is a host name associated with a destination socket address.
- *
- * @param dest the socket address of the destination
- * @return the host name associated with the IP address, or the IP address
- * if no associated host name is found.
- */
- private static String socketAddressToName(InetSocketAddress dest) {
- // destination IP address
- InetAddress inetAddress = dest.getAddress();
- String destName = null;
-
- // go through the 'hostList' to see if there is a matching name
- for (String hostName : hostList) {
- try {
- if (inetAddress.equals(InetAddress.getByName(hostName))) {
- // this one matches -- use the name instead of the IP address
- destName = hostName;
- break;
- }
- } catch (UnknownHostException e) {
- logger.debug("Server.socketAddressToName error", e);
- }
- }
-
- // default name = string value of IP address
- return destName == null ? inetAddress.getHostAddress() : destName;
- }
-
- /**
- * Create an 'HttpClient' instance for a particular host.
- *
- * @param name of the host (currently a UUID or host:port string)
- * @param dest the socket address of the destination
- * @param destName the string name to use for the destination
- */
- static HttpClient buildClient(String name, InetSocketAddress dest, String destName)
- throws HttpClientConfigException {
-
- return HttpClientFactoryInstance.getClientFactory().build(
- BusTopicParams.builder()
- .clientName(name) // name
- .useHttps(useHttps) // https
- .allowSelfSignedCerts(useSelfSignedCertificates)// selfSignedCerts
- .hostname(destName) // host
- .port(dest.getPort()) // port
- .managed(false) // managed
- .build());
- }
-
- /**
- * Extract the 'WebTarget' information from the 'HttpClient'.
- *
- * @param client the associated HttpClient instance
- * @return a WebTarget referring to the previously-specified 'baseUrl'
- */
- static WebTarget getTarget(HttpClient client)
- throws NoSuchFieldException, IllegalAccessException {
- // need access to the internal field 'client'
- // TBD: We need a way to get this information without reflection
- Field field = client.getClass().getDeclaredField("client");
- field.setAccessible(true);
- Client rsClient = (Client) field.get(client);
- field.setAccessible(false);
-
- rsClient.property(ClientProperties.CONNECT_TIMEOUT, connectTimeout);
- rsClient.property(ClientProperties.READ_TIMEOUT, readTimeout);
-
- // For performance reasons, the root 'WebTarget' is generated only once
- // at initialization time for each remote host.
- return rsClient.target(client.getBaseUrl());
- }
-
- /**
- * This method may be invoked from any thread, and is used to send a
- * message to the destination server associated with this 'Server' instance.
- *
- * @param path the path relative to the base URL
- * @param entity the "request entity" containing the body of the
- * HTTP POST request
- */
- public void post(final String path, final Entity<?> entity) {
- post(path, entity, null);
- }
-
- /**
- * This method may be invoked from any thread, and is used to send a
- * message to the destination server associated with this 'Server' instance.
- *
- * @param path the path relative to the base URL
- * @param entity the "request entity" containing the body of the
- * HTTP POST request (if 'null', an HTTP GET is used instead)
- * @param responseCallback if non-null, this callback may be used to
- * modify the WebTarget, and/or receive the POST response message
- */
- public void post(final String path, final Entity<?> entity,
- PostResponse responseCallback) {
- if (target == null) {
- return;
- }
-
- getThreadPool().execute(() -> {
- /*
- * This method is running within the 'MainLoop' thread.
- */
- try {
- invokeWebTarget(path, entity, responseCallback);
-
- } catch (Exception e) {
- logger.error("Failed to send to {} ({}, {})",
- uuid, destSocketAddress, destName);
- if (responseCallback != null) {
- responseCallback.exceptionResponse(e);
- }
- // this runs in the 'MainLoop' thread
-
- // the DNS cache may have been out-of-date when this server
- // was first contacted -- fix the problem, if needed
- MainLoop.queueWork(this::checkServer);
- }
- });
- }
-
- private void invokeWebTarget(final String path, final Entity<?> entity, PostResponse responseCallback) {
- WebTarget webTarget = target.path(path);
- if (responseCallback != null) {
- // give callback a chance to modify 'WebTarget'
- webTarget = responseCallback.webTarget(webTarget);
-
- // send the response to the callback
- Response response;
- if (entity == null) {
- response = webTarget.request().get();
- } else {
- response = webTarget.request().post(entity);
- }
- responseCallback.response(response);
- } else {
- // just do the invoke, and ignore the response
- if (entity == null) {
- webTarget.request().get();
- } else {
- webTarget.request().post(entity);
- }
- }
- }
-
- /**
- * This method may be invoked from any thread.
- *
- * @return the 'ThreadPoolExecutor' associated with this server
- */
- public synchronized ThreadPoolExecutor getThreadPool() {
- if (sendThreadPool == null) {
- // build a thread pool for this Server
- sendThreadPool =
- new ThreadPoolExecutor(corePoolSize, maximumPoolSize,
- keepAliveTime, TimeUnit.MILLISECONDS,
- new LinkedTransferQueue<>());
- sendThreadPool.allowCoreThreadTimeOut(true);
- }
- return sendThreadPool;
- }
-
- /**
- * Lower-level method supporting HTTP, which requires that the caller's
- * thread tolerate blocking. This method may be called from any thread.
- *
- * @param path the path relative to the base URL
- * @return a 'WebTarget' instance pointing to this path
- */
- public WebTarget getWebTarget(String path) {
- return target == null ? null : target.path(path);
- }
-
- /**
- * This method may be invoked from any thread, but its real intent is
- * to decode an incoming 'admin' message (which is Base-64-encoded),
- * and send it to the 'MainLoop' thread for processing.
- *
- * @param data the base-64-encoded data
- */
- static void adminRequest(byte[] data) {
- final byte[] packet = Base64.getDecoder().decode(data);
- Runnable task = () -> {
- try {
- ByteArrayInputStream bis = new ByteArrayInputStream(packet);
- DataInputStream dis = new DataInputStream(bis);
-
- while (dis.available() != 0) {
- Server serverData = new Server(dis);
-
- // TBD: Compare with current server
-
- Server server = servers.get(serverData.uuid);
- if (server == null) {
- serverData.newServer();
- } else {
- server.updateServer(serverData);
- }
- }
- } catch (Exception e) {
- logger.error("Server.adminRequest: can't decode packet", e);
- }
- };
- MainLoop.queueWork(task);
- }
-
- /**
- * Send out information about servers 'updatedList' to all servers
- * in 'notifyList' (may need to build or rebuild 'notifyList').
- */
- static void sendOutData() throws IOException {
- // include 'thisServer' in the data -- first, advance the count
- thisServer.count += 1;
- if (thisServer.count == 0) {
- /*
- * counter wrapped (0 is a special case) --
- * actually, we could probably leave this out, because it would take
- * more than a century to wrap if the increment is 1 second
- */
- thisServer.count = 1;
- }
-
- ByteArrayOutputStream bos = new ByteArrayOutputStream();
- DataOutputStream dos = new DataOutputStream(bos);
-
- thisServer.lastUpdateTime = System.currentTimeMillis();
- thisServer.writeServerData(dos);
-
- // include all hosts in the updated list
- for (Server server : updatedList) {
- server.writeServerData(dos);
- }
- updatedList.clear();
-
- // create an 'Entity' that can be sent out to all hosts in the notify list
- Entity<String> entity = Entity.entity(
- new String(Base64.getEncoder().encode(bos.toByteArray()), StandardCharsets.UTF_8),
- MediaType.APPLICATION_OCTET_STREAM_TYPE);
- for (Server server : getNotifyList()) {
- server.post("admin", entity);
- }
- }
-
- /**
- * Search for servers which have taken too long to respond.
- */
- static void searchForFailedServers() {
- long currentTime = System.currentTimeMillis();
-
- // used to build a list of newly-failed servers
- LinkedList<Server> failed = new LinkedList<>();
- for (Server server : servers.values()) {
- if (server == thisServer) {
- continue;
- }
- long gap = currentTime - server.lastUpdateTime;
- if (gap > server.allowedGap) {
- // add it to the failed list -- we don't call 'serverFailed' yet,
- // because this updates the server list, and leads to a
- // 'ConcurrentModificationException'
- failed.add(server);
- }
- }
-
- // remove servers from our list
- if (!failed.isEmpty()) {
- for (Server server : failed) {
- server.serverFailed();
- }
- notifyList = null;
- }
- }
-
- /**
- * This method may be invoked from any thread:
- * Send information about 'thisServer' to the list of hosts.
- *
- * @param out the 'PrintStream' to use for displaying information
- * @param hosts a comma-separated list of entries containing
- * 'host:port' or just 'port' (current host is implied in this case)
- */
- static void pingHosts(PrintStream out, String hosts) {
- LinkedList<InetSocketAddress> addresses = new LinkedList<>();
- boolean error = false;
-
- for (String host : hosts.split(",")) {
- try {
- String[] segs = host.split(":");
-
- switch (segs.length) {
- case 1:
- addresses.add(new InetSocketAddress(InetAddress.getLocalHost(),
- Integer.parseInt(segs[0])));
- break;
- case 2:
- addresses.add(new InetSocketAddress(segs[0],
- Integer.parseInt(segs[1])));
- break;
- default:
- out.println(host + ": Invalid host/port value");
- error = true;
- break;
- }
- } catch (NumberFormatException e) {
- out.println(host + ": Invalid port value");
- logger.error(PINGHOSTS_ERROR, e);
- error = true;
- } catch (UnknownHostException e) {
- out.println(host + ": Unknown host");
- logger.error(PINGHOSTS_ERROR, e);
- error = true;
- }
- }
- if (!error) {
- pingHosts(out, addresses);
- }
- }
-
- /**
- * This method may be invoked from any thread:
- * Send information about 'thisServer' to the list of hosts.
- *
- * @param out the 'PrintStream' to use for displaying information
- * @param hosts a collection of 'InetSocketAddress' instances, which are
- * the hosts to send the information to
- */
- static void pingHosts(final PrintStream out,
- final Collection<InetSocketAddress> hosts) {
- FutureTask<Integer> ft = new FutureTask<>(() -> {
- ByteArrayOutputStream bos = new ByteArrayOutputStream();
- DataOutputStream dos = new DataOutputStream(bos);
-
- // add information for this server only
- try {
- thisServer.writeServerData(dos);
-
- // create an 'Entity' that can be sent out to all hosts
- Entity<String> entity = Entity.entity(
- new String(Base64.getEncoder().encode(bos.toByteArray()),
- StandardCharsets.UTF_8),
- MediaType.APPLICATION_OCTET_STREAM_TYPE);
- pingHostsLoop(entity, out, hosts);
- } catch (IOException e) {
- out.println("Unable to generate 'ping' data: " + e);
- logger.error(PINGHOSTS_ERROR, e);
- }
- return 0;
- });
-
- MainLoop.queueWork(ft);
- try {
- ft.get(60, TimeUnit.SECONDS);
- } catch (InterruptedException e) {
- logger.error("Server.pingHosts: interrupted waiting for queued work", e);
- Thread.currentThread().interrupt();
- } catch (ExecutionException | TimeoutException e) {
- logger.error("Server.pingHosts: error waiting for queued work", e);
- }
- }
-
- /**
- * This method is used for pingHosts method to reduce its Cognitive Complexity.
- *
- * @param entity for sending out to all hosts
- * @param out the 'PrintStream' to use for displaying information
- * @param hosts a collection of 'InetSocketAddress' instances, which are
- * the hosts to send the information to
- */
- static void pingHostsLoop(final Entity<String> entity,
- final PrintStream out,
- final Collection<InetSocketAddress> hosts) {
- // loop through hosts
- for (InetSocketAddress host : hosts) {
- HttpClient httpClient = null;
-
- try {
- httpClient = buildClient(host.toString(), host,
- socketAddressToName(host));
- getTarget(httpClient).path("admin").request().post(entity);
- httpClient.shutdown();
- httpClient = null;
- } catch (NoSuchFieldException | IllegalAccessException e) {
- out.println(host + ": Unable to get link to target");
- logger.error(PINGHOSTS_ERROR, e);
- } catch (Exception e) {
- out.println(host + ": " + e);
- logger.error(PINGHOSTS_ERROR, e);
- }
- if (httpClient != null) {
- httpClient.shutdown();
- }
- }
- }
-
- /**
- * This method may be invoked from any thread:
- * Dump out the current 'servers' table in a human-readable table form.
- *
- * @param out the 'PrintStream' to dump the table to
- */
- public static void dumpHosts(final PrintStream out) {
- FutureTask<Integer> ft = new FutureTask<>(() -> {
- dumpHostsInternal(out);
- return 0;
- });
- MainLoop.queueWork(ft);
- try {
- ft.get(60, TimeUnit.SECONDS);
- } catch (InterruptedException e) {
- logger.error("Server.dumpHosts: interrupted waiting for queued work", e);
- Thread.currentThread().interrupt();
- } catch (ExecutionException | TimeoutException e) {
- logger.error("Server.dumpHosts: error waiting for queued work", e);
- }
- }
-
- /**
- * Dump out the current 'servers' table in a human-readable table form.
- *
- * @param out the 'PrintStream' to dump the table to
- */
- private static void dumpHostsInternal(PrintStream out) {
- // modifications to 'servers.values()' and 'notifyList'.
- HashSet<Server> localNotifyList = new HashSet<>(getNotifyList());
-
- // see if we have any site information
- boolean siteData = false;
- for (Server server : servers.values()) {
- if (server.siteSocketAddress != null) {
- siteData = true;
- break;
- }
- }
-
- String format = "%1s %-36s %-15s %5s %5s %12s %7s %7s\n";
- SimpleDateFormat dateFormat = new SimpleDateFormat("kk:mm:ss.SSS");
-
- if (siteData) {
- format = "%1s %-36s %-15s %5s %-15s %5s %5s %12s %7s %7s\n";
- // @formatter:off
- out.printf(format, "", "UUID", "IP Address", "Port",
- "Site IP Address", "Port",
- "Count", "Update Time", "Elapsed", "Allowed");
- out.printf(format, "", "----", "----------", "----",
- "---------------", "----",
- "-----", "-----------", PRINTOUT_DASHES, PRINTOUT_DASHES);
- // @formatter:on
- } else {
- // @formatter:off
- out.printf(format, "", "UUID", "IP Address", "Port",
- "Count", "Update Time", "Elapsed", "Allowed");
- out.printf(format, "", "----", "----------", "----",
- "-----", "-----------", PRINTOUT_DASHES, PRINTOUT_DASHES);
- // @formatter:on
- }
-
- long currentTime = System.currentTimeMillis();
- for (Server server : servers.values()) {
- String thisOne = "";
-
- if (server == thisServer) {
- thisOne = "*";
- } else if (localNotifyList.contains(server)) {
- thisOne = "n";
- }
-
- if (siteData) {
- String siteIp = "";
- String sitePort = "";
- if (server.siteSocketAddress != null) {
- siteIp =
- server.siteSocketAddress.getAddress().getHostAddress();
- sitePort = String.valueOf(server.siteSocketAddress.getPort());
- }
-
- out.printf(format, thisOne, server.uuid,
- server.socketAddress.getAddress().getHostAddress(),
- server.socketAddress.getPort(),
- siteIp, sitePort, server.count,
- dateFormat.format(new Date(server.lastUpdateTime)),
- currentTime - server.lastUpdateTime,
- server.allowedGap);
- } else {
- out.printf(format, thisOne, server.uuid,
- server.socketAddress.getAddress().getHostAddress(),
- server.socketAddress.getPort(), server.count,
- dateFormat.format(new Date(server.lastUpdateTime)),
- currentTime - server.lastUpdateTime,
- server.allowedGap);
- }
- }
- out.println("Count: " + servers.size());
- }
-
- /* ============================================================ */
-
- /**
- * This interface supports the 'post' method, and provides the opportunity
- * to change the WebTarget and/or receive the POST response message.
- */
- interface PostResponse {
- /**
- * Callback that can be used to modify 'WebTarget', and do things like
- * add query parameters.
- *
- * @param webTarget the current WebTarget
- * @return the updated WebTarget
- */
- public default WebTarget webTarget(WebTarget webTarget) {
- return webTarget;
- }
-
- /**
- * Callback that passes the POST response.
- *
- * @param response the POST response
- */
- public default void response(Response response) {
- }
-
- /**
- * Callback that passes the POST exception response.
- *
- */
- public default void exceptionResponse(Exception exception) {
- Response.ResponseBuilder response;
- response = Response.serverError();
- this.response(response.build());
- }
- }
-}
diff --git a/feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/ServerPoolApi.java b/feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/ServerPoolApi.java
deleted file mode 100644
index 8ec7c100..00000000
--- a/feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/ServerPoolApi.java
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * ============LICENSE_START=======================================================
- * feature-server-pool
- * ================================================================================
- * Copyright (C) 2020 AT&T Intellectual Property. All rights reserved.
- * ================================================================================
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ============LICENSE_END=========================================================
- */
-
-package org.onap.policy.drools.serverpool;
-
-import java.util.Collection;
-import java.util.Collections;
-import org.onap.policy.common.utils.services.OrderedService;
-import org.onap.policy.common.utils.services.OrderedServiceImpl;
-
-public abstract class ServerPoolApi implements OrderedService {
- /**
- * 'ServerPoolApi.impl.getList()' returns an ordered list of objects
- * implementing the 'ServerPoolApi' interface.
- */
- public static final OrderedServiceImpl<ServerPoolApi> impl =
- new OrderedServiceImpl<>(ServerPoolApi.class);
-
- /**
- * method gives all of the listening features the ability to add
- * classes to the 'HttpServletServer'.
- *
- * @return a Collection of classes implementing REST methods
- */
- public Collection<Class<?>> servletClasses() {
- return Collections.emptyList();
- }
-
- /**
- * This is called in the case where no bucket migration data was received
- * from the old owner of the bucket (such as if the old owner failed).
- * It gives one or more features the opportunity to do the restore.
- *
- * @param bucket the bucket that needs restoring
- */
- public void restoreBucket(Bucket bucket) {
- // do nothing
- }
-
- /**
- * This is called whenever a 'GlobalLocks' object is updated. It was added
- * in order to support persistence, but may be used elsewhere as well.
- *
- * @param bucket the bucket containing the 'GlobalLocks' adjunct
- * @param globalLocks the 'GlobalLocks' adjunct
- */
- public void lockUpdate(Bucket bucket, TargetLock.GlobalLocks globalLocks) {
- // do nothing
- }
-
- /**
- * This is called when the state of a bucket has changed, but is currently
- * stable, and it gives features the ability to do an audit. The intent is
- * to make sure that the adjunct state is correct; in particular, to remove
- * adjuncts that should no longer be there based upon the current state.
- * Note that this method is called while being synchronized on the bucket.
- *
- * @param bucket the bucket to audit
- * @param isOwner 'true' if the current host owns the bucket
- * @param isBackup 'true' if the current host is a backup for the bucket
- */
- public void auditBucket(Bucket bucket, boolean isOwner, boolean isBackup) {
- // do nothing
- }
-}
diff --git a/feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/ServerPoolProperties.java b/feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/ServerPoolProperties.java
deleted file mode 100644
index d1c09d43..00000000
--- a/feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/ServerPoolProperties.java
+++ /dev/null
@@ -1,338 +0,0 @@
-/*
- * ============LICENSE_START=======================================================
- * feature-server-pool
- * ================================================================================
- * Copyright (C) 2020 AT&T Intellectual Property. All rights reserved.
- * ================================================================================
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ============LICENSE_END=========================================================
- */
-
-package org.onap.policy.drools.serverpool;
-
-import java.util.Properties;
-import org.apache.commons.lang3.StringUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class ServerPoolProperties {
- // 'Server' port listener
- public static final String SERVER_IP_ADDRESS = "server.pool.server.ipAddress";
- public static final String SERVER_PORT = "server.pool.server.port";
- public static final String SERVER_HTTPS = "server.pool.server.https";
- public static final String SERVER_SELF_SIGNED_CERTIFICATES =
- "server.pool.server.selfSignedCerts";
-
- // 'site' information
- public static final String SITE_IP_ADDRESS = "server.pool.server.site.ip";
- public static final String SITE_PORT = "server.pool.server.site.port";
-
- // the default is to listen to all IP addresses on the host
- public static final String DEFAULT_SERVER_IP_ADDRESS = "0.0.0.0";
-
- // the default is to dynamically select a port
- public static final int DEFAULT_SERVER_PORT = 0;
-
- // the default is to have HTTPS disabled
- public static final boolean DEFAULT_HTTPS = false;
-
- // the default is to not use self-signed certificates
- public static final boolean DEFAULT_SELF_SIGNED_CERTIFICATES = false;
-
- // list of remote server names to use in HTTP/HTTPS messages
- // (instead of host names)
- public static final String HOST_LIST = "server.pool.server.hostlist";
-
- // 'Server' timeouts
- public static final String SERVER_INITIAL_ALLOWED_GAP = "server.pool.server.allowedGap";
- public static final String SERVER_ADAPTIVE_GAP_ADJUST =
- "server.adaptiveGapAdjust";
- public static final String SERVER_CONNECT_TIMEOUT = "server.pool.server.connectTimeout";
- public static final String SERVER_READ_TIMEOUT = "server.pool.server.readTimeout";
-
- // at startup, initially allow 30 seconds between pings
- public static final long DEFAULT_SERVER_INITIAL_ALLOWED_GAP = 30000;
-
- // when doing the adaptive calculation of the allowed gap between pings,
- // adjust the time by adding 5 seconds (by default)
- public static final long DEFAULT_SERVER_ADAPTIVE_GAP_ADJUST = 5000;
-
- // the default is to allow 10 seconds for a TCP connect
- public static final long DEFAULT_SERVER_CONNECT_TIMEOUT = 10000;
-
- // the default is to allow 10 seconds for a TCP read response
- public static final long DEFAULT_SERVER_READ_TIMEOUT = 10000;
-
- // outgoing per-server thread pool parameters
- public static final String SERVER_THREADS_CORE_POOL_SIZE =
- "server.pool.server.threads.corePoolSize";
- public static final String SERVER_THREADS_MAXIMUM_POOL_SIZE =
- "server.pool.server.threads.maximumPoolSize";
- public static final String SERVER_THREADS_KEEP_ALIVE_TIME =
- "server.pool.server.threads.keepAliveTime";
-
- public static final int DEFAULT_SERVER_THREADS_CORE_POOL_SIZE = 5;
- public static final int DEFAULT_SERVER_THREADS_MAXIMUM_POOL_SIZE = 10;
- public static final long DEFAULT_SERVER_THREADS_KEEP_ALIVE_TIME = 5000;
-
- /*================*/
- /* Host Discovery */
- /*================*/
-
- public static final String DISCOVERY_SERVERS = "server.pool.discovery.servers";
- public static final String DISCOVERY_TOPIC = "server.pool.discovery.topic";
-
- // HTTP authentication
- public static final String DISCOVERY_USERNAME = "server.pool.discovery.username";
- public static final String DISCOVERY_PASSWORD = "server.pool.discovery.password";
-
- // Cambria authentication
- public static final String DISCOVERY_API_KEY = "server.pool.discovery.apiKey";
- public static final String DISCOVERY_API_SECRET = "server.pool.discovery.apiSecret";
-
- // timeouts
- public static final String DISCOVERY_FETCH_TIMEOUT =
- "server.pool.discovery.fetchTimeout";
-
- // this value is passed to the UEB/DMAAP server, and controls how long
- // a 'fetch' request will wait when there are no incoming messages
- public static final String DEFAULT_DISCOVERY_FETCH_TIMEOUT = "60000";
-
- // maximum message fetch limit
- public static final String DISCOVERY_FETCH_LIMIT = "server.pool.discovery.fetchLimit";
-
- // this value is passed to the UEB/DMAAP server, and controls how many
- // requests may be returned in a single fetch
- public static final String DEFAULT_DISCOVERY_FETCH_LIMIT = "100";
-
- // publisher thread cycle time
- public static final String DISCOVER_PUBLISHER_LOOP_CYCLE_TIME =
- "discovery.publisherLoopCycleTime";
-
- // default cycle time is 5 seconds
- public static final long DEFAULT_DISCOVER_PUBLISHER_LOOP_CYCLE_TIME = 5000;
-
- // encryption
- public static final String DISCOVERY_HTTPS = "server.pool.discovery.https";
- public static final String DISCOVERY_ALLOW_SELF_SIGNED_CERTIFICATES =
- "server.pool.discovery.selfSignedCertificates";
-
- /*============================*/
- /* Leader Election Parameters */
- /*============================*/
-
- public static final String LEADER_STABLE_IDLE_CYCLES =
- "server.pool.leader.stableIdleCycles";
- public static final String LEADER_STABLE_VOTING_CYCLES =
- "server.pool.leader.stableVotingCycles";
-
- // by default, wait for 5 cycles (seconds) of stability before voting starts
- public static final int DEFAULT_LEADER_STABLE_IDLE_CYCLES = 5;
-
- // by default, wait for 5 cycles of stability before declaring a winner
- public static final int DEFAULT_LEADER_STABLE_VOTING_CYCLES = 5;
-
- /*=====================*/
- /* MainLoop Parameters */
- /*=====================*/
-
- public static final String MAINLOOP_CYCLE = "server.pool.mainLoop.cycle";
-
- // by default, the main loop cycle is 1 second
- public static final long DEFAULT_MAINLOOP_CYCLE = 1000;
-
- /*=============================*/
- /* Bucket Migration Parameters */
- /*=============================*/
-
- // time-to-live controls how many hops a 'TargetLock' message can take
- public static final String BUCKET_TIME_TO_LIVE = "bucket.ttl";
-
- // bucket migration timeout when a server has been notified that it
- // is the new owner of the bucket
- public static final String BUCKET_CONFIRMED_TIMEOUT =
- "bucket.confirmed.timeout";
-
- // bucket migration timeout when a server has inferred that it may be
- // the new owner, but it hasn't yet been confirmed
- public static final String BUCKET_UNCONFIRMED_TIMEOUT =
- "bucket.unconfirmed.timeout";
-
- // timeout for operation run within a Drools session
- public static final String BUCKET_DROOLS_TIMEOUT =
- "bucket.drools.timeout";
-
- // when a new owner of a bucket has completed the takeover of the
- // bucket, but it hasn't yet been confirmed, there is an additional
- // grace period before leaving the 'NewOwner' state
- public static final String BUCKET_UNCONFIRMED_GRACE_PERIOD =
- "bucket.unconfirmed.graceperiod";
-
- // time-to-live = 5 hops
- public static final int DEFAULT_BUCKET_TIME_TO_LIVE = 5;
-
- // 30 seconds timeout if it has been confirmed that we are the new owner
- public static final long DEFAULT_BUCKET_CONFIRMED_TIMEOUT = 30000;
-
- // 10 seconds timeout if it has not been confirmed that we are the new owner
- public static final long DEFAULT_BUCKET_UNCONFIRMED_TIMEOUT = 10000;
-
- // 10 seconds timeout waiting for a drools operation to complete
- public static final long DEFAULT_BUCKET_DROOLS_TIMEOUT = 10000;
-
- // 10 seconds timeout waiting to be confirmed that we are the new owner
- public static final long DEFAULT_BUCKET_UNCONFIRMED_GRACE_PERIOD = 10000;
-
- /*=======================*/
- /* TargetLock Parameters */
- /*=======================*/
-
- // time-to-live controls how many hops a 'TargetLock' message can take
- public static final String LOCK_TIME_TO_LIVE = "lock.ttl";
-
- // how frequently should the audit run?
- public static final String LOCK_AUDIT_PERIOD = "lock.audit.period";
-
- // when the audit is rescheduled (e.g. due to a new server joining), this
- // is the initial grace period, to allow time for bucket assignments, etc.
- public static final String LOCK_AUDIT_GRACE_PERIOD =
- "lock.audit.gracePeriod";
-
- // there may be audit mismatches detected that are only due to the transient
- // nature of the lock state -- we check the mismatches on both sides after
- // this delay to see if we are still out-of-sync
- public static final String LOCK_AUDIT_RETRY_DELAY = "lock.audit.retryDelay";
-
- // time-to-live = 5 hops
- public static final int DEFAULT_LOCK_TIME_TO_LIVE = 5;
-
- // run the audit every 5 minutes
- public static final long DEFAULT_LOCK_AUDIT_PERIOD = 300000;
-
- // wait at least 60 seconds after an event before running the audit
- public static final long DEFAULT_LOCK_AUDIT_GRACE_PERIOD = 60000;
-
- // wait 5 seconds to see if the mismatches still exist
- public static final long DEFAULT_LOCK_AUDIT_RETRY_DELAY = 5000;
-
- /* ============================================================ */
-
- private static Logger logger =
- LoggerFactory.getLogger(ServerPoolProperties.class);
-
- // save initial set of properties
- private static Properties properties = new Properties();
-
- /**
- * Hide implicit public constructor.
- */
- private ServerPoolProperties() {
- // everything here is static -- no instances of this class are created
- }
-
- /**
- * Store the application properties values.
- *
- * @param properties the properties to save
- */
- public static void setProperties(Properties properties) {
- ServerPoolProperties.properties = properties;
- }
-
- /**
- * Return the properties used when starting this server.
- *
- * @return the properties used when starting this server.
- */
- public static Properties getProperties() {
- return properties;
- }
-
- /**
- * Convenience method to fetch a 'long' property.
- *
- * @param name the property name
- * @param defaultValue the value to use if the property is not defined,
- * or has an illegal value
- * @return the property value
- */
- public static long getProperty(String name, long defaultValue) {
- long rval = defaultValue;
- String value = properties.getProperty(name);
- if (StringUtils.isNotBlank(value)) {
- // try to convert to a 'long' -- log a message in case of failure
- try {
- rval = Long.parseLong(value);
- } catch (NumberFormatException e) {
- logger.error("Property {}=\"{}\": illegal long -- "
- + "using default of {}", name, value, defaultValue);
- }
- }
- return rval;
- }
-
- /**
- * Convenience method to fetch an 'int' property.
- *
- * @param name the property name
- * @param defaultValue the value to use if the property is not defined,
- * or has an illegal value
- * @return the property value
- */
- public static int getProperty(String name, int defaultValue) {
- int rval = defaultValue;
- String value = properties.getProperty(name);
- if (StringUtils.isNotBlank(value)) {
- // try to convert to an 'int' -- log a message in case of failure
- try {
- rval = Integer.parseInt(value);
- } catch (NumberFormatException e) {
- logger.error("Property {}=\"{}\": illegal int -- "
- + "using default of {}", name, value, defaultValue);
- }
- }
- return rval;
- }
-
- /**
- * Convenience method to fetch a 'boolean' property.
- *
- * @param name the property name
- * @param defaultValue the value to use if the property is not defined,
- * or has an illegal value
- * @return the property value
- */
- public static boolean getProperty(String name, boolean defaultValue) {
- boolean rval = defaultValue;
- String value = properties.getProperty(name);
- if (StringUtils.isNotBlank(value)) {
- // try to convert to an 'boolean' -- log a message in case of failure
- rval = Boolean.parseBoolean(value);
- }
- return rval;
- }
-
- /**
- * Convenience method to fetch a 'String' property
- * (provided for consistency with 'long' and 'int' versions).
- *
- * @param name the property name
- * @param defaultValue the value to use if the property is not defined,
- * or has an illegal value
- * @return the property value
- */
- public static String getProperty(String name, String defaultValue) {
- String value = properties.getProperty(name);
- return (StringUtils.isNotBlank(value)) ? value : defaultValue;
- }
-}
diff --git a/feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/TargetLock.java b/feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/TargetLock.java
deleted file mode 100644
index f46013ca..00000000
--- a/feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/TargetLock.java
+++ /dev/null
@@ -1,2852 +0,0 @@
-/*
- * ============LICENSE_START=======================================================
- * feature-server-pool
- * ================================================================================
- * Copyright (C) 2020 AT&T Intellectual Property. All rights reserved.
- * ================================================================================
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ============LICENSE_END=========================================================
- */
-
-package org.onap.policy.drools.serverpool;
-
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.DEFAULT_LOCK_AUDIT_GRACE_PERIOD;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.DEFAULT_LOCK_AUDIT_PERIOD;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.DEFAULT_LOCK_AUDIT_RETRY_DELAY;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.DEFAULT_LOCK_TIME_TO_LIVE;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.LOCK_AUDIT_GRACE_PERIOD;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.LOCK_AUDIT_PERIOD;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.LOCK_AUDIT_RETRY_DELAY;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.LOCK_TIME_TO_LIVE;
-import static org.onap.policy.drools.serverpool.ServerPoolProperties.getProperty;
-
-import java.io.IOException;
-import java.io.ObjectInputStream;
-import java.io.ObjectOutputStream;
-import java.io.PrintStream;
-import java.io.Serializable;
-import java.lang.ref.Reference;
-import java.lang.ref.ReferenceQueue;
-import java.lang.ref.WeakReference;
-import java.util.ArrayList;
-import java.util.Base64;
-import java.util.Collection;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.IdentityHashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Queue;
-import java.util.TimerTask;
-import java.util.TreeMap;
-import java.util.UUID;
-import java.util.concurrent.LinkedTransferQueue;
-import java.util.concurrent.TimeUnit;
-import javax.ws.rs.client.Entity;
-import javax.ws.rs.client.WebTarget;
-import javax.ws.rs.core.MediaType;
-import javax.ws.rs.core.Response;
-import lombok.EqualsAndHashCode;
-import org.onap.policy.drools.core.DroolsRunnable;
-import org.onap.policy.drools.core.PolicyContainer;
-import org.onap.policy.drools.core.PolicySession;
-import org.onap.policy.drools.core.lock.Lock;
-import org.onap.policy.drools.core.lock.LockCallback;
-import org.onap.policy.drools.core.lock.PolicyResourceLockManager;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class provides a locking mechanism based upon a string key that
- * identifies the lock, and another string key that identifies the owner.
- * The existence of the 'TargetLock' instance doesn't mean that the
- * corresponding lock has been acquired -- this is only the case if the
- * instance is in the 'ACTIVE' state.
- * A lock in the ACTIVE or WAITING state exists in two sets of tables,
- * which may be on different hosts:
- * LocalLocks - these two tables are associated with the owner key of the
- * lock. They are in an adjunct to the bucket associated with this key,
- * and the bucket is owned by the host containing the entry.
- * GlobalLocks - this table is associated with the lock key. It is an
- * adjunct to the bucket associated with this key, and the bucket is
- * owned by the host containing the entry.
- */
-public class TargetLock implements Lock, Serializable {
- private static final String LOCK_MSG = "(key={},owner={},uuid={},ttl={})";
-
- private static final long serialVersionUID = 1L;
-
- private static Logger logger = LoggerFactory.getLogger(TargetLock.class);
-
- // Listener class to handle state changes that require restarting the audit
- private static EventHandler eventHandler = new EventHandler();
-
- static {
- // register Listener class
- Events.register(eventHandler);
- }
-
- // this is used to locate ACTIVE 'TargetLock' instances that have been
- // abandoned -- as the GC cleans up the 'WeakReference' instances referring
- // to these locks, we use that information to clean them up
- private static ReferenceQueue<TargetLock> abandoned = new ReferenceQueue<>();
-
- // some status codes
- static final int ACCEPTED = 202; //Response.Status.ACCEPTED.getStatusCode()
- static final int NO_CONTENT = 204; //Response.Status.NO_CONTENT.getStatusCode()
- static final int LOCKED = 423;
-
- // Values extracted from properties
-
- private static String timeToLive;
- private static long auditPeriod;
- private static long auditGracePeriod;
- private static long auditRetryDelay;
-
- // lock states:
- // WAITING - in line to acquire the lock
- // ACTIVE - currently holding the lock
- // FREE - WAITING/ACTIVE locks that were explicitly freed
- // LOST - could occur when a de-serialized ACTIVE lock can't be made
- // ACTIVE because there is already an ACTIVE holder of the lock
- public enum State {
- WAITING, ACTIVE, FREE, LOST
- }
-
- // this contains information that is placed in the 'LocalLocks' tables,
- // and has a one-to-one correspondence with the 'TargetLock' instance
- private Identity identity;
-
- // this is the only field that can change after initialization
- private State state;
-
- // this is used to notify the application when a lock is available,
- // or if it is not available
- private final LockCallback owner;
-
- // This is what is actually called by the infrastructure to do the owner
- // notification. The owner may be running in a Drools session, in which case
- // the actual notification should be done within that thread -- the 'context'
- // object ensures that it happens this way.
- private final LockCallback context;
-
- // HTTP query parameters
- private static final String QP_KEY = "key";
- private static final String QP_OWNER = "owner";
- private static final String QP_UUID = "uuid";
- private static final String QP_WAIT = "wait";
- private static final String QP_SERVER = "server";
- private static final String QP_TTL = "ttl";
-
- // define a constant for empty of byte array
- private static final byte[] EMPTY_BYTE_ARRAY = {};
-
- // below are for duplicating string in printout or logger
- private static final String PRINTOUT_DASHES = "---------";
- private static final String LOCK_AUDIT = "lock/audit";
- private static final String TARGETLOCK_AUDIT_SEND = "TargetLock.Audit.send: ";
-
- /**
- * This method triggers registration of 'eventHandler', and also extracts
- * property values.
- */
- static void startup() {
- int intTimeToLive =
- getProperty(LOCK_TIME_TO_LIVE, DEFAULT_LOCK_TIME_TO_LIVE);
- timeToLive = String.valueOf(intTimeToLive);
- auditPeriod = getProperty(LOCK_AUDIT_PERIOD, DEFAULT_LOCK_AUDIT_PERIOD);
- auditGracePeriod =
- getProperty(LOCK_AUDIT_GRACE_PERIOD, DEFAULT_LOCK_AUDIT_GRACE_PERIOD);
- auditRetryDelay =
- getProperty(LOCK_AUDIT_RETRY_DELAY, DEFAULT_LOCK_AUDIT_RETRY_DELAY);
- }
-
- /**
- * Shutdown threads.
- */
- static void shutdown() {
- AbandonedHandler ah = abandonedHandler;
-
- if (ah != null) {
- abandonedHandler = null;
- ah.interrupt();
- }
- }
-
- /**
- * Constructor - initializes the 'TargetLock' instance, and tries to go
- * ACTIVE. The lock is initially placed in the WAITING state, and the owner
- * and the owner will be notified when the success or failure of the lock
- * attempt is determined.
- *
- * @param key string key identifying the lock
- * @param ownerKey string key identifying the owner, which must hash to
- * a bucket owned by the current host (it is typically a 'RequestID')
- * @param owner owner of the lock (will be notified when going from
- * WAITING to ACTIVE)
- */
- public TargetLock(String key, String ownerKey, LockCallback owner) {
- this(key, ownerKey, owner, true);
- }
-
- /**
- * Constructor - initializes the 'TargetLock' instance, and tries to go
- * ACTIVE. The lock is initially placed in the WAITING state, and the owner
- * and the owner will be notified when the success or failure of the lock
- * attempt is determined.
- *
- * @param key string key identifying the lock
- * @param ownerKey string key identifying the owner, which must hash to
- * a bucket owned by the current host (it is typically a 'RequestID')
- * @param owner owner of the lock (will be notified when going from
- * WAITING to ACTIVE)
- * @param waitForLock this controls the behavior when 'key' is already
- * locked - 'true' means wait for it to be freed, 'false' means fail
- */
- public TargetLock(final String key, final String ownerKey,
- final LockCallback owner, final boolean waitForLock) {
- if (key == null) {
- throw(new IllegalArgumentException("TargetLock: 'key' can't be null"));
- }
- if (ownerKey == null) {
- throw(new IllegalArgumentException("TargetLock: 'ownerKey' can't be null"));
- }
- if (!Bucket.isKeyOnThisServer(ownerKey)) {
- // associated bucket is assigned to a different server
- throw(new IllegalArgumentException("TargetLock: 'ownerKey=" + ownerKey
- + "' not currently assigned to this server"));
- }
- if (owner == null) {
- throw(new IllegalArgumentException("TargetLock: 'owner' can't be null"));
- }
- identity = new Identity(key, ownerKey);
- state = State.WAITING;
- this.owner = owner;
-
- // determine the context
- PolicySession session = PolicySession.getCurrentSession();
- if (session != null) {
- // deliver through a 'PolicySessionContext' class
- Object lcontext = session.getAdjunct(PolicySessionContext.class);
- if (!(lcontext instanceof LockCallback)) {
- context = new PolicySessionContext(session);
- session.setAdjunct(PolicySessionContext.class, context);
- } else {
- context = (LockCallback) lcontext;
- }
- } else {
- // no context to deliver through -- call back directly to owner
- context = owner;
- }
-
- // update 'LocalLocks' tables
- final WeakReference<TargetLock> wr = new WeakReference<>(this, abandoned);
- final LocalLocks localLocks = LocalLocks.get(ownerKey);
-
- synchronized (localLocks) {
- localLocks.weakReferenceToIdentity.put(wr, identity);
- localLocks.uuidToWeakReference.put(identity.uuid, wr);
- }
-
- // The associated 'GlobalLocks' table may or may not be on a different
- // host. Also, the following call may queue the message for later
- // processing if the bucket is in a transient state.
- Bucket.forwardAndProcess(key, new Bucket.Message() {
- /**
- * {@inheritDoc}
- */
- @Override
- public void process() {
- // 'GlobalLocks' is on the same host
- State newState = GlobalLocks.get(key).lock(key, ownerKey, identity.uuid, waitForLock);
- logger.info("Lock lock request: key={}, owner={}, uuid={}, wait={} (resp={})",
- key, ownerKey, identity.uuid, waitForLock, state);
-
- // The lock may now be ACTIVE, FREE, or WAITING -- we can notify
- // the owner of the result now for ACTIVE or FREE. Also, the callback
- // may occur while the constructor is still on the stack, although
- // this won't happen in a Drools session.
- setState(newState);
- switch (newState) {
- case ACTIVE:
- // lock was successful - send notification
- context.lockAvailable(TargetLock.this);
- break;
- case FREE:
- // lock attempt failed -
- // clean up local tables, and send notification
- synchronized (localLocks) {
- localLocks.weakReferenceToIdentity.remove(wr);
- localLocks.uuidToWeakReference.remove(identity.uuid);
- }
- wr.clear();
- context.lockUnavailable(TargetLock.this);
- break;
-
- case WAITING:
- break;
-
- default:
- logger.error("Unknown state: {}", newState);
- break;
- }
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void sendToServer(Server server, int bucketNumber) {
- // actual lock is on a remote host -- send the request as
- // a REST message
- logger.info("Sending lock request to {}: key={}, owner={}, uuid={}, wait={}",
- server, key, ownerKey, identity.uuid, waitForLock);
- server.post("lock/lock", null, new Server.PostResponse() {
- /**
- * {@inheritDoc}
- */
- @Override
- public WebTarget webTarget(WebTarget webTarget) {
- return webTarget
- .queryParam(QP_KEY, key)
- .queryParam(QP_OWNER, ownerKey)
- .queryParam(QP_UUID, identity.uuid.toString())
- .queryParam(QP_WAIT, waitForLock)
- .queryParam(QP_TTL, timeToLive);
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void response(Response response) {
- logger.info("Lock response={} (code={})",
- response, response.getStatus());
-
- /*
- * there are three possible responses:
- * 204 No Content - operation was successful
- * 202 Accepted - operation is still in progress
- * 423 (Locked) - lock in use, and 'waitForLock' is 'false'
- */
- switch (response.getStatus()) {
- case NO_CONTENT:
- // lock successful
- setState(State.ACTIVE);
- context.lockAvailable(TargetLock.this);
- break;
-
- case LOCKED:
- // failed -- lock in use, and 'waitForLock == false'
- setState(State.FREE);
- synchronized (localLocks) {
- localLocks.weakReferenceToIdentity.remove(wr);
- localLocks.uuidToWeakReference.remove(identity.uuid);
- }
- wr.clear();
- context.lockUnavailable(TargetLock.this);
- break;
-
- case ACCEPTED:
- break;
-
- default:
- logger.error("Unknown status: {}", response.getStatus());
- break;
- }
- }
- });
- }
- });
- }
-
- /* ****************** */
- /* 'Lock' Interface */
- /* ****************** */
-
- /**
- * This method will free the current lock, or remove it from the waiting
- * list if a response is pending.
- *
- * @return 'true' if successful, 'false' if it was not locked, or there
- * appears to be corruption in 'LocalLocks' tables
- */
- @Override
- public boolean free() {
- synchronized (this) {
- if (state != State.ACTIVE && state != State.WAITING) {
- // nothing to free
- return false;
- }
- state = State.FREE;
- }
-
- return identity.free();
- }
-
- /**
- * Return 'true' if the lock is in the ACTIVE state.
- *
- * @return 'true' if the lock is in the ACTIVE state, and 'false' if not
- */
- @Override
- public synchronized boolean isActive() {
- return state == State.ACTIVE;
- }
-
- /**
- * Return 'true' if the lock is not available.
- *
- * @return 'true' if the lock is in the FREE or LOST state,
- * and 'false' if not
- */
- @Override
- public synchronized boolean isUnavailable() {
- return state == State.FREE || state == State.LOST;
- }
-
- /**
- * Return 'true' if the lock is in the WAITING state.
- *
- * @return 'true' if the lock is in the WAITING state, and 'false' if not
- */
- public synchronized boolean isWaiting() {
- return state == State.WAITING;
- }
-
- /**
- * Return the lock's key.
- *
- * @return the lock's key
- */
- @Override
- public String getResourceId() {
- return identity.key;
- }
-
- /**
- * Return the owner key field.
- *
- * @return the owner key field
- */
- @Override
- public String getOwnerKey() {
- return identity.ownerKey;
- }
-
- /**
- * Extends the lock's hold time (not implemented yet).
- */
- @Override
- public void extend(int holdSec, LockCallback callback) {
- // not implemented yet
- }
-
- /* ****************** */
-
- /**
- * Update the state.
- *
- * @param newState the new state value
- */
- private synchronized void setState(State newState) {
- state = newState;
- }
-
- /**
- * Return the currentstate of the lock.
- *
- * @return the current state of the lock
- */
- public synchronized State getState() {
- return state;
- }
-
- /**
- * This method is called when an incoming /lock/lock REST message is received.
- *
- * @param key string key identifying the lock, which must hash to a bucket
- * owned by the current host
- * @param ownerKey string key identifying the owner
- * @param uuid the UUID that uniquely identifies the original 'TargetLock'
- * @param waitForLock this controls the behavior when 'key' is already
- * locked - 'true' means wait for it to be freed, 'false' means fail
- * @param ttl similar to IP time-to-live -- it controls the number of hops
- * the message may take
- * @return the Response that should be passed back to the HTTP request
- */
- static Response incomingLock(String key, String ownerKey, UUID uuid, boolean waitForLock, int ttl) {
- if (!Bucket.isKeyOnThisServer(key)) {
- // this is the wrong server -- forward to the correct one
- // (we can use this thread)
- ttl -= 1;
- if (ttl > 0) {
- Server server = Bucket.bucketToServer(Bucket.bucketNumber(key));
- if (server != null) {
- WebTarget webTarget = server.getWebTarget("lock/lock");
- if (webTarget != null) {
- logger.warn("Forwarding 'lock/lock' to uuid {} "
- + "(key={},owner={},uuid={},wait={},ttl={})",
- server.getUuid(), key, ownerKey, uuid,
- waitForLock, ttl);
- return webTarget
- .queryParam(QP_KEY, key)
- .queryParam(QP_OWNER, ownerKey)
- .queryParam(QP_UUID, uuid.toString())
- .queryParam(QP_WAIT, waitForLock)
- .queryParam(QP_TTL, String.valueOf(ttl))
- .request().get();
- }
- }
- }
-
- // if we reach this point, we didn't forward for some reason --
- // return failure by indicating it is locked and unavailable
- logger.error("Couldn't forward 'lock/lock' "
- + "(key={},owner={},uuid={},wait={},ttl={})",
- key, ownerKey, uuid, waitForLock, ttl);
- return Response.noContent().status(LOCKED).build();
- }
-
- State state = GlobalLocks.get(key).lock(key, ownerKey, uuid, waitForLock);
- switch (state) {
- case ACTIVE:
- return Response.noContent().build();
- case WAITING:
- return Response.noContent().status(Response.Status.ACCEPTED).build();
- default:
- return Response.noContent().status(LOCKED).build();
- }
- }
-
- /**
- * This method is called when an incoming /lock/free REST message is received.
- *
- * @param key string key identifying the lock, which must hash to a bucket
- * owned by the current host
- * @param ownerKey string key identifying the owner
- * @param uuid the UUID that uniquely identifies the original 'TargetLock'
- * @param ttl similar to IP time-to-live -- it controls the number of hops
- * the message may take
- * @return the Response that should be passed back to the HTTP request
- */
- static Response incomingFree(String key, String ownerKey, UUID uuid, int ttl) {
- if (!Bucket.isKeyOnThisServer(key)) {
- // this is the wrong server -- forward to the correct one
- // (we can use this thread)
- ttl -= 1;
- if (ttl > 0) {
- Server server = Bucket.bucketToServer(Bucket.bucketNumber(key));
- if (server != null) {
- WebTarget webTarget = server.getWebTarget("lock/free");
- if (webTarget != null) {
- logger.warn("Forwarding 'lock/free' to uuid {} "
- + LOCK_MSG,
- server.getUuid(), key, ownerKey, uuid, ttl);
- return webTarget
- .queryParam(QP_KEY, key)
- .queryParam(QP_OWNER, ownerKey)
- .queryParam(QP_UUID, uuid.toString())
- .queryParam(QP_TTL, String.valueOf(ttl))
- .request().get();
- }
- }
- }
-
- // if we reach this point, we didn't forward for some reason --
- // return failure by indicating it is locked and unavailable
- logger.error("Couldn't forward 'lock/free' "
- + LOCK_MSG,
- key, ownerKey, uuid, ttl);
- return null;
- }
-
- // TBD: should this return a more meaningful response?
- GlobalLocks.get(key).unlock(key, uuid);
- return null;
- }
-
- /**
- * This method is called when an incoming /lock/locked message is received
- * (this is a callback to an earlier requestor that the lock is now
- * available).
- *
- * @param key string key identifying the lock
- * @param ownerKey string key identifying the owner, which must hash to
- * a bucket owned by the current host (it is typically a 'RequestID')
- * @param uuid the UUID that uniquely identifies the original 'TargetLock'
- * @param ttl similar to IP time-to-live -- it controls the number of hops
- * the message may take
- * @return the Response that should be passed back to the HTTP request
- */
- static Response incomingLocked(String key, String ownerKey, UUID uuid, int ttl) {
- if (!Bucket.isKeyOnThisServer(ownerKey)) {
- // this is the wrong server -- forward to the correct one
- // (we can use this thread)
- ttl -= 1;
- if (ttl > 0) {
- Server server = Bucket.bucketToServer(Bucket.bucketNumber(key));
- if (server != null) {
- WebTarget webTarget = server.getWebTarget("lock/locked");
- if (webTarget != null) {
- logger.warn("Forwarding 'lock/locked' to uuid {} "
- + LOCK_MSG,
- server.getUuid(), key, ownerKey, uuid, ttl);
- return webTarget
- .queryParam(QP_KEY, key)
- .queryParam(QP_OWNER, ownerKey)
- .queryParam(QP_UUID, uuid.toString())
- .queryParam(QP_TTL, String.valueOf(ttl))
- .request().get();
- }
- }
- }
-
- // if we reach this point, we didn't forward for some reason --
- // return failure by indicating it is locked and unavailable
- logger.error("Couldn't forward 'lock/locked' "
- + LOCK_MSG,
- key, ownerKey, uuid, ttl);
- return Response.noContent().status(LOCKED).build();
- }
-
- TargetLock targetLock;
- LocalLocks localLocks = LocalLocks.get(ownerKey);
- synchronized (localLocks) {
- targetLock = grabLock(uuid, localLocks);
- }
- if (targetLock == null) {
- // We can't locate the target lock
- // TBD: This probably isn't the best error code to use
- return Response.noContent().status(LOCKED).build();
- } else {
- targetLock.context.lockAvailable(targetLock);
- return Response.noContent().build();
- }
- }
-
- private static TargetLock grabLock(UUID uuid, LocalLocks localLocks) {
- WeakReference<TargetLock> wr =
- localLocks.uuidToWeakReference.get(uuid);
-
- if (wr != null) {
- TargetLock targetLock = wr.get();
- if (targetLock == null) {
- // lock has been abandoned
- // (AbandonedHandler should usually find this first)
- localLocks.weakReferenceToIdentity.remove(wr);
- localLocks.uuidToWeakReference.remove(uuid);
- } else {
- // the lock has been made available -- update the state
- // TBD: This could be outside of 'synchronized (localLocks)'
- synchronized (targetLock) {
- if (targetLock.state == State.WAITING) {
- targetLock.state = State.ACTIVE;
- return targetLock;
- } else {
- // will return a failure -- not sure how this happened
- logger.error("incomingLocked: {} is in state {}",
- targetLock, targetLock.state);
- }
- }
- }
- } else {
- // clean up what we can
- localLocks.uuidToWeakReference.remove(uuid);
- }
-
- return null;
- }
-
- /**
- * This is called when the state of a bucket has changed, but is currently
- * stable. Note that this method is called while being synchronized on the
- * bucket.
- *
- * @param bucket the bucket to audit
- * @param owner 'true' if the current host owns the bucket
- */
- static void auditBucket(Bucket bucket, boolean isOwner) {
- if (!isOwner) {
- // we should not have any 'TargetLock' adjuncts
- if (bucket.removeAdjunct(LocalLocks.class) != null) {
- logger.warn("Bucket {}: Removed superfluous "
- + "'TargetLock.LocalLocks' adjunct",
- bucket.getIndex());
- }
- if (bucket.removeAdjunct(GlobalLocks.class) != null) {
- logger.warn("Bucket {}: Removed superfluous "
- + "'TargetLock.GlobalLocks' adjunct",
- bucket.getIndex());
- }
- }
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public String toString() {
- return "TargetLock(key=" + identity.key
- + ", ownerKey=" + identity.ownerKey
- + ", uuid=" + identity.uuid
- + ", state=" + state + ")";
- }
-
- /* *************** */
- /* Serialization */
- /* *************** */
-
- /**
- * This method modifies the behavior of 'TargetLock' deserialization by
- * creating the corresponding 'LocalLocks' entries.
- */
- private void readObject(java.io.ObjectInputStream in) throws IOException, ClassNotFoundException {
- in.defaultReadObject();
- if (state == State.ACTIVE || state == State.WAITING) {
- // need to build entries in 'LocalLocks'
- LocalLocks localLocks = LocalLocks.get(identity.ownerKey);
- WeakReference<TargetLock> wr = new WeakReference<>(this, abandoned);
-
- synchronized (localLocks) {
- localLocks.weakReferenceToIdentity.put(wr, identity);
- localLocks.uuidToWeakReference.put(identity.uuid, wr);
- }
- }
- }
-
- /* ============================================================ */
-
- private static class LockFactory implements PolicyResourceLockManager {
- /* *************************************** */
- /* 'PolicyResourceLockManager' interface */
- /* *************************************** */
-
- /**
- * {@inheritDoc}
- */
- @Override
- public Lock createLock(String resourceId, String ownerKey,
- int holdSec, LockCallback callback,
- boolean waitForLock) {
- // 'holdSec' isn't implemented yet
- return new TargetLock(resourceId, ownerKey, callback, waitForLock);
- }
-
- /* *********************** */
- /* 'Startable' interface */
- /* *********************** */
-
- /**
- * {@inheritDoc}
- */
- @Override
- public boolean start() {
- return true;
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public boolean stop() {
- return false;
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void shutdown() {
- // nothing needs to be done
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public boolean isAlive() {
- return true;
- }
-
- /* ********************** */
- /* 'Lockable' interface */
- /* ********************** */
-
- /**
- * {@inheritDoc}
- */
- @Override
- public boolean lock() {
- return false;
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public boolean unlock() {
- return true;
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public boolean isLocked() {
- return false;
- }
- }
-
- private static LockFactory lockFactory = new LockFactory();
-
- public static PolicyResourceLockManager getLockFactory() {
- return lockFactory;
- }
-
- /* ============================================================ */
-
- /**
- * There is a single instance of class 'TargetLock.EventHandler', which is
- * registered to listen for notifications of state transitions.
- */
- private static class EventHandler extends Events {
- /**
- * {@inheritDoc}
- */
- @Override
- public void newServer(Server server) {
- // with an additional server, the offset within the audit period changes
- Audit.scheduleAudit();
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void serverFailed(Server server) {
- // when one less server, the offset within the audit period changes
- Audit.scheduleAudit();
- }
- }
-
- /* ============================================================ */
-
- /**
- * This class usually has a one-to-one correspondence with a 'TargetLock'
- * instance, unless the 'TargetLock' has been abandoned.
- */
- @EqualsAndHashCode
- private static class Identity implements Serializable {
- private static final long serialVersionUID = 1L;
-
- // this is the key associated with the lock
- String key;
-
- // this is the key associated with the lock requestor
- String ownerKey;
-
- // this is a unique identifier assigned to the 'TargetLock'
- UUID uuid;
-
- /**
- * Constructor - initializes the 'Identity' instance, including the
- * generation of the unique identifier.
- *
- * @param key string key identifying the lock
- * @param ownerKey string key identifying the owner, which must hash to
- * a bucket owned by the current host (it is typically a 'RequestID')
- */
- private Identity(String key, String ownerKey) {
- this.key = key;
- this.ownerKey = ownerKey;
- this.uuid = UUID.randomUUID();
- }
-
- /**
- * Constructor - initializes the 'Identity' instance, with the 'uuid'
- * value passed at initialization time (only used for auditing).
- *
- * @param key string key identifying the lock
- * @param ownerKey string key identifying the owner, which must hash to
- * @param uuid the UUID that uniquely identifies the original 'TargetLock'
- */
- private Identity(String key, String ownerKey, UUID uuid) {
- this.key = key;
- this.ownerKey = ownerKey;
- this.uuid = uuid;
- }
-
- /**
- * Free the lock associated with this 'Identity' instance.
- *
- * @return 'false' if the 'LocalLocks' data is not there, true' if it is
- */
- private boolean free() {
- // free the lock
- Bucket.forwardAndProcess(key, new Bucket.Message() {
- /**
- * {@inheritDoc}
- */
- @Override
- public void process() {
- // the global lock entry is also on this server
- GlobalLocks.get(key).unlock(key, uuid);
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void sendToServer(Server server, int bucketNumber) {
- logger.info("Sending free request to {}: key={}, owner={}, uuid={}",
- server, key, ownerKey, uuid);
- server.post("lock/free", null, new Server.PostResponse() {
- @Override
- public WebTarget webTarget(WebTarget webTarget) {
- return webTarget
- .queryParam(QP_KEY, key)
- .queryParam(QP_OWNER, ownerKey)
- .queryParam(QP_UUID, uuid.toString())
- .queryParam(QP_TTL, timeToLive);
- }
-
- @Override
- public void response(Response response) {
- logger.info("Free response={} (code={})",
- response, response.getStatus());
- switch (response.getStatus()) {
- case NO_CONTENT:
- // free successful -- don't need to do anything
- break;
-
- case LOCKED:
- // free failed
- logger.error("TargetLock free failed, "
- + "key={}, owner={}, uuid={}",
- key, ownerKey, uuid);
- break;
-
- default:
- logger.error("Unknown status: {}", response.getStatus());
- break;
- }
- }
- });
- }
- });
-
- // clean up locallocks entry
- LocalLocks localLocks = LocalLocks.get(ownerKey);
- synchronized (localLocks) {
- WeakReference<TargetLock> wr =
- localLocks.uuidToWeakReference.get(uuid);
- if (wr == null) {
- return false;
- }
-
- localLocks.weakReferenceToIdentity.remove(wr);
- localLocks.uuidToWeakReference.remove(uuid);
- wr.clear();
- }
- return true;
- }
- }
-
- /* ============================================================ */
-
- /**
- * An instance of this class is used for 'TargetLock.context' when the
- * lock is allocated within a Drools session. Its purpose is to ensure that
- * the callback to 'TargetLock.owner' runs within the Drools thread.
- */
- private static class PolicySessionContext implements LockCallback, Serializable {
- private static final long serialVersionUID = 1L;
-
- // the 'PolicySession' instance in question
- PolicySession policySession;
-
- /**
- * Constructor - initialize the 'policySession' field.
- *
- * @param policySession the Drools session
- */
- private PolicySessionContext(PolicySession policySession) {
- this.policySession = policySession;
- }
-
- /* ******************* */
- /* 'Owner' interface */
- /* ******************* */
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void lockAvailable(final Lock lock) {
- // Run 'owner.lockAvailable' within the Drools session
- if (policySession != null) {
- DroolsRunnable callback = () ->
- ((TargetLock) lock).owner.lockAvailable(lock);
- policySession.getKieSession().insert(callback);
- }
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void lockUnavailable(Lock lock) {
- // Run 'owner.unlockAvailable' within the Drools session
- if (policySession != null) {
- DroolsRunnable callback = () ->
- ((TargetLock) lock).owner.lockUnavailable(lock);
- policySession.getKieSession().insert(callback);
- }
- }
-
- /* *************** */
- /* Serialization */
- /* *************** */
-
- /**
- * Specializes serialization of 'PolicySessionContext'.
- */
- private void writeObject(ObjectOutputStream out) throws IOException {
- // 'PolicySession' can't be serialized directly --
- // store as 'groupId', 'artifactId', 'sessionName'
- PolicyContainer pc = policySession.getPolicyContainer();
-
- out.writeObject(pc.getGroupId());
- out.writeObject(pc.getArtifactId());
- out.writeObject(policySession.getName());
- }
-
- /**
- * Specializes deserialization of 'PolicySessionContext'.
- */
- private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
- // 'PolicySession' can't be serialized directly --
- // read in 'groupId', 'artifactId', 'sessionName'
- String groupId = String.class.cast(in.readObject());
- String artifactId = String.class.cast(in.readObject());
- String sessionName = String.class.cast(in.readObject());
-
- // locate the 'PolicySession' associated with
- // 'groupId', 'artifactId', and 'sessionName'
- for (PolicyContainer pc : PolicyContainer.getPolicyContainers()) {
- if (artifactId.equals(pc.getArtifactId())
- && groupId.equals(pc.getGroupId())) {
- // found 'PolicyContainer' -- look up the session
- policySession = pc.getPolicySession(sessionName);
- if (policySession == null) {
- logger.error("TargetLock.PolicySessionContext.readObject: "
- + "Can't find session {}:{}:{}",
- groupId, artifactId, sessionName);
- }
- }
- }
- }
- }
-
- /* ============================================================ */
-
- /**
- * This class contains two tables that have entries for any 'TargetLock'
- * in the 'ACTIVE' or 'WAITING' state. This is the "client" end of the
- * lock implementation.
- */
- static class LocalLocks {
- // this table makes it easier to clean up locks that have been
- // abandoned (see 'AbandonedHandler')
- private Map<WeakReference<TargetLock>, Identity> weakReferenceToIdentity = new IdentityHashMap<>();
-
- // this table is used to locate a 'TargetLock' instance from a UUID
- private Map<UUID, WeakReference<TargetLock>> uuidToWeakReference =
- new HashMap<>();
-
- /**
- * Fetch the 'LocalLocks' entry associated with a particular owner key
- * (it is created if necessary).
- *
- * @param ownerKey string key identifying the owner, which must hash to
- * a bucket owned by the current host (it is typically a 'RequestID')
- * @return the associated 'LocalLocks' instance (it should never be 'null')
- */
- private static LocalLocks get(String ownerKey) {
- return Bucket.getBucket(ownerKey).getAdjunct(LocalLocks.class);
- }
- }
-
- /* ============================================================ */
-
- /**
- * This class contains the actual lock table, which is the "server" end
- * of the lock implementation.
- */
- public static class GlobalLocks implements Serializable {
- private static final long serialVersionUID = 1L;
-
- // this is the lock table, mapping 'key' to 'LockEntry', which indicates
- // the current lock holder, and all those waiting
- private Map<String, LockEntry> keyToEntry = new HashMap<>();
-
- /**
- * Fetch the 'GlobalLocks' entry associated with a particular key
- * (it is created if necessary).
- *
- * @param key string key identifying the lock
- * @return the associated 'GlobalLocks' instance
- * (it should never be 'null')
- */
- private static GlobalLocks get(String key) {
- return Bucket.getBucket(key).getAdjunct(GlobalLocks.class);
- }
-
- /**
- * Do the 'lock' operation -- lock immediately, if possible. If not,
- * get on the waiting list, if requested.
- *
- * @param key string key identifying the lock, which must hash to a bucket
- * owned by the current host
- * @param ownerKey string key identifying the owner
- * @param uuid the UUID that uniquely identifies the original 'TargetLock'
- * (on the originating host)
- * @param waitForLock this controls the behavior when 'key' is already
- * locked - 'true' means wait for it to be freed, 'false' means fail
- * @return the lock State corresponding to the current request
- */
- synchronized State lock(String key, String ownerKey, UUID uuid, boolean waitForLock) {
- synchronized (keyToEntry) {
- LockEntry entry = keyToEntry.get(key);
- if (entry == null) {
- // there is no existing entry -- create one, and return ACTIVE
- entry = new LockEntry(key, ownerKey, uuid);
- keyToEntry.put(key, entry);
- sendUpdate(key);
- return State.ACTIVE;
- }
- if (waitForLock) {
- // the requestor is willing to wait -- get on the waiting list,
- // and return WAITING
- entry.waitingList.add(new Waiting(ownerKey, uuid));
- sendUpdate(key);
- return State.WAITING;
- }
-
- // the requestor is not willing to wait -- return FREE,
- // which will be interpreted as a failure
- return State.FREE;
- }
- }
-
- /**
- * Free a lock or a pending lock request.
- *
- * @param key string key identifying the lock
- * @param uuid the UUID that uniquely identifies the original 'TargetLock'
- */
- synchronized void unlock(String key, UUID uuid) {
- synchronized (keyToEntry) {
- final LockEntry entry = keyToEntry.get(key);
- if (entry == null) {
- logger.error("GlobalLocks.unlock: unknown lock, key={}, uuid={}",
- key, uuid);
- return;
- }
- if (entry.currentOwnerUuid.equals(uuid)) {
- // this is the current lock holder
- if (entry.waitingList.isEmpty()) {
- // free this lock
- keyToEntry.remove(key);
- } else {
- // pass it on to the next one in the list
- Waiting waiting = entry.waitingList.remove();
- entry.currentOwnerKey = waiting.ownerKey;
- entry.currentOwnerUuid = waiting.ownerUuid;
-
- entry.notifyNewOwner(this);
- }
- sendUpdate(key);
- } else {
- // see if one of the waiting entries is being freed
- for (Waiting waiting : entry.waitingList) {
- if (waiting.ownerUuid.equals(uuid)) {
- entry.waitingList.remove(waiting);
- sendUpdate(key);
- break;
- }
- }
- }
- }
- }
-
- /**
- * Notify all features that an update has occurred on this GlobalLock.
- *
- * @param key the key associated with the change
- * (used to locate the bucket)
- */
- private void sendUpdate(String key) {
- Bucket bucket = Bucket.getBucket(key);
- for (ServerPoolApi feature : ServerPoolApi.impl.getList()) {
- feature.lockUpdate(bucket, this);
- }
- }
-
- /*===============*/
- /* Serialization */
- /*===============*/
-
- private void writeObject(ObjectOutputStream out) throws IOException {
- synchronized (this) {
- out.defaultWriteObject();
- }
- }
- }
-
- /* ============================================================ */
-
- /**
- * Each instance of this object corresponds to a single key in the lock
- * table. It includes the current holder of the lock, as well as
- * any that are waiting.
- */
- private static class LockEntry implements Serializable {
- private static final long serialVersionUID = 1L;
-
- // string key identifying the lock
- private String key;
-
- // string key identifying the owner
- private String currentOwnerKey;
-
- // UUID identifying the original 'TargetLock
- private UUID currentOwnerUuid;
-
- // list of pending lock requests for this key
- private Queue<Waiting> waitingList = new LinkedList<>();
-
- /**
- * Constructor - initialize the 'LockEntry'.
- *
- * @param key string key identifying the lock, which must hash to a bucket
- * owned by the current host
- * @param ownerKey string key identifying the owner
- * @param uuid the UUID that uniquely identifies the original 'TargetLock'
- */
- private LockEntry(String key, String ownerKey, UUID uuid) {
- this.key = key;
- this.currentOwnerKey = ownerKey;
- this.currentOwnerUuid = uuid;
- }
-
- /**
- * This method is called after the 'currentOwnerKey' and
- * 'currentOwnerUuid' fields have been updated, and it notifies the new
- * owner that they now have the lock.
- *
- * @param globalLocks the 'GlobalLocks' instance containing this entry
- */
- private void notifyNewOwner(final GlobalLocks globalLocks) {
- Bucket.forwardAndProcess(currentOwnerKey, new Bucket.Message() {
- /**
- * {@inheritDoc}
- */
- @Override
- public void process() {
- // the new owner is on this host
- incomingLocked(key, currentOwnerKey, currentOwnerUuid, 1);
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void sendToServer(Server server, int bucketNumber) {
- // the new owner is on a remote host
- logger.info("Sending locked notification to {}: key={}, owner={}, uuid={}",
- server, key, currentOwnerKey, currentOwnerUuid);
- server.post("lock/locked", null, new Server.PostResponse() {
- @Override
- public WebTarget webTarget(WebTarget webTarget) {
- return webTarget
- .queryParam(QP_KEY, key)
- .queryParam(QP_OWNER, currentOwnerKey)
- .queryParam(QP_UUID, currentOwnerUuid.toString())
- .queryParam(QP_TTL, timeToLive);
- }
-
- @Override
- public void response(Response response) {
- logger.info("Locked response={} (code={})",
- response, response.getStatus());
- if (response.getStatus() != NO_CONTENT) {
- // notification failed -- free this one
- globalLocks.unlock(key, currentOwnerUuid);
- }
- }
- });
- }
- });
-
- }
- }
-
- /* ============================================================ */
-
- /**
- * This corresponds to a member of 'LockEntry.waitingList'
- */
- private static class Waiting implements Serializable {
- private static final long serialVersionUID = 1L;
-
- // string key identifying the owner
- String ownerKey;
-
- // uniquely identifies the new owner 'TargetLock'
- UUID ownerUuid;
-
- /**
- * Constructor.
- *
- * @param ownerKey string key identifying the owner
- * @param ownerUuid uniquely identifies the new owner 'TargetLock'
- */
- private Waiting(String ownerKey, UUID ownerUuid) {
- this.ownerKey = ownerKey;
- this.ownerUuid = ownerUuid;
- }
- }
-
- /* ============================================================ */
-
- /**
- * Backup data associated with a 'GlobalLocks' instance.
- */
- static class LockBackup implements Bucket.Backup {
- /**
- * {@inheritDoc}
- */
- @Override
- public Bucket.Restore generate(int bucketNumber) {
- Bucket bucket = Bucket.getBucket(bucketNumber);
-
- // just remove 'LocalLocks' -- it will need to be rebuilt from
- // 'TargetLock' instances
- bucket.removeAdjunct(LocalLocks.class);
-
- // global locks need to be transferred
- GlobalLocks globalLocks = bucket.removeAdjunct(GlobalLocks.class);
- return globalLocks == null ? null : new LockRestore(globalLocks);
- }
- }
-
- /* ============================================================ */
-
- /**
- * This class is used to restore a 'GlobalLocks' instance from a backup.
- */
- static class LockRestore implements Bucket.Restore, Serializable {
- private static final long serialVersionUID = 1L;
-
- GlobalLocks globalLocks;
-
- /**
- * Constructor - runs as part of backup (deserialization bypasses this constructor).
- *
- * @param globalLocks GlobalLocks instance extracted as part of backup
- */
- LockRestore(GlobalLocks globalLocks) {
- this.globalLocks = globalLocks;
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void restore(int bucketNumber) {
- // fetch bucket
- Bucket bucket = Bucket.getBucket(bucketNumber);
-
- // update the adjunct
- if (bucket.putAdjunct(globalLocks) != null) {
- logger.error("LockRestore({}): GlobalLocks adjunct already existed",
- bucketNumber);
- }
-
- // notify features of the 'globalLocks' update
- for (ServerPoolApi feature : ServerPoolApi.impl.getList()) {
- feature.lockUpdate(bucket, globalLocks);
- }
- }
- }
-
- /* ============================================================ */
-
- /**
- * This class is a deamon that monitors the 'abandoned' queue. If an
- * ACTIVE 'TargetLock' is abandoned, the GC will eventually place the
- * now-empty 'WeakReference' in this queue.
- */
- private static class AbandonedHandler extends Thread {
- AbandonedHandler() {
- super("TargetLock.AbandonedHandler");
- }
-
- /**
- * This method camps on the 'abandoned' queue, processing entries as
- * they are received.
- */
- @Override
- public void run() {
- while (abandonedHandler != null) {
- try {
- Reference<? extends TargetLock> wr = abandoned.remove();
-
- // At this point, we know that 'ref' is a
- // 'WeakReference<TargetLock>' instance that has been abandoned,
- // but we don't know what the associated 'Identity' instance
- // is. Here, we search through every bucket looking for a
- // matching entry. The assumption is that this is rare enough,
- // and due to a bug, so it doesn't hurt to spend extra CPU time
- // here. The alternative is to add some additional information
- // to make this mapping quick, at the expense of a slight
- // slow down of normal lock operations.
- for (int i = 0; i < Bucket.BUCKETCOUNT; i += 1) {
- LocalLocks localLocks =
- Bucket.getBucket(i).getAdjunctDontCreate(LocalLocks.class);
- if (localLocks != null) {
- // the adjunct does exist -- see if the WeakReference
- // instance is known to this bucket
- synchronized (localLocks) {
- Identity identity =
- localLocks.weakReferenceToIdentity.get(wr);
- if (identity != null) {
- // found it
- logger.error("Abandoned TargetLock: bucket={}, "
- + "key={}, ownerKey={}, uuid={}",
- i, identity.key, identity.ownerKey,
- identity.uuid);
- identity.free();
- break;
- }
- }
- }
- }
- } catch (Exception e) {
- logger.error("TargetLock.AbandonedHandler exception", e);
- }
- }
- }
- }
-
- // create a single instance of 'AbandonedHandler', and start it
- private static AbandonedHandler abandonedHandler = new AbandonedHandler();
-
- static {
- abandonedHandler.start();
- }
-
- /* ============================================================ */
-
- /**
- * This class handles the '/cmd/dumpLocks' REST command.
- */
- static class DumpLocks {
- // indicates whether a more detailed dump should be done
- private boolean detail;
-
- // this table maps the 'TargetLock' UUID into an object containing
- // both client (LocalLocks) and server (GlobalLocks) information
- private Map<UUID, MergedData> mergedDataMap =
- new TreeMap<>(Util.uuidComparator);
-
- // this table maps the 'TargetLock' key into the associated 'LockEntry'
- // (server end)
- private Map<String, LockEntry> lockEntries = new TreeMap<>();
-
- // this table maps the 'TargetLock' key into entries that only exist
- // on the client end
- private Map<String, MergedData> clientOnlyEntries = new TreeMap<>();
-
- // display format (although it is now dynamically adjusted)
- private String format = "%-14s %-14s %-36s %-10s %s\n";
-
- // calculation of maximum key length for display
- private int keyLength = 10;
-
- // calculation of maximum owner key length for display
- private int ownerKeyLength = 10;
-
- // 'true' if any comments need to be displayed (affects format)
- private boolean commentsIncluded = false;
-
- /**
- * Entry point for the '/cmd/dumpLocks' REST command.
- *
- * @param out where the output should be displayed
- * @param detail 'true' provides additional bucket and host information
- * (but abbreviates all UUIDs in order to avoid excessive
- * line length)
- */
- static void dumpLocks(PrintStream out, boolean detail)
- throws InterruptedException, IOException, ClassNotFoundException {
-
- // the actual work is done in the constructor
- new DumpLocks(out, detail);
- }
-
- /**
- * Entry point for the '/lock/dumpLocksData' REST command, which generates
- * a byte stream for this particular host.
- *
- * @param serverUuid the UUID of the intended destination server
- * @param ttl similar to IP time-to-live -- it controls the number of hops
- * the message may take
- * @return a base64-encoded byte stream containing serialized 'HostData'
- */
- static byte[] dumpLocksData(UUID serverUuid, int ttl) throws IOException {
- if (!Server.getThisServer().getUuid().equals(serverUuid)) {
- ttl -= 1;
- if (ttl > 0) {
- Server server = Server.getServer(serverUuid);
- if (server != null) {
- WebTarget webTarget =
- server.getWebTarget("lock/dumpLocksData");
- if (webTarget != null) {
- logger.info("Forwarding 'lock/dumpLocksData' to uuid {}",
- serverUuid);
- return webTarget
- .queryParam(QP_SERVER, serverUuid.toString())
- .queryParam(QP_TTL, String.valueOf(ttl))
- .request().get(byte[].class);
- }
- }
- }
-
- // if we reach this point, we didn't forward for some reason
-
- logger.error("Couldn't forward 'lock/dumpLocksData to uuid {}",
- serverUuid);
- return EMPTY_BYTE_ARRAY;
- }
-
- return Base64.getEncoder().encode(Util.serialize(new HostData()));
- }
-
- /**
- * Constructor - does the '/cmd/dumpLocks' REST command.
- *
- * @param out where the output should be displayed
- */
- DumpLocks(PrintStream out, boolean detail)
- throws IOException, InterruptedException, ClassNotFoundException {
-
- this.detail = detail;
-
- // receives responses from '/lock/dumpLocksData'
- final LinkedTransferQueue<Response> responseQueue =
- new LinkedTransferQueue<>();
-
- // generate a count of the number of external servers that should respond
- int pendingResponseCount = 0;
-
- // iterate over all of the servers
- for (final Server server : Server.getServers()) {
- if (server == Server.getThisServer()) {
- // skip this server -- we will handle it differently
- continue;
- }
-
- // keep a running count
- pendingResponseCount += 1;
- server.post("lock/dumpLocksData", null, new Server.PostResponse() {
- @Override
- public WebTarget webTarget(WebTarget webTarget) {
- return webTarget
- .queryParam(QP_SERVER, server.getUuid().toString())
- .queryParam(QP_TTL, timeToLive);
- }
-
- @Override
- public void response(Response response) {
- // responses are queued, and the main thread will collect them
- responseQueue.put(response);
- }
- });
- }
-
- // this handles data associated with this server -- it also goes through
- // serialization/deserialization, which provides a deep copy of the data
- populateLockData(dumpLocksData(Server.getThisServer().getUuid(), 0));
-
- // now, poll for responses from all of the the other servers
- while (pendingResponseCount > 0) {
- pendingResponseCount -= 1;
- Response response = responseQueue.poll(60, TimeUnit.SECONDS);
- if (response == null) {
- // timeout -- we aren't expecting any more responses
- break;
- }
-
- // populate data associated with this server
- populateLockData(response.readEntity(byte[].class));
- }
-
- // we have processed all of the servers that we are going to,
- // now generate the output
- dump(out);
- }
-
- /**
- * process base64-encoded data from a server (local or remote).
- *
- * @param data base64-encoded data (class 'HostData')
- */
- void populateLockData(byte[] data) throws IOException, ClassNotFoundException {
- Object decodedData = Util.deserialize(Base64.getDecoder().decode(data));
- if (decodedData instanceof HostData) {
- // deserialized data
- HostData hostData = (HostData) decodedData;
-
- // fetch 'Server' instance associated with the responding server
- Server server = Server.getServer(hostData.hostUuid);
-
- // process the client-end data
- for (ClientData clientData : hostData.clientDataList) {
- populateLockDataClientData(clientData, server);
- }
-
- // process the server-end data
- for (ServerData serverData : hostData.serverDataList) {
- populateLockDataServerData(serverData, server);
- }
- } else {
- logger.error("TargetLock.DumpLocks.populateLockData: "
- + "received data has class {}",
- decodedData.getClass().getName());
- }
- }
-
- private void populateLockDataClientData(ClientData clientData, Server server) {
- // 'true' if the bucket associated with this 'ClientData'
- // doesn't belong to the remote server, as far as we can tell
- boolean serverMismatch =
- Bucket.bucketToServer(clientData.bucketNumber) != server;
-
- // each 'ClientDataRecord' instance corresponds to an
- // active 'Identity' (TargetLock) instance
- for (ClientDataRecord cdr : clientData.clientDataRecords) {
- // update maximum 'key' and 'ownerKey' lengths
- updateKeyLength(cdr.identity.key);
- updateOwnerKeyLength(cdr.identity.ownerKey);
-
- // fetch UUID
- UUID uuid = cdr.identity.uuid;
-
- // fetch/generate 'MergeData' instance for this UUID
- MergedData md = mergedDataMap.computeIfAbsent(uuid, key -> new MergedData(uuid));
-
- // update 'MergedData.clientDataRecord'
- if (md.clientDataRecord == null) {
- md.clientDataRecord = cdr;
- } else {
- md.comment("Duplicate client entry for UUID");
- }
-
- if (serverMismatch) {
- // need to generate an additional error
- md.comment(server.toString()
- + "(client) does not own bucket "
- + clientData.bucketNumber);
- }
- }
- }
-
- private void populateLockDataServerData(ServerData serverData, Server server) {
- // 'true' if the bucket associated with this 'ServerData'
- // doesn't belong to the remote server, as far as we can tell
- boolean serverMismatch =
- Bucket.bucketToServer(serverData.bucketNumber) != server;
-
- // each 'LockEntry' instance corresponds to the current holder
- // of a lock, and all requestors waiting for it to be freed
- for (LockEntry le : serverData.globalLocks.keyToEntry.values()) {
- // update maximum 'key' and 'ownerKey' lengths
- updateKeyLength(le.key);
- updateOwnerKeyLength(le.currentOwnerKey);
-
- // fetch uuid
- UUID uuid = le.currentOwnerUuid;
-
- // fetch/generate 'MergeData' instance for this UUID
- MergedData md = mergedDataMap.computeIfAbsent(uuid, key -> new MergedData(uuid));
-
- // update 'lockEntries' table entry
- if (lockEntries.get(le.key) != null) {
- md.comment("Duplicate server entry for key " + le.key);
- } else {
- lockEntries.put(le.key, le);
- }
-
- // update 'MergedData.serverLockEntry'
- // (leave 'MergedData.serverWaiting' as 'null', because
- // this field is only used for waiting entries)
- if (md.serverLockEntry == null) {
- md.serverLockEntry = le;
- } else {
- md.comment("Duplicate server entry for UUID");
- }
-
- if (serverMismatch) {
- // need to generate an additional error
- md.comment(server.toString()
- + "(server) does not own bucket "
- + serverData.bucketNumber);
- }
-
- // we need 'MergeData' entries for all waiting requests
- for (Waiting waiting : le.waitingList) {
- populateLockDataServerDataWaiting(
- serverData, server, serverMismatch, le, waiting);
- }
- }
- }
-
- private void populateLockDataServerDataWaiting(
- ServerData serverData, Server server, boolean serverMismatch,
- LockEntry le, Waiting waiting) {
-
- // update maximum 'ownerKey' length
- updateOwnerKeyLength(waiting.ownerKey);
-
- // fetch uuid
- UUID uuid = waiting.ownerUuid;
-
- // fetch/generate 'MergeData' instance for this UUID
- MergedData md = mergedDataMap.computeIfAbsent(uuid, key -> new MergedData(uuid));
-
- // update 'MergedData.serverLockEntry' and
- // 'MergedData.serverWaiting'
- if (md.serverLockEntry == null) {
- md.serverLockEntry = le;
- md.serverWaiting = waiting;
- } else {
- md.comment("Duplicate server entry for UUID");
- }
-
- if (serverMismatch) {
- // need to generate an additional error
- md.comment(server.toString()
- + "(server) does not own bucket "
- + serverData.bucketNumber);
- }
- }
-
- /**
- * Do some additional sanity checks on the 'MergedData', and then
- * display all of the results.
- *
- * @param out where the output should be displayed
- */
- void dump(PrintStream out) {
- // iterate over the 'MergedData' instances looking for problems
- for (MergedData md : mergedDataMap.values()) {
- if (md.clientDataRecord == null) {
- md.comment("Client data missing");
- } else if (md.serverLockEntry == null) {
- md.comment("Server data missing");
- clientOnlyEntries.put(md.clientDataRecord.identity.key, md);
- } else if (!md.clientDataRecord.identity.key.equals(md.serverLockEntry.key)) {
- md.comment("Client key(" + md.clientDataRecord.identity.key
- + ") server key(" + md.serverLockEntry.key
- + ") mismatch");
- } else {
- String serverOwnerKey = (md.serverWaiting == null
- ? md.serverLockEntry.currentOwnerKey : md.serverWaiting.ownerKey);
- if (!md.clientDataRecord.identity.ownerKey.equals(serverOwnerKey)) {
- md.comment("Client owner key("
- + md.clientDataRecord.identity.ownerKey
- + ") server owner key(" + serverOwnerKey
- + ") mismatch");
- }
- // TBD: test for state mismatch
- }
- }
-
- dumpMergeData(out);
- dumpServerTable(out);
- dumpClientOnlyEntries(out);
- }
-
- private void dumpMergeData(PrintStream out) {
- if (detail) {
- // generate format based upon the maximum key length, maximum
- // owner key length, and whether comments are included anywhere
- format = "%-" + keyLength + "s %6s %-9s %-" + ownerKeyLength
- + "s %6s %-9s %-9s %-10s" + (commentsIncluded ? " %s\n" : "\n");
-
- // dump out the header
- out.printf(format, "Key", "Bucket", "Host UUID",
- "Owner Key", "Bucket", "Host UUID",
- "Lock UUID", "State", "Comments");
- out.printf(format, "---", "------", PRINTOUT_DASHES,
- PRINTOUT_DASHES, "------", PRINTOUT_DASHES,
- PRINTOUT_DASHES, "-----", "--------");
- } else {
- // generate format based upon the maximum key length, maximum
- // owner key length, and whether comments are included anywhere
- format = "%-" + keyLength + "s %-" + ownerKeyLength
- + "s %-36s %-10s" + (commentsIncluded ? " %s\n" : "\n");
-
- // dump out the header
- out.printf(format, "Key", "Owner Key", "UUID", "State", "Comments");
- out.printf(format, "---", PRINTOUT_DASHES, "----", "-----", "--------");
- }
- }
-
- private void dumpServerTable(PrintStream out) {
- // iterate over the server table
- for (LockEntry le : lockEntries.values()) {
- // fetch merged data
- MergedData md = mergedDataMap.get(le.currentOwnerUuid);
-
- // dump out record associated with lock owner
- if (detail) {
- out.printf(format,
- le.key, getBucket(le.key), bucketOwnerUuid(le.key),
- le.currentOwnerKey, getBucket(le.currentOwnerKey),
- bucketOwnerUuid(le.currentOwnerKey),
- abbrevUuid(le.currentOwnerUuid),
- md.getState(), md.firstComment());
- } else {
- out.printf(format,
- le.key, le.currentOwnerKey, le.currentOwnerUuid,
- md.getState(), md.firstComment());
- }
- dumpMoreComments(out, md);
-
- // iterate over all requests waiting for this lock
- for (Waiting waiting: le.waitingList) {
- // fetch merged data
- md = mergedDataMap.get(waiting.ownerUuid);
-
- // dump out record associated with waiting request
- if (detail) {
- out.printf(format,
- "", "", "",
- waiting.ownerKey, getBucket(waiting.ownerKey),
- bucketOwnerUuid(waiting.ownerKey),
- abbrevUuid(waiting.ownerUuid),
- md.getState(), md.firstComment());
- } else {
- out.printf(format, "", waiting.ownerKey, waiting.ownerUuid,
- md.getState(), md.firstComment());
- }
- dumpMoreComments(out, md);
- }
- }
- }
-
- private void dumpClientOnlyEntries(PrintStream out) {
- // client records that don't have matching server entries
- for (MergedData md : clientOnlyEntries.values()) {
- ClientDataRecord cdr = md.clientDataRecord;
- if (detail) {
- out.printf(format,
- cdr.identity.key, getBucket(cdr.identity.key),
- bucketOwnerUuid(cdr.identity.key),
- cdr.identity.ownerKey,
- getBucket(cdr.identity.ownerKey),
- bucketOwnerUuid(cdr.identity.ownerKey),
- abbrevUuid(cdr.identity.uuid),
- md.getState(), md.firstComment());
- } else {
- out.printf(format, cdr.identity.key, cdr.identity.ownerKey,
- cdr.identity.uuid, md.getState(), md.firstComment());
- }
- dumpMoreComments(out, md);
- }
- }
-
- /**
- * This method converts a String keyword into the corresponding bucket
- * number.
- *
- * @param key the keyword to be converted
- * @return the bucket number
- */
- private static int getBucket(String key) {
- return Bucket.bucketNumber(key);
- }
-
- /**
- * Determine the abbreviated UUID associated with a key.
- *
- * @param key the keyword to be converted
- * @return the abbreviated UUID of the bucket owner
- */
- private static String bucketOwnerUuid(String key) {
- // fetch the bucket
- Bucket bucket = Bucket.getBucket(Bucket.bucketNumber(key));
-
- // fetch the bucket owner (may be 'null' if unassigned)
- Server owner = bucket.getOwner();
-
- return owner == null ? "NONE" : abbrevUuid(owner.getUuid());
- }
-
- /**
- * Convert a UUID to an abbreviated form, which is the
- * first 8 hex digits of the UUID, followed by the character '*'.
- *
- * @param uuid the UUID to convert
- * @return the abbreviated form
- */
- private static String abbrevUuid(UUID uuid) {
- return uuid.toString().substring(0, 8) + "*";
- }
-
- /**
- * If the 'MergedData' instance has more than one comment,
- * dump out comments 2-n.
- *
- * @param out where the output should be displayed
- * @param md the MergedData instance
- */
- void dumpMoreComments(PrintStream out, MergedData md) {
- if (md.comments.size() > 1) {
- Queue<String> comments = new LinkedList<>(md.comments);
-
- // remove the first entry, because it has already been displayed
- comments.remove();
- for (String comment : comments) {
- if (detail) {
- out.printf(format, "", "", "", "", "", "", "", "", comment);
- } else {
- out.printf(format, "", "", "", "", comment);
- }
- }
- }
- }
-
- /**
- * Check the length of the specified 'key', and update 'keyLength' if
- * it exceeds the current maximum.
- *
- * @param key the key to be tested
- */
- void updateKeyLength(String key) {
- int length = key.length();
- if (length > keyLength) {
- keyLength = length;
- }
- }
-
- /**
- * Check the length of the specified 'ownerKey', and update
- * 'ownerKeyLength' if it exceeds the current maximum.
- *
- * @param ownerKey the owner key to be tested
- */
- void updateOwnerKeyLength(String ownerKey) {
- int length = ownerKey.length();
- if (length > ownerKeyLength) {
- ownerKeyLength = length;
- }
- }
-
- /* ============================== */
-
- /**
- * Each instance of this class corresponds to client and/or server
- * data structures, and is used to check consistency between the two.
- */
- class MergedData {
- // the client/server UUID
- UUID uuid;
-
- // client-side data (from LocalLocks)
- ClientDataRecord clientDataRecord = null;
-
- // server-side data (from GlobalLocks)
- LockEntry serverLockEntry = null;
- Waiting serverWaiting = null;
-
- // detected problems, such as server/client mismatches
- Queue<String> comments = new LinkedList<>();
-
- /**
- * Constructor - initialize the 'uuid'.
- *
- * @param uuid the UUID that identifies the original 'TargetLock'
- */
- MergedData(UUID uuid) {
- this.uuid = uuid;
- }
-
- /**
- * add a comment to the list, and indicate that there are now
- * comments present.
- *
- * @param co the comment to add
- */
- void comment(String co) {
- comments.add(co);
- commentsIncluded = true;
- }
-
- /**
- * Return the first comment, or an empty string if there are no
- * comments.
- *
- * @return the first comment, or an empty string if there are no
- * comments (useful for formatting output).
- */
- String firstComment() {
- return comments.isEmpty() ? "" : comments.poll();
- }
-
- /**
- * Return a string description of the state.
- *
- * @return a string description of the state.
- */
- String getState() {
- return clientDataRecord == null
- ? "MISSING" : clientDataRecord.state.toString();
- }
- }
-
- /**
- * This class contains all of the data sent from each host to the
- * host that is consolidating the information for display.
- */
- static class HostData implements Serializable {
- private static final long serialVersionUID = 1L;
-
- // the UUID of the host sending the data
- private UUID hostUuid;
-
- // all of the information derived from the 'LocalLocks' data
- private List<ClientData> clientDataList;
-
- // all of the information derived from the 'GlobalLocks' data
- private List<ServerData> serverDataList;
-
- /**
- * Constructor - this goes through all of the lock tables,
- * and populates 'clientDataList' and 'serverDataList'.
- */
- HostData() {
- // fetch UUID
- hostUuid = Server.getThisServer().getUuid();
-
- // initial storage for client and server data
- clientDataList = new ArrayList<>();
- serverDataList = new ArrayList<>();
-
- // go through buckets
- for (int i = 0; i < Bucket.BUCKETCOUNT; i += 1) {
- Bucket bucket = Bucket.getBucket(i);
-
- // client data
- LocalLocks localLocks =
- bucket.getAdjunctDontCreate(LocalLocks.class);
- if (localLocks != null) {
- // we have client data for this bucket
- ClientData clientData = new ClientData(i);
- clientDataList.add(clientData);
-
- synchronized (localLocks) {
- generateClientLockData(localLocks, clientData);
- }
- }
-
- // server data
- GlobalLocks globalLocks =
- bucket.getAdjunctDontCreate(GlobalLocks.class);
- if (globalLocks != null) {
- // server data is already in serializable form
- serverDataList.add(new ServerData(i, globalLocks));
- }
- }
- }
-
- private void generateClientLockData(LocalLocks localLocks, ClientData clientData) {
- for (WeakReference<TargetLock> wr :
- localLocks.weakReferenceToIdentity.keySet()) {
- // Note: 'targetLock' may be 'null' if it has
- // been abandoned, and garbage collected
- TargetLock targetLock = wr.get();
-
- // fetch associated 'identity'
- Identity identity =
- localLocks.weakReferenceToIdentity.get(wr);
- if (identity != null) {
- // add a new 'ClientDataRecord' for this bucket
- clientData.clientDataRecords.add(
- new ClientDataRecord(identity,
- (targetLock == null ? null :
- targetLock.getState())));
- }
- }
- }
- }
-
- /**
- * Information derived from the 'LocalLocks' adjunct to a single bucket.
- */
- static class ClientData implements Serializable {
- private static final long serialVersionUID = 1L;
-
- // number of the bucket
- private int bucketNumber;
-
- // all of the client locks within this bucket
- private List<ClientDataRecord> clientDataRecords;
-
- /**
- * Constructor - initially, there are no 'clientDataRecords'.
- *
- * @param bucketNumber the bucket these records are associated with
- */
- ClientData(int bucketNumber) {
- this.bucketNumber = bucketNumber;
- clientDataRecords = new ArrayList<>();
- }
- }
-
- /**
- * This corresponds to the information contained within a
- * single 'TargetLock'.
- */
- static class ClientDataRecord implements Serializable {
- private static final long serialVersionUID = 1L;
-
- // contains key, ownerKey, uuid
- private Identity identity;
-
- // state field of 'TargetLock'
- // (may be 'null' if there is no 'TargetLock')
- private State state;
-
- /**
- * Constructor - initialize the fields.
- *
- * @param identity contains key, ownerKey, uuid
- * @param state the state if the 'TargetLock' exists, and 'null' if it
- * has been garbage collected
- */
- ClientDataRecord(Identity identity, State state) {
- this.identity = identity;
- this.state = state;
- }
- }
-
- /**
- * Information derived from the 'GlobalLocks' adjunct to a single bucket.
- */
- static class ServerData implements Serializable {
- private static final long serialVersionUID = 1L;
-
- // number of the bucket
- private int bucketNumber;
-
- // server-side data associated with a single bucket
- private GlobalLocks globalLocks;
-
- /**
- * Constructor - initialize the fields.
- *
- * @param bucketNumber the bucket these records are associated with
- * @param globalLocks GlobalLocks instance associated with 'bucketNumber'
- */
- ServerData(int bucketNumber, GlobalLocks globalLocks) {
- this.bucketNumber = bucketNumber;
- this.globalLocks = globalLocks;
- }
- }
- }
-
- /* ============================================================ */
-
- /**
- * Instances of 'AuditData' are passed between servers as part of the
- * 'TargetLock' audit.
- */
- static class AuditData implements Serializable {
- private static final long serialVersionUID = 1L;
-
- // client records that currently exist, or records to be cleared
- // (depending upon message) -- client/server is from the senders side
- private List<Identity> clientData;
-
- // server records that currently exist, or records to be cleared
- // (depending upon message) -- client/server is from the senders side
- private List<Identity> serverData;
-
- /**
- * Constructor - set 'hostUuid' to the current host, and start with
- * empty lists.
- */
- AuditData() {
- clientData = new ArrayList<>();
- serverData = new ArrayList<>();
- }
-
- /**
- * This is called when we receive an incoming 'AuditData' object from
- * a remote host.
- *
- * @param includeWarnings if 'true', generate warning messages
- * for mismatches
- * @return an 'AuditData' instance that only contains records we
- * can't confirm
- */
- AuditData generateResponse(boolean includeWarnings) {
- AuditData response = new AuditData();
-
- // compare remote servers client data with our server data
- generateResponseClientEnd(response, includeWarnings);
-
- // test server data
- generateResponseServerEnd(response, includeWarnings);
-
- return response;
- }
-
- private void generateResponseClientEnd(AuditData response, boolean includeWarnings) {
- for (Identity identity : clientData) {
- // remote end is the client, and we are the server
- Bucket bucket = Bucket.getBucket(identity.key);
- GlobalLocks globalLocks =
- bucket.getAdjunctDontCreate(GlobalLocks.class);
-
- if (globalLocks != null) {
- Map<String, LockEntry> keyToEntry = globalLocks.keyToEntry;
- synchronized (keyToEntry) {
- if (matchIdentity(keyToEntry, identity)) {
- continue;
- }
- }
- }
-
- // If we reach this point, a match was not confirmed. Note that it
- // is possible that we got caught in a transient state, so we need
- // to somehow make sure that we don't "correct" a problem that
- // isn't real.
-
- if (includeWarnings) {
- logger.warn("TargetLock audit issue: server match not found "
- + "(key={},ownerKey={},uuid={})",
- identity.key, identity.ownerKey, identity.uuid);
- }
-
- // it was 'clientData' to the sender, but 'serverData' to us
- response.serverData.add(identity);
- }
- }
-
- private boolean matchIdentity(Map<String, LockEntry> keyToEntry, Identity identity) {
- LockEntry le = keyToEntry.get(identity.key);
- if (le != null) {
- if (identity.uuid.equals(le.currentOwnerUuid)
- && identity.ownerKey.equals(le.currentOwnerKey)) {
- // we found a match
- return true;
- }
-
- // check the waiting list
- for (Waiting waiting : le.waitingList) {
- if (identity.uuid.equals(waiting.ownerUuid)
- && identity.ownerKey.equals(waiting.ownerKey)) {
- // we found a match on the waiting list
- return true;
- }
- }
- }
-
- return false;
- }
-
- private void generateResponseServerEnd(AuditData response, boolean includeWarnings) {
- for (Identity identity : serverData) {
- // remote end is the server, and we are the client
- Bucket bucket = Bucket.getBucket(identity.ownerKey);
- LocalLocks localLocks =
- bucket.getAdjunctDontCreate(LocalLocks.class);
- if (localLocks != null) {
- synchronized (localLocks) {
- WeakReference<TargetLock> wr =
- localLocks.uuidToWeakReference.get(identity.uuid);
- if (wr != null) {
- Identity identity2 =
- localLocks.weakReferenceToIdentity.get(wr);
- if (identity2 != null
- && identity.key.equals(identity2.key)
- && identity.ownerKey.equals(identity2.ownerKey)) {
- // we have a match
- continue;
- }
- }
- }
- }
-
- // If we reach this point, a match was not confirmed. Note that it
- // is possible that we got caught in a transient state, so we need
- // to somehow make sure that we don't "correct" a problem that
- // isn't real.
- if (includeWarnings) {
- logger.warn("TargetLock audit issue: client match not found "
- + "(key={},ownerKey={},uuid={})",
- identity.key, identity.ownerKey, identity.uuid);
- }
- response.clientData.add(identity);
- }
- }
-
- /**
- * The response messages contain 'Identity' objects that match those
- * in our outgoing '/lock/audit' message, but that the remote end could
- * not confirm. Again, the definition of 'client' and 'server' are
- * the remote ends' version.
- *
- * @param server the server we sent the request to
- */
- void processResponse(Server server) {
- if (clientData.isEmpty() && serverData.isEmpty()) {
- // no mismatches
- logger.info("TargetLock audit with {} completed -- no mismatches",
- server);
- return;
- }
-
- // There are still mismatches -- note that 'clientData' and
- // 'serverData' are from the remote end's perspective, which is the
- // opposite of this end
-
- for (Identity identity : clientData) {
- // these are on our server end -- we were showing a lock on this
- // end, but the other end has no such client
- logger.error("Audit mismatch (GlobalLocks): (key={},owner={},uuid={})",
- identity.key, identity.ownerKey, identity.uuid);
-
- // free the lock
- GlobalLocks.get(identity.key).unlock(identity.key, identity.uuid);
- }
- for (Identity identity : serverData) {
- // these are on our client end
- logger.error("Audit mismatch (LocalLocks): (key={},owner={},uuid={})",
- identity.key, identity.ownerKey, identity.uuid);
-
- // clean up 'LocalLocks' tables
- LocalLocks localLocks = LocalLocks.get(identity.ownerKey);
- TargetLock targetLock = null;
- synchronized (localLocks) {
- WeakReference<TargetLock> wr =
- localLocks.uuidToWeakReference.get(identity.uuid);
- if (wr != null) {
- targetLock = wr.get();
- localLocks.weakReferenceToIdentity.remove(wr);
- localLocks.uuidToWeakReference
- .remove(identity.uuid);
- wr.clear();
- }
- }
-
- if (targetLock != null) {
- // may need to update state
- synchronized (targetLock) {
- if (targetLock.state != State.FREE) {
- targetLock.state = State.LOST;
- }
- }
- }
- }
- logger.info("TargetLock audit with {} completed -- {} mismatches",
- server, clientData.size() + serverData.size());
- }
-
- /**
- * Serialize and base64-encode this 'AuditData' instance, so it can
- * be sent in a message.
- *
- * @return a byte array, which can be decoded and deserialized at
- * the other end ('null' is returned if there were any problems)
- */
- byte[] encode() {
- try {
- return Base64.getEncoder().encode(Util.serialize(this));
- } catch (IOException e) {
- logger.error("TargetLock.AuditData.encode Exception", e);
- return EMPTY_BYTE_ARRAY;
- }
- }
-
- /**
- * Base64-decode and deserialize a byte array.
- *
- * @param encodedData a byte array encoded via 'AuditData.encode'
- * (typically on the remote end of a connection)
- * @return an 'AuditData' instance if decoding was successful,
- * and 'null' if not
- */
- static AuditData decode(byte[] encodedData) {
- try {
- Object decodedData =
- Util.deserialize(Base64.getDecoder().decode(encodedData));
- if (decodedData instanceof AuditData) {
- return (AuditData) decodedData;
- } else {
- logger.error(
- "TargetLock.AuditData.decode returned instance of class {}",
- decodedData.getClass().getName());
- return null;
- }
- } catch (IOException | ClassNotFoundException e) {
- logger.error("TargetLock.AuditData.decode Exception", e);
- return null;
- }
- }
- }
-
- /**
- * This class contains methods that control the audit. Also, sn instance of
- * 'Audit' is created for each audit that is in progress.
- */
- static class Audit {
- // if non-null, it means that we have a timer set that periodicall
- // triggers the audit
- static TimerTask timerTask = null;
-
- // maps 'Server' to audit data associated with that server
- Map<Server, AuditData> auditMap = new IdentityHashMap<>();
-
- /**
- * Run a single audit cycle.
- */
- static void runAudit() {
- logger.info("Starting TargetLock audit");
- Audit audit = new Audit();
-
- // populate 'auditMap' table
- audit.build();
-
- // send to all of the servers in 'auditMap' (may include this server)
- audit.send();
- }
-
- /**
- * Schedule the audit to run periodically based upon defined properties.
- */
- static void scheduleAudit() {
- scheduleAudit(auditPeriod, auditGracePeriod);
- }
-
- /**
- * Schedule the audit to run periodically -- all of the hosts arrange to
- * run their audit at a different time, evenly spaced across the audit
- * period.
- *
- * @param period how frequently to run the audit, in milliseconds
- * @param gracePeriod ensure that the audit doesn't run until at least
- * 'gracePeriod' milliseconds have passed from the current time
- */
- static synchronized void scheduleAudit(final long period, final long gracePeriod) {
-
- if (timerTask != null) {
- // cancel current timer
- timerTask.cancel();
- timerTask = null;
- }
-
- // this needs to run in the 'MainLoop' thread, because it is dependent
- // upon the list of servers, and our position in this list
- MainLoop.queueWork(() -> {
- // this runs in the 'MainLoop' thread
-
- // current list of servers
- Collection<Server> servers = Server.getServers();
-
- // count of the number of servers
- int count = servers.size();
-
- // will contain our position in this list
- int index = 0;
-
- // current server
- Server thisServer = Server.getThisServer();
-
- for (Server server : servers) {
- if (server == thisServer) {
- break;
- }
- index += 1;
- }
-
- // if index == count, we didn't find this server
- // (which shouldn't happen)
-
- if (index < count) {
- // The servers are ordered by UUID, and 'index' is this
- // server's position within the list. Suppose the period is
- // 60000 (60 seconds), and there are 5 servers -- the first one
- // will run the audit at 0 seconds after the minute, the next
- // at 12 seconds after the minute, then 24, 36, 48.
- long offset = (period * index) / count;
-
- // the earliest time we want the audit to run
- long time = System.currentTimeMillis() + gracePeriod;
- long startTime = time - (time % period) + offset;
- if (startTime <= time) {
- startTime += period;
- }
- synchronized (Audit.class) {
- if (timerTask != null) {
- timerTask.cancel();
- }
- timerTask = new TimerTask() {
- @Override
- public void run() {
- runAudit();
- }
- };
-
- // now, schedule the timer
- Util.timer.scheduleAtFixedRate(
- timerTask, new Date(startTime), period);
- }
- }
- });
- }
-
- /**
- * Handle an incoming '/lock/audit' message.
- *
- * @param serverUuid the UUID of the intended destination server
- * @param ttl similar to IP time-to-live -- it controls the number of hops
- * @param data base64-encoded data, containing a serialized 'AuditData'
- * instance
- * @return a serialized and base64-encoded 'AuditData' response
- */
- static byte[] incomingAudit(UUID serverUuid, int ttl, byte[] encodedData) {
- if (!Server.getThisServer().getUuid().equals(serverUuid)) {
- ttl -= 1;
- if (ttl > 0) {
- Server server = Server.getServer(serverUuid);
- if (server != null) {
- WebTarget webTarget = server.getWebTarget(LOCK_AUDIT);
- if (webTarget != null) {
- logger.info("Forwarding {} to uuid {}", LOCK_AUDIT,
- serverUuid);
- Entity<String> entity =
- Entity.entity(new String(encodedData),
- MediaType.APPLICATION_OCTET_STREAM_TYPE);
- return webTarget
- .queryParam(QP_SERVER, serverUuid.toString())
- .queryParam(QP_TTL, String.valueOf(ttl))
- .request().post(entity, byte[].class);
- }
- }
- }
-
- // if we reach this point, we didn't forward for some reason
-
- logger.error("Couldn't forward {} to uuid {}", LOCK_AUDIT,
- serverUuid);
- return EMPTY_BYTE_ARRAY;
- }
-
- AuditData auditData = AuditData.decode(encodedData);
- if (auditData != null) {
- AuditData auditResp = auditData.generateResponse(true);
- return auditResp.encode();
- }
- return EMPTY_BYTE_ARRAY;
- }
-
- /**
- * This method populates the 'auditMap' table by going through all of
- * the client and server lock data, and sorting it according to the
- * remote server.
- */
- void build() {
- for (int i = 0; i < Bucket.BUCKETCOUNT; i += 1) {
- Bucket bucket = Bucket.getBucket(i);
-
- // client data
- buildClientData(bucket);
-
- // server data
- buildServerData(bucket);
- }
- }
-
- private void buildClientData(Bucket bucket) {
- // client data
- LocalLocks localLocks =
- bucket.getAdjunctDontCreate(LocalLocks.class);
- if (localLocks != null) {
- synchronized (localLocks) {
- // we have client data for this bucket
- for (Identity identity :
- localLocks.weakReferenceToIdentity.values()) {
- // find or create the 'AuditData' instance associated
- // with the server owning the 'key'
- AuditData auditData = getAuditData(identity.key);
- if (auditData != null) {
- auditData.clientData.add(identity);
- }
- }
- }
- }
- }
-
- private void buildServerData(Bucket bucket) {
- // server data
- GlobalLocks globalLocks =
- bucket.getAdjunctDontCreate(GlobalLocks.class);
- if (globalLocks != null) {
- // we have server data for this bucket
- Map<String, LockEntry> keyToEntry = globalLocks.keyToEntry;
- synchronized (keyToEntry) {
- for (LockEntry le : keyToEntry.values()) {
- // find or create the 'AuditData' instance associated
- // with the current 'ownerKey'
- AuditData auditData = getAuditData(le.currentOwnerKey);
- if (auditData != null) {
- // create an 'Identity' entry, and add it to
- // the list associated with the remote server
- auditData.serverData.add(
- new Identity(le.key, le.currentOwnerKey,
- le.currentOwnerUuid));
- }
-
- for (Waiting waiting : le.waitingList) {
- // find or create the 'AuditData' instance associated
- // with the waiting entry 'ownerKey'
- auditData = getAuditData(waiting.ownerKey);
- if (auditData != null) {
- // create an 'Identity' entry, and add it to
- // the list associated with the remote server
- auditData.serverData.add(
- new Identity(le.key, waiting.ownerKey,
- waiting.ownerUuid));
- }
- }
- }
- }
- }
- }
-
- /**
- * Find or create the 'AuditData' structure associated with a particular
- * key.
- */
- AuditData getAuditData(String key) {
- // map 'key -> bucket number', and then 'bucket number' -> 'server'
- Server server = Bucket.bucketToServer(Bucket.bucketNumber(key));
- if (server != null) {
- return auditMap.computeIfAbsent(server, sk -> new AuditData());
- }
-
- // this happens when the bucket has not been assigned to a server yet
- return null;
- }
-
- /**
- * Using the collected 'auditMap', send out the messages to all of the
- * servers.
- */
- void send() {
- if (auditMap.isEmpty()) {
- logger.info("TargetLock audit: no locks on this server");
- } else {
- logger.info("TargetLock audit: sending audit information to {}",
- auditMap.keySet());
- }
-
- for (final Server server : auditMap.keySet()) {
- sendServer(server);
- }
- }
-
- private void sendServer(final Server server) {
- // fetch audit data
- AuditData auditData = auditMap.get(server);
-
- if (server == Server.getThisServer()) {
- // process this locally
- final AuditData respData = auditData.generateResponse(true);
- if (respData.clientData.isEmpty()
- && respData.serverData.isEmpty()) {
- // no mismatches
- logger.info("{} no errors from self ({})", TARGETLOCK_AUDIT_SEND, server);
- return;
- }
-
- // do the rest in a separate thread
- server.getThreadPool().execute(() -> {
- // wait a few seconds, and see if we still know of these
- // errors
- if (AuditPostResponse.responseSupport(
- respData, "self (" + server + ")",
- "TargetLock.Audit.send")) {
- // a return value of 'true' either indicates the
- // mismatches were resolved after a retry, or we
- // received an interrupt, and need to abort
- return;
- }
-
- // any mismatches left in 'respData' are still issues
- respData.processResponse(server);
- });
- return;
- }
-
- // serialize
- byte[] encodedData = auditData.encode();
- if (encodedData.length == 0) {
- // error has already been displayed
- return;
- }
-
- // generate entity
- Entity<String> entity =
- Entity.entity(new String(encodedData),
- MediaType.APPLICATION_OCTET_STREAM_TYPE);
-
- server.post(LOCK_AUDIT, entity, new AuditPostResponse(server));
- }
- }
-
- static class AuditPostResponse implements Server.PostResponse {
- private Server server;
-
- AuditPostResponse(Server server) {
- this.server = server;
- }
-
- @Override
- public WebTarget webTarget(WebTarget webTarget) {
- // include the 'uuid' keyword
- return webTarget
- .queryParam(QP_SERVER, server.getUuid().toString())
- .queryParam(QP_TTL, timeToLive);
- }
-
- @Override
- public void response(Response response) {
- // process the response here
- AuditData respData =
- AuditData.decode(response.readEntity(byte[].class));
- if (respData == null) {
- logger.error("{} couldn't process response from {}",
- TARGETLOCK_AUDIT_SEND, server);
- return;
- }
-
- // if we reach this point, we got a response
- if (respData.clientData.isEmpty()
- && respData.serverData.isEmpty()) {
- // no mismatches
- logger.info("{} no errors from {}", TARGETLOCK_AUDIT_SEND, server);
- return;
- }
-
- // wait a few seconds, and see if we still know of these
- // errors
- if (responseSupport(respData, server, "AuditPostResponse.response")) {
- // a return falue of 'true' either indicates the mismatches
- // were resolved after a retry, or we received an interrupt,
- // and need to abort
- return;
- }
-
- // any mismatches left in 'respData' are still there --
- // hopefully, they are transient issues on the other side
- AuditData auditData = new AuditData();
- auditData.clientData = respData.serverData;
- auditData.serverData = respData.clientData;
-
- // serialize
- byte[] encodedData = auditData.encode();
- if (encodedData.length == 0) {
- // error has already been displayed
- return;
- }
-
- // generate entity
- Entity<String> entity =
- Entity.entity(new String(encodedData),
- MediaType.APPLICATION_OCTET_STREAM_TYPE);
-
- // send new list to other end
- response = server
- .getWebTarget(LOCK_AUDIT)
- .queryParam(QP_SERVER, server.getUuid().toString())
- .queryParam(QP_TTL, timeToLive)
- .request().post(entity);
-
- respData = AuditData.decode(response.readEntity(byte[].class));
- if (respData == null) {
- logger.error("TargetLock.auditDataBuilder.send: "
- + "couldn't process response from {}",
- server);
- return;
- }
-
- // if there are mismatches left, they are presumably real
- respData.processResponse(server);
- }
-
- // Handle mismatches indicated by an audit response -- a return value of
- // 'true' indicates that there were no mismatches after a retry, or
- // we received an interrupt. In either case, the caller returns.
- private static boolean responseSupport(AuditData respData, Object serverString, String caller) {
- logger.info("{}: mismatches from {}", caller, serverString);
- try {
- Thread.sleep(auditRetryDelay);
- } catch (InterruptedException e) {
- logger.error("{}: Interrupted handling audit response from {}",
- caller, serverString);
- // just abort
- Thread.currentThread().interrupt();
- return true;
- }
-
- // This will check against our own data -- any mismatches
- // mean that things have changed since we sent out the
- // first message. We will remove any mismatches from
- // 'respData', and see if there are any left.
- AuditData mismatches = respData.generateResponse(false);
-
- respData.serverData.removeAll(mismatches.clientData);
- respData.clientData.removeAll(mismatches.serverData);
-
- if (respData.clientData.isEmpty()
- && respData.serverData.isEmpty()) {
- // no mismatches --
- // there must have been transient issues on our side
- logger.info("{}: no mismatches from {} after retry",
- caller, serverString);
- return true;
- }
-
- return false;
- }
- }
-}
diff --git a/feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/Util.java b/feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/Util.java
deleted file mode 100644
index 92d0994c..00000000
--- a/feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/Util.java
+++ /dev/null
@@ -1,183 +0,0 @@
-/*
- * ============LICENSE_START=======================================================
- * feature-server-pool
- * ================================================================================
- * Copyright (C) 2020 AT&T Intellectual Property. All rights reserved.
- * ================================================================================
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ============LICENSE_END=========================================================
- */
-
-package org.onap.policy.drools.serverpool;
-
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.ObjectInputStream;
-import java.io.ObjectOutputStream;
-import java.nio.charset.StandardCharsets;
-import java.util.Comparator;
-import java.util.Timer;
-import java.util.UUID;
-import org.apache.commons.io.IOUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class Util {
- private static Logger logger = LoggerFactory.getLogger(Util.class);
- // create a shared 'Timer' instance
- public static final Timer timer = new Timer("Server Pool Timer", true);
-
- /**
- * Hide implicit public constructor.
- */
- private Util() {
- // everything here is static -- no instances of this class are created
- }
-
- /**
- * Internally, UUID objects use two 'long' variables, and the default
- * comparison is signed, which means the order for the first and 16th digit
- * is: '89abcdef01234567', while the order for the rest is
- * '0123456789abcdef'.
- * The following comparator uses the ordering '0123456789abcdef' for all
- * digits.
- */
- public static final Comparator<UUID> uuidComparator = (UUID u1, UUID u2) -> {
- // compare most significant portion
- int rval = Long.compareUnsigned(u1.getMostSignificantBits(),
- u2.getMostSignificantBits());
- if (rval == 0) {
- // most significant portion matches --
- // compare least significant portion
- rval = Long.compareUnsigned(u1.getLeastSignificantBits(),
- u2.getLeastSignificantBits());
- }
- return rval;
- };
-
- /* ============================================================ */
-
- /**
- * write a UUID to an output stream.
- *
- * @param ds the output stream
- * @param uuid the uuid to write
- */
- public static void writeUuid(DataOutputStream ds, UUID uuid) throws IOException {
- // write out 16 byte UUID
- ds.writeLong(uuid.getMostSignificantBits());
- ds.writeLong(uuid.getLeastSignificantBits());
- }
-
- /**
- * read a UUID from an input stream.
- *
- * @param ds the input stream
- */
- public static UUID readUuid(DataInputStream ds) throws IOException {
- long mostSigBits = ds.readLong();
- long leastSigBits = ds.readLong();
- return new UUID(mostSigBits, leastSigBits);
- }
-
- /* ============================================================ */
-
- /**
- * Read from an 'InputStream' until EOF or until it is closed. This method
- * may block, depending on the type of 'InputStream'.
- *
- * @param input This is the input stream
- * @return A 'String' containing the contents of the input stream
- */
- public static String inputStreamToString(InputStream input) {
- try {
- return IOUtils.toString(input, StandardCharsets.UTF_8);
- } catch (IOException e) {
- logger.error("Util.inputStreamToString error", e);
- return "";
- }
- }
-
- /* ============================================================ */
-
- /**
- * Serialize an object into a byte array.
- *
- * @param object the object to serialize
- * @return a byte array containing the serialized object
- * @throws IOException this may be an exception thrown by the output stream,
- * a NotSerializableException if an object can't be serialized, or an
- * InvalidClassException
- */
- public static byte[] serialize(Object object) throws IOException {
- try (ByteArrayOutputStream bos = new ByteArrayOutputStream();
- ObjectOutputStream oos = new ObjectOutputStream(bos)) {
- oos.writeObject(object);
- oos.flush();
- return bos.toByteArray();
- }
- }
-
- /**
- * Deserialize a byte array into an object.
- *
- * @param data a byte array containing the serialized object
- * @return the deserialized object
- * @throws IOException this may be an exception thrown by the input stream,
- * a StreamCorrupted Exception if the information in the stream is not
- * consistent, an OptionalDataException if the input data primitive data,
- * rather than an object, or InvalidClassException
- * @throws ClassNotFoundException if the class of a serialized object can't
- * be found
- */
- public static Object deserialize(byte[] data) throws IOException, ClassNotFoundException {
- try (ByteArrayInputStream bis = new ByteArrayInputStream(data);
- ObjectInputStream ois = new ObjectInputStream(bis)) {
- return ois.readObject();
- }
- }
-
- /**
- * Deserialize a byte array into an object.
- *
- * @param data a byte array containing the serialized object
- * @param classLoader the class loader to use when locating classes
- * @return the deserialized object
- * @throws IOException this may be an exception thrown by the input stream,
- * a StreamCorrupted Exception if the information in the stream is not
- * consistent, an OptionalDataException if the input data primitive data,
- * rather than an object, or InvalidClassException
- * @throws ClassNotFoundException if the class of a serialized object can't
- * be found
- */
- public static Object deserialize(byte[] data, ClassLoader classLoader)
- throws IOException, ClassNotFoundException {
-
- try (ByteArrayInputStream bis = new ByteArrayInputStream(data);
- ExtendedObjectInputStream ois =
- new ExtendedObjectInputStream(bis, classLoader)) {
- return ois.readObject();
- }
- }
-
- /**
- * Shutdown the timer thread.
- */
- public static void shutdown() {
- timer.cancel();
- }
-}
diff --git a/feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/persistence/Persistence.java b/feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/persistence/Persistence.java
deleted file mode 100644
index 18afcd47..00000000
--- a/feature-server-pool/src/main/java/org/onap/policy/drools/serverpool/persistence/Persistence.java
+++ /dev/null
@@ -1,899 +0,0 @@
-/*
- * ============LICENSE_START=======================================================
- * feature-server-pool
- * ================================================================================
- * Copyright (C) 2020 AT&T Intellectual Property. All rights reserved.
- * ================================================================================
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ============LICENSE_END=========================================================
- */
-
-package org.onap.policy.drools.serverpool.persistence;
-
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.ObjectOutputStream;
-import java.util.Base64;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.IdentityHashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Properties;
-import java.util.Set;
-import java.util.UUID;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-import javax.ws.rs.Consumes;
-import javax.ws.rs.POST;
-import javax.ws.rs.Path;
-import javax.ws.rs.QueryParam;
-import javax.ws.rs.client.Entity;
-import javax.ws.rs.client.WebTarget;
-import javax.ws.rs.core.MediaType;
-import org.kie.api.event.rule.ObjectDeletedEvent;
-import org.kie.api.event.rule.ObjectInsertedEvent;
-import org.kie.api.event.rule.ObjectUpdatedEvent;
-import org.kie.api.event.rule.RuleRuntimeEventListener;
-import org.kie.api.runtime.KieSession;
-import org.onap.policy.drools.core.DroolsRunnable;
-import org.onap.policy.drools.core.PolicyContainer;
-import org.onap.policy.drools.core.PolicySession;
-import org.onap.policy.drools.core.PolicySessionFeatureApi;
-import org.onap.policy.drools.serverpool.Bucket;
-import org.onap.policy.drools.serverpool.Keyword;
-import org.onap.policy.drools.serverpool.Server;
-import org.onap.policy.drools.serverpool.ServerPoolApi;
-import org.onap.policy.drools.serverpool.TargetLock.GlobalLocks;
-import org.onap.policy.drools.serverpool.Util;
-import org.onap.policy.drools.system.PolicyControllerConstants;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class provides a persistence implementation for 'feature-server-pool',
- * backing up the data of selected Drools sessions and server-side 'TargetLock'
- * data on separate hosts.
- */
-public class Persistence extends ServerPoolApi implements PolicySessionFeatureApi {
- private static Logger logger = LoggerFactory.getLogger(Persistence.class);
-
- // HTTP query parameters
- private static final String QP_BUCKET = "bucket";
- private static final String QP_SESSION = "session";
- private static final String QP_COUNT = "count";
- private static final String QP_DEST = "dest";
-
- /* ************************************* */
- /* 'PolicySessionFeatureApi' interface */
- /* ************************************* */
-
- /**
- * {@inheritDoc}
- */
- @Override
- public int getSequenceNumber() {
- return 1;
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void newPolicySession(PolicySession policySession) {
- // a new Drools session is being created -- look at the properties
- // 'persistence.<session-name>.type' and 'persistence.type' to determine
- // whether persistence is enabled for this session
-
- // fetch properties file
- PolicyContainer container = policySession.getPolicyContainer();
- Properties properties = PolicyControllerConstants.getFactory().get(
- container.getGroupId(), container.getArtifactId()).getProperties();
-
- // look at 'persistence.<session-name>.type', and 'persistence.type'
- String type = properties.getProperty("persistence." + policySession.getName() + ".type");
- if (type == null) {
- type = properties.getProperty("persistence.type");
- }
-
- if ("auto".equals(type) || "native".equals(type)) {
- // persistence is enabled this session
- policySession.setAdjunct(PersistenceRunnable.class,
- new PersistenceRunnable(policySession));
- }
- }
-
- /* *************************** */
- /* 'ServerPoolApi' interface */
- /* *************************** */
-
- /**
- * {@inheritDoc}
- */
- @Override
- public Collection<Class<?>> servletClasses() {
- // the nested class 'Rest' contains additional REST calls
- List<Class<?>> classes = new LinkedList<>();
- classes.add(Rest.class);
- return classes;
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void restoreBucket(Bucket bucket) {
- // if we reach this point, no data was received from the old server, which
- // means we just initialized, or we did not have a clean bucket migration
-
- ReceiverBucketData rbd = bucket.removeAdjunct(ReceiverBucketData.class);
- if (rbd != null) {
- // there is backup data -- do a restore
- rbd.restoreBucket(bucket);
- }
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void lockUpdate(Bucket bucket, GlobalLocks globalLocks) {
- // we received a notification from 'TargetLock' that 'GlobalLocks' data
- // has changed (TBD: should any attempt be made to group updates that
- // occur in close succession?)
-
- sendLockDataToBackups(bucket, globalLocks);
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void auditBucket(Bucket bucket, boolean isOwner, boolean isBackup) {
- if (isOwner) {
- // it may be that backup hosts have changed --
- // send out lock and session data
-
- // starting with lock data
- GlobalLocks globalLocks =
- bucket.getAdjunctDontCreate(GlobalLocks.class);
- if (globalLocks != null) {
- sendLockDataToBackups(bucket, globalLocks);
- }
-
- // now, session data
- SenderBucketData sbd =
- bucket.getAdjunctDontCreate(SenderBucketData.class);
- if (sbd != null) {
- synchronized (sbd) {
- // go through all of the sessions where we have persistent data
- for (PolicySession session : sbd.sessionData.keySet()) {
- Object obj = session.getAdjunct(PersistenceRunnable.class);
- if (obj instanceof PersistenceRunnable) {
- PersistenceRunnable pr = (PersistenceRunnable) obj;
- synchronized (pr.modifiedBuckets) {
- // mark bucket associated with this session
- // as modified
- pr.modifiedBuckets.add(bucket);
- }
- }
- }
- }
- }
- } else if (bucket.removeAdjunct(SenderBucketData.class) != null) {
- logger.warn("Bucket {}: Removed superfluous "
- + "'SenderBucketData' adjunct",
- bucket.getIndex());
- }
- if (!isBackup && bucket.removeAdjunct(ReceiverBucketData.class) != null) {
- logger.warn("Bucket {}: Removed superfluous "
- + "'ReceiverBucketData' adjunct",
- bucket.getIndex());
- }
- }
-
- /**
- * This method supports 'lockUpdate' -- it has been moved to a separate
- * 'static' method, so it can also be called after restoring 'GlobalLocks',
- * so it can be backed up on its new servers.
- *
- * @param bucket the bucket containing the 'GlobalLocks' adjunct
- * @param globalLocks the 'GlobalLocks' adjunct
- */
- private static void sendLockDataToBackups(final Bucket bucket, final GlobalLocks globalLocks) {
- final int bucketNumber = bucket.getIndex();
- SenderBucketData sbd = bucket.getAdjunct(SenderBucketData.class);
- int lockCount = 0;
-
- // serialize the 'globalLocks' instance
- ByteArrayOutputStream bos = new ByteArrayOutputStream();
- try {
- ObjectOutputStream oos = new ObjectOutputStream(bos);
- synchronized (globalLocks) {
- // the 'GlobalLocks' instance and counter are tied together
- oos.writeObject(globalLocks);
- lockCount = sbd.getLockCountAndIncrement();
- }
- oos.close();
- } catch (IOException e) {
- logger.error("Persistence.LockUpdate({})", bucketNumber, e);
- return;
- }
-
- // convert to Base64, and generate an 'Entity' for the REST call
- byte[] serializedData = Base64.getEncoder().encode(bos.toByteArray());
- final Entity<String> entity =
- Entity.entity(new String(serializedData),
- MediaType.APPLICATION_OCTET_STREAM_TYPE);
- final int count = lockCount;
-
- sendLocksToBackupServers(bucketNumber, entity, count, bucket.getBackups());
- }
-
- private static void sendLocksToBackupServers(final int bucketNumber, final Entity<String> entity, final int count,
- Set<Server> servers) {
- for (final Server server : servers) {
- if (server != null) {
- // send out REST command
- server.getThreadPool().execute(() -> {
- WebTarget webTarget =
- server.getWebTarget("persistence/lock");
- if (webTarget != null) {
- webTarget
- .queryParam(QP_BUCKET, bucketNumber)
- .queryParam(QP_COUNT, count)
- .queryParam(QP_DEST, server.getUuid())
- .request().post(entity);
- }
- });
- }
- }
- }
-
- /* ============================================================ */
-
- /**
- * One instance of this class exists for every Drools session that is
- * being backed up. It implements the 'RuleRuntimeEventListener' interface,
- * so it receives notifications of Drools object changes, and also implements
- * the 'DroolsRunnable' interface, so it can run within the Drools session
- * thread, which should reduce the chance of catching an object in a
- * transient state.
- */
- static class PersistenceRunnable implements DroolsRunnable,
- RuleRuntimeEventListener {
- // this is the Drools session associated with this instance
- private PolicySession session;
-
- // this is the string "<groupId>:<artifactId>:<sessionName>"
- private String encodedSessionName;
-
- // the buckets in this session which have modifications that still
- // need to be backed up
- private Set<Bucket> modifiedBuckets = new HashSet<>();
-
- /**
- * Constructor - save the session information, and start listing for
- * updates.
- */
- PersistenceRunnable(PolicySession session) {
- PolicyContainer pc = session.getPolicyContainer();
-
- this.session = session;
- this.encodedSessionName =
- pc.getGroupId() + ":" + pc.getArtifactId() + ":" + session.getName();
- session.getKieSession().addEventListener(this);
- }
-
- /* **************************** */
- /* 'DroolsRunnable' interface */
- /* **************************** */
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void run() {
- try {
- // save a snapshot of 'modifiedBuckets'
- Set<Bucket> saveModifiedBuckets;
- synchronized (modifiedBuckets) {
- saveModifiedBuckets = new HashSet<>(modifiedBuckets);
- modifiedBuckets.clear();
- }
-
- // iterate over all of the modified buckets, sending update data
- // to all of the backup servers
- for (Bucket bucket : saveModifiedBuckets) {
- SenderBucketData sbd =
- bucket.getAdjunctDontCreate(SenderBucketData.class);
- if (sbd != null) {
- // serialization occurs within the Drools session thread
- SenderSessionBucketData ssbd = sbd.getSessionData(session);
- byte[] serializedData =
- ssbd.getLatestEncodedSerializedData();
- final int count = ssbd.getCount();
- final Entity<String> entity =
- Entity.entity(new String(serializedData),
- MediaType.APPLICATION_OCTET_STREAM_TYPE);
-
- // build list of backup servers
- Set<Server> servers = new HashSet<>();
- synchronized (bucket) {
- servers.add(bucket.getPrimaryBackup());
- servers.add(bucket.getSecondaryBackup());
- }
- sendBucketToBackupServers(bucket, count, entity, servers);
- }
- }
- } catch (Exception e) {
- logger.error("Persistence.PersistenceRunnable.run:", e);
- }
- }
-
- private void sendBucketToBackupServers(Bucket bucket, final int count, final Entity<String> entity,
- Set<Server> servers) {
-
- for (final Server server : servers) {
- if (server != null) {
- // send out REST command
- server.getThreadPool().execute(() -> {
- WebTarget webTarget =
- server.getWebTarget("persistence/session");
- if (webTarget != null) {
- webTarget
- .queryParam(QP_BUCKET,
- bucket.getIndex())
- .queryParam(QP_SESSION,
- encodedSessionName)
- .queryParam(QP_COUNT, count)
- .queryParam(QP_DEST, server.getUuid())
- .request().post(entity);
- }
- });
- }
- }
- }
-
- /* ************************************** */
- /* 'RuleRuntimeEventListener' interface */
- /* ************************************** */
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void objectDeleted(ObjectDeletedEvent event) {
- // determine Drools object that was deleted
- Object object = event.getOldObject();
-
- // determine keyword, if any
- String keyword = Keyword.lookupKeyword(object);
- if (keyword == null) {
- // no keyword, so there is no associated bucket
- return;
- }
-
- // locate bucket and associated data
- // (don't create adjunct if it isn't there -- there's nothing to delete)
- Bucket bucket = Bucket.getBucket(keyword);
- SenderBucketData sbd =
- bucket.getAdjunctDontCreate(SenderBucketData.class);
- if (sbd != null) {
- // add bucket to 'modified' list
- synchronized (modifiedBuckets) {
- modifiedBuckets.add(bucket);
- }
-
- // update set of Drools objects in this bucket
- sbd.getSessionData(session).objectDeleted(object);
-
- // insert this 'DroolsRunnable' to do the backup (note that it
- // may already be inserted from a previous update to this
- // DroolsSession -- eventually, the rule will fire, and the 'run'
- // method will be called)
- session.getKieSession().insert(this);
- }
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void objectInserted(ObjectInsertedEvent event) {
- objectChanged(event.getObject());
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void objectUpdated(ObjectUpdatedEvent event) {
- objectChanged(event.getObject());
- }
-
- /**
- * A Drools session object was either inserted or updated
- * (both are treated the same way).
- *
- * @param object the object being inserted or updated
- */
- private void objectChanged(Object object) {
- // determine keyword, if any
- String keyword = Keyword.lookupKeyword(object);
- if (keyword == null) {
- // no keyword, so there is no associated bucket
- return;
- }
-
- // locate bucket and associated data
- Bucket bucket = Bucket.getBucket(keyword);
- SenderBucketData sbd = bucket.getAdjunct(SenderBucketData.class);
-
- // add bucket to 'modified' list
- synchronized (modifiedBuckets) {
- modifiedBuckets.add(bucket);
- }
-
- // update set of Drools objects in this bucket
- sbd.getSessionData(session).objectChanged(object);
-
- // insert this 'DroolsRunnable' to do the backup (note that it
- // may already be inserted from a previous update to this
- // DroolsSession -- eventually, the rule will fire, and the 'run'
- // method will be called)
- session.getKieSession().insert(this);
- }
- }
-
- /* ============================================================ */
-
- /**
- * Per-session data for a single bucket on the sender's side.
- */
- static class SenderSessionBucketData {
- // the set of all objects in the session associated with this bucket
- Map<Object, Object> droolsObjects = new IdentityHashMap<>();
-
- // used by the receiver to determine whether an update is really newer
- int count = 0;
-
- // serialized base64 form of 'droolsObjects'
- // (TBD: determine if we are getting any benefit from caching this)
- byte[] encodedSerializedData = null;
-
- // 'true' means that 'encodedSerializedData' is out-of-date
- boolean rebuildNeeded = true;
-
- /**
- * Notification that a Drools object associated with this bucket
- * was deleted.
- *
- * @param object the object that was deleted
- */
- synchronized void objectDeleted(Object object) {
- if (droolsObjects.remove(object) != null) {
- rebuildNeeded = true;
- }
- }
-
- /**
- * Notification that a Drools object associated with this bucket
- * was inserted or updated.
- *
- * @param object the object that was updated
- */
- synchronized void objectChanged(Object object) {
- droolsObjects.put(object, object);
- rebuildNeeded = true;
- }
-
- /**
- * Serialize and base64-encode the objects in this Drools session.
- *
- * @return a byte array containing the encoded serialized objects
- */
- synchronized byte[] getLatestEncodedSerializedData() {
- if (rebuildNeeded) {
- try {
- // this should be run in the Drools session thread in order
- // to avoid transient data
- encodedSerializedData =
- Base64.getEncoder().encode(Util.serialize(droolsObjects));
- count += 1;
- } catch (IOException e) {
- logger.error("Persistence.SenderSessionBucketData."
- + "getLatestEncodedSerializedData: ", e);
- }
- rebuildNeeded = false;
- }
- return encodedSerializedData;
- }
-
- /**
- * Return a counter that will be used for update comparison.
- *
- * @return the value of a counter that can be used to determine whether
- * an update is really newer than the previous update
- */
- synchronized int getCount() {
- return count;
- }
- }
-
- /* ============================================================ */
-
- /**
- * Data for a single bucket on the sender's side.
- */
- public static class SenderBucketData {
- // maps session name into SenderSessionBucketData
- Map<PolicySession, SenderSessionBucketData> sessionData =
- new IdentityHashMap<>();
-
- // used by the receiver to determine whether an update is really newer
- int lockCount = 0;
-
- /**
- * Create or fetch the 'SenderSessionBucketData' instance associated
- * with the specified session.
- *
- * @param session the 'PolicySession' object
- * @return the associated 'SenderSessionBucketData' instance
- */
- synchronized SenderSessionBucketData getSessionData(PolicySession session) {
- return sessionData.computeIfAbsent(session, key -> new SenderSessionBucketData());
- }
-
- /**
- * Return a counter that will be used for update comparison.
- *
- * @return the value of a counter that can be used to determine whether
- * an update is really newer than the previous update
- */
- int getLockCountAndIncrement() {
- // note that this is synchronized using the 'GlobalLocks' instance
- // within the same bucket
- return lockCount++;
- }
- }
-
- /* ============================================================ */
-
- /**
- * Data for a single bucket and session on the receiver's side.
- */
- static class ReceiverSessionBucketData {
- // used to determine whether an update is really newer
- int count = -1;
-
- // serialized base64 form of 'droolsObjects'
- byte[] encodedSerializedData = null;
- }
-
- /* ============================================================ */
-
- /**
- * Data for a single bucket on the receiver's side -- this adjunct is used
- * to store encoded data on a backup host. It will only be needed if the
- * bucket owner fails.
- */
- public static class ReceiverBucketData {
- static final String RESTORE_BUCKET_ERROR =
- "Persistence.ReceiverBucketData.restoreBucket: ";
-
- // maps session name into encoded data
- Map<String, ReceiverSessionBucketData> sessionData = new HashMap<>();
-
- // used by the receiver to determine whether an update is really newer
- int lockCount = -1;
-
- // encoded lock data
- byte[] lockData = null;
-
- /**
- * This method is called in response to the '/persistence/session'
- * REST message. It stores the base64-encoded and serialized data
- * for a particular bucket and session.
- *
- * @param bucketNumber identifies the bucket
- * @param sessionName identifies the Drools session
- * @param count counter used to determine whether data is really newer
- * @param data base64-encoded serialized data for this bucket and session
- */
- static void receiveSession(int bucketNumber, String sessionName, int count, byte[] data) {
- // fetch the bucket
- Bucket bucket = Bucket.getBucket(bucketNumber);
-
- // create/fetch the 'ReceiverBucketData' adjunct
- ReceiverBucketData rbd = bucket.getAdjunct(ReceiverBucketData.class);
- synchronized (rbd) {
- // update the session data
- ReceiverSessionBucketData rsbd = rbd.sessionData.get(sessionName);
- if (rsbd == null) {
- rsbd = new ReceiverSessionBucketData();
- rbd.sessionData.put(sessionName, rsbd);
- }
-
- if ((count - rsbd.count) > 0 || count == 0) {
- // this is new data
- rsbd.count = count;
- rsbd.encodedSerializedData = data;
- }
- }
- }
-
- /**
- * This method is called in response to the '/persistence/lock'
- * REST message. It stores the base64-encoded and serialized
- * server-side lock data associated with this bucket.
- *
- * @param bucketNumber identifies the bucket
- * @param count counter used to determine whether data is really newer
- * @param data base64-encoded serialized lock data for this bucket
- */
- static void receiveLockData(int bucketNumber, int count, byte[] data) {
- // fetch the bucket
- Bucket bucket = Bucket.getBucket(bucketNumber);
-
- // create/fetch the 'ReceiverBucketData' adjunct
- ReceiverBucketData rbd = bucket.getAdjunct(ReceiverBucketData.class);
- synchronized (rbd) {
- // update the lock data
- if ((count - rbd.lockCount) > 0 || count == 0) {
- rbd.lockCount = count;
- rbd.lockData = data;
- }
- }
- }
-
- /**
- * This method is called when a bucket is being restored from persistent
- * data, which indicates that a clean migration didn't occur.
- * Drools session and/or lock data is restored.
- *
- * @param bucket the bucket being restored
- */
- synchronized void restoreBucket(Bucket bucket) {
- // one entry for each Drools session being restored --
- // indicates when the restore is complete (restore runs within
- // the Drools session thread)
- List<CountDownLatch> sessionLatches = restoreBucketDroolsSessions();
-
- // restore lock data
- restoreBucketLocks(bucket);
-
- // wait for all of the sessions to update
- try {
- for (CountDownLatch sessionLatch : sessionLatches) {
- if (!sessionLatch.await(10000L, TimeUnit.MILLISECONDS)) {
- logger.error("{}: timed out waiting for session latch",
- this);
- }
- }
- } catch (InterruptedException e) {
- logger.error("Exception in {}", this, e);
- Thread.currentThread().interrupt();
- }
- }
-
- private List<CountDownLatch> restoreBucketDroolsSessions() {
- List<CountDownLatch> sessionLatches = new LinkedList<>();
- for (Map.Entry<String, ReceiverSessionBucketData> entry : sessionData.entrySet()) {
- restoreBucketDroolsSession(sessionLatches, entry);
- }
- return sessionLatches;
- }
-
- private void restoreBucketDroolsSession(List<CountDownLatch> sessionLatches,
- Entry<String, ReceiverSessionBucketData> entry) {
-
- String sessionName = entry.getKey();
- ReceiverSessionBucketData rsbd = entry.getValue();
-
- PolicySession policySession = detmPolicySession(sessionName);
- if (policySession == null) {
- logger.error(RESTORE_BUCKET_ERROR
- + "Can't find PolicySession{}", sessionName);
- return;
- }
-
- final Map<?, ?> droolsObjects = deserializeMap(sessionName, rsbd, policySession);
- if (droolsObjects == null) {
- return;
- }
-
- // if we reach this point, we have decoded the persistent data
-
- // signal when restore is complete
- final CountDownLatch sessionLatch = new CountDownLatch(1);
-
- // 'KieSession' object
- final KieSession kieSession = policySession.getKieSession();
-
- // run the following within the Drools session thread
- DroolsRunnable insertDroolsObjects = () -> {
- try {
- // insert all of the Drools objects into the session
- for (Object droolsObj : droolsObjects.keySet()) {
- kieSession.insert(droolsObj);
- }
- } finally {
- // signal completion
- sessionLatch.countDown();
- }
- };
- kieSession.insert(insertDroolsObjects);
-
- // add this to the set of 'CountDownLatch's we are waiting for
- sessionLatches.add(sessionLatch);
- }
-
- private PolicySession detmPolicySession(String sessionName) {
- // [0]="<groupId>" [1]="<artifactId>", [2]="<sessionName>"
- String[] nameSegments = sessionName.split(":");
-
- // locate the 'PolicyContainer' and 'PolicySession'
- if (nameSegments.length == 3) {
- // step through all 'PolicyContainer' instances looking
- // for a matching 'artifactId' & 'groupId'
- for (PolicyContainer pc : PolicyContainer.getPolicyContainers()) {
- if (nameSegments[1].equals(pc.getArtifactId())
- && nameSegments[0].equals(pc.getGroupId())) {
- // 'PolicyContainer' matches -- try to fetch the session
- return pc.getPolicySession(nameSegments[2]);
- }
- }
- }
- return null;
- }
-
- private Map<?, ?> deserializeMap(String sessionName, ReceiverSessionBucketData rsbd,
- PolicySession policySession) {
- Object obj;
-
- try {
- // deserialization needs to use the correct 'ClassLoader'
- obj = Util.deserialize(Base64.getDecoder().decode(rsbd.encodedSerializedData),
- policySession.getPolicyContainer().getClassLoader());
- } catch (IOException | ClassNotFoundException | IllegalArgumentException e) {
- logger.error(RESTORE_BUCKET_ERROR
- + "Failed to read data for session '{}'",
- sessionName, e);
-
- // can't decode -- skip this session
- return null;
- }
-
- if (!(obj instanceof Map)) {
- logger.error(RESTORE_BUCKET_ERROR
- + "Session '{}' data has class {}, expected 'Map'",
- sessionName, obj.getClass().getName());
-
- // wrong object type decoded -- skip this session
- return null;
- }
-
- // if we reach this point, we have decoded the persistent data
-
- return (Map<?, ?>) obj;
- }
-
- private void restoreBucketLocks(Bucket bucket) {
- if (lockData != null) {
- Object obj = null;
- try {
- // decode lock data
- obj = Util.deserialize(Base64.getDecoder().decode(lockData));
- if (obj instanceof GlobalLocks) {
- bucket.putAdjunct(obj);
-
- // send out updated date
- sendLockDataToBackups(bucket, (GlobalLocks) obj);
- } else {
- logger.error(RESTORE_BUCKET_ERROR
- + "Expected 'GlobalLocks', got '{}'",
- obj.getClass().getName());
- }
- } catch (IOException | ClassNotFoundException | IllegalArgumentException e) {
- logger.error(RESTORE_BUCKET_ERROR
- + "Failed to read lock data", e);
- // skip the lock data
- }
-
- }
- }
- }
-
- /* ============================================================ */
-
- @Path("/")
- public static class Rest {
- /**
- * Handle the '/persistence/session' REST call.
- */
- @POST
- @Path("/persistence/session")
- @Consumes(MediaType.APPLICATION_OCTET_STREAM)
- public void receiveSession(@QueryParam(QP_BUCKET) int bucket,
- @QueryParam(QP_SESSION) String sessionName,
- @QueryParam(QP_COUNT) int count,
- @QueryParam(QP_DEST) UUID dest,
- byte[] data) {
- logger.debug("/persistence/session: (bucket={},session={},count={}) "
- + "got {} bytes of data",
- bucket, sessionName, count, data.length);
- if (dest == null || dest.equals(Server.getThisServer().getUuid())) {
- ReceiverBucketData.receiveSession(bucket, sessionName, count, data);
- } else {
- // This host is not the intended destination -- this could happen
- // if it was sent from another site. Leave off the 'dest' param
- // when forwarding the message, to ensure that we don't have
- // an infinite forwarding loop, if the site data happens to be bad.
- Server server;
- WebTarget webTarget;
-
- if ((server = Server.getServer(dest)) != null
- && (webTarget =
- server.getWebTarget("persistence/session")) != null) {
- Entity<String> entity =
- Entity.entity(new String(data),
- MediaType.APPLICATION_OCTET_STREAM_TYPE);
- webTarget
- .queryParam(QP_BUCKET, bucket)
- .queryParam(QP_SESSION, sessionName)
- .queryParam(QP_COUNT, count)
- .request().post(entity);
- }
- }
- }
-
- /**
- * Handle the '/persistence/lock' REST call.
- */
- @POST
- @Path("/persistence/lock")
- @Consumes(MediaType.APPLICATION_OCTET_STREAM)
- public void receiveLockData(@QueryParam(QP_BUCKET) int bucket,
- @QueryParam(QP_COUNT) int count,
- @QueryParam(QP_DEST) UUID dest,
- byte[] data) {
- logger.debug("/persistence/lock: (bucket={},count={}) "
- + "got {} bytes of data", bucket, count, data.length);
- if (dest == null || dest.equals(Server.getThisServer().getUuid())) {
- ReceiverBucketData.receiveLockData(bucket, count, data);
- } else {
- // This host is not the intended destination -- this could happen
- // if it was sent from another site. Leave off the 'dest' param
- // when forwarding the message, to ensure that we don't have
- // an infinite forwarding loop, if the site data happens to be bad.
- Server server;
- WebTarget webTarget;
-
- if ((server = Server.getServer(dest)) != null
- && (webTarget = server.getWebTarget("persistence/lock")) != null) {
- Entity<String> entity =
- Entity.entity(new String(data),
- MediaType.APPLICATION_OCTET_STREAM_TYPE);
- webTarget
- .queryParam(QP_BUCKET, bucket)
- .queryParam(QP_COUNT, count)
- .request().post(entity);
- }
- }
- }
- }
-}
diff --git a/feature-server-pool/src/main/resources/META-INF/services/org.onap.policy.drools.control.api.DroolsPdpStateControlApi b/feature-server-pool/src/main/resources/META-INF/services/org.onap.policy.drools.control.api.DroolsPdpStateControlApi
deleted file mode 100644
index 3dc6a574..00000000
--- a/feature-server-pool/src/main/resources/META-INF/services/org.onap.policy.drools.control.api.DroolsPdpStateControlApi
+++ /dev/null
@@ -1 +0,0 @@
-org.onap.policy.drools.serverpool.FeatureServerPool
diff --git a/feature-server-pool/src/main/resources/META-INF/services/org.onap.policy.drools.core.PolicySessionFeatureApi b/feature-server-pool/src/main/resources/META-INF/services/org.onap.policy.drools.core.PolicySessionFeatureApi
deleted file mode 100644
index 8ad5a18f..00000000
--- a/feature-server-pool/src/main/resources/META-INF/services/org.onap.policy.drools.core.PolicySessionFeatureApi
+++ /dev/null
@@ -1,2 +0,0 @@
-org.onap.policy.drools.serverpool.FeatureServerPool
-org.onap.policy.drools.serverpool.persistence.Persistence
diff --git a/feature-server-pool/src/main/resources/META-INF/services/org.onap.policy.drools.features.PolicyControllerFeatureApi b/feature-server-pool/src/main/resources/META-INF/services/org.onap.policy.drools.features.PolicyControllerFeatureApi
deleted file mode 100644
index 3dc6a574..00000000
--- a/feature-server-pool/src/main/resources/META-INF/services/org.onap.policy.drools.features.PolicyControllerFeatureApi
+++ /dev/null
@@ -1 +0,0 @@
-org.onap.policy.drools.serverpool.FeatureServerPool
diff --git a/feature-server-pool/src/main/resources/META-INF/services/org.onap.policy.drools.features.PolicyEngineFeatureApi b/feature-server-pool/src/main/resources/META-INF/services/org.onap.policy.drools.features.PolicyEngineFeatureApi
deleted file mode 100644
index 3dc6a574..00000000
--- a/feature-server-pool/src/main/resources/META-INF/services/org.onap.policy.drools.features.PolicyEngineFeatureApi
+++ /dev/null
@@ -1 +0,0 @@
-org.onap.policy.drools.serverpool.FeatureServerPool
diff --git a/feature-server-pool/src/main/resources/META-INF/services/org.onap.policy.drools.serverpool.ServerPoolApi b/feature-server-pool/src/main/resources/META-INF/services/org.onap.policy.drools.serverpool.ServerPoolApi
deleted file mode 100644
index a72d8cb2..00000000
--- a/feature-server-pool/src/main/resources/META-INF/services/org.onap.policy.drools.serverpool.ServerPoolApi
+++ /dev/null
@@ -1 +0,0 @@
-org.onap.policy.drools.serverpool.persistence.Persistence
diff --git a/feature-server-pool/src/test/java/org/onap/policy/drools/serverpool/AdapterImpl.java b/feature-server-pool/src/test/java/org/onap/policy/drools/serverpool/AdapterImpl.java
deleted file mode 100644
index 865f4e90..00000000
--- a/feature-server-pool/src/test/java/org/onap/policy/drools/serverpool/AdapterImpl.java
+++ /dev/null
@@ -1,455 +0,0 @@
-/*
- * ============LICENSE_START=======================================================
- * feature-server-pool
- * ================================================================================
- * Copyright (C) 2020 AT&T Intellectual Property. All rights reserved.
- * ================================================================================
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ============LICENSE_END=========================================================
- */
-
-package org.onap.policy.drools.serverpool;
-
-import static org.awaitility.Awaitility.await;
-
-import java.io.PrintStream;
-import java.nio.file.Paths;
-import java.util.Properties;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.TimeUnit;
-import org.kie.api.runtime.KieSession;
-import org.onap.policy.common.endpoints.event.comm.Topic.CommInfrastructure;
-import org.onap.policy.common.endpoints.event.comm.TopicListener;
-import org.onap.policy.drools.core.PolicyContainer;
-import org.onap.policy.drools.core.PolicySession;
-import org.onap.policy.drools.core.PolicySessionFeatureApiConstants;
-import org.onap.policy.drools.serverpooltest.Adapter;
-import org.onap.policy.drools.serverpooltest.BucketWrapper;
-import org.onap.policy.drools.serverpooltest.ServerWrapper;
-import org.onap.policy.drools.serverpooltest.TargetLockWrapper;
-import org.onap.policy.drools.system.PolicyController;
-import org.onap.policy.drools.system.PolicyEngineConstants;
-import org.onap.policy.drools.util.KieUtils;
-import org.onap.policy.drools.utils.PropertyUtil;
-import org.powermock.reflect.Whitebox;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class implements the 'Adapter' interface. There is one 'AdapterImpl'
- * class for each simulated host, and one instance of each 'AdapterImpl' class.
- */
-public class AdapterImpl extends Adapter {
- private static Logger logger = LoggerFactory.getLogger(AdapterImpl.class);
- /*
- * Each 'AdapterImpl' instance has it's own class object, making it a
- * singleton. There is only a single 'Adapter' class object, and all
- * 'AdapterImpl' classes are derived from it.
- *
- * Sonar thinks this field isn't used. However, it's value is actually
- * retrieved via Whitebox, below. Thus it is marked "protected" instead
- * of "private" to avoid the sonar complaint.
- */
- protected static AdapterImpl adapter = null;
-
- // this is the adapter index
- private int index;
-
- // this will refer to the Drools session 'PolicyController' instance
- private PolicyController policyController = null;
-
- // this will refer to the Drools session 'PolicySession' instance
- private PolicySession policySession = null;
-
- // used by Drools session to signal back to Junit tests
- private LinkedBlockingQueue<String> inotificationQueue =
- new LinkedBlockingQueue<>();
-
- // provides indirect references to a select set of static 'Server' methods
- private static ServerWrapper.Static serverStatic =
- new ServerWrapperImpl.Static();
-
- // provides indirect references to a select set of static 'Bucket' methods
- private static BucketWrapper.Static bucketStatic =
- new BucketWrapperImpl.Static();
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void init(int index) throws Exception {
- adapter = this;
- this.index = index;
-
- PolicyEngineConstants.getManager().configure(new Properties());
- PolicyEngineConstants.getManager().start();
- /*
- * Note that this method does basically what
- * 'FeatureServerPool.afterStart(PolicyEngine)' does, but allows us to
- * specify different properties for each of the 6 simulated hosts
- */
- logger.info("{}: Running: AdapterImpl.init({}), class hash code = {}",
- this, index, AdapterImpl.class.hashCode());
- final String propertyFile = "src/test/resources/feature-server-pool-test.properties";
- Properties prop = PropertyUtil.getProperties(propertyFile);
- if (System.getProperty("os.name").toLowerCase().indexOf("mac") < 0) {
- // Window, Unix
- String[] ipComponent = prop.getProperty("server.pool.server.ipAddress").split("[.]");
- String serverIP = ipComponent[0] + "." + ipComponent[1] + "." + ipComponent[2] + "."
- + (Integer.parseInt(ipComponent[3]) + index);
- prop.setProperty("server.pool.server.ipAddress", serverIP);
- } else {
- // Mac, use localhost and different ports
- String port = Integer.toString(Integer.parseInt(
- prop.getProperty("server.pool.server.port")) + index);
- prop.setProperty("server.pool.server.port", port);
- }
- logger.info("server={}, serverIP={}, port={}", index,
- prop.getProperty("server.pool.server.ipAddress"),
- prop.getProperty("server.pool.server.port"));
-
- TargetLock.startup();
- Server.startup(prop);
-
- // use reflection to set private static field
- // 'FeatureServerPool.droolsTimeoutMillis'
- Whitebox.setInternalState(FeatureServerPool.class, "droolsTimeoutMillis",
- ServerPoolProperties.DEFAULT_BUCKET_DROOLS_TIMEOUT);
-
- // use reflection to set private static field
- // 'FeatureServerPool.timeToLiveSecond'
- Whitebox.setInternalState(FeatureServerPool.class, "timeToLiveSecond",
- String.valueOf(ServerPoolProperties.DEFAULT_BUCKET_TIME_TO_LIVE));
-
- // use reflection to call private static method
- // 'FeatureServerPool.buildKeywordTable()'
- Whitebox.invokeMethod(FeatureServerPool.class, "buildKeywordTable");
-
- Bucket.Backup.register(new FeatureServerPool.DroolsSessionBackup());
- Bucket.Backup.register(new TargetLock.LockBackup());
-
- // dump out feature lists
- logger.info("{}: ServerPoolApi features list: {}",
- this, ServerPoolApi.impl.getList());
- logger.info("{}: PolicySessionFeatureApi features list: {}",
- this, PolicySessionFeatureApiConstants.getImpl().getList());
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void shutdown() {
- policyController.stop();
- Server.shutdown();
-
- PolicyEngineConstants.getManager().stop();
- PolicyEngineConstants.getManager().getExecutorService().shutdown();
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public LinkedBlockingQueue<String> notificationQueue() {
- return inotificationQueue;
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public boolean waitForInit(long endTime) throws InterruptedException {
- try {
- // wait until a leader is elected
- await().atMost(endTime - System.currentTimeMillis(),
- TimeUnit.MILLISECONDS).until(() -> Leader.getLeader() != null);
-
- // wait for each bucket to have an owner
- for (int i = 0; i < Bucket.BUCKETCOUNT; i += 1) {
- Bucket bucket = Bucket.getBucket(i);
- while (bucket.getOwner() == null) {
- await().atMost(Math.min(endTime - System.currentTimeMillis(), 100L), TimeUnit.MILLISECONDS);
- }
- }
- } catch (IllegalArgumentException e) {
- // 'Thread.sleep()' was passed a negative time-out value --
- // time is up
- logger.debug("AdapterImpl waitForInit error", e);
- return false;
- }
- return true;
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public ServerWrapper.Static getServerStatic() {
- return serverStatic;
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public ServerWrapper getLeader() {
- return ServerWrapperImpl.getWrapper(Leader.getLeader());
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public BucketWrapper.Static getBucketStatic() {
- return bucketStatic;
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public TargetLockWrapper newTargetLock(
- String key, String ownerKey, TargetLockWrapper.Owner owner, boolean waitForLock) {
-
- return TargetLockWrapperImpl.newTargetLock(key, ownerKey, owner, waitForLock);
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public TargetLockWrapper newTargetLock(String key, String ownerKey, TargetLockWrapper.Owner owner) {
- return TargetLockWrapperImpl.newTargetLock(key, ownerKey, owner);
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void dumpLocks(PrintStream out, boolean detail) {
- try {
- TargetLock.DumpLocks.dumpLocks(out, detail);
- } catch (Exception e) {
- logger.error("{}: Exception in 'dumpLocks'", this, e);
- }
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public String createController() {
- Properties properties;
- /*
- * set the thread class loader to be the same as the one associated
- * with the 'AdapterImpl' instance, so it will be inherited by any
- * new threads created (the Drools session thread, in particular)
- */
- ClassLoader saveClassLoader =
- Thread.currentThread().getContextClassLoader();
- Thread.currentThread().setContextClassLoader(AdapterImpl.class.getClassLoader());
-
- try {
- // build and install Drools artifact
- KieUtils.installArtifact(
- Paths.get("src/test/resources/drools-artifact-1.1/src/main/resources/META-INF/kmodule.xml").toFile(),
- Paths.get("src/test/resources/drools-artifact-1.1/pom.xml").toFile(),
- "src/main/resources/rules/org/onap/policy/drools/core/test/rules.drl",
- Paths.get("src/test/resources/drools-artifact-1.1/src/main/resources/rules.drl").toFile());
-
- // load properties from file
- properties = PropertyUtil.getProperties("src/test/resources/TestController-controller.properties");
- } catch (Exception e) {
- e.printStackTrace();
- Thread.currentThread().setContextClassLoader(saveClassLoader);
- return e.toString();
- }
-
- StringBuilder sb = new StringBuilder();
- try {
- // create and start 'PolicyController'
- policyController = PolicyEngineConstants.getManager()
- .createPolicyController("TestController", properties);
- policyController.start();
-
- // dump out container information (used for debugging tests)
- sb.append("PolicyContainer count: ")
- .append(PolicyContainer.getPolicyContainers().size()).append('\n');
- for (PolicyContainer policyContainer :
- PolicyContainer.getPolicyContainers()) {
- sb.append(" name = ")
- .append(policyContainer.getName())
- .append('\n')
- .append(" session count = ")
- .append(policyContainer.getPolicySessions().size())
- .append('\n');
- for (PolicySession pc : policyContainer.getPolicySessions()) {
- policySession = pc;
- }
- }
- } finally {
- Thread.currentThread().setContextClassLoader(saveClassLoader);
- }
- return sb.toString();
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void sendEvent(String key) {
- /*
- * Note: the dumping out of package information was useful in tracking
- * down strange Drools behavior that was eventually tied to the
- * Drools class loader.
- */
- logger.info("{}: Calling 'sendEvent': packages = {}", this,
- policySession.getKieSession().getKieBase().getKiePackages());
- ((TopicListener) policyController).onTopicEvent(
- CommInfrastructure.UEB, "JUNIT-TEST-TOPIC",
- "{\"key\":\"" + key + "\"}");
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public KieSession getKieSession() {
- return policySession == null ? null : policySession.getKieSession();
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void insertDrools(Object object) {
- if (policySession != null) {
- /*
- * this will eventually be changed to use the
- * 'PolicySession.insertObject(...)' method
- */
- new FeatureServerPool().insertDrools(policySession, object);
- }
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public boolean isForeign(Object... objects) {
- boolean rval = false;
- ClassLoader myClassLoader = AdapterImpl.class.getClassLoader();
- for (Object o : objects) {
- Class<?> clazz = o.getClass();
- ClassLoader objClassLoader = clazz.getClassLoader();
-
- try {
- if (myClassLoader != objClassLoader
- && clazz != myClassLoader.loadClass(clazz.getName())) {
- rval = true;
- logger.info("{}: FOREIGN OBJECT ({}) - {}",
- this, getAdapter(objClassLoader), o);
- }
- } catch (ClassNotFoundException e) {
- rval = true;
- logger.error("{}: FOREIGN OBJECT -- CLASS NOT FOUND ({}) - {}",
- this, getAdapter(objClassLoader), o);
- }
- }
- return rval;
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public String findKey(String prefix, int startingIndex, ServerWrapper host) {
- String rval = null;
-
- // try up to 10000 numeric values to locate one on a particular host
- for (int i = 0; i < 10000; i += 1) {
- // generate key, and see if it is on the desired server
- String testString = prefix + (startingIndex + i);
- if (ServerWrapperImpl.getWrapper(
- Bucket.bucketToServer(Bucket.bucketNumber(testString))) == host) {
- // we have one that works
- rval = testString;
- break;
- }
- }
- return rval;
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public String findKey(String prefix, int startingIndex) {
- return findKey(prefix, startingIndex, serverStatic.getThisServer());
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public String findKey(String prefix) {
- return findKey(prefix, 1, serverStatic.getThisServer());
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public String toString() {
- return "AdapterImpl[" + index + "]";
- }
-
- /**
- * Return an Adapter.
- *
- * @return the 'Adapter' instance associated with the ClassLoader associated
- * with the current thread
- */
- public static Adapter getAdapter() {
- /*
- * Note that 'return(adapter)' doesn't work as expected when called from
- * within a 'Drools' session, because of the strange way that the Drools
- * 'ClassLoader' works -- it bypasses 'AdapterClassLoader' when doing
- * class lookups, even though it is the immediate parent of the Drools
- * session class loader.
- */
- return getAdapter(Thread.currentThread().getContextClassLoader());
- }
-
- /**
- * Return an Adapter.
- *
- * @param classLoader a ClassLoader instance
- * @return the 'Adapter' instance associated with the specified ClassLoader
- */
- public static Adapter getAdapter(ClassLoader classLoader) {
- try {
- // locate the 'AdapterImpl' class associated with a particular
- // 'ClassLoader' (which may be different from the current one)
- Class<?> thisAdapterClass =
- classLoader.loadClass("org.onap.policy.drools.serverpool.AdapterImpl");
-
- // return the 'adapter' field value
- return Whitebox.getInternalState(thisAdapterClass, "adapter");
- } catch (Exception e) {
- e.printStackTrace();
- return null;
- }
- }
-}
diff --git a/feature-server-pool/src/test/java/org/onap/policy/drools/serverpool/BucketWrapperImpl.java b/feature-server-pool/src/test/java/org/onap/policy/drools/serverpool/BucketWrapperImpl.java
deleted file mode 100644
index ddd4d553..00000000
--- a/feature-server-pool/src/test/java/org/onap/policy/drools/serverpool/BucketWrapperImpl.java
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * ============LICENSE_START=======================================================
- * feature-server-pool
- * ================================================================================
- * Copyright (C) 2020 AT&T Intellectual Property. All rights reserved.
- * ================================================================================
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ============LICENSE_END=========================================================
- */
-
-package org.onap.policy.drools.serverpool;
-
-import java.io.PrintStream;
-import java.util.IdentityHashMap;
-import org.onap.policy.drools.serverpooltest.BucketWrapper;
-import org.onap.policy.drools.serverpooltest.ServerWrapper;
-
-/**
- * This class implements the 'BucketWrapper' interface. There is one
- * 'BucketWrapperImpl' class for each simulated host.
- */
-public class BucketWrapperImpl implements BucketWrapper {
- // this maps a 'Bucket' instance on this host to an associated wrapper
- private static IdentityHashMap<Bucket, BucketWrapperImpl> bucketToWrapper =
- new IdentityHashMap<>();
-
- // this is the 'Bucket' instance associated with the wrapper
- private Bucket bucket;
-
- /**
- * This method maps a 'Bucket' instance into a 'BucketWrapperImpl'
- * instance. The goal is to have only a single 'BucketWrapperImpl' instance
- * for each 'Bucket' instance, so that testing for identity will work
- * as expected.
- *
- * @param bucket the 'Bucket' instance
- * @return the associated 'BucketWrapperImpl' instance
- */
- static synchronized BucketWrapperImpl getWrapper(Bucket bucket) {
- if (bucket == null) {
- return null;
- }
- BucketWrapperImpl rval = bucketToWrapper.get(bucket);
- if (rval == null) {
- // a matching entry does not yet exist -- create one
- rval = new BucketWrapperImpl(bucket);
- bucketToWrapper.put(bucket, rval);
- }
- return rval;
- }
-
- /**
- * Constructor - initialize the 'bucket' field.
- */
- BucketWrapperImpl(Bucket bucket) {
- this.bucket = bucket;
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public int getBucketNumber() {
- return bucket.getIndex();
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public ServerWrapper getOwner() {
- return ServerWrapperImpl.getWrapper(bucket.getOwner());
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public ServerWrapper getPrimaryBackup() {
- return ServerWrapperImpl.getWrapper(bucket.getPrimaryBackup());
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public ServerWrapper getSecondaryBackup() {
- return ServerWrapperImpl.getWrapper(bucket.getSecondaryBackup());
- }
-
- /* ============================================================ */
-
- /**
- * This class implements the 'BucketWrapper.Static' interface. There is
- * one 'BucketWrapperImpl.Static' class, and one instance for each
- * simulated host
- */
- public static class Static implements BucketWrapper.Static {
- /**
- * {@inheritDoc}
- */
- @Override
- public int getBucketCount() {
- return Bucket.BUCKETCOUNT;
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public int bucketNumber(String value) {
- return Bucket.bucketNumber(value);
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public ServerWrapper bucketToServer(int bucketNumber) {
- return ServerWrapperImpl.getWrapper(Bucket.bucketToServer(bucketNumber));
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public BucketWrapper getBucket(int bucketNumber) {
- return getWrapper(Bucket.getBucket(bucketNumber));
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public boolean isKeyOnThisServer(String key) {
- return Bucket.isKeyOnThisServer(key);
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void moveBucket(PrintStream out, int bucketNumber, String newHostUuid) {
- ClassLoader save = Thread.currentThread().getContextClassLoader();
- try {
- Thread.currentThread().setContextClassLoader(
- BucketWrapperImpl.class.getClassLoader());
- Bucket.moveBucket(out, bucketNumber, newHostUuid);
- } finally {
- Thread.currentThread().setContextClassLoader(save);
- }
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void dumpAdjuncts(PrintStream out) {
- Bucket.dumpAdjuncts(out);
- }
- }
-}
diff --git a/feature-server-pool/src/test/java/org/onap/policy/drools/serverpool/ServerWrapperImpl.java b/feature-server-pool/src/test/java/org/onap/policy/drools/serverpool/ServerWrapperImpl.java
deleted file mode 100644
index 031c3323..00000000
--- a/feature-server-pool/src/test/java/org/onap/policy/drools/serverpool/ServerWrapperImpl.java
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * ============LICENSE_START=======================================================
- * feature-server-pool
- * ================================================================================
- * Copyright (C) 2020 AT&T Intellectual Property. All rights reserved.
- * ================================================================================
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ============LICENSE_END=========================================================
- */
-
-package org.onap.policy.drools.serverpool;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.IdentityHashMap;
-import java.util.UUID;
-import org.onap.policy.drools.serverpooltest.ServerWrapper;
-
-/**
- * This class implements the 'ServerWrapper' interface. There is one
- * 'ServerWrapperImpl' class for each simulated host.
- */
-public class ServerWrapperImpl implements ServerWrapper {
- // this maps a 'Server' instance on this host to an associated wrapper
- private static IdentityHashMap<Server, ServerWrapperImpl> serverToWrapper =
- new IdentityHashMap<>();
-
- // this is the 'Server' instance associated with the wrapper
- private Server server;
-
- /**
- * This method maps a 'Server' instance into a 'ServerWrapperImpl'
- * instance. The goal is to have only a single 'ServerWrapperImpl' instance
- * for each 'Server' instance, so that testing for identity will work
- * as expected.
- *
- * @param server the 'Server' instance
- * @return the associated 'ServerWrapperImpl' instance
- * ('null' if 'server' is 'null')
- */
- static synchronized ServerWrapperImpl getWrapper(Server server) {
- if (server == null) {
- return null;
- }
- ServerWrapperImpl rval = serverToWrapper.computeIfAbsent(server,
- (key) -> new ServerWrapperImpl(server));
- return rval;
- }
-
- /**
- * Constructor - initialize the 'server' field.
- */
- private ServerWrapperImpl(Server server) {
- this.server = server;
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public String toString() {
- return server.toString();
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public UUID getUuid() {
- return server.getUuid();
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public boolean isActive() {
- return server.isActive();
- }
-
- /* ============================================================ */
-
- /**
- * This class implements the 'ServerWrapper.Static' interface. There is
- * one 'ServerWrapperImpl.Static' class, and one instance for each
- * simulated host
- */
- public static class Static implements ServerWrapper.Static {
- /**
- * {@inheritDoc}
- */
- @Override
- public ServerWrapper getThisServer() {
- return getWrapper(Server.getThisServer());
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public ServerWrapper getFirstServer() {
- return getWrapper(Server.getFirstServer());
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public ServerWrapper getServer(UUID uuid) {
- return getWrapper(Server.getServer(uuid));
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public int getServerCount() {
- return Server.getServerCount();
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public Collection<ServerWrapper> getServers() {
- // build an 'ArrayList' which mirrors the set of servers
- ArrayList<ServerWrapper> rval = new ArrayList<>(Server.getServerCount());
-
- for (Server server : Server.getServers()) {
- rval.add(getWrapper(server));
- }
- return rval;
- }
- }
-}
diff --git a/feature-server-pool/src/test/java/org/onap/policy/drools/serverpool/TargetLockWrapperImpl.java b/feature-server-pool/src/test/java/org/onap/policy/drools/serverpool/TargetLockWrapperImpl.java
deleted file mode 100644
index e170975a..00000000
--- a/feature-server-pool/src/test/java/org/onap/policy/drools/serverpool/TargetLockWrapperImpl.java
+++ /dev/null
@@ -1,197 +0,0 @@
-/*
- * ============LICENSE_START=======================================================
- * feature-server-pool
- * ================================================================================
- * Copyright (C) 2020 AT&T Intellectual Property. All rights reserved.
- * ================================================================================
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ============LICENSE_END=========================================================
- */
-
-package org.onap.policy.drools.serverpool;
-
-import java.io.Serializable;
-import org.onap.policy.drools.core.lock.Lock;
-import org.onap.policy.drools.core.lock.LockCallback;
-import org.onap.policy.drools.serverpooltest.TargetLockWrapper;
-
-/**
- * This class implements the 'TargetLockWrapper' interface. There is one
- * 'TargetLockWrapperImpl' class for each simulated host.
- */
-public class TargetLockWrapperImpl implements TargetLockWrapper {
- private static final long serialVersionUID = 1L;
-
- // this is the 'TargetLock' instance associated with the wrapper
- private TargetLock targetLock;
-
- /**
- * {@inheritDoc}
- */
- @Override
- public boolean free() {
- return targetLock.free();
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public boolean isActive() {
- return targetLock.isActive();
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public State getState() {
- return TargetLockWrapper.State.valueOf(targetLock.getState().toString());
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public String getOwnerKey() {
- return targetLock.getOwnerKey();
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public String toString() {
- return "TLW-"
- + String.valueOf(AdapterImpl.getAdapter(TargetLockWrapperImpl.class.getClassLoader()))
- + "[" + targetLock.toString() + "]";
- }
-
- /**
- * This method creates a new 'TargetLock'. Internally, an 'OwnerAdapter'
- * instance is built as well, which translates the 'LockCallback'
- * callbacks to 'TargetLockWrapper.Owner' callbacks. As with the call to
- * 'new TargetLock(...)', it is possible for the callback occur before
- * this method returns -- this can happen if the 'key' hashes to a bucket
- * owned by the current host.
- *
- * @param key string key identifying the lock
- * @param ownerKey string key identifying the owner, which must hash to
- * a bucket owned by the current host (it is typically a 'RequestID')
- * @param owner owner of the lock (will be notified when going from
- * WAITING to ACTIVE)
- * @param waitForLock this controls the behavior when 'key' is already
- * locked - 'true' means wait for it to be freed, 'false' means fail
- * @return a 'TargetLockWrapper' instance associated with the new
- * 'TargetLock.
- */
- static TargetLockWrapper newTargetLock(
- String key, String ownerKey, TargetLockWrapper.Owner owner, boolean waitForLock) {
-
- TargetLockWrapperImpl rval = new TargetLockWrapperImpl();
- rval.targetLock =
- new TargetLock(key, ownerKey,
- TargetLockWrapperImpl.getOwnerAdapter(rval, owner),
- waitForLock);
- return rval;
- }
-
- /**
- * This method creates a new 'TargetLock'. Internally, an 'OwnerAdapter'
- * instance is built as well, which translates the 'LockCallback'
- * callbacks to 'TargetLockWrapper.Owner' callbacks. As with the call to
- * 'new TargetLock(...)', it is possible for the callback occur before
- * this method returns -- this can happen if the 'key' hashes to a bucket
- * owned by the current host.
- *
- * @param key string key identifying the lock
- * @param ownerKey string key identifying the owner, which must hash to
- * a bucket owned by the current host (it is typically a 'RequestID')
- * @param owner owner of the lock (will be notified when going from
- * WAITING to ACTIVE)
- * @return a 'TargetLockWrapper' instance associated with the new
- * 'TargetLock.
- */
- static TargetLockWrapper newTargetLock(
- String key, String ownerKey, TargetLockWrapper.Owner owner) {
-
- TargetLockWrapperImpl rval = new TargetLockWrapperImpl();
- rval.targetLock =
- new TargetLock(key, ownerKey,
- TargetLockWrapperImpl.getOwnerAdapter(rval, owner));
- return rval;
- }
-
- /* ============================================================ */
-
- /**
- * This method builds an adapter that implements the 'LockCallback'
- * callback interface, translating it to 'TargetLockWrapper.Owner'.
- *
- * @param targetLock the TargetLockWrapper that is using this adapter
- * @param owner the 'TargetLockWrapper.Owner' callback
- * @return an adapter implementing the 'LockCallback' interface
- * ('null' is returned if 'owner' is null -- this is an error condition,
- * but is used to verify the error handling of the 'TargetLock'
- * constructor.
- */
- public static LockCallback getOwnerAdapter(
- TargetLockWrapper targetLock, TargetLockWrapper.Owner owner) {
-
- return owner == null ? null : new OwnerAdapter(targetLock, owner);
- }
-
- /**
- * This class is an adapter that implements the 'LockCallback' callback
- * interface, translating it to a 'TargetLockWrapper.Owner' callback.
- */
- public static class OwnerAdapter implements LockCallback, Serializable {
- private static final long serialVersionUID = 1L;
-
- // the 'TargetLockWrapper' instance to pass as an argument in the callback
- TargetLockWrapper targetLock;
-
- // the 'TargetLockWrapper.Owner' callback
- TargetLockWrapper.Owner owner;
-
- /**
- * Constructor - initialize the adapter.
- *
- * @param targetLock this will be passed as an argument in the callback
- * @param owner the object implementing the 'TargetLockWrapper.Owner'
- * interface
- */
- private OwnerAdapter(TargetLockWrapper targetLock, TargetLockWrapper.Owner owner) {
- this.targetLock = targetLock;
- this.owner = owner;
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void lockAvailable(Lock lock) {
- // forward 'lockAvailable' callback
- owner.lockAvailable(targetLock);
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void lockUnavailable(Lock lock) {
- // forward 'lockUnavailable' callback
- owner.lockUnavailable(targetLock);
- }
- }
-}
diff --git a/feature-server-pool/src/test/java/org/onap/policy/drools/serverpooltest/Adapter.java b/feature-server-pool/src/test/java/org/onap/policy/drools/serverpooltest/Adapter.java
deleted file mode 100644
index 9fa54608..00000000
--- a/feature-server-pool/src/test/java/org/onap/policy/drools/serverpooltest/Adapter.java
+++ /dev/null
@@ -1,356 +0,0 @@
-/*
- * ============LICENSE_START=======================================================
- * feature-server-pool
- * ================================================================================
- * Copyright (C) 2020 AT&T Intellectual Property. All rights reserved.
- * ================================================================================
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ============LICENSE_END=========================================================
- */
-
-package org.onap.policy.drools.serverpooltest;
-
-import static org.junit.Assert.assertTrue;
-
-import java.io.File;
-import java.io.PrintStream;
-import java.net.URL;
-import java.net.URLClassLoader;
-import java.util.ArrayList;
-import java.util.Properties;
-import java.util.concurrent.LinkedBlockingQueue;
-import org.kie.api.runtime.KieSession;
-import org.onap.policy.common.utils.network.NetworkUtil;
-import org.onap.policy.drools.serverpool.Util;
-import org.onap.policy.drools.utils.PropertyUtil;
-
-/**
- * This is a common base class for 6 'AdapterImpl' instances, all running
- * with their own copies of the server pool classes, and a number of the ONAP
- * classes. The purpose is to simulate 6 separate hosts in a server pool.
- * Note that there is potentially a 7th copy of any of these classes, which is
- * the one loaded with the system class loader. Ideally, those classes
- * shouldn't be referred to, but there were some problems during testing,
- * where they unexpectedly were (prompting a change in
- * 'ExtendedObjectInputStream'). This is referred to as the 'null' host,
- * where the classes may exist, but have not gone through initialization.
- */
-public abstract class Adapter {
- // 'true' indicates that initialization is still needed
- private static boolean initNeeded = true;
- /*
- * Each 'Adapter' instance is implemented by 'AdapterImpl', but loaded
- * with a different class loader that provides each with a different copy
- * of the set of classes with packages in the list below (see references to
- * 'BlockingClassLoader').
- */
- public static Adapter[] adapters = new Adapter[6];
-
- /**
- * Ensure that all adapters have been initialized.
- */
- public static void ensureInit() throws Exception {
- synchronized (Adapter.class) {
- if (initNeeded) {
- initNeeded = false;
-
- // start DMAAP Simulator
- new Thread(new Runnable() {
- @Override
- public void run() {
- SimDmaap.start();
- }
- }, "DMAAP Simulator").start();
-
- // wait 200 millisecond to allow time for the port 3904 listener
- final String propertyFile = "src/test/resources/feature-server-pool-test.properties";
- Properties prop = PropertyUtil.getProperties(propertyFile);
- assertTrue(NetworkUtil.isTcpPortOpen(prop.getProperty("server.pool.discovery.servers"),
- Integer.parseInt(prop.getProperty("server.pool.discovery.port")), 250, 200));
-
- // build 'BlockingClassLoader'
- BlockingClassLoader bcl = new BlockingClassLoader(
- Adapter.class.getClassLoader(),
- // All 'org.onap.policy.*' classes are adapter-specific, except
- // for the exclusions listed below.
- "org.onap.policy.*"
- );
- bcl.addExclude("org.onap.policy.drools.core.DroolsRunnable");
- bcl.addExclude("org.onap.policy.drools.serverpooltest.*");
-
- // build URL list for class loader
- URL[] urls = {};
-
- // iterate through 'adapter' entries
- ClassLoader saveClassLoader =
- Thread.currentThread().getContextClassLoader();
- if (saveClassLoader instanceof URLClassLoader) {
- urls = ((URLClassLoader) saveClassLoader).getURLs();
- } else {
- // the parent is not a 'URLClassLoader' --
- // try to get this information from 'java.class.path'
- ArrayList<URL> tmpUrls = new ArrayList<>();
- for (String entry : System.getProperty("java.class.path").split(
- File.pathSeparator)) {
- if (new File(entry).isDirectory()) {
- tmpUrls.add(new URL("file:" + entry + "/"));
- } else {
- tmpUrls.add(new URL("file:" + entry));
- }
- }
- urls = tmpUrls.toArray(new URL[0]);
- }
- try {
- for (int i = 0; i < adapters.length; i += 1) {
- /*
- * Build a new 'ClassLoader' for this adapter. The
- * 'ClassLoader' hierarchy is:
- *
- * AdapterClassLoader(one copy per Adapter) ->
- * BlockingClassLoader ->
- * base ClassLoader (with the complete URL list)
- */
- ClassLoader classLoader =
- new AdapterClassLoader(i, urls, bcl);
- /*
- * set the current thread class loader, which should be
- * inherited by any child threads created during
- * the initialization of the adapter
- */
- Thread.currentThread().setContextClassLoader(classLoader);
-
- // now, build the adapter -- it is not just a new instance,
- // but a new copy of class 'AdapterImpl'
- Adapter adapter = (Adapter) classLoader.loadClass(
- "org.onap.policy.drools.serverpool.AdapterImpl")
- .getDeclaredConstructor()
- .newInstance();
-
- // initialize the adapter
- adapter.init(i);
- adapters[i] = adapter;
- }
- } finally {
- // restore the class loader to that used during the Junit tests
- Thread.currentThread().setContextClassLoader(saveClassLoader);
- }
- }
- }
- }
-
- /**
- * Shut everything down.
- */
- public static void ensureShutdown() {
- for (Adapter adapter : adapters) {
- adapter.shutdown();
- }
- SimDmaap.stop();
- // not sure why the following is started
- Util.shutdown();
- }
-
- /**
- * Runs server pool initialization for a particular host.
- *
- * @param index the index of the adapter (0-5)
- */
- public abstract void init(int index) throws Exception;
-
- /**
- * Shuts down the server pool for this host.
- */
- public abstract void shutdown();
-
- /**
- * Return a 'LinkedBlockingQueue' instance, which is used as a way for
- * Drools code to signal back to running Junit tests.
- *
- * @return a 'LinkedBlockingQueue' instance, which is used as a way for
- * Drools code to signal back to running Junit tests
- */
- public abstract LinkedBlockingQueue<String> notificationQueue();
-
- /**
- * This method blocks and waits for all buckets to have owners, or for
- * a timeout, whichever occurs first.
- *
- * @param endTime the point at which timeout occurs
- * @return 'true' if all buckets have owners, 'false' if a timeout occurred
- */
- public abstract boolean waitForInit(long endTime) throws InterruptedException;
-
- /**
- * Return an object providing indirect references to a select set of
- * static 'Server' methods.
- *
- * @return an object providing indirect references to a select set of
- * static 'Server' methods
- */
- public abstract ServerWrapper.Static getServerStatic();
-
- /**
- * Return an object providing an indirect reference to the lead 'Server'
- * object.
- *
- * @return an object providing an indirect reference to the lead 'Server'
- * object
- */
- public abstract ServerWrapper getLeader();
-
- /**
- * Return an object providing indirect references to a select set of
- * static 'Bucket' methods.
- *
- * @return an object providing indirect references to a select set of
- * static 'Bucket' methods
- */
- public abstract BucketWrapper.Static getBucketStatic();
-
- /**
- * Create a new 'TargetLock' instance, returning an indirect reference.
- *
- * @param key string key identifying the lock
- * @param ownerKey string key identifying the owner, which must hash to
- * a bucket owned by the current host (it is typically a 'RequestID')
- * @param owner owner of the lock (will be notified when going from
- * WAITING to ACTIVE)
- * @param waitForLock this controls the behavior when 'key' is already
- * locked - 'true' means wait for it to be freed, 'false' means fail
- */
- public abstract TargetLockWrapper newTargetLock(
- String key, String ownerKey, TargetLockWrapper.Owner owner,
- boolean waitForLock);
-
- /**
- * Create a new 'TargetLock' instance, returning an indirect reference.
- *
- * @param key string key identifying the lock
- * @param ownerKey string key identifying the owner, which must hash to
- * a bucket owned by the current host (it is typically a 'RequestID')
- * @param owner owner of the lock (will be notified when going from
- * WAITING to ACTIVE)
- */
- public abstract TargetLockWrapper newTargetLock(
- String key, String ownerKey, TargetLockWrapper.Owner owner);
-
- /**
- * Call 'TargetLock.DumpLocks.dumpLocks'
- *
- * @param out where the output should be displayed
- * @param detail 'true' provides additional bucket and host information
- * (but abbreviates all UUIDs in order to avoid excessive
- * line length)
- */
- public abstract void dumpLocks(PrintStream out, boolean detail);
-
- /**
- * Create and initialize PolicyController 'TestController', and start
- * the associated Drools container and session.
- *
- * @return a string containing controller session information
- */
- public abstract String createController();
-
- /**
- * Send an event in the form of a JSON message string. The message is
- * sent to JUNIT-TEST-TOPIC, and the JSON object is converted to a
- * 'TestDroolsObject' (all compatible with the Drools session created by
- * 'createController'.
- *
- * @param key determines the bucket number, which affects which host the
- * message is eventually routed to
- */
- public abstract void sendEvent(String key);
-
- /**
- * Return the one-and-only 'KieSession' on this host.
- *
- * @return the one-and-only 'KieSession' on this host
- */
- public abstract KieSession getKieSession();
-
- /**
- * Insert an object into the one-and-only Drools session.
- *
- * @param object the object to insert
- */
- public abstract void insertDrools(Object object);
-
- // some test utilities
-
- /**
- * Determine whether any of the objects passed as parameters are of a class
- * that belongs to different adapter. Print messages are displayed
- * for any that do occur.
- *
- * @param objects one or more objects to be tested
- * @return 'true' if one or more are foreign
- */
- public abstract boolean isForeign(Object... objects);
-
- /**
- * This method is used to generate keys that hash to a bucket associated
- * with a particular server. The algorithm generates a key using 'prefix'
- * concatenated with a numeric value, and searches for the first one on
- * the desired host. It will try up to 10000 indices before giving up --
- * each host owns 1/6 of the buckets, should the 10000 number should be
- * way more than enough. The tests are written with the assumption that
- * a valid key will be returned, and 'NullPointerException' is an acceptable
- * way to handle the situation if this doesn't work out somehow.
- *
- * @param prefix the first portion of the key
- * @param startingIndex the first index to try
- * @param host this indicates the 'Server' instance to locate, which must
- * not be foreign to this adapter
- * @return a key associated with 'host' ('null' if not found)
- */
- public abstract String findKey(String prefix, int startingIndex, ServerWrapper host);
-
- /**
- * Equivalent to 'findKey(prefix, startingIndex, THIS-SERVER)'.
- *
- * @param prefix the first portion of the key
- * @param startingIndex the first index to try
- * @return a key associated with 'host' ('null' if not found)
- */
- public abstract String findKey(String prefix, int startingIndex);
-
- /**
- * Equivalent to 'findKey(prefix, 1, THIS-SERVER)'.
- *
- * @param prefix the first portion of the key
- * @return a key associated with 'host' ('null' if not found)
- */
- public abstract String findKey(String prefix);
-
- /* ============================================================ */
-
- /**
- * This class is basically a 'URLClassLoader', but with a 'toString()'
- * method that indicates the host and adapter number.
- */
- public static class AdapterClassLoader extends URLClassLoader {
- private int index;
-
- public AdapterClassLoader(int index, URL[] urls, ClassLoader parent) {
- super(urls, parent);
- this.index = index;
- }
-
- @Override
- public String toString() {
- return "AdapterClassLoader(" + index + ")";
- }
- }
-}
diff --git a/feature-server-pool/src/test/java/org/onap/policy/drools/serverpooltest/BlockingClassLoader.java b/feature-server-pool/src/test/java/org/onap/policy/drools/serverpooltest/BlockingClassLoader.java
deleted file mode 100644
index a21f254d..00000000
--- a/feature-server-pool/src/test/java/org/onap/policy/drools/serverpooltest/BlockingClassLoader.java
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * ============LICENSE_START=======================================================
- * feature-server-pool
- * ================================================================================
- * Copyright (C) 2020 AT&T Intellectual Property. All rights reserved.
- * ================================================================================
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ============LICENSE_END=========================================================
- */
-
-package org.onap.policy.drools.serverpooltest;
-
-import java.net.URL;
-import java.util.ArrayList;
-import java.util.Enumeration;
-import java.util.HashSet;
-import java.util.NoSuchElementException;
-
-/**
- * Ordinarily, a 'ClassLoader' first attempts to load a class via the
- * parent 'ClassLoader'. If that fails, it attempts to load it "locally"
- * by whatever mechanism the class loader supports.
- * This 'ClassLoader' instance blocks attempts to load specific classes,
- * throwing a 'ClassNotFoundException'. This doesn't seem useful on the
- * surface, but it forces all child 'ClassLoader' instances to do the lookup
- * themselves. In addition, each child 'ClassLoader' will have their own
- * copy of the classes they load, providing a way to have multiple copies of
- * the same class running within the same JVM. Each child 'ClassLoader' can
- * be viewed as having a separate name space.
- */
-public class BlockingClassLoader extends ClassLoader {
- // these are the set of packages to block
- private HashSet<String> packages;
-
- // these are the prefixes of class names to block
- private ArrayList<String> prefixes;
-
- // these specific classes will not be blocked, even if they are in one
- // of the packages indicated by 'packages'
- private HashSet<String> excludes = new HashSet<String>();
-
- // these are the prefixes of class names to exclude
- private ArrayList<String> excludePrefixes = new ArrayList<>();
-
- /**
- * Constructor -- initialize the 'ClassLoader' and 'packages' variable.
- *
- * @param parent the parent ClassLoader
- * @param packages variable number of packages to block
- */
- public BlockingClassLoader(ClassLoader parent, String... packages) {
- super(parent);
- this.packages = new HashSet<>();
- this.prefixes = new ArrayList<>();
- for (String pkg : packages) {
- if (pkg.endsWith("*")) {
- prefixes.add(pkg.substring(0, pkg.length() - 1));
- } else {
- this.packages.add(pkg);
- }
- }
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- protected Class<?> findClass(String name) throws ClassNotFoundException {
- // throws a 'ClassNotFoundException' if we are blocking this one
- testClass(name);
-
- // not blocking this one -- pass it on to the superclass
- return super.findClass(name);
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public Enumeration<URL> getResources(String name) {
- // in order to avoid replicated resources, we return an empty set
- return new Enumeration<URL>() {
- public boolean hasMoreElements() {
- return false;
- }
-
- public URL nextElement() {
- throw new NoSuchElementException("'BlockingClassLoader' blocks duplicate resources");
- }
- };
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public Class<?> loadClass(String name) throws ClassNotFoundException {
- // throws a 'ClassNotFoundException' if we are blocking this one
- testClass(name);
-
- // not blocking this one -- pass it on to the superclass
- return super.loadClass(name);
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- protected Class<?> loadClass(String name, boolean resolve) throws ClassNotFoundException {
- // throws a 'ClassNotFoundException' if we are blocking this one
- testClass(name);
-
- // not blocking this one -- pass it on to the superclass
- return super.loadClass(name, resolve);
- }
-
- /**
- * Add an entry to the list of classes that should NOT be blocked.
- *
- * @param name the full name of a class that shouldn't be blocked
- */
- public void addExclude(String name) {
- if (name.endsWith("*")) {
- excludePrefixes.add(name.substring(0, name.length() - 1));
- } else {
- excludes.add(name);
- }
- }
-
- /**
- * This method looks at a class name -- if it should be blocked, a
- * 'ClassNotFoundException' is thrown. Otherwise, it does nothing.
- *
- * @param name the name of the class to be tested
- * @throws ClassNotFoundException if this class should be blocked
- */
- private void testClass(String name) throws ClassNotFoundException {
- if (excludes.contains(name)) {
- // allow this one
- return;
- }
-
- for (String prefix : excludePrefixes) {
- if (name.startsWith(prefix)) {
- // allow this one
- return;
- }
- }
-
- // extract the package from the class name -- throw a
- // 'ClassNotFoundException' if the package is in the list
- // being blocked
- int index = name.lastIndexOf('.');
- if (index >= 0) {
- if (packages.contains(name.substring(0, index))) {
- throw(new ClassNotFoundException(name));
- }
-
- for (String prefix : prefixes) {
- if (name.startsWith(prefix)) {
- throw(new ClassNotFoundException(name));
- }
- }
- }
- }
-}
diff --git a/feature-server-pool/src/test/java/org/onap/policy/drools/serverpooltest/BucketWrapper.java b/feature-server-pool/src/test/java/org/onap/policy/drools/serverpooltest/BucketWrapper.java
deleted file mode 100644
index 2628513c..00000000
--- a/feature-server-pool/src/test/java/org/onap/policy/drools/serverpooltest/BucketWrapper.java
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * ============LICENSE_START=======================================================
- * feature-server-pool
- * ================================================================================
- * Copyright (C) 2020 AT&T Intellectual Property. All rights reserved.
- * ================================================================================
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ============LICENSE_END=========================================================
- */
-
-package org.onap.policy.drools.serverpooltest;
-
-import java.io.PrintStream;
-
-/**
- * This class provides base classes for accessing the various 'Bucket'
- * classes. There is a separate copy of the 'Bucket' class for each
- * adapter, and this wrapper was created to give them a common interface.
- */
-public interface BucketWrapper {
- /**
- * This calls the 'Bucket.getBucketNumber()' method
- *
- * @return the bucket number
- */
- public int getBucketNumber();
-
- /**
- * This calls the 'Bucket.getOwner()' method
- *
- * @return a 'ServerWrapper' instance that corresponds to the owner
- * of the bucket ('null' if unassigned)
- */
- public ServerWrapper getOwner();
-
- /**
- * This calls the 'Bucket.getPrimaryBackup()' method
- *
- * @return a 'ServerWrapper' instance that corresponds to the primary backup
- * host for the bucket ('null' if unassigned)
- */
- public ServerWrapper getPrimaryBackup();
-
- /**
- * This calls the 'Bucket.getPrimaryBackup()' method
- *
- * @return a 'ServerWrapper' instance that corresponds to the secondary
- * backup host for the bucket ('null' if unassigned)
- */
- public ServerWrapper getSecondaryBackup();
-
- /* ============================================================ */
-
- /**
- * This class provides access to the static 'Bucket' methods. There are
- * multiple 'Bucket' classes (one for each 'Adapter'), and each has
- * a corresponding 'BucketWrapper.Static' instance. In other words, there
- * is one 'Bucket.Static' instance for each simulated host.
- */
- public interface Static {
- /**
- * This returns the value of 'Bucket.BUCKETCOUNT'
- *
- * @return the number of Bucket instances in the bucket table
- */
- public int getBucketCount();
-
- /**
- * This calls the static 'Bucket.bucketNumber(String)' method
- *
- * @param value the keyword to be converted
- * @return the bucket number
- */
- public int bucketNumber(String value);
-
- /**
- * This calls the static 'Bucket.bucketToServer(int)' method
- *
- * @param bucketNumber a bucket number in the range 0-1023
- * @return a 'ServerWrapper' for the server that currently handles the
- * bucket, or 'null' if none is currently assigned
- */
- public ServerWrapper bucketToServer(int bucketNumber);
-
- /**
- * This calls the static 'Bucket.getBucket(int)' method
- *
- * @param bucketNumber a bucket number in the range 0-1023
- * @return A 'BucketWrapper' for the Bucket associated with
- * this bucket number
- */
- public BucketWrapper getBucket(int bucketNumber);
-
- /**
- * This calls the static 'Bucket.isKeyOnThisServer(String)' method
- *
- * @param key the keyword to be hashed
- * @return 'true' if the associated bucket is assigned to this server,
- * 'false' if not
- */
- public boolean isKeyOnThisServer(String key);
-
- /**
- * This calls the static 'Bucket.moveBucket(PrintStream, int, String)'
- * method (the one associated with the '/cmd/moveBucket' REST call).
- *
- * @param out the 'PrintStream' to use for displaying information
- * @param bucketNumber the bucket number to be moved
- * @param newHostUuid the UUID of the destination host (if 'null', a
- * destination host will be chosen at random)
- */
- public void moveBucket(PrintStream out, int bucketNumber, String newHostUuid);
-
- /**
- * This calls the static 'Bucket.dumpAdjuncts(PrintStream)' method
- * (the one associated with the '/cmd/dumpBucketAdjuncts' REST call).
- *
- * @param out the 'PrintStream' to use for displaying information
- */
- public void dumpAdjuncts(PrintStream out);
- }
-}
diff --git a/feature-server-pool/src/test/java/org/onap/policy/drools/serverpooltest/ServerWrapper.java b/feature-server-pool/src/test/java/org/onap/policy/drools/serverpooltest/ServerWrapper.java
deleted file mode 100644
index e31a6817..00000000
--- a/feature-server-pool/src/test/java/org/onap/policy/drools/serverpooltest/ServerWrapper.java
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * ============LICENSE_START=======================================================
- * feature-server-pool
- * ================================================================================
- * Copyright (C) 2020 AT&T Intellectual Property. All rights reserved.
- * ================================================================================
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ============LICENSE_END=========================================================
- */
-
-package org.onap.policy.drools.serverpooltest;
-
-import java.util.Collection;
-import java.util.UUID;
-
-/**
- * This class provides base classes for accessing the various 'Server'
- * classes. There is a separate copy of the 'Server' class for each
- * adapter, and this wrapper was created to give them a common interface.
- */
-public interface ServerWrapper {
- /**
- * This calls the 'Server.toString()' method
- *
- * @return a string of the form 'Server[UUID]'
- */
- public String toString();
-
- /**
- * This calls the 'Server.getUuid()' method
- *
- * @return the UUID associated with this Server
- */
- public UUID getUuid();
-
- /**
- * This calls the 'Server.isActive()' method
- *
- * @return 'true' if the this server is active, and 'false' if not
- */
- public boolean isActive();
-
- /* ============================================================ */
-
- /**
- * This class provides access to the static 'Server' methods. There are
- * multiple 'Server' classes (one for each 'Adapter'), and each has
- * a corresponding 'ServerWrapper.Static' instance. In other words, there
- * is one 'Server.Static' instance for each simulated host.
- */
- public interface Static {
- /**
- * This calls the static 'Server.getThisServer()' method
- *
- * @return a 'ServerWrapper' instance that corresponds to the Server
- * instance associated with this simulated host
- */
- public ServerWrapper getThisServer();
-
- /**
- * This calls the static 'Server.getFirstServer()' method
- *
- * @return a 'ServerWrapper' instance that corresponds to the first
- * 'Server' instance in the 'servers' list (the one with the
- * lowest UUID)
- */
- public ServerWrapper getFirstServer();
-
- /**
- * This calls the static 'Server.getServer(UUID)' method
- *
- * @param uuid the key to the lookup
- * @return a 'ServerWrapper' instance that corresponds to the associated
- * 'Server' instance ('null' if none)
- */
- public ServerWrapper getServer(UUID uuid);
-
- /**
- * This calls the static 'Server.getServerCount()' method
- *
- * @return a count of the number of servers
- */
- public int getServerCount();
-
- /**
- * This calls the static 'Server.getServers()' method
- *
- * @return the complete list of servers, each with a 'ServerWrapper'
- * referring to the 'Server'
- */
- public Collection<ServerWrapper> getServers();
- }
-}
diff --git a/feature-server-pool/src/test/java/org/onap/policy/drools/serverpooltest/SimDmaap.java b/feature-server-pool/src/test/java/org/onap/policy/drools/serverpooltest/SimDmaap.java
deleted file mode 100644
index b1b86370..00000000
--- a/feature-server-pool/src/test/java/org/onap/policy/drools/serverpooltest/SimDmaap.java
+++ /dev/null
@@ -1,327 +0,0 @@
-/*
- * ============LICENSE_START=======================================================
- * feature-server-pool
- * ================================================================================
- * Copyright (C) 2020 AT&T Intellectual Property. All rights reserved.
- * ================================================================================
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ============LICENSE_END=========================================================
- */
-
-package org.onap.policy.drools.serverpooltest;
-
-import java.util.Map;
-import java.util.Properties;
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.TimeUnit;
-import javax.ws.rs.Consumes;
-import javax.ws.rs.GET;
-import javax.ws.rs.POST;
-import javax.ws.rs.Path;
-import javax.ws.rs.PathParam;
-import javax.ws.rs.Produces;
-import javax.ws.rs.QueryParam;
-import javax.ws.rs.core.MediaType;
-import org.eclipse.jetty.server.Server;
-import org.eclipse.jetty.server.ServerConnector;
-import org.eclipse.jetty.servlet.ServletContextHandler;
-import org.eclipse.jetty.servlet.ServletHolder;
-import org.onap.policy.drools.utils.PropertyUtil;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class simulates a UEB/DMAAP server.
- */
-
-@Path("/")
-public class SimDmaap {
- private static Logger logger = LoggerFactory.getLogger(SimDmaap.class);
-
- // miscellaneous Jetty/Servlet parameters
- private static ServletContextHandler context;
- private static Server jettyServer;
- private static ServerConnector connector;
- private static ServletHolder holder;
-
- /**
- * Do whatever needs to be done to start the server. I don't know exactly
- * what abstractions the various pieces provide, but the following code
- * ties the pieces together, and starts up the server.
- */
- public static void start() {
- try {
- context = new ServletContextHandler(ServletContextHandler.SESSIONS);
- context.setContextPath("/");
-
- jettyServer = new Server();
-
- connector = new ServerConnector(jettyServer);
- connector.setName("simdmaap");
- connector.setReuseAddress(true);
- final String propertyFile = "src/test/resources/feature-server-pool-test.properties";
- Properties prop = PropertyUtil.getProperties(propertyFile);
- connector.setPort(Integer.parseInt(prop.getProperty("server.pool.discovery.port")));
- connector.setHost(prop.getProperty("server.pool.discovery.servers"));
-
- jettyServer.addConnector(connector);
- jettyServer.setHandler(context);
-
- holder = context.addServlet(org.glassfish.jersey.servlet.ServletContainer.class.getName(), "/*");
- holder.setInitParameter(
- "jersey.config.server.provider.classnames", SimDmaap.class.getName());
-
- jettyServer.start();
- jettyServer.join();
- } catch (Exception e) {
- e.printStackTrace();
- }
- }
-
- /**
- * Cleanly shut down the server.
- */
- public static void stop() {
- try {
- if (jettyServer != null) {
- jettyServer.stop();
- jettyServer = null;
- }
- } catch (Exception e) {
- e.printStackTrace();
- }
- }
-
- /* ============================================================ */
-
- // maps topic name to 'Topic' instance
- static Map<String, Topic> topicTable = new ConcurrentHashMap<>();
-
- /**
- * Each instance of this class corresponds to a DMAAP or UEB topic.
- */
- static class Topic {
- // topic name
- String topic;
-
- // maps group name into group instance
- Map<String, Group> groupTable = new ConcurrentHashMap<>();
-
- /**
- * Create or get a Topic.
- *
- * @param name the topic name
- * @return the associated Topic instance
- */
- static Topic createOrGet(String name) {
- // look up the topic name
- Topic topicObj = topicTable.get(name);
- if (topicObj == null) {
- // no entry found -- the following will create one, without
- // the need for explicit synchronization
- topicTable.putIfAbsent(name, new Topic(name));
- topicObj = topicTable.get(name);
- }
- return topicObj;
- }
-
- /**
- * Constructor - initialize the 'topic' field.
- *
- * @param topic the topic name
- */
- private Topic(String topic) {
- this.topic = topic;
- }
-
- /**
- * Handle an incoming '/events/{topic}' POST REST message.
- *
- * @param the body of the REST message
- * @return the appropriate JSON response
- */
- String post(String data) {
- // start of message processing
- long startTime = System.currentTimeMillis();
-
- // current and ending indices to the 'data' field
- int cur = 0;
- int end = data.length();
-
- // the number of messages retrieved so far
- int messageCount = 0;
-
- while (cur < end) {
- /*
- * The body of the message may consist of multiple JSON messages,
- * each preceded by 3 integers separated by '.'. The second one
- * is the length, in bytes (the third seems to be some kind of
- * channel identifier).
- */
- int leftBrace = data.indexOf('{', cur);
- if (leftBrace < 0) {
- // no more messages
- break;
- }
- String[] prefix = data.substring(cur, leftBrace).split("\\.");
- if (prefix.length == 3) {
- try {
- // determine length of message, and advance current position
- int length = Integer.valueOf(prefix[1]);
- cur = leftBrace + length;
- /*
- * extract message, and update count -- each double quote
- * has a '\' character placed before it, so the overall
- * message can be placed in double quotes, and parsed as
- * a literal string
- */
- String message = data.substring(leftBrace, cur)
- .replace("\\", "\\\\").replace("\"", "\\\"")
- .replace("\n", "\\n");
- messageCount += 1;
-
- // send to all listening groups
- for (Group group : groupTable.values()) {
- group.messages.add(message);
- }
- } catch (Exception e) {
- logger.error("{}: {}", prefix[1], e);
- break;
- }
- } else if (cur == 0) {
- // there is only a single message -- extract it, and update count
- String message = data.substring(leftBrace, end)
- .replace("\\", "\\\\").replace("\"", "\\\"")
- .replace("\n", "\\n");
- messageCount += 1;
-
- // send to all listening grops
- for (Group group : groupTable.values()) {
- group.messages.add(message);
- }
- break;
- } else {
- // don't know what this is -- toss it
- break;
- }
- }
-
- // generate response message
- long elapsedTime = System.currentTimeMillis() - startTime;
- return "{\n"
- + " \"count\": " + messageCount + ",\n"
- + " \"serverTimeMs\": " + elapsedTime + "\n"
- + "}";
- }
-
- /**
- * read one or more incoming messages.
- *
- * @param group the 'consumerGroup' value
- * @param timeout how long to wait for a message, in milliseconds
- * @param limit the maximum number of messages to receive
- * @return a JSON array, containing somewhere between 0 and 'limit' messages
- */
- String get(String group, long timeout, int limit) throws InterruptedException {
- // look up the group -- create one if it doesn't exist
- Group groupObj = groupTable.get(group);
- if (groupObj == null) {
- // no entry found -- the following will create one, without
- // the need for explicit synchronization
- groupTable.putIfAbsent(group, new Group());
- groupObj = groupTable.get(group);
- }
-
- // pass it on to the 'Group' instance
- return groupObj.get(timeout, limit);
- }
- }
-
- /* ============================================================ */
-
- /**
- * Each instance of this class corresponds to a Consumer Group.
- */
- static class Group {
- // messages queued for this group
- private BlockingQueue<String> messages = new LinkedBlockingQueue<>();
-
- /**
- * Retrieve messages sent to this group.
- *
- * @param timeout how long to wait for a message, in milliseconds
- * @param limit the maximum number of messages to receive
- * @return a JSON array, containing somewhere between 0 and 'limit' messages
- */
- String get(long timeout, int limit) throws InterruptedException {
- String message = messages.poll(timeout, TimeUnit.MILLISECONDS);
- if (message == null) {
- // timed out without messages
- return "[]";
- }
-
- // use 'StringBuilder' to assemble the response -- add the first message
- StringBuilder builder = new StringBuilder();
- builder.append("[\"").append(message);
-
- // add up to '<limit>-1' more messages
- for (int i = 1; i < limit; i += 1) {
- // fetch the next message -- don't wait if it isn't currently there
- message = messages.poll();
- if (message == null) {
- // no more currently available
- break;
- }
- builder.append("\",\"").append(message);
- }
- builder.append("\"]");
- return builder.toString();
- }
- }
-
- /* ============================================================ */
-
- /**
- * Incoming messages from the caller to the simulator.
- */
- @POST
- @Path("/events/{topic}")
- @Consumes("application/cambria")
- @Produces(MediaType.APPLICATION_JSON)
- public String send(@PathParam("topic") String topic,
- String data) {
- logger.info("Send: topic={}", topic);
- return Topic.createOrGet(topic).post(data);
- }
-
- /**
- * Send messages from the simulator to the caller.
- */
- @GET
- @Path("/events/{topic}/{group}/{id}")
- @Consumes(MediaType.TEXT_PLAIN)
- @Produces(MediaType.APPLICATION_JSON)
- public String receive(@PathParam("topic") String topic,
- @PathParam("group") String group,
- @PathParam("id") String id,
- @QueryParam("timeout") long timeout,
- @QueryParam("limit") int limit)
- throws InterruptedException {
-
- logger.info("Receive: topic={}, group={}, id={}, timeout={}, limit={}",
- topic, group, id, timeout, limit);
- return Topic.createOrGet(topic).get(group, timeout, limit);
- }
-}
diff --git a/feature-server-pool/src/test/java/org/onap/policy/drools/serverpooltest/TargetLockWrapper.java b/feature-server-pool/src/test/java/org/onap/policy/drools/serverpooltest/TargetLockWrapper.java
deleted file mode 100644
index ce9f39e0..00000000
--- a/feature-server-pool/src/test/java/org/onap/policy/drools/serverpooltest/TargetLockWrapper.java
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * ============LICENSE_START=======================================================
- * feature-server-pool
- * ================================================================================
- * Copyright (C) 2020 AT&T Intellectual Property. All rights reserved.
- * ================================================================================
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ============LICENSE_END=========================================================
- */
-
-package org.onap.policy.drools.serverpooltest;
-
-import java.io.Serializable;
-
-/**
- * This class provides base classes for accessing the various 'TargetLock'
- * classes. There is a separate copy of the 'TargetLock' class for each
- * adapter, and this wrapper was created to give them a common interface.
- */
-public interface TargetLockWrapper extends Serializable {
- /**
- * There is a separate copy of 'TargetLock.State' for each adapter --
- * The 'TargetLockWrapper.getState()' maps these into a common
- * 'TargetLockWrapper.State' enumeration.
- */
- public enum State {
- WAITING, ACTIVE, FREE, LOST
- }
-
- /**
- * This calls the 'TargetLock.free()' method
- *
- * @return 'true' if successful, 'false' if it was not locked, or there
- * appears to be corruption in 'LocalLocks' tables
- */
- public boolean free();
-
- /**
- * This calls the 'TargetLock.isActive()' method
- *
- * @return 'true' if the lock is in the ACTIVE state, and 'false' if not
- */
- public boolean isActive();
-
- /**
- * This calls the 'TargetLock.getState()' method
- *
- * @return the current state of the lock, as a 'TargetLockWrapper.State'
- */
- public State getState();
-
- /**
- * This calls the 'TargetLock.getOwnerKey()' method
- *
- * @return the owner key field
- */
- public String getOwnerKey();
-
- /**
- * Return the value returned by 'TargetLock.toString()'.
- *
- * @return the value returned by 'TargetLock.toString()'
- */
- public String toString();
-
- /* ============================================================ */
-
- /**
- * This interface mimics the 'LockCallback' interface, with the
- * exception that 'TargetLockWrapper' is used as the arguments to the
- * callback methods.
- */
- public static interface Owner {
- /**
- * Callback indicates the lock was successful.
- *
- * @param lock the 'TargetLockWrapper' instance
- */
- public void lockAvailable(TargetLockWrapper lock);
-
- /**
- * Callback indicates the lock request has failed.
- *
- * @param lock the 'TargetLockWrapper' instance
- */
- public void lockUnavailable(TargetLockWrapper lock);
- }
-}
diff --git a/feature-server-pool/src/test/java/org/onap/policy/drools/serverpooltest/Test1.java b/feature-server-pool/src/test/java/org/onap/policy/drools/serverpooltest/Test1.java
deleted file mode 100644
index fe525788..00000000
--- a/feature-server-pool/src/test/java/org/onap/policy/drools/serverpooltest/Test1.java
+++ /dev/null
@@ -1,916 +0,0 @@
-/*
- * ============LICENSE_START=======================================================
- * feature-server-pool
- * ================================================================================
- * Copyright (C) 2020 AT&T Intellectual Property. All rights reserved.
- * ================================================================================
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ============LICENSE_END=========================================================
- */
-
-package org.onap.policy.drools.serverpooltest;
-
-import static org.assertj.core.api.Assertions.assertThat;
-import static org.assertj.core.api.Assertions.catchThrowable;
-import static org.awaitility.Awaitility.await;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-
-import java.io.ByteArrayOutputStream;
-import java.io.PrintStream;
-import java.io.Serializable;
-import java.text.SimpleDateFormat;
-import java.util.ArrayList;
-import java.util.Date;
-import java.util.UUID;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicReference;
-import org.awaitility.Durations;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.kie.api.runtime.KieSession;
-import org.onap.policy.drools.core.DroolsRunnable;
-import org.onap.policy.drools.serverpool.BucketWrapperImpl;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class Test1 {
- private static Logger logger = LoggerFactory.getLogger(Test1.class);
-
- // indicates that Drools containers need to be initialized
- private static boolean needControllerInit = true;
-
- private static int initialCount = 0;
-
- private static int threadList(String header, boolean stackTrace) {
- logger.info("***** threadList: {} *****", header);
- Thread[] thr = new Thread[1000];
- int count = Thread.enumerate(thr);
-
- if (count > thr.length) {
- count = thr.length;
- }
- for (int i = 0; i < count; i += 1) {
- StringBuilder sb = new StringBuilder();
- sb.append(" ").append(thr[i]);
- if (stackTrace) {
- for (StackTraceElement ste : thr[i].getStackTrace()) {
- sb.append("\n ").append(ste);
- }
- }
- logger.info(sb.toString());
- }
- logger.info("***** end threadList: {}, count = {} *****", header, count);
- return count;
- }
-
- /**
- * Set up environment prior to running tests.
- */
- @BeforeClass
- public static void init() throws Exception {
- initialCount = threadList("BeforeClass", false);
-
- // create 6 adapters, corresponding to 6 'Server' instances
- Adapter.ensureInit();
-
- // make sure initialization has completed
- long endTime = System.currentTimeMillis() + 60000L;
- for (Adapter adapter : Adapter.adapters) {
- assertTrue(adapter.toString() + ": Bucket assignments incomplete",
- adapter.waitForInit(endTime));
- }
- }
-
- public static boolean verifyComplete() {
- return Thread.enumerate(new Thread[initialCount + 1]) == initialCount;
- }
-
- /**
- * Clean up after tests have finished.
- */
- @AfterClass
- public static void finish() throws InterruptedException {
- threadList("AfterClass", false);
- if (needControllerInit) {
- return;
- }
- // shut down Server Pools and DMAAP Simulator
- Adapter.ensureShutdown();
-
- // updates for persistence may still be in progress -- wait 5 seconds
- threadList("AfterEnsureShutdown", false);
-
- try {
- initialCount = initialCount + 1; // one for await thread
- await().atMost(Durations.ONE_MINUTE)
- .with().pollInterval(Durations.ONE_SECOND)
- .until(() -> verifyComplete());
- } finally {
- threadList("AfterSleep", true);
- }
-
- // look at KieSession objects
- for (Adapter adapter : Adapter.adapters) {
- StringBuilder sb = new StringBuilder();
- sb.append(adapter.toString())
- .append(": ")
- .append(adapter.getKieSession().getObjects().size())
- .append(" objects");
- for (Object o : adapter.getKieSession().getObjects()) {
- sb.append("\n ").append(o);
- }
- LinkedBlockingQueue<String> lbq = adapter.notificationQueue();
- if (!lbq.isEmpty()) {
- sb.append("\n")
- .append(adapter.toString())
- .append(": ")
- .append(lbq.size())
- .append(" queued entries");
- for (String string : lbq) {
- sb.append("\n ").append(string);
- }
- }
- logger.info(sb.toString());
- }
-
- // this was used during test debugging to verify that no adjuncts
- // were created on the 'null' host -- there shouldn't be any
- ByteArrayOutputStream bos = new ByteArrayOutputStream();
- PrintStream out = new PrintStream(bos, true);
- new BucketWrapperImpl.Static().dumpAdjuncts(out);
- logger.info(out.toString());
- }
-
- /**
- * Initialize all Drools controllers, if needed.
- */
- static void ensureControllersInitialized() {
- if (needControllerInit) {
- needControllerInit = false;
- for (Adapter adapter : Adapter.adapters) {
- String rval = adapter.createController();
- logger.info("{}: Got the following from PolicyController:\n{}",
- adapter, rval);
- }
- }
- }
-
- /**
- * make sure all servers have agreed on a lead server.
- */
- @Test
- public void checkLeadServer() {
- Adapter firstAdapter = Adapter.adapters[0];
- UUID leaderUuid = firstAdapter.getLeader().getUuid();
- for (Adapter adapter : Adapter.adapters) {
- UUID uuid = adapter.getLeader().getUuid();
- assertEquals(adapter.toString(), leaderUuid, uuid);
- }
- }
-
- /**
- * make sure all servers agree on bucket distribution.
- */
- @Test
- public void startup() throws Exception {
- Adapter firstAdapter = Adapter.adapters[0];
- BucketWrapper.Static firstBucketStatic = firstAdapter.getBucketStatic();
-
- for (Adapter adapter : Adapter.adapters) {
- BucketWrapper.Static bucketStatic = adapter.getBucketStatic();
- if (adapter == firstAdapter) {
- // make sure an owner and primary backup have been chosen
- // for each bucket (secondary backups aren't implemented yet)
- for (int i = 0; i < bucketStatic.getBucketCount(); i += 1) {
- BucketWrapper bucket = bucketStatic.getBucket(i);
- assertNotNull(bucket.getOwner());
- assertNotNull(bucket.getPrimaryBackup());
- }
- } else {
- // make sure the bucket assignments are consistent with
- // the primary backup
- for (int i = 0; i < bucketStatic.getBucketCount(); i += 1) {
- BucketWrapper firstBucket = firstBucketStatic.getBucket(i);
- BucketWrapper bucket = bucketStatic.getBucket(i);
- assertEquals(firstBucket.getOwner().getUuid(),
- bucket.getOwner().getUuid());
- assertEquals(firstBucket.getPrimaryBackup().getUuid(),
- bucket.getPrimaryBackup().getUuid());
- }
- }
- }
- }
-
- // test 'TargetLock'
- @Test
- public void testTargetLock() throws InterruptedException {
- // test locks on different hosts
- lockTests(Adapter.adapters[5], Adapter.adapters[0]);
-
- // test locks on the same host
- lockTests(Adapter.adapters[2], Adapter.adapters[2]);
-
- Adapter adapter0 = Adapter.adapters[0];
- Adapter adapter5 = Adapter.adapters[5];
- String ownerKey = adapter0.findKey("owner");
- String key = adapter5.findKey("key");
- LockOwner owner = new LockOwner();
-
- // some exceptions
- Throwable thrown = catchThrowable(() -> {
- adapter0.newTargetLock(null, ownerKey, owner);
- });
- assertThat(thrown).isInstanceOf(IllegalArgumentException.class)
- .hasNoCause()
- .hasMessageContaining("TargetLock: 'key' can't be null");
-
- thrown = catchThrowable(() -> {
- adapter0.newTargetLock(key, null, owner);
- });
- assertThat(thrown).isInstanceOf(IllegalArgumentException.class)
- .hasNoCause()
- .hasMessageContaining("TargetLock: 'ownerKey' can't be null");
-
- thrown = catchThrowable(() -> {
- adapter5.newTargetLock(key, ownerKey, owner);
- });
- assertThat(thrown).isInstanceOf(IllegalArgumentException.class)
- .hasNoCause()
- .hasMessageContaining("not currently assigned to this server");
-
- thrown = catchThrowable(() -> {
- adapter0.newTargetLock(key, ownerKey, null);
- });
- assertThat(thrown).isInstanceOf(IllegalArgumentException.class)
- .hasNoCause()
- .hasMessageContaining("TargetLock: 'owner' can't be null");
- }
-
- /**
- * Run some 'TargetLock' tests.
- *
- * @param keyAdapter this is the adapter for the key, which determines
- * where the server-side data will reside
- * @param ownerAdapter this is the adapter associated with the requestor
- */
- void lockTests(Adapter keyAdapter, Adapter ownerAdapter) throws InterruptedException {
- // choose 'key' and 'ownerKey' values that map to buckets owned
- // by their respective adapters
- String key = keyAdapter.findKey("key");
- String ownerKey = ownerAdapter.findKey("owner");
-
- // this receives and queues callback notifications
- LockOwner owner = new LockOwner();
-
- // first lock -- should succeed
- TargetLockWrapper tl1 = ownerAdapter.newTargetLock(key, ownerKey, owner);
- assertLockAvailable(owner, tl1);
- //assertArrayEquals(new Object[] {"lockAvailable", tl1},
- // owner.poll(5, TimeUnit.SECONDS));
- assertNull(owner.poll());
- assertTrue(tl1.isActive());
- assertEquals(TargetLockWrapper.State.ACTIVE, tl1.getState());
- assertEquals(ownerKey, tl1.getOwnerKey());
-
- // second lock -- should fail (lock in use)
- TargetLockWrapper tl2 =
- ownerAdapter.newTargetLock(key, ownerKey, owner, false);
- assertLockUnavailable(owner, tl2);
-
- assertNull(owner.poll());
- assertFalse(tl2.isActive());
- assertEquals(TargetLockWrapper.State.FREE, tl2.getState());
- assertEquals(ownerKey, tl2.getOwnerKey());
-
- // third and fourth locks -- should wait
- TargetLockWrapper tl3 = ownerAdapter.newTargetLock(key, ownerKey, owner);
- TargetLockWrapper tl4 = ownerAdapter.newTargetLock(key, ownerKey, owner);
- assertNull(owner.poll(5, TimeUnit.SECONDS)); // nothing immediately
- assertFalse(tl3.isActive());
- assertFalse(tl4.isActive());
- assertEquals(TargetLockWrapper.State.WAITING, tl3.getState());
- assertEquals(TargetLockWrapper.State.WAITING, tl4.getState());
-
- // free third lock before ever getting a callback
- assertTrue(tl3.free());
- assertFalse(tl3.isActive());
- assertEquals(TargetLockWrapper.State.FREE, tl3.getState());
- assertFalse(tl3.free());
-
- // free first lock
- assertTrue(tl1.free());
- assertFalse(tl1.isActive());
- assertEquals(TargetLockWrapper.State.FREE, tl1.getState());
- assertFalse(tl1.free()); // already free
-
- // fourth lock should be active now (or soon)
- assertLockAvailable(owner, tl4);
- assertNull(owner.poll());
- assertTrue(tl4.isActive());
- assertEquals(TargetLockWrapper.State.ACTIVE, tl4.getState());
-
- // free fourth lock
- assertTrue(tl4.free());
- assertFalse(tl4.isActive());
- assertEquals(TargetLockWrapper.State.FREE, tl4.getState());
- }
-
- /**
- * Test sending of intra-server and inter-server messages.
- */
- @Test
- public void topicSendTests() throws InterruptedException {
- ensureControllersInitialized();
-
- // sender and receiver are the same
- topicSendTest(Adapter.adapters[2], Adapter.adapters[2], false);
-
- // sender and receiver are different
- topicSendTest(Adapter.adapters[5], Adapter.adapters[4], false);
- }
-
- /**
- * Send a message from 'sender' to 'receiver' -- the message is delivered
- * as an incoming 'TopicListener' event, is processed by
- * 'FeatureServerPool.beforeOffer' (PolicyControllerFeatureApi), which
- * will route it based upon keyword. At the destination end, it should
- * be converted to an instance of 'TestDroolsObject', and inserted into
- * the Drools session. The associated Drools rule should fire, and the
- * message is placed in the notification queue. The message is then
- * retracted, unless the string 'SAVE' appears within the message, which
- * is the case if the 'save' parameter is set.
- *
- * @param sender the adapter associated with the sending end
- * @param receiver the adapter associated with the receiving end
- * @param save if 'true' the message is not retracted, if 'false' it is
- * retracted
- */
- String topicSendTest(Adapter sender, Adapter receiver, boolean save) throws InterruptedException {
- // generate base message -- include 'SAVE' in the message if 'save' is set
- String message = "From " + sender.toString() + " to "
- + receiver.toString();
- if (save) {
- message += " (SAVE)";
- }
- message += ": " + UUID.randomUUID().toString() + ".";
-
- // add a numeric suffix to the message, such that it will be routed
- // to 'receiver'
- message = receiver.findKey(message);
-
- // send the message
- sender.sendEvent(message);
-
- // verify that it has been received
- assertEquals(message,
- receiver.notificationQueue().poll(60, TimeUnit.SECONDS));
- return message;
- }
-
- /**
- * Return the Adapter associated with the current lead server.
- *
- * @return the Adapter associated with the current lead server
- * ('null' if there is no leader)
- */
- private Adapter getLeader() {
- for (Adapter adapter : Adapter.adapters) {
- if (adapter.getLeader() == adapter.getServerStatic().getThisServer()) {
- // we have located the leader
- return adapter;
- }
- }
- throw new AssertionError();
- }
-
- /**
- * Test migration of sessions from one server to another.
- */
- @Test
- public void sessionMigrationTest() throws InterruptedException {
- ensureControllersInitialized();
-
- // select adapters for roles
- Adapter sender = Adapter.adapters[1];
- Adapter receiver = Adapter.adapters[3];
- Adapter newReceiver = Adapter.adapters[5];
-
- // determine current leader
- Adapter leader = getLeader();
-
- // send message from 'sender' to 'receiver', and save it
- String message = topicSendTest(sender, receiver, true);
-
- // verify where the bucket is and is not
- assertTrue(receiver.getBucketStatic().isKeyOnThisServer(message));
- assertFalse(newReceiver.getBucketStatic().isKeyOnThisServer(message));
-
- // move to the new host
- ByteArrayOutputStream bos = new ByteArrayOutputStream();
- PrintStream out = new PrintStream(bos, true);
- leader.getBucketStatic().moveBucket(
- out, receiver.getBucketStatic().bucketNumber(message),
- newReceiver.getServerStatic().getThisServer().getUuid().toString());
- logger.info(bos.toString());
-
- // poll up to 5 minutes for the bucket to be updated
- TestDroolsObject matchingObject = new TestDroolsObject(message);
- await().atMost(5, TimeUnit.MINUTES)
- .with().pollInterval(Durations.ONE_SECOND)
- .until(() -> (new ArrayList<Object>(newReceiver.getKieSession().getObjects())
- .contains(matchingObject)));
-
- // verify where the bucket is and is not
- assertFalse(receiver.getBucketStatic().isKeyOnThisServer(message));
- assertTrue(newReceiver.getBucketStatic().isKeyOnThisServer(message));
- }
-
- /**
- * Test migration of locks from one server to another.
- */
- @Test
- public void lockMigrationTest() throws InterruptedException {
- ensureControllersInitialized();
-
- // select adapters for roles -- '*Server' refers to the 'key' end,
- // and '*Client' refers to the 'ownerKey' end
- final Adapter oldServer = Adapter.adapters[0];
- final Adapter newServer = Adapter.adapters[1];
- final Adapter oldClient = Adapter.adapters[2];
- final Adapter newClient = Adapter.adapters[3];
-
- // determine the current leader
- final Adapter leader = getLeader();
-
- // choose 'key' and 'ownerKey' values associated with
- // 'oldServer' and 'oldClient', respectively
- String key = oldServer.findKey("key");
- String ownerKey = oldClient.findKey("owner");
- LockOwner owner = new LockOwner();
-
- // allocate lock 1
- TargetLockWrapper tl1 = oldClient.newTargetLock(key, ownerKey, owner);
- assertLockAvailable(owner, tl1);
-
- // allocate a lock 2, which should be in the 'WAITING' state
- TargetLockWrapper tl2 = oldClient.newTargetLock(key, ownerKey, owner);
- assertNull(owner.poll(5, TimeUnit.SECONDS)); // nothing immediately
-
- // verify lock states
- assertEquals(TargetLockWrapper.State.ACTIVE, tl1.getState());
- assertEquals(TargetLockWrapper.State.WAITING, tl2.getState());
-
- // verify key buckets (before)
- assertTrue(oldServer.getBucketStatic().isKeyOnThisServer(key));
- assertFalse(newServer.getBucketStatic().isKeyOnThisServer(key));
-
- // move key buckets to new host (server side)
- ByteArrayOutputStream bos = new ByteArrayOutputStream();
- PrintStream out = new PrintStream(bos, true);
- leader.getBucketStatic().moveBucket(
- out, oldServer.getBucketStatic().bucketNumber(key),
- newServer.getServerStatic().getThisServer().getUuid().toString());
- logger.info(bos.toString());
-
- logger.debug("lock migration test - before: new isKeyOnThisServer: {}, "
- + "old isKeyOnThisServer: {}, time: {}",
- newServer.getBucketStatic().isKeyOnThisServer(key),
- oldServer.getBucketStatic().isKeyOnThisServer(key),
- new SimpleDateFormat("yyyy-MM-dd kk:mm:ss").format(new Date()));
-
- await().atMost(10000L, TimeUnit.MILLISECONDS).until(() ->
- newServer.getBucketStatic().isKeyOnThisServer(key)
- && oldServer.getBucketStatic().isKeyOnThisServer(key) == false);
-
- logger.debug("lock migration test - after : new isKeyOnThisServer: {}, "
- + "old isKeyOnThisServer: {}, time: {}",
- newServer.getBucketStatic().isKeyOnThisServer(key),
- oldServer.getBucketStatic().isKeyOnThisServer(key),
- new SimpleDateFormat("yyyy-MM-dd kk:mm:ss").format(new Date()));
-
- // verify key buckets (after)
- assertFalse(oldServer.getBucketStatic().isKeyOnThisServer(key));
- assertTrue(newServer.getBucketStatic().isKeyOnThisServer(key));
-
- // we should be able to free lock1 now, and lock2 should go active,
- // indicating that the server side is still working
- assertTrue(tl1.free());
-
- assertLockAvailable(owner, tl2);
- assertEquals(TargetLockWrapper.State.ACTIVE, tl2.getState());
-
- // create a third lock
- TargetLockWrapper tl3 = oldClient.newTargetLock(key, ownerKey, owner);
- assertNull(owner.poll(5, TimeUnit.SECONDS)); // nothing immediately
- assertEquals(TargetLockWrapper.State.WAITING, tl3.getState());
-
- // insert active objects in Drools session, which is about to be moved
- // (if we don't do this, the client objects won't be relocated)
- oldClient.getKieSession().insert(new KeywordWrapper(ownerKey, "lmt.owner", owner));
- oldClient.getKieSession().insert(new KeywordWrapper(ownerKey, "lmt.tl2", tl2));
- oldClient.getKieSession().insert(new KeywordWrapper(ownerKey, "lmt.tl3", tl3));
-
- // dumping out some state information as part of debugging --
- // I see no reason to remove it now
- {
- bos = new ByteArrayOutputStream();
- out = new PrintStream(bos, true);
- out.println("BEFORE: tl2=" + tl2 + "\ntl3=" + tl3);
- oldClient.dumpLocks(out, true);
- oldClient.getBucketStatic().dumpAdjuncts(out);
- logger.debug(bos.toString());
- }
-
- // don't need these any more -- we will get them back on the new host
- tl1 = tl2 = tl3 = null;
- owner = null;
-
- // verify ownerKey buckets (before)
- assertTrue(oldClient.getBucketStatic().isKeyOnThisServer(ownerKey));
- assertFalse(newClient.getBucketStatic().isKeyOnThisServer(ownerKey));
-
- // move ownerKey buckets to new host (client side)
- bos = new ByteArrayOutputStream();
- out = new PrintStream(bos, true);
- leader.getBucketStatic().moveBucket(
- out, oldClient.getBucketStatic().bucketNumber(ownerKey),
- newClient.getServerStatic().getThisServer().getUuid().toString());
- logger.info(bos.toString());
-
- logger.debug("lock migration test2 - before: new isKeyOnThisServer: {}, "
- + "old isKeyOnThisServer: {}, time: {}",
- newClient.getBucketStatic().isKeyOnThisServer(ownerKey),
- oldClient.getBucketStatic().isKeyOnThisServer(ownerKey),
- new SimpleDateFormat("yyyy-MM-dd kk:mm:ss").format(new Date()));
-
- await().atMost(Durations.FIVE_SECONDS)
- .with().pollInterval(Durations.ONE_HUNDRED_MILLISECONDS)
- .until(() -> newClient.getBucketStatic().isKeyOnThisServer(ownerKey)
- && oldClient.getBucketStatic().isKeyOnThisServer(ownerKey) == false);
-
- logger.debug("lock migration test2 - before: new isKeyOnThisServer: {}, "
- + "old isKeyOnThisServer: {}, time: {}",
- newClient.getBucketStatic().isKeyOnThisServer(ownerKey),
- oldClient.getBucketStatic().isKeyOnThisServer(ownerKey),
- new SimpleDateFormat("yyyy-MM-dd kk:mm:ss").format(new Date()));
-
- // verify ownerKey buckets (before)
- assertFalse(oldClient.getBucketStatic().isKeyOnThisServer(ownerKey));
- assertTrue(newClient.getBucketStatic().isKeyOnThisServer(ownerKey));
-
- // now, we need to locate 'tl2', 'tl3', and 'owner' in Drools memory
- await().atMost(Durations.FIVE_SECONDS)
- .with().pollInterval(Durations.ONE_SECOND)
- .until(() -> newClient.getKieSession() != null);
- KieSession kieSession = newClient.getKieSession();
- for (Object obj : new ArrayList<Object>(kieSession.getObjects())) {
- if (obj instanceof KeywordWrapper) {
- KeywordWrapper kw = (KeywordWrapper) obj;
-
- if ("lmt.owner".equals(kw.id)) {
- owner = kw.getObject(LockOwner.class);
- } else if ("lmt.tl2".equals(kw.id)) {
- tl2 = kw.getObject(TargetLockWrapper.class);
- } else if ("lmt.tl3".equals(kw.id)) {
- tl3 = kw.getObject(TargetLockWrapper.class);
- }
- kieSession.delete(kieSession.getFactHandle(obj));
- }
- }
-
- // make sure we found everything
- assertNotNull(tl2);
- assertNotNull(tl3);
- assertNotNull(owner);
- assertFalse(newClient.isForeign(tl2, tl3, owner));
-
- // verify the states of 'tl2' and 'tl3'
- assertEquals(TargetLockWrapper.State.ACTIVE, tl2.getState());
- assertEquals(TargetLockWrapper.State.WAITING, tl3.getState());
-
- // dumping out some state information as part of debugging --
- // I see no reason to remove it now
- {
- bos = new ByteArrayOutputStream();
- out = new PrintStream(bos, true);
- out.println("AFTER: tl2=" + tl2 + "\ntl3=" + tl3);
- newClient.dumpLocks(out, true);
- newClient.getBucketStatic().dumpAdjuncts(out);
- logger.debug(bos.toString());
- }
-
- // now, we should be able to free 'tl2', and 'tl3' should go active
- assertNull(owner.poll(5, TimeUnit.SECONDS)); // nothing immediately
- assertTrue(tl2.free());
-
- assertLockAvailable(owner, tl3);
- assertEquals(TargetLockWrapper.State.ACTIVE, tl3.getState());
- assertTrue(tl3.free());
- }
-
- private void assertLockAvailable(LockOwner owner, TargetLockWrapper tl) {
- AtomicReference<Object[]> objArray = new AtomicReference<>();
- await().atMost(300000, TimeUnit.MILLISECONDS).until(() -> {
- objArray.set(owner.poll(5, TimeUnit.SECONDS));
- return objArray.get() != null;
- });
- assertArrayEquals(new Object[] {"lockAvailable", tl}, objArray.get());
- }
-
- private void assertLockUnavailable(LockOwner owner, TargetLockWrapper tl) {
- AtomicReference<Object[]> objArray = new AtomicReference<>();
- await().atMost(300000, TimeUnit.MILLISECONDS).until(() -> {
- objArray.set(owner.poll(5, TimeUnit.SECONDS));
- return objArray.get() != null;
- });
- assertArrayEquals(new Object[] {"lockUnavailable", tl}, objArray.get());
- }
-
- /**
- * Test cleanup of locks that have been abandoned.
- */
- @Test
- public void abandonedLocks() throws InterruptedException {
- // choose adapters
- Adapter keyAdapter = Adapter.adapters[3];
- Adapter ownerAdapter = Adapter.adapters[4];
-
- // generate compatible keys
- String key = keyAdapter.findKey("abandonedLocks.key");
- String ownerKey = ownerAdapter.findKey("abandonedLocks.owner");
-
- // receiver of callback notifications
- LockOwner owner = new LockOwner();
-
- // first lock -- should succeed
- TargetLockWrapper tl1 = ownerAdapter.newTargetLock(key, ownerKey, owner);
- //assertLockAvailable(owner, tl1);
- assertArrayEquals(new Object[] {"lockAvailable", tl1},
- owner.poll(5, TimeUnit.SECONDS));
-
- // second lock -- should wait
- final TargetLockWrapper tl2 = ownerAdapter.newTargetLock(key, ownerKey, owner);
- assertNull(owner.poll(5, TimeUnit.SECONDS)); // nothing immediately
-
- // abandon first lock, and do a GC cycle -- tl2 should go active
- tl1 = null;
- System.gc();
- //assertLockAvailable(owner, tl2);
- assertArrayEquals(new Object[] {"lockAvailable", tl2},
- owner.poll(5, TimeUnit.SECONDS));
- assertTrue(tl2.isActive());
- assertEquals(TargetLockWrapper.State.ACTIVE, tl2.getState());
-
- // free tl2
- assertTrue(tl2.free());
- assertFalse(tl2.isActive());
- assertEquals(TargetLockWrapper.State.FREE, tl2.getState());
- }
-
- /**
- * Test locks within Drools sessions.
- */
- @Test
- public void locksWithinDrools() throws InterruptedException {
- ensureControllersInitialized();
-
- // choose adapters
- Adapter keyAdapter = Adapter.adapters[3];
- Adapter ownerAdapter = Adapter.adapters[4];
-
- // generate compatible keys
- final String key = keyAdapter.findKey("locksWithinDrools.key");
- final String ownerKey = ownerAdapter.findKey("locksWithinDrools.owner");
-
- // need a 'LockOwner' variant
- final LockOwner owner = new LockOwner() {
- private static final long serialVersionUID = 1L;
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void lockAvailable(TargetLockWrapper lock) {
- // insert notification in 'LinkedBlockingQueue'
- add(new Object[] {"lockAvailable", lock, Thread.currentThread()});
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void lockUnavailable(TargetLockWrapper lock) {
- // insert notification in 'LinkedBlockingQueue'
- add(new Object[] {"lockUnavailable", lock, Thread.currentThread()});
- }
- };
-
- // generate first lock outside of Drools
- final TargetLockWrapper tl1 = ownerAdapter.newTargetLock(key, ownerKey, owner);
- Object[] response = owner.poll(5, TimeUnit.SECONDS);
- assertNotNull(response);
- assertEquals(3, response.length);
- assertEquals("lockAvailable", response[0]);
- assertEquals(tl1, response[1]);
-
- // now, generate one from within Drools
- ownerAdapter.getKieSession().insert(new DroolsRunnable() {
- @Override
- public void run() {
- // create lock, which should block
- TargetLockWrapper tl2 =
- ownerAdapter.newTargetLock(key, ownerKey, owner);
- owner.add(new Object[] {"tl2Data", tl2, Thread.currentThread()});
- }
- });
-
- // fetch data from Drools thread
- response = owner.poll(5, TimeUnit.SECONDS);
- assertNotNull(response);
- assertEquals(3, response.length);
- assertEquals("tl2Data", response[0]);
-
- TargetLockWrapper tl2 = null;
- Thread droolsThread = null;
-
- if (response[1] instanceof TargetLockWrapper) {
- tl2 = (TargetLockWrapper) response[1];
- }
- if (response[2] instanceof Thread) {
- droolsThread = (Thread) response[2];
- }
-
- assertNotNull(tl2);
- assertNotNull(droolsThread);
-
- // tl2 should still be waiting
- assertNull(owner.poll(5, TimeUnit.SECONDS));
- assertFalse(tl2.isActive());
- assertEquals(TargetLockWrapper.State.WAITING, tl2.getState());
-
- // free tl1
- assertTrue(tl1.free());
-
- // verify that 'tl2' is now available,
- // and the call back ran in the Drools thread
- assertArrayEquals(new Object[] {"lockAvailable", tl2, droolsThread},
- owner.poll(5, TimeUnit.SECONDS));
- assertTrue(tl2.isActive());
- assertEquals(TargetLockWrapper.State.ACTIVE, tl2.getState());
-
- // free tl2
- assertTrue(tl2.free());
- }
-
- /**
- * Test insertion of objects into Drools memory.
- */
- @Test
- public void insertDrools() throws InterruptedException {
- Adapter adapter1 = Adapter.adapters[1];
- final Adapter adapter2 = Adapter.adapters[2];
-
- // check whether we can insert objects locally (adapter1 -> adapter1)
- String key1 = adapter1.findKey("insertDrools1-");
- adapter1.insertDrools(new KeywordWrapper(key1, "insertDroolsLocal", null));
-
- await().atMost(Durations.TEN_SECONDS)
- .with().pollInterval(Durations.ONE_SECOND)
- .until(() -> adapter1.getKieSession() != null);
-
- KieSession kieSession;
- boolean found = false;
- kieSession = adapter1.getKieSession();
- for (Object obj : new ArrayList<Object>(kieSession.getObjects())) {
- if (obj instanceof KeywordWrapper
- && "insertDroolsLocal".equals(((KeywordWrapper) obj).id)) {
- found = true;
- kieSession.delete(kieSession.getFactHandle(obj));
- break;
- }
- }
- assertTrue(found);
-
- // check whether we can insert objects remotely (adapter1 -> adapter2)
- String key2 = adapter2.findKey("insertDrools2-");
- adapter1.insertDrools(new KeywordWrapper(key2, "insertDroolsRemote", null));
-
- // it would be nice to test for this, rather than sleep
- await().atMost(Durations.FIVE_SECONDS)
- .with().pollInterval(Durations.ONE_HUNDRED_MILLISECONDS)
- .until(() -> adapter2.getKieSession() != null);
-
- found = false;
- kieSession = adapter2.getKieSession();
- for (Object obj : new ArrayList<Object>(kieSession.getObjects())) {
- if (obj instanceof KeywordWrapper
- && "insertDroolsRemote".equals(((KeywordWrapper) obj).id)) {
- found = true;
- kieSession.delete(kieSession.getFactHandle(obj));
- break;
- }
- }
- assertTrue(found);
- }
-
- /* ============================================================ */
-
- /**
- * This class implements the 'LockCallback' interface, and
- * makes callback responses available via a 'LinkedBlockingQueue'.
- */
- public static class LockOwner extends LinkedBlockingQueue<Object[]>
- implements TargetLockWrapper.Owner, Serializable {
-
- private static final long serialVersionUID = 1L;
-
- /**
- * Constructor -- initialize the 'LinkedBlockingQueue'.
- */
- public LockOwner() {
- super();
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void lockAvailable(TargetLockWrapper lock) {
- // insert notification in 'LinkedBlockingQueue'
- add(new Object[] {"lockAvailable", lock});
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void lockUnavailable(TargetLockWrapper lock) {
- // insert notification in 'LinkedBlockingQueue'
- add(new Object[] {"lockUnavailable", lock});
- }
- }
-
- /* ============================================================ */
-
- /**
- * This class is used to insert objects in Drools memory to support
- * testing.
- */
- public static class KeywordWrapper implements Serializable {
- private static final long serialVersionUID = 1L;
-
- // this is the keyword, which determines the associated bucket,
- // which then determines when this object is migrated
- public String key;
-
- // this is an identifier, which can be used to select objects
- // on the receiving end
- public String id;
-
- // this is the object being wrapped
- public Serializable obj;
-
- /**
- * Constructor -- initialize fields.
- *
- * @param key keyword, which determines the associated bucket
- * @param id string identifier, used to match objects from the sending
- * to the receiving end
- * @param obj the object being wrapped
- */
- public KeywordWrapper(String key, String id, Serializable obj) {
- this.key = key;
- this.id = id;
- this.obj = obj;
- }
-
- /**
- * This is used to extract objects on the receiving end. If the class
- * matches, we get the expected object. If the class does not match,
- * we get 'null', and the test should fail.
- *
- * @param clazz the expected class of the 'obj' field
- * @return the object (if 'clazz' matches), 'null' if it does not
- */
- public <T> T getObject(Class<T> clazz) {
- return clazz.isInstance(obj) ? clazz.cast(obj) : null;
- }
- }
-}
diff --git a/feature-server-pool/src/test/java/org/onap/policy/drools/serverpooltest/TestDroolsObject.java b/feature-server-pool/src/test/java/org/onap/policy/drools/serverpooltest/TestDroolsObject.java
deleted file mode 100644
index a937680f..00000000
--- a/feature-server-pool/src/test/java/org/onap/policy/drools/serverpooltest/TestDroolsObject.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * ============LICENSE_START=======================================================
- * feature-server-pool
- * ================================================================================
- * Copyright (C) 2020 AT&T Intellectual Property. All rights reserved.
- * ================================================================================
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ============LICENSE_END=========================================================
- */
-
-package org.onap.policy.drools.serverpooltest;
-
-import java.io.Serializable;
-import lombok.EqualsAndHashCode;
-import lombok.Getter;
-import lombok.Setter;
-import lombok.ToString;
-
-/**
- * Instances of this class can be inserted into a Drools session, and used
- * to test things like message routing and bucket migration.
- */
-@Getter
-@Setter
-@ToString
-@EqualsAndHashCode
-public class TestDroolsObject implements Serializable {
- private static final long serialVersionUID = 1L;
-
- // determines the bucket number
- private String key;
-
- /**
- * Constructor - no key specified.
- */
- public TestDroolsObject() {
- this.key = null;
- }
-
- /**
- * Constructor - initialize the key.
- *
- * @param key key that is hashed to determine the bucket number
- */
- public TestDroolsObject(String key) {
- this.key = key;
- }
-}
diff --git a/feature-server-pool/src/test/resources/TestController-controller.properties b/feature-server-pool/src/test/resources/TestController-controller.properties
deleted file mode 100644
index 7d645588..00000000
--- a/feature-server-pool/src/test/resources/TestController-controller.properties
+++ /dev/null
@@ -1,13 +0,0 @@
-controller.name=TestController
-
-persistence.type=auto
-
-rules.artifactId=drools-artifact1
-rules.groupId=org.onap.policy.drools-pdp
-rules.version=1.0.0
-
-ueb.source.topics=JUNIT-TEST-TOPIC
-ueb.source.topics.JUNIT-TEST-TOPIC.servers=127.0.0.1
-ueb.source.topics.JUNIT-TEST-TOPIC.events=org.onap.policy.drools.serverpooltest.TestDroolsObject
-ueb.source.topics.JUNIT-TEST-TOPIC.events.org.onap.policy.drools.serverpooltest.TestDroolsObject.filter=
-[?($.key =~ /.*/)] \ No newline at end of file
diff --git a/feature-server-pool/src/test/resources/drools-artifact-1.1/pom.xml b/feature-server-pool/src/test/resources/drools-artifact-1.1/pom.xml
deleted file mode 100644
index e747e42b..00000000
--- a/feature-server-pool/src/test/resources/drools-artifact-1.1/pom.xml
+++ /dev/null
@@ -1,31 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- ============LICENSE_START=======================================================
- feature-server-pool
- ================================================================================
- Copyright (C) 2020 AT&T Intellectual Property. All rights reserved.
- ================================================================================
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
- ============LICENSE_END=========================================================
- -->
-
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
- <modelVersion>4.0.0</modelVersion>
-
- <groupId>org.onap.policy.drools-pdp</groupId>
- <artifactId>drools-artifact1</artifactId>
- <version>1.0.0</version>
- <description>supports Junit tests in feature-server-pool</description>
-
-</project>
diff --git a/feature-server-pool/src/test/resources/drools-artifact-1.1/src/main/resources/META-INF/kmodule.xml b/feature-server-pool/src/test/resources/drools-artifact-1.1/src/main/resources/META-INF/kmodule.xml
deleted file mode 100644
index 1e447eb3..00000000
--- a/feature-server-pool/src/test/resources/drools-artifact-1.1/src/main/resources/META-INF/kmodule.xml
+++ /dev/null
@@ -1,25 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- ============LICENSE_START=======================================================
- feature-server-pool
- ================================================================================
- Copyright (C) 2019 AT&T Intellectual Property. All rights reserved.
- ================================================================================
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
- ============LICENSE_END=========================================================
- -->
-<kmodule xmlns="http://jboss.org/kie/6.0.0/kmodule">
- <kbase name="rules">
- <ksession name="session1"/>
- </kbase>
-</kmodule>
diff --git a/feature-server-pool/src/test/resources/drools-artifact-1.1/src/main/resources/rules.drl b/feature-server-pool/src/test/resources/drools-artifact-1.1/src/main/resources/rules.drl
deleted file mode 100644
index 9591dee0..00000000
--- a/feature-server-pool/src/test/resources/drools-artifact-1.1/src/main/resources/rules.drl
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * ============LICENSE_START=======================================================
- * feature-server-pool
- * ================================================================================
- * Copyright (C) 2020 AT&T Intellectual Property. All rights reserved.
- * ================================================================================
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- * ============LICENSE_END=========================================================
- */
-package org.onap.policy.drools.serverpool.test;
-
-import org.onap.policy.drools.serverpool.AdapterImpl;
-import org.onap.policy.drools.serverpooltest.Adapter;
-import org.onap.policy.drools.serverpooltest.TestDroolsObject;
-
-rule "Initialization"
- when
- then
- {
- System.out.println("Initialization rule running");
- }
-end
-
-rule "Receive 'TestDroolsObject'"
- when
- $object : TestDroolsObject()
- then
- {
- Adapter adapter = AdapterImpl.getAdapter();
- String key = $object.getKey();
- System.out.println(adapter.toString()
- + ": Received TestDroolsObject - key=" + key);
- adapter.notificationQueue().add(key);
- if (!key.contains("SAVE"))
- {
- // 'SAVE' keyword identifies messages that shouldn't be retracted
- retract($object);
- }
- }
-end
-
diff --git a/feature-server-pool/src/test/resources/feature-server-pool-test.properties b/feature-server-pool/src/test/resources/feature-server-pool-test.properties
deleted file mode 100644
index 70fed3ff..00000000
--- a/feature-server-pool/src/test/resources/feature-server-pool-test.properties
+++ /dev/null
@@ -1,142 +0,0 @@
-###
-# ============LICENSE_START=======================================================
-# feature-server-pool
-# ================================================================================
-# Copyright (C) 2020 AT&T Intellectual Property. All rights reserved.
-# ================================================================================
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============LICENSE_END=========================================================
-###
-
-# The following properties control the IP address and port that a given
-# server binds to. The default will bind to all interfaces on the host,
-# and choose a port number at random.
-
-server.pool.server.ipAddress=127.0.0.1
-server.pool.server.port=20000
-
-# The following properties determine whether HTTPS is used -- note that HTTPS
-# also requires that the 'java.net.ssl.*' parameters in 'system.properties' be
-# specified, and the key store and trust store be configured, as appropriate.
-server.pool.server.https=
-server.pool.server.selfSignedCerts=false
-
-# The IP address and port that servers on the geo-redundant site
-# should use to connect to servers on this site.
-server.pool.server.site.ip=
-server.pool.server.site.port=
-
-# A comma-separated list of host names -- if an entry is found that refers
-# to an HTTP/HTTPS destination IP address, the host name will used as the
-# destination, instead of the IP address
-server.pool.server.hostlist=
-
-# The servers send 'pings' to each other once per main loop cycle. They
-# also measure the gap between 'pings' from each server, and calculate
-# an allowed time gap based upon this. 'server.pool.server.allowedGap' is the initial
-# allowed gap prior to receiving any pings (default=30 seconds), and
-# 'server.pool.server.adaptiveGapAdjust' is a value that is added to the calculated
-# gap "just in case" (default=5 seconds)
-
-server.pool.server.allowedGap=30000
-server.pool.server.adaptiveGapAdjust=5000
-
-# 'connectTimeout' and 'readTimeout' affect the client end of a REST
-# connection (default=10 seconds each)
-
-server.pool.server.connectTimeout=10000
-server.pool.server.readTimeout=10000
-
-# Each server has a thread pool per remote server, which is used when
-# sending HTTP REST messages -- the following parameters determine the
-# configuration.
-
-server.pool.server.threads.corePoolSize=5
-server.pool.server.threads.maximumPoolSize=10
-server.pool.server.threads.keepAliveTime=5000
-
-# The server pool members use a UEB/DMAAP topic to connect with other
-# servers in the pool. The following set of parameters are passed to
-# the CambriaClient library, and are used in setting up the consumer and
-# publisher ends of the connection. 'discovery.servers' and 'discovery.topic'
-# are the minimum values that need to be specified. The last parameter in
-# this set, 'discovery.publisherLoopCycleTime' isn't passed to the
-# CambriaClient library; instead, it controls how frequently the 'ping'
-# messages are sent out on this channel. Note that only the lead server
-# keeps this channel open long-term.
-
-server.pool.discovery.servers=127.0.0.1
-server.pool.discovery.port=3904
-server.pool.discovery.topic=DISCOVERY-TOPIC
-server.pool.discovery.username=
-server.pool.discovery.password=
-server.pool.discovery.https=
-server.pool.discovery.apiKey=
-server.pool.discovery.apiSecret=
-#server.pool.discovery.publisherSocketTimeout=5000
-#server.pool.discovery.consumerSocketTimeout=70000
-server.pool.discovery.fetchTimeout=60000
-server.pool.discovery.fetchLimit=100
-server.pool.discovery.selfSignedCertificates=false
-server.pool.discovery.publisherLoopCycleTime=5000
-
-# The 'leader.*' parameters affect behavior during an election. The value of
-# 'mainLoop.cycle' determines the actual time delay. 'leader.stableIdCycles'
-# is the required minimum number of "stable" cycles before voting can start
-# (in this case, "stable" means no servers added or failing). Once voting has
-# started, "leader.stableVotingCycles' is the minimum number of "stable"
-# cycles needed before declaring a winner (in this case, "stable" means no
-# votes changing).
-
-server.pool.leader.stableIdleCycles=5
-server.pool.leader.stableVotingCycles=5
-
-# The value of 'mainLoop.cycle' (default = 1 second) determines how frequently
-# various events occur, such as the sending of 'ping' messages, and the
-# duration of a "cycle" while voting for a lead server.
-
-server.pool.mainLoop.cycle=1000
-
-# 'keyword.path' is used when looking for "keywords" within JSON messages.
-# The first keyword located is hashed to determine which bucket to route
-# through.
-
-keyword.path=requestID,CommonHeader.RequestID,body.output.common-header.request-id,result-info.request-id:uuid
-# 'keyword.<CLASS-NAME>.lookup' is used to locate "keywords" within objects.
-# The 'value' field contains a list of method calls or field names separated
-# by '.' that are used to locate the keyword
-# (e.g. 'method1().field2.method3()')
-
-keyword.java.lang.String.lookup=toString()
-keyword.org.onap.policy.m2.base.Transaction.lookup=getRequestID()
-keyword.org.onap.policy.controlloop.ControlLoopEvent.lookup=requestID
-keyword.org.onap.policy.appclcm.LcmRequestWrapper.lookup=getBody().getCommonHeader().getRequestId()
-keyword.org.onap.policy.appclcm.LcmResponseWrapper.lookup=getBody().getCommonHeader().getRequestId()
-keyword.org.onap.policy.drools.serverpool.TargetLock.lookup=getOwnerKey()
-keyword.org.onap.policy.drools.serverpooltest.TestDroolsObject.lookup=getKey()
-keyword.org.onap.policy.drools.serverpooltest.Test1$KeywordWrapper.lookup=key
-
-# The following properties affect distributed 'TargetLock' behavior.
-#
-# server.pool.lock.ttl - controls how many hops a 'TargetLock' message can take
-# server.pool.lock.audit.period - how frequently should the audit run?
-# server.pool.lock.audit.gracePeriod - how long to wait after bucket reassignments
-# before running the audit again
-# server.pool.lock.audit.retryDelay - mismatches can occur due to the transient nature
-# of the lock state: this property controls how long to wait before
-# trying again
-
-server.pool.lock.ttl=3
-server.pool.lock.audit.period=300000
-server.pool.lock.audit.gracePeriod=60000
-server.pool.lock.audit.retryDelay=5000 \ No newline at end of file
diff --git a/feature-server-pool/src/test/resources/logback-test.xml b/feature-server-pool/src/test/resources/logback-test.xml
deleted file mode 100644
index 7df374fb..00000000
--- a/feature-server-pool/src/test/resources/logback-test.xml
+++ /dev/null
@@ -1,32 +0,0 @@
-<!--
- ============LICENSE_START=======================================================
- ONAP
- ================================================================================
- Copyright (C) 2020 AT&T Intellectual Property. All rights reserved.
- ================================================================================
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
- ============LICENSE_END=========================================================
- -->
-
-<!-- Controls the output of logs for JUnit tests -->
-
-<configuration>
- <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
- <encoder>
- <pattern>%d{HH:mm:ss.SSS} [%thread] %-5level %logger{50}:%M:%line - %msg%n</pattern>
- </encoder>
- </appender>
- <root level="WARN">
- <appender-ref ref="STDOUT" />
- </root>
-</configuration>