aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authoradheli.tavares <adheli.tavares@est.tech>2024-08-15 11:08:57 +0100
committeradheli.tavares <adheli.tavares@est.tech>2024-08-15 11:10:39 +0100
commit88744c04a7cfed3a4227bc2137102ff5fe69895f (patch)
tree5865844183729aed07f06dd2f94e4940e7ff80f1
parentf6d8e60eb75733cf9996bffb3c6ecb586f377da6 (diff)
PostgreSQL support for Drools
- moved all sql related management to db-migrator - any hardcoded variable related to database is configurable Issue-ID: POLICY-5107 Change-Id: I789895773ba8737651f68a0b494f72f947a147d1 Signed-off-by: adheli.tavares <adheli.tavares@est.tech>
-rw-r--r--feature-distributed-locking/src/assembly/assemble_zip.xml7
-rw-r--r--feature-distributed-locking/src/main/feature/config/feature-distributed-locking.properties4
-rw-r--r--feature-distributed-locking/src/main/feature/db/pooling/sql/1804-distributedlocking.downgrade.sql20
-rw-r--r--feature-distributed-locking/src/main/feature/db/pooling/sql/1804-distributedlocking.upgrade.sql30
-rw-r--r--feature-distributed-locking/src/main/feature/db/pooling/sql/1811-distributedlocking.downgrade.sql19
-rw-r--r--feature-distributed-locking/src/main/feature/db/pooling/sql/1811-distributedlocking.upgrade.sql23
-rw-r--r--feature-distributed-locking/src/main/feature/install/disable26
-rw-r--r--feature-distributed-locking/src/main/feature/install/enable26
-rw-r--r--feature-distributed-locking/src/main/java/org/onap/policy/distributed/locking/DistributedLockManager.java78
-rw-r--r--feature-distributed-locking/src/test/java/org/onap/policy/distributed/locking/DistributedLockManagerTest.java189
-rw-r--r--feature-healthcheck/src/assembly/assemble_zip.xml7
-rw-r--r--feature-no-locking/src/assembly/assemble_zip.xml7
-rw-r--r--feature-no-locking/src/main/java/org/onap/policy/no/locking/NoLockManager.java9
-rw-r--r--feature-no-locking/src/test/java/org/onap/policy/no/locking/NoLockManagerTest.java2
-rw-r--r--feature-pooling-messages/src/assembly/assemble_zip.xml7
-rw-r--r--feature-test-transaction/src/assembly/assemble_zip.xml6
-rw-r--r--packages/base/src/files/bin/policy8
-rw-r--r--packages/docker/src/main/docker/Dockerfile6
-rw-r--r--packages/docker/src/main/docker/pdpd-entrypoint.sh22
-rw-r--r--packages/docker/src/main/docker/suse.Dockerfile3
-rw-r--r--policy-management/src/main/java/org/onap/policy/drools/features/PolicyEngineFeatureApi.java4
-rw-r--r--policy-management/src/main/java/org/onap/policy/drools/system/PolicyEngineManager.java14
-rw-r--r--policy-management/src/main/server-gen/bin/db-migrator635
-rw-r--r--policy-management/src/main/server-gen/bin/features315
-rw-r--r--policy-management/src/test/java/org/onap/policy/drools/system/PolicyEngineManagerTest.java16
25 files changed, 173 insertions, 1310 deletions
diff --git a/feature-distributed-locking/src/assembly/assemble_zip.xml b/feature-distributed-locking/src/assembly/assemble_zip.xml
index 2112fbcd..3def9898 100644
--- a/feature-distributed-locking/src/assembly/assemble_zip.xml
+++ b/feature-distributed-locking/src/assembly/assemble_zip.xml
@@ -3,6 +3,7 @@
feature-distributed-locking
================================================================================
Copyright (C) 2018 AT&T Intellectual Property. All rights reserved.
+ Modifications Copyright (C) 2024 Nordix Foundation.
================================================================================
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -59,12 +60,6 @@
<excludes/>
</fileSet>
<fileSet>
- <directory>src/main/feature/db</directory>
- <outputDirectory>db</outputDirectory>
- <fileMode>0744</fileMode>
- <excludes/>
- </fileSet>
- <fileSet>
<directory>src/main/feature/install</directory>
<outputDirectory>install</outputDirectory>
<fileMode>0744</fileMode>
diff --git a/feature-distributed-locking/src/main/feature/config/feature-distributed-locking.properties b/feature-distributed-locking/src/main/feature/config/feature-distributed-locking.properties
index 69629e15..fa311260 100644
--- a/feature-distributed-locking/src/main/feature/config/feature-distributed-locking.properties
+++ b/feature-distributed-locking/src/main/feature/config/feature-distributed-locking.properties
@@ -3,7 +3,7 @@
# ONAP
# ================================================================================
# Copyright (C) 2018-2019, 2021-2022 AT&T Intellectual Property. All rights reserved.
-# Modifications Copyright (C) 2023 Nordix Foundation.
+# Modifications Copyright (C) 2023-2024 Nordix Foundation.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -20,7 +20,7 @@
###
#Database properties
-jakarta.persistence.jdbc.driver=org.mariadb.jdbc.Driver
+jakarta.persistence.jdbc.driver=${envd:JDBC_DRIVER}
jakarta.persistence.jdbc.url=${envd:JDBC_URL}pooling${envd:JDBC_OPTS}
jakarta.persistence.jdbc.user=${envd:SQL_USER}
jakarta.persistence.jdbc.password=${envd:SQL_PASSWORD}
diff --git a/feature-distributed-locking/src/main/feature/db/pooling/sql/1804-distributedlocking.downgrade.sql b/feature-distributed-locking/src/main/feature/db/pooling/sql/1804-distributedlocking.downgrade.sql
deleted file mode 100644
index cd1b815d..00000000
--- a/feature-distributed-locking/src/main/feature/db/pooling/sql/1804-distributedlocking.downgrade.sql
+++ /dev/null
@@ -1,20 +0,0 @@
-# ============LICENSE_START=======================================================
-# feature-distributed-locking
-# ================================================================================
-# Copyright (C) 2018 AT&T Intellectual Property. All rights reserved.
-# ================================================================================
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============LICENSE_END=========================================================
-
-use pooling;
-drop table if exists locks; \ No newline at end of file
diff --git a/feature-distributed-locking/src/main/feature/db/pooling/sql/1804-distributedlocking.upgrade.sql b/feature-distributed-locking/src/main/feature/db/pooling/sql/1804-distributedlocking.upgrade.sql
deleted file mode 100644
index 07b30738..00000000
--- a/feature-distributed-locking/src/main/feature/db/pooling/sql/1804-distributedlocking.upgrade.sql
+++ /dev/null
@@ -1,30 +0,0 @@
-# ============LICENSE_START=======================================================
-# feature-distributed-locking
-# ================================================================================
-# Copyright (C) 2018, 2022 AT&T Intellectual Property. All rights reserved.
-# ================================================================================
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============LICENSE_END=========================================================
-
-SET foreign_key_checks=0;
-
-CREATE TABLE if NOT EXISTS pooling.locks(
- resourceId VARCHAR(128),
- host VARCHAR(128),
- owner VARCHAR(128),
- expirationTime BIGINT,
- PRIMARY KEY (resourceId),
- INDEX idx_expirationTime(expirationTime),
- INDEX idx_host(host));
-
-SET foreign_key_checks=1; \ No newline at end of file
diff --git a/feature-distributed-locking/src/main/feature/db/pooling/sql/1811-distributedlocking.downgrade.sql b/feature-distributed-locking/src/main/feature/db/pooling/sql/1811-distributedlocking.downgrade.sql
deleted file mode 100644
index 55a883f2..00000000
--- a/feature-distributed-locking/src/main/feature/db/pooling/sql/1811-distributedlocking.downgrade.sql
+++ /dev/null
@@ -1,19 +0,0 @@
-# ============LICENSE_START=======================================================
-# feature-distributed-locking
-# ================================================================================
-# Copyright (C) 2018, 2021 AT&T Intellectual Property. All rights reserved.
-# ================================================================================
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============LICENSE_END=========================================================
-
-ALTER TABLE pooling.locks modify if exists expirationTime bigint(20) default 0;
diff --git a/feature-distributed-locking/src/main/feature/db/pooling/sql/1811-distributedlocking.upgrade.sql b/feature-distributed-locking/src/main/feature/db/pooling/sql/1811-distributedlocking.upgrade.sql
deleted file mode 100644
index 0cdad5b9..00000000
--- a/feature-distributed-locking/src/main/feature/db/pooling/sql/1811-distributedlocking.upgrade.sql
+++ /dev/null
@@ -1,23 +0,0 @@
-# ============LICENSE_START=======================================================
-# feature-distributed-locking
-# ================================================================================
-# Copyright (C) 2018, 2021-2022 AT&T Intellectual Property. All rights reserved.
-# ================================================================================
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============LICENSE_END=========================================================
-
-SET foreign_key_checks=0;
-
-ALTER TABLE pooling.locks MODIFY expirationTime TIMESTAMP DEFAULT '1971-01-01 00:00:00.000000';
-
-SET foreign_key_checks=1;
diff --git a/feature-distributed-locking/src/main/feature/install/disable b/feature-distributed-locking/src/main/feature/install/disable
deleted file mode 100644
index be455734..00000000
--- a/feature-distributed-locking/src/main/feature/install/disable
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env sh
-
-# ============LICENSE_START=======================================================
-# ONAP
-# ================================================================================
-# Copyright (C) 2018-2021 AT&T Intellectual Property. All rights reserved.
-# ================================================================================
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============LICENSE_END=========================================================
-
-if [ "${DEBUG}" = "y" ]; then
- set -x
-fi
-
-${POLICY_HOME}/bin/db-migrator -s pooling -o downgrade
-
diff --git a/feature-distributed-locking/src/main/feature/install/enable b/feature-distributed-locking/src/main/feature/install/enable
deleted file mode 100644
index af202f56..00000000
--- a/feature-distributed-locking/src/main/feature/install/enable
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env sh
-
-# ============LICENSE_START=======================================================
-# ONAP
-# ================================================================================
-# Copyright (C) 2018-2021 AT&T Intellectual Property. All rights reserved.
-# ================================================================================
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============LICENSE_END=========================================================
-
-if [ "${DEBUG}" = "y" ]; then
- set -x
-fi
-
-${POLICY_HOME}/bin/db-migrator -s pooling -o upgrade
-
diff --git a/feature-distributed-locking/src/main/java/org/onap/policy/distributed/locking/DistributedLockManager.java b/feature-distributed-locking/src/main/java/org/onap/policy/distributed/locking/DistributedLockManager.java
index d7f857eb..e9f1453a 100644
--- a/feature-distributed-locking/src/main/java/org/onap/policy/distributed/locking/DistributedLockManager.java
+++ b/feature-distributed-locking/src/main/java/org/onap/policy/distributed/locking/DistributedLockManager.java
@@ -3,6 +3,7 @@
* ONAP
* ================================================================================
* Copyright (C) 2019-2022 AT&T Intellectual Property. All rights reserved.
+ * Modifications Copyright (C) 2024 Nordix Foundation.
* ================================================================================
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -24,6 +25,9 @@ import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.sql.SQLTransientException;
+import java.sql.Timestamp;
+import java.time.Instant;
+import java.time.temporal.ChronoUnit;
import java.util.HashSet;
import java.util.Map;
import java.util.Properties;
@@ -57,11 +61,8 @@ import org.slf4j.LoggerFactory;
* Distributed implementation of the Lock Feature. Maintains locks across servers using a
* shared DB.
*
- * <p/>
- * Note: this implementation does <i>not</i> honor the waitForLocks={@code true}
- * parameter.
- *
- * <p/>
+ * <p>Note: this implementation does <i>not</i> honor the waitForLocks={@code true}
+ * parameter.<p/>
* Additional Notes:
* <dl>
* <li>The <i>owner</i> field in the DB is not derived from the lock's owner info, but is
@@ -77,7 +78,7 @@ import org.slf4j.LoggerFactory;
* </dl>
*/
public class DistributedLockManager extends LockManager<DistributedLockManager.DistributedLock>
- implements PolicyEngineFeatureApi {
+ implements PolicyEngineFeatureApi {
private static final Logger logger = LoggerFactory.getLogger(DistributedLockManager.class);
@@ -141,11 +142,11 @@ public class DistributedLockManager extends LockManager<DistributedLockManager.D
}
@Override
- public PolicyResourceLockManager beforeCreateLockManager(PolicyEngine engine, Properties properties) {
+ public PolicyResourceLockManager beforeCreateLockManager() {
try {
this.pdpName = PolicyEngineConstants.getManager().getPdpName();
- this.featProps = new DistributedLockProperties(getProperties(CONFIGURATION_PROPERTIES_NAME));
+ this.featProps = new DistributedLockProperties(getProperties());
this.dataSource = makeDataSource();
return this;
@@ -176,9 +177,9 @@ public class DistributedLockManager extends LockManager<DistributedLockManager.D
* Make data source.
*
* @return a new, pooled data source
- * @throws Exception exception
+ * @throws SQLException exception
*/
- protected BasicDataSource makeDataSource() throws Exception {
+ protected BasicDataSource makeDataSource() throws SQLException {
var props = new Properties();
props.put("driverClassName", featProps.getDbDriver());
props.put("url", featProps.getDbUrl());
@@ -199,7 +200,7 @@ public class DistributedLockManager extends LockManager<DistributedLockManager.D
logger.info("deleting all expired locks from the DB");
try (var conn = dataSource.getConnection();
- var stmt = conn.prepareStatement("DELETE FROM pooling.locks WHERE expirationTime <= now()")) {
+ var stmt = conn.prepareStatement("DELETE FROM pooling.locks WHERE expirationTime <= now()")) {
int ndel = stmt.executeUpdate();
logger.info("deleted {} expired locks from the DB", ndel);
@@ -398,17 +399,17 @@ public class DistributedLockManager extends LockManager<DistributedLockManager.D
/**
* Constructs the object.
*
- * @param state initial state of the lock
+ * @param state initial state of the lock
* @param resourceId identifier of the resource to be locked
- * @param ownerKey information identifying the owner requesting the lock
- * @param holdSec amount of time, in seconds, for which the lock should be held,
- * after which it will automatically be released
- * @param callback callback to be invoked once the lock is granted, or
- * subsequently lost; must not be {@code null}
- * @param feature feature containing this lock
+ * @param ownerKey information identifying the owner requesting the lock
+ * @param holdSec amount of time, in seconds, for which the lock should be held,
+ * after which it will automatically be released
+ * @param callback callback to be invoked once the lock is granted, or
+ * subsequently lost; must not be {@code null}
+ * @param feature feature containing this lock
*/
public DistributedLock(LockState state, String resourceId, String ownerKey, int holdSec, LockCallback callback,
- DistributedLockManager feature) {
+ DistributedLockManager feature) {
super(state, resourceId, ownerKey, holdSec, callback);
this.feature = feature;
@@ -531,7 +532,6 @@ public class DistributedLockManager extends LockManager<DistributedLockManager.D
* there are no more requests in the queue.
*
* @param prevReq the previous request that was just run
- *
* @return the next request, or {@code null} if the queue is empty
*/
private synchronized RunnableWithEx getNextRequest(RunnableWithEx prevReq) {
@@ -699,19 +699,19 @@ public class DistributedLockManager extends LockManager<DistributedLockManager.D
* Inserts the lock into the DB.
*
* @param conn DB connection
- * @return {@code true} if a record was successfully inserted, {@code false}
- * otherwise
+ * @return {@code true} if a record was successfully inserted, {@code false}otherwise
* @throws SQLException if a DB error occurs
*/
protected boolean doDbInsert(Connection conn) throws SQLException {
logger.info("insert lock record {}", this);
- try (var stmt = conn.prepareStatement("INSERT INTO pooling.locks (resourceId, host, owner, expirationTime) "
- + "values (?, ?, ?, timestampadd(second, ?, now()))")) {
+ var time = Instant.now().plus(getHoldSec(), ChronoUnit.SECONDS);
+ String sql = "INSERT INTO pooling.locks (resourceId, host, owner, expirationTime) values (?, ?, ?, ?)";
+ try (var stmt = conn.prepareStatement(sql)) {
stmt.setString(1, getResourceId());
stmt.setString(2, feature.pdpName);
stmt.setString(3, feature.uuidString);
- stmt.setInt(4, getHoldSec());
+ stmt.setTimestamp(4, new Timestamp(time.toEpochMilli()));
stmt.executeUpdate();
@@ -726,20 +726,21 @@ public class DistributedLockManager extends LockManager<DistributedLockManager.D
* Updates the lock in the DB.
*
* @param conn DB connection
- * @return {@code true} if a record was successfully updated, {@code false}
- * otherwise
+ * @return {@code true} if a record was successfully updated, {@code false} otherwise
* @throws SQLException if a DB error occurs
*/
protected boolean doDbUpdate(Connection conn) throws SQLException {
logger.info("update lock record {}", this);
- try (var stmt = conn.prepareStatement("UPDATE pooling.locks SET resourceId=?, host=?, owner=?,"
- + " expirationTime=timestampadd(second, ?, now()) WHERE resourceId=?"
- + " AND ((host=? AND owner=?) OR expirationTime < now())")) {
+ var time = Instant.now().plus(getHoldSec(), ChronoUnit.SECONDS);
+ var query = "UPDATE pooling.locks SET resourceId=?, host=?, owner=?,"
+ + " expirationTime=? WHERE resourceId=?"
+ + " AND ((host=? AND owner=?) OR expirationTime < now())";
+ try (var stmt = conn.prepareStatement(query)) {
stmt.setString(1, getResourceId());
stmt.setString(2, feature.pdpName);
stmt.setString(3, feature.uuidString);
- stmt.setInt(4, getHoldSec());
+ stmt.setTimestamp(4, new Timestamp(time.toEpochMilli()));
stmt.setString(5, getResourceId());
stmt.setString(6, this.hostName);
@@ -764,8 +765,8 @@ public class DistributedLockManager extends LockManager<DistributedLockManager.D
*/
protected void doDbDelete(Connection conn) throws SQLException {
logger.info("delete lock record {}", this);
- try (var stmt = conn
- .prepareStatement("DELETE FROM pooling.locks WHERE resourceId=? AND host=? AND owner=?")) {
+ var query = "DELETE FROM pooling.locks WHERE resourceId=? AND host=? AND owner=?";
+ try (var stmt = conn.prepareStatement(query)) {
stmt.setString(1, getResourceId());
stmt.setString(2, this.hostName);
@@ -793,8 +794,8 @@ public class DistributedLockManager extends LockManager<DistributedLockManager.D
@Override
public String toString() {
return "DistributedLock [state=" + getState() + ", resourceId=" + getResourceId() + ", ownerKey="
- + getOwnerKey() + ", holdSec=" + getHoldSec() + ", hostName=" + hostName + ", uuidString="
- + uuidString + "]";
+ + getOwnerKey() + ", holdSec=" + getHoldSec() + ", hostName=" + hostName + ", uuidString="
+ + uuidString + "]";
}
}
@@ -811,12 +812,13 @@ public class DistributedLockManager extends LockManager<DistributedLockManager.D
// these may be overridden by junit tests
- protected Properties getProperties(String fileName) {
- return SystemPersistenceConstants.getManager().getProperties(fileName);
+ protected Properties getProperties() {
+ return SystemPersistenceConstants.getManager().getProperties(
+ DistributedLockManager.CONFIGURATION_PROPERTIES_NAME);
}
protected DistributedLock makeLock(LockState state, String resourceId, String ownerKey, int holdSec,
- LockCallback callback) {
+ LockCallback callback) {
return new DistributedLock(state, resourceId, ownerKey, holdSec, callback, this);
}
}
diff --git a/feature-distributed-locking/src/test/java/org/onap/policy/distributed/locking/DistributedLockManagerTest.java b/feature-distributed-locking/src/test/java/org/onap/policy/distributed/locking/DistributedLockManagerTest.java
index 2e173cf0..579d53cc 100644
--- a/feature-distributed-locking/src/test/java/org/onap/policy/distributed/locking/DistributedLockManagerTest.java
+++ b/feature-distributed-locking/src/test/java/org/onap/policy/distributed/locking/DistributedLockManagerTest.java
@@ -141,12 +141,12 @@ class DistributedLockManagerTest {
private LockCallback callback;
@Mock
- private BasicDataSource datasrc;
+ private BasicDataSource dataSource;
private DistributedLock lock;
- private AtomicInteger nactive;
- private AtomicInteger nsuccesses;
+ private AtomicInteger numActive;
+ private AtomicInteger numSuccess;
private DistributedLockManager feature;
AutoCloseable closeable;
@@ -204,8 +204,8 @@ class DistributedLockManagerTest {
session.setPolicySession();
- nactive = new AtomicInteger(0);
- nsuccesses = new AtomicInteger(0);
+ numActive = new AtomicInteger(0);
+ numSuccess = new AtomicInteger(0);
cleanDb();
@@ -248,7 +248,7 @@ class DistributedLockManagerTest {
@Test
void testBeforeCreateLockManager() {
- assertSame(feature, feature.beforeCreateLockManager(engine, new Properties()));
+ assertSame(feature, feature.beforeCreateLockManager());
}
/**
@@ -260,13 +260,12 @@ class DistributedLockManagerTest {
feature = new MyLockingFeature(false) {
@Override
- protected Properties getProperties(String fileName) {
+ protected Properties getProperties() {
throw new IllegalArgumentException(EXPECTED_EXCEPTION);
}
};
- Properties props = new Properties();
- assertThatThrownBy(() -> feature.beforeCreateLockManager(engine, props))
+ assertThatThrownBy(() -> feature.beforeCreateLockManager())
.isInstanceOf(DistributedLockManagerException.class);
}
@@ -317,7 +316,6 @@ class DistributedLockManagerTest {
/**
* Tests deleteExpiredDbLocks(), when getConnection() throws an exception.
- *
*/
@Test
void testDeleteExpiredDbLocksEx() {
@@ -347,7 +345,6 @@ class DistributedLockManagerTest {
/**
* Tests afterStop(), when the data source throws an exception when close() is called.
- *
*/
@Test
void testAfterStopEx() {
@@ -416,10 +413,10 @@ class DistributedLockManagerTest {
void testCreateLockNotLatestInstance() {
DistributedLockManager.setLatestInstance(null);
- Lock lock = feature.createLock(RESOURCE, OWNER_KEY, HOLD_SEC, callback, false);
- assertTrue(lock.isUnavailable());
+ Lock newLock = feature.createLock(RESOURCE, OWNER_KEY, HOLD_SEC, callback, false);
+ assertTrue(newLock.isUnavailable());
verify(callback, never()).lockAvailable(any());
- verify(callback).lockUnavailable(lock);
+ verify(callback).lockUnavailable(newLock);
}
@Test
@@ -532,11 +529,11 @@ class DistributedLockManagerTest {
feature = new MyLockingFeature(true) {
@Override
- protected BasicDataSource makeDataSource() throws Exception {
+ protected BasicDataSource makeDataSource() throws SQLException {
// get the real data source
BasicDataSource src2 = super.makeDataSource();
- when(datasrc.getConnection()).thenAnswer(answer -> {
+ when(dataSource.getConnection()).thenAnswer(answer -> {
DistributedLock lck = freeLock.getAndSet(null);
if (lck != null) {
// free it
@@ -549,7 +546,7 @@ class DistributedLockManagerTest {
return src2.getConnection();
});
- return datasrc;
+ return dataSource;
}
};
@@ -596,11 +593,11 @@ class DistributedLockManagerTest {
@Test
void testDistributedLockNoArgs() {
- DistributedLock lock = new DistributedLock();
- assertNull(lock.getResourceId());
- assertNull(lock.getOwnerKey());
- assertNull(lock.getCallback());
- assertEquals(0, lock.getHoldSec());
+ DistributedLock newLock = new DistributedLock();
+ assertNull(newLock.getResourceId());
+ assertNull(newLock.getOwnerKey());
+ assertNull(newLock.getCallback());
+ assertEquals(0, newLock.getHoldSec());
}
@Test
@@ -615,15 +612,15 @@ class DistributedLockManagerTest {
@Test
void testDistributedLockSerializable() throws Exception {
- DistributedLock lock = getLock(RESOURCE, callback);
- lock = roundTrip(lock);
+ DistributedLock newLock = getLock(RESOURCE, callback);
+ newLock = roundTrip(newLock);
- assertTrue(lock.isWaiting());
+ assertTrue(newLock.isWaiting());
- assertEquals(RESOURCE, lock.getResourceId());
- assertEquals(OWNER_KEY, lock.getOwnerKey());
- assertNull(lock.getCallback());
- assertEquals(HOLD_SEC, lock.getHoldSec());
+ assertEquals(RESOURCE, newLock.getResourceId());
+ assertEquals(OWNER_KEY, newLock.getOwnerKey());
+ assertNull(newLock.getCallback());
+ assertEquals(HOLD_SEC, newLock.getHoldSec());
}
@Test
@@ -690,13 +687,13 @@ class DistributedLockManagerTest {
*/
@Test
void testDistributedLockFreeSerialized() throws Exception {
- DistributedLock lock = getLock(RESOURCE, callback);
+ DistributedLock newLock = getLock(RESOURCE, callback);
feature = new MyLockingFeature(true);
- lock = roundTrip(lock);
- assertTrue(lock.free());
- assertTrue(lock.isUnavailable());
+ newLock = roundTrip(newLock);
+ assertTrue(newLock.free());
+ assertTrue(newLock.isUnavailable());
}
/**
@@ -706,13 +703,13 @@ class DistributedLockManagerTest {
*/
@Test
void testDistributedLockFreeNoFeature() throws Exception {
- DistributedLock lock = getLock(RESOURCE, callback);
+ DistributedLock newLock = getLock(RESOURCE, callback);
DistributedLockManager.setLatestInstance(null);
- lock = roundTrip(lock);
- assertFalse(lock.free());
- assertTrue(lock.isUnavailable());
+ newLock = roundTrip(newLock);
+ assertFalse(newLock.free());
+ assertTrue(newLock.isUnavailable());
}
/**
@@ -788,28 +785,28 @@ class DistributedLockManagerTest {
*/
@Test
void testDistributedLockExtendSerialized() throws Exception {
- DistributedLock lock = getLock(RESOURCE, callback);
+ DistributedLock newLock = getLock(RESOURCE, callback);
// run doLock
runLock(0, 0);
- assertTrue(lock.isActive());
+ assertTrue(newLock.isActive());
feature = new MyLockingFeature(true);
- lock = roundTrip(lock);
- assertTrue(lock.isActive());
+ newLock = roundTrip(newLock);
+ assertTrue(newLock.isActive());
- LockCallback scallback = mock(LockCallback.class);
+ LockCallback mockCallback = mock(LockCallback.class);
- lock.extend(HOLD_SEC, scallback);
- assertTrue(lock.isWaiting());
+ newLock.extend(HOLD_SEC, mockCallback);
+ assertTrue(newLock.isWaiting());
// run doExtend (in new feature)
runLock(0, 0);
- assertTrue(lock.isActive());
+ assertTrue(newLock.isActive());
- verify(scallback).lockAvailable(lock);
- verify(scallback, never()).lockUnavailable(lock);
+ verify(mockCallback).lockAvailable(newLock);
+ verify(mockCallback, never()).lockUnavailable(newLock);
}
/**
@@ -819,24 +816,24 @@ class DistributedLockManagerTest {
*/
@Test
void testDistributedLockExtendNoFeature() throws Exception {
- DistributedLock lock = getLock(RESOURCE, callback);
+ DistributedLock newLock = getLock(RESOURCE, callback);
// run doLock
runLock(0, 0);
- assertTrue(lock.isActive());
+ assertTrue(newLock.isActive());
DistributedLockManager.setLatestInstance(null);
- lock = roundTrip(lock);
- assertTrue(lock.isActive());
+ newLock = roundTrip(newLock);
+ assertTrue(newLock.isActive());
LockCallback scallback = mock(LockCallback.class);
- lock.extend(HOLD_SEC, scallback);
- assertTrue(lock.isUnavailable());
+ newLock.extend(HOLD_SEC, scallback);
+ assertTrue(newLock.isUnavailable());
- verify(scallback, never()).lockAvailable(lock);
- verify(scallback).lockUnavailable(lock);
+ verify(scallback, never()).lockAvailable(newLock);
+ verify(scallback).lockUnavailable(newLock);
}
/**
@@ -964,7 +961,7 @@ class DistributedLockManagerTest {
feature = new MyLockingFeature(true) {
@Override
protected DistributedLock makeLock(LockState state, String resourceId, String ownerKey, int holdSec,
- LockCallback callback) {
+ LockCallback callback) {
return new DistributedLock(state, resourceId, ownerKey, holdSec, callback, feature) {
private static final long serialVersionUID = 1L;
@@ -1015,13 +1012,13 @@ class DistributedLockManagerTest {
@Test
void testDistributedLockDoRequestRunExWaiting() throws SQLException {
// throw run-time exception
- when(datasrc.getConnection()).thenThrow(new IllegalStateException(EXPECTED_EXCEPTION));
+ when(dataSource.getConnection()).thenThrow(new IllegalStateException(EXPECTED_EXCEPTION));
// use a data source that throws an exception when getConnection() is called
feature = new MyLockingFeature(true) {
@Override
protected BasicDataSource makeDataSource() {
- return datasrc;
+ return dataSource;
}
};
@@ -1045,7 +1042,7 @@ class DistributedLockManagerTest {
@Test
void testDistributedLockDoRequestRunExUnavailable() throws SQLException {
// throw run-time exception
- when(datasrc.getConnection()).thenAnswer(answer -> {
+ when(dataSource.getConnection()).thenAnswer(answer -> {
lock.free();
throw new IllegalStateException(EXPECTED_EXCEPTION);
});
@@ -1054,7 +1051,7 @@ class DistributedLockManagerTest {
feature = new MyLockingFeature(true) {
@Override
protected BasicDataSource makeDataSource() {
- return datasrc;
+ return dataSource;
}
};
@@ -1232,7 +1229,6 @@ class DistributedLockManagerTest {
/**
* Tests doUnlock() when a DB exception is thrown.
- *
*/
@Test
void testDistributedLockDoUnlockEx() {
@@ -1337,7 +1333,6 @@ class DistributedLockManagerTest {
/**
* Tests doExtend() when both update and insert fail.
- *
*/
@Test
void testDistributedLockDoExtendNeitherSucceeds() {
@@ -1348,7 +1343,7 @@ class DistributedLockManagerTest {
feature = new MyLockingFeature(true) {
@Override
protected DistributedLock makeLock(LockState state, String resourceId, String ownerKey, int holdSec,
- LockCallback callback) {
+ LockCallback callback) {
return new DistributedLock(state, resourceId, ownerKey, holdSec, callback, feature) {
private static final long serialVersionUID = 1L;
private int ntimes = 0;
@@ -1437,7 +1432,7 @@ class DistributedLockManagerTest {
feature = new DistributedLockManager();
// this should create a thread pool
- feature.beforeCreateLockManager(engine, new Properties());
+ feature.beforeCreateLockManager();
feature.afterStart(engine);
assertThatCode(this::shutdownFeature).doesNotThrowAnyException();
@@ -1447,14 +1442,14 @@ class DistributedLockManagerTest {
* Performs a multithreaded test of the locking facility.
*
* @throws InterruptedException if the current thread is interrupted while waiting for
- * the background threads to complete
+ * the background threads to complete
*/
@Test
void testMultiThreaded() throws InterruptedException {
ReflectionTestUtils.setField(PolicyEngineConstants.getManager(), POLICY_ENGINE_EXECUTOR_FIELD, realExec);
feature = new DistributedLockManager();
- feature.beforeCreateLockManager(PolicyEngineConstants.getManager(), new Properties());
+ feature.beforeCreateLockManager();
feature.afterStart(PolicyEngineConstants.getManager());
List<MyThread> threads = new ArrayList<>(MAX_THREADS);
@@ -1475,7 +1470,7 @@ class DistributedLockManagerTest {
}
}
- assertTrue(nsuccesses.get() > 0);
+ assertTrue(numSuccess.get() > 0);
}
private DistributedLock getLock(String resource, LockCallback callback) {
@@ -1511,9 +1506,9 @@ class DistributedLockManagerTest {
/**
* Runs a lock action (e.g., doLock, doUnlock).
*
- * @param nskip number of actions in the work queue to skip
+ * @param nskip number of actions in the work queue to skip
* @param nadditional number of additional actions that appear in the work queue
- * <i>after</i> the desired action
+ * <i>after</i> the desired action
*/
void runLock(int nskip, int nadditional) {
ArgumentCaptor<Runnable> captor = ArgumentCaptor.forClass(Runnable.class);
@@ -1544,7 +1539,7 @@ class DistributedLockManagerTest {
*/
private int getRecordCount() throws SQLException {
try (PreparedStatement stmt = conn.prepareStatement("SELECT count(*) FROM pooling.locks");
- ResultSet result = stmt.executeQuery()) {
+ ResultSet result = stmt.executeQuery()) {
if (result.next()) {
return result.getInt(1);
@@ -1561,16 +1556,16 @@ class DistributedLockManagerTest {
*
* @param resourceId ID of the resource of interest
* @param uuidString UUID string of the owner
- * @param holdSec seconds for which the lock was to be held
- * @param tbegin earliest time, in milliseconds, at which the record could have been
- * inserted into the DB
+ * @param holdSec seconds for which the lock was to be held
+ * @param tbegin earliest time, in milliseconds, at which the record could have been
+ * inserted into the DB
* @return {@code true} if a record is found, {@code false} otherwise
* @throws SQLException if an error occurs accessing the DB
*/
private boolean recordInRange(String resourceId, String uuidString, int holdSec, long tbegin) throws SQLException {
try (PreparedStatement stmt =
- conn.prepareStatement("SELECT timestampdiff(second, now(), expirationTime) FROM pooling.locks"
- + " WHERE resourceId=? AND host=? AND owner=?")) {
+ conn.prepareStatement("SELECT timestampdiff(second, now(), expirationTime) FROM pooling.locks"
+ + " WHERE resourceId=? AND host=? AND owner=?")) {
stmt.setString(1, resourceId);
stmt.setString(2, feature.getPdpName());
@@ -1592,8 +1587,8 @@ class DistributedLockManagerTest {
/**
* Inserts a record into the DB.
*
- * @param resourceId ID of the resource of interest
- * @param uuidString UUID string of the owner
+ * @param resourceId ID of the resource of interest
+ * @param uuidString UUID string of the owner
* @param expireOffset offset, in seconds, from "now", at which the lock should expire
* @throws SQLException if an error occurs accessing the DB
*/
@@ -1604,8 +1599,8 @@ class DistributedLockManagerTest {
private void insertRecord(String resourceId, String hostName, String uuidString, int expireOffset)
throws SQLException {
try (PreparedStatement stmt =
- conn.prepareStatement("INSERT INTO pooling.locks (resourceId, host, owner, expirationTime) "
- + "values (?, ?, ?, timestampadd(second, ?, now()))")) {
+ conn.prepareStatement("INSERT INTO pooling.locks (resourceId, host, owner, expirationTime) "
+ + "values (?, ?, ?, timestampadd(second, ?, now()))")) {
stmt.setString(1, resourceId);
stmt.setString(2, hostName);
@@ -1619,8 +1614,8 @@ class DistributedLockManagerTest {
/**
* Updates a record in the DB.
*
- * @param resourceId ID of the resource of interest
- * @param newUuid UUID string of the <i>new</i> owner
+ * @param resourceId ID of the resource of interest
+ * @param newUuid UUID string of the <i>new</i> owner
* @param expireOffset offset, in seconds, from "now", at which the lock should expire
* @throws SQLException if an error occurs accessing the DB
*/
@@ -1650,7 +1645,7 @@ class DistributedLockManagerTest {
ReflectionTestUtils.setField(PolicyEngineConstants.getManager(), POLICY_ENGINE_EXECUTOR_FIELD, exsvc);
if (init) {
- beforeCreateLockManager(engine, new Properties());
+ beforeCreateLockManager();
start();
afterStart(engine);
}
@@ -1671,14 +1666,14 @@ class DistributedLockManagerTest {
this.isTransient = isTransient;
- this.beforeCreateLockManager(engine, new Properties());
+ this.beforeCreateLockManager();
this.start();
this.afterStart(engine);
}
@Override
- protected BasicDataSource makeDataSource() throws Exception {
- lenient().when(datasrc.getConnection()).thenAnswer(answer -> {
+ protected BasicDataSource makeDataSource() throws SQLException {
+ lenient().when(dataSource.getConnection()).thenAnswer(answer -> {
if (freeLock) {
freeLock = false;
lock.free();
@@ -1687,9 +1682,9 @@ class DistributedLockManagerTest {
throw makeEx();
});
- doThrow(makeEx()).when(datasrc).close();
+ doThrow(makeEx()).when(dataSource).close();
- return datasrc;
+ return dataSource;
}
protected SQLException makeEx() {
@@ -1715,7 +1710,7 @@ class DistributedLockManagerTest {
@Override
protected DistributedLock makeLock(LockState state, String resourceId, String ownerKey, int holdSec,
- LockCallback callback) {
+ LockCallback callback) {
return new DistributedLock(state, resourceId, ownerKey, holdSec, callback, feature) {
private static final long serialVersionUID = 1L;
@@ -1785,27 +1780,27 @@ class DistributedLockManagerTest {
}
};
- Lock lock = feature.createLock(RESOURCE, getName(), HOLD_SEC, cb, false);
+ Lock newLock = feature.createLock(RESOURCE, getName(), HOLD_SEC, cb, false);
// wait for callback, whether available or unavailable
assertTrue(sem.tryAcquire(5, TimeUnit.SECONDS));
- if (!lock.isActive()) {
+ if (!newLock.isActive()) {
return;
}
- nsuccesses.incrementAndGet();
+ numSuccess.incrementAndGet();
- assertEquals(1, nactive.incrementAndGet());
+ assertEquals(1, numActive.incrementAndGet());
- lock.extend(HOLD_SEC2, cb);
+ newLock.extend(HOLD_SEC2, cb);
assertTrue(sem.tryAcquire(5, TimeUnit.SECONDS));
- assertTrue(lock.isActive());
+ assertTrue(newLock.isActive());
// decrement BEFORE free()
- nactive.decrementAndGet();
+ numActive.decrementAndGet();
- assertTrue(lock.free());
- assertTrue(lock.isUnavailable());
+ assertTrue(newLock.free());
+ assertTrue(newLock.isUnavailable());
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
diff --git a/feature-healthcheck/src/assembly/assemble_zip.xml b/feature-healthcheck/src/assembly/assemble_zip.xml
index 0e2eaa8b..3f60900e 100644
--- a/feature-healthcheck/src/assembly/assemble_zip.xml
+++ b/feature-healthcheck/src/assembly/assemble_zip.xml
@@ -3,6 +3,7 @@
feature-healthcheck
================================================================================
Copyright (C) 2017 AT&T Intellectual Property. All rights reserved.
+ Modifications Copyright (C) 2024 Nordix Foundation.
================================================================================
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -59,12 +60,6 @@
<excludes/>
</fileSet>
<fileSet>
- <directory>src/main/feature/db</directory>
- <outputDirectory>db</outputDirectory>
- <fileMode>0744</fileMode>
- <excludes/>
- </fileSet>
- <fileSet>
<directory>src/main/feature/install</directory>
<outputDirectory>install</outputDirectory>
<fileMode>0744</fileMode>
diff --git a/feature-no-locking/src/assembly/assemble_zip.xml b/feature-no-locking/src/assembly/assemble_zip.xml
index 2c74fc00..ded6dff7 100644
--- a/feature-no-locking/src/assembly/assemble_zip.xml
+++ b/feature-no-locking/src/assembly/assemble_zip.xml
@@ -3,6 +3,7 @@
ONAP
================================================================================
Copyright (C) 2021 AT&T Intellectual Property. All rights reserved.
+ Modifications Copyright (C) 2024 Nordix Foundation.
================================================================================
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -60,12 +61,6 @@
<excludes/>
</fileSet>
<fileSet>
- <directory>src/main/feature/db</directory>
- <outputDirectory>db</outputDirectory>
- <fileMode>0744</fileMode>
- <excludes/>
- </fileSet>
- <fileSet>
<directory>src/main/feature/install</directory>
<outputDirectory>install</outputDirectory>
<fileMode>0744</fileMode>
diff --git a/feature-no-locking/src/main/java/org/onap/policy/no/locking/NoLockManager.java b/feature-no-locking/src/main/java/org/onap/policy/no/locking/NoLockManager.java
index 49ea0af9..449f7480 100644
--- a/feature-no-locking/src/main/java/org/onap/policy/no/locking/NoLockManager.java
+++ b/feature-no-locking/src/main/java/org/onap/policy/no/locking/NoLockManager.java
@@ -3,6 +3,7 @@
* ONAP
* ================================================================================
* Copyright (C) 2021 AT&T Intellectual Property. All rights reserved.
+ * Modifications Copyright (C) 2024 Nordix Foundation.
* ================================================================================
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -20,7 +21,6 @@
package org.onap.policy.no.locking;
-import java.util.Properties;
import lombok.NoArgsConstructor;
import lombok.ToString;
import org.onap.policy.drools.core.lock.AlwaysSuccessLock;
@@ -28,7 +28,6 @@ import org.onap.policy.drools.core.lock.Lock;
import org.onap.policy.drools.core.lock.LockCallback;
import org.onap.policy.drools.core.lock.PolicyResourceLockManager;
import org.onap.policy.drools.features.PolicyEngineFeatureApi;
-import org.onap.policy.drools.system.PolicyEngine;
/**
* In contrast with other implementations the no-lock manager provides non-synchronized access
@@ -43,8 +42,8 @@ public class NoLockManager implements PolicyResourceLockManager, PolicyEngineFea
@Override
public Lock createLock(String resourceId, String ownerKey, int holdSec,
- LockCallback callback, boolean waitForLock) {
- var successLock = new AlwaysSuccessLock(resourceId, ownerKey, holdSec, callback);
+ LockCallback callback, boolean waitForLock) {
+ var successLock = new AlwaysSuccessLock(resourceId, ownerKey, holdSec, callback);
successLock.notifyAvailable();
return successLock;
}
@@ -90,7 +89,7 @@ public class NoLockManager implements PolicyResourceLockManager, PolicyEngineFea
}
@Override
- public PolicyResourceLockManager beforeCreateLockManager(PolicyEngine engine, Properties properties) {
+ public PolicyResourceLockManager beforeCreateLockManager() {
return this;
}
}
diff --git a/feature-no-locking/src/test/java/org/onap/policy/no/locking/NoLockManagerTest.java b/feature-no-locking/src/test/java/org/onap/policy/no/locking/NoLockManagerTest.java
index 5b5e0964..22f3f5d1 100644
--- a/feature-no-locking/src/test/java/org/onap/policy/no/locking/NoLockManagerTest.java
+++ b/feature-no-locking/src/test/java/org/onap/policy/no/locking/NoLockManagerTest.java
@@ -93,7 +93,7 @@ public class NoLockManagerTest {
@Test
void testBeforeCreateLockManager() {
- assertEquals(nlm, nlm.beforeCreateLockManager(null, null));
+ assertEquals(nlm, nlm.beforeCreateLockManager());
}
@Test
diff --git a/feature-pooling-messages/src/assembly/assemble_zip.xml b/feature-pooling-messages/src/assembly/assemble_zip.xml
index 67424116..a4c7b0fa 100644
--- a/feature-pooling-messages/src/assembly/assemble_zip.xml
+++ b/feature-pooling-messages/src/assembly/assemble_zip.xml
@@ -3,6 +3,7 @@
feature-pooling-messages
================================================================================
Copyright (C) 2018 AT&T Intellectual Property. All rights reserved.
+ Modifications Copyright (C) 2024 Nordix Foundation.
================================================================================
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -61,12 +62,6 @@
<excludes/>
</fileSet>
<fileSet>
- <directory>src/main/feature/db</directory>
- <outputDirectory>db</outputDirectory>
- <fileMode>0744</fileMode>
- <excludes/>
- </fileSet>
- <fileSet>
<directory>src/main/feature/install</directory>
<outputDirectory>install</outputDirectory>
<fileMode>0744</fileMode>
diff --git a/feature-test-transaction/src/assembly/assemble_zip.xml b/feature-test-transaction/src/assembly/assemble_zip.xml
index 9945a1c9..0b5763c4 100644
--- a/feature-test-transaction/src/assembly/assemble_zip.xml
+++ b/feature-test-transaction/src/assembly/assemble_zip.xml
@@ -3,6 +3,7 @@
feature-test-transaction
================================================================================
Copyright (C) 2017 AT&T Intellectual Property. All rights reserved.
+ Modifications Copyright (C) 2024 Nordix Foundation.
================================================================================
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -59,11 +60,6 @@
<fileMode>0744</fileMode>
</fileSet>
<fileSet>
- <directory>src/main/feature/db</directory>
- <outputDirectory>db</outputDirectory>
- <fileMode>0744</fileMode>
- </fileSet>
- <fileSet>
<directory>src/main/feature/install</directory>
<outputDirectory>install</outputDirectory>
<fileMode>0744</fileMode>
diff --git a/packages/base/src/files/bin/policy b/packages/base/src/files/bin/policy
index 690fe1e5..c5ec37fa 100644
--- a/packages/base/src/files/bin/policy
+++ b/packages/base/src/files/bin/policy
@@ -4,6 +4,7 @@
# ONAP
# ================================================================================
# Copyright (C) 2017-2021 AT&T Intellectual Property. All rights reserved.
+# Modifications Copyright (C) 2024 Nordix Foundation.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -72,13 +73,6 @@ function policy_status() {
echo
echo "[features]"
features status
-
- local databases=$(ls -d "${POLICY_HOME}"/etc/db/migration/*/ 2>/dev/null)
- if [ -n "${databases}" ]; then
- echo "[migration]"
- db-migrator -s ALL -o ok
- fi
-
}
function policy_start() {
diff --git a/packages/docker/src/main/docker/Dockerfile b/packages/docker/src/main/docker/Dockerfile
index a3d013d4..d263ba71 100644
--- a/packages/docker/src/main/docker/Dockerfile
+++ b/packages/docker/src/main/docker/Dockerfile
@@ -2,7 +2,7 @@
# Dockerfile
# ============LICENSE_START=======================================================
# Copyright (C) 2020-2021 AT&T Intellectual Property. All rights reserved.
-# Modifications Copyright (C) 2022-2023 Nordix Foundation.
+# Modifications Copyright (C) 2022-2024 Nordix Foundation.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -52,9 +52,7 @@ ENV http_proxy $http_proxy
USER root
RUN apk update && \
- apk add --no-cache mariadb-client \
- file \
- maven \
+ apk add --no-cache file maven \
net-tools netcat-openbsd sudo less vim openssl \
&& python3 -m pip install --no-cache-dir --upgrade setuptools http-prompt \
&& python3 -m pip install --no-cache-dir httpie
diff --git a/packages/docker/src/main/docker/pdpd-entrypoint.sh b/packages/docker/src/main/docker/pdpd-entrypoint.sh
index 2caca628..c2e3f020 100644
--- a/packages/docker/src/main/docker/pdpd-entrypoint.sh
+++ b/packages/docker/src/main/docker/pdpd-entrypoint.sh
@@ -138,26 +138,6 @@ function serverConfig {
done
}
-function db {
- if [ "${DEBUG}" = "y" ]; then
- echo "-- db --"
- set -x
- fi
-
- if [ -z "${SQL_HOST}" ]; then
- return 0
- fi
-
- if [ -z "${SQL_PORT}" ]; then
- export SQL_PORT=3306
- fi
-
- echo "Waiting for ${SQL_HOST}:${SQL_PORT} ..."
- timeout 120 sh -c 'until nc -vz -w 20 "${SQL_HOST}" "${SQL_PORT}"; do echo -n "."; sleep 1; done'
-
- "${POLICY_HOME}"/bin/db-migrator -s ALL -o upgrade
-}
-
function inspect {
if [ "${DEBUG}" = "y" ]; then
echo "-- inspect --"
@@ -209,7 +189,6 @@ function configure {
fi
reload
- db
}
function vmBoot {
@@ -219,7 +198,6 @@ function vmBoot {
fi
reload
- db
start
scripts "post.sh"
}
diff --git a/packages/docker/src/main/docker/suse.Dockerfile b/packages/docker/src/main/docker/suse.Dockerfile
index e6bf9dbd..c83a5cc6 100644
--- a/packages/docker/src/main/docker/suse.Dockerfile
+++ b/packages/docker/src/main/docker/suse.Dockerfile
@@ -1,7 +1,7 @@
#-------------------------------------------------------------------------------
# Dockerfile
# ============LICENSE_START=======================================================
-# Copyright (C) 2022 Nordix Foundation.
+# Copyright (C) 2022, 2024 Nordix Foundation.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -56,7 +56,6 @@ RUN zypper -n -q install --no-recommends \
gzip \
java-17-openjdk-devel \
maven \
- mariadb-client \
netcat-openbsd \
python3 \
python3-pip \
diff --git a/policy-management/src/main/java/org/onap/policy/drools/features/PolicyEngineFeatureApi.java b/policy-management/src/main/java/org/onap/policy/drools/features/PolicyEngineFeatureApi.java
index 8edf394e..27b96999 100644
--- a/policy-management/src/main/java/org/onap/policy/drools/features/PolicyEngineFeatureApi.java
+++ b/policy-management/src/main/java/org/onap/policy/drools/features/PolicyEngineFeatureApi.java
@@ -182,7 +182,7 @@ public interface PolicyEngineFeatureApi extends OrderedService {
*
* @return true if this feature intercepts and takes ownership
* of the operation preventing the invocation of
- * lower priority features. False, otherwise..
+ * lower priority features. False, otherwise.
*/
default boolean afterLock(PolicyEngine engine) {
return false;
@@ -281,7 +281,7 @@ public interface PolicyEngineFeatureApi extends OrderedService {
* operation preventing the invocation of lower priority features. Null,
* otherwise
*/
- default PolicyResourceLockManager beforeCreateLockManager(PolicyEngine engine, Properties properties) {
+ default PolicyResourceLockManager beforeCreateLockManager() {
return null;
}
diff --git a/policy-management/src/main/java/org/onap/policy/drools/system/PolicyEngineManager.java b/policy-management/src/main/java/org/onap/policy/drools/system/PolicyEngineManager.java
index 0bc2318b..203d6bf2 100644
--- a/policy-management/src/main/java/org/onap/policy/drools/system/PolicyEngineManager.java
+++ b/policy-management/src/main/java/org/onap/policy/drools/system/PolicyEngineManager.java
@@ -342,7 +342,7 @@ class PolicyEngineManager implements PolicyEngine {
private void createLockManager(Properties properties) {
for (PolicyEngineFeatureApi feature : getEngineProviders()) {
try {
- this.lockManager = feature.beforeCreateLockManager(this, properties);
+ this.lockManager = feature.beforeCreateLockManager();
if (this.lockManager != null) {
logger.info("overridden lock manager is {}", this.lockManager);
return;
@@ -941,7 +941,7 @@ class PolicyEngineManager implements PolicyEngine {
@Override
public void run() {
try {
- doSleep(SHUTDOWN_MAX_GRACE_TIME);
+ doSleep();
logger.warn("{}: abnormal termination - shutdown graceful time period expiration",
PolicyEngineManager.this);
} catch (final InterruptedException e) {
@@ -960,18 +960,18 @@ class PolicyEngineManager implements PolicyEngine {
ex.getMessage(), ex));
logger.info("{}: exit", PolicyEngineManager.this);
- doExit(0);
+ doExit();
}
}
// these may be overridden by junit tests
- protected void doSleep(long sleepMs) throws InterruptedException {
- Thread.sleep(sleepMs);
+ protected void doSleep() throws InterruptedException {
+ Thread.sleep(ShutdownThread.SHUTDOWN_MAX_GRACE_TIME);
}
- protected void doExit(int code) {
- System.exit(code);
+ protected void doExit() {
+ System.exit(0);
}
}
diff --git a/policy-management/src/main/server-gen/bin/db-migrator b/policy-management/src/main/server-gen/bin/db-migrator
deleted file mode 100644
index 64d0fcf1..00000000
--- a/policy-management/src/main/server-gen/bin/db-migrator
+++ /dev/null
@@ -1,635 +0,0 @@
-#!/usr/bin/env sh
-
-# ============LICENSE_START=======================================================
-# ONAP
-# ================================================================================
-# Copyright (C) 2017-2022 AT&T Intellectual Property. All rights reserved.
-# ================================================================================
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============LICENSE_END=========================================================
-
-# #####################################################################
-#
-# Upgrade/Downgrade SQL File Name Format:
-#
-# <VERSION>-<pdp|feature-name>[-description](.upgrade|.downgrade).sql
-#
-# This tool operates on a migration working directory at
-#
-# $POLICY_HOME/etc/db/migration
-#
-# Upgrade/Downgrade files for each schema (aka database) names to be maintained
-# by this tool are located at
-#
-# $POLICY_HOME/etc/db/migration/<schema-name>/sql
-#
-# The nature of the migration directories is dynamic.
-# Other tooling aware of when migrations are needed are in charge to populate
-# the migrations directory accordingly.
-#
-# One of these tools is the 'features' when a feature with DB requirements
-# is 'enabled', the upgrade scripts will be made present in the migration directory.
-# When a features is 'disabled' downgrade scripts will be made available in the
-# migration directory.
-#
-# The 'policy' tool via its operations 'status' or 'start' will signal the
-# need to perform upgrade or downgrade for a given schema.
-#
-# At any given time the following invariant must be preserved in any given
-# $POLICY_HOME/etc/db/migration/<schema-name>/sql directory
-#
-# There is only upgrade scripts, or only downgrade scripts, or none.
-#
-# #####################################################################
-
-source ${POLICY_HOME}/etc/profile.d/env.sh
-
-METADATA_DB=migration
-METADATA_TABLE=${METADATA_DB}.metadata_versions
-MIGRATION_DIR=${POLICY_HOME}/etc/db/migration
-ZERO_VERSION="0"
-UPGRADE_SQL_SUFFIX=".upgrade.sql"
-DOWNGRADE_SQL_SUFFIX=".downgrade.sql"
-
-SQL_QUOTES="SET SESSION SQL_MODE=ANSI_QUOTES;"
-
-#####################################################
-# usage
-#####################################################
-
-function usage() {
- echo
- echo -e "syntax: $(basename "$0") "
- echo -e "\t -s <schema-name> "
- echo -e "\t [-b <migration-dir>] "
- echo -e "\t [-f <from-version>]"
- echo -e "\t [-t <target-version>]"
- echo -e "\t -o <operations> "
- echo
- echo -e "\t where <operations>=upgrade|downgrade|auto|version|erase|report"
- echo
- echo
- echo -e "Configuration Options:"
- echo -e "\t -s|--schema|--database: schema to operate on ('ALL' to apply on all)"
- echo -e "\t -b|--basedir: overrides base DB migration directory"
- echo -e "\t -f|--from: overrides current release version for operations"
- echo -e "\t -t|--target: overrides target release to upgrade/downgrade"
- echo
- echo -e "Operations:"
- echo -e "\t upgrade: upgrade operation"
- echo -e "\t downgrade: performs a downgrade operation"
- echo -e "\t auto: autonomous operation, determines upgrade or downgrade"
- echo -e "\t version: returns current version, and in conjunction if '-f' sets the current version"
- echo -e "\t erase: erase all data related <schema> (use with care)"
- echo -e "\t report: migration detailed report on an schema"
- echo -e "\t ok: is the migration status valid"
- echo
- echo
-}
-
-#####################################################
-# ensure global metadata
-#####################################################
-
-function ensure_metadata
-{
- if [ "${DEBUG}" = "y" ]; then
- echo "-- ensure_metadata --"
- set -x
- fi
-
- local sql rc
-
- sql="CREATE DATABASE IF NOT EXISTS ${METADATA_DB};"
- ${MYSQL} --execute "${sql}"
- rc=$?
- if [ ${rc} -ne 0 ]; then
- return ${rc}
- fi
-
- sql="CREATE TABLE IF NOT EXISTS ${METADATA_TABLE} "
- sql=${sql}"(name VARCHAR(60) NOT NULL, version VARCHAR(20), PRIMARY KEY(name));"
- ${MYSQL} --execute "${sql}"
- return $?
-}
-
-
-#####################################################
-# ensure metadata on a per schema basis
-#####################################################
-
-function ensure_metadata_schema
-{
- if [ "${DEBUG}" = "y" ]; then
- echo "-- ensure_metadata_schema --"
- set -x
- fi
-
- local sql rc
-
- sql="CREATE TABLE IF NOT EXISTS ${METADATA_HISTORY} "
- sql=${sql}"(script VARCHAR(80) NOT NULL, operation VARCHAR(10), success VARCHAR(1), "
- sql=${sql}"atTime TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, "
- sql=${sql}"PRIMARY KEY(script));"
- ${MYSQL} --execute "${sql}"
- rc=$?
- if [ ${rc} -ne 0 ]; then
- return ${rc}
- fi
-
- sql="CREATE DATABASE IF NOT EXISTS ${SCHEMA_DB};"
- ${MYSQL} --execute "${sql}"
- return $?
-}
-
-
-#####################################################
-# target_release
-#####################################################
-
-function target_release
-{
- if [ "${DEBUG}" = "y" ]; then
- echo "-- target_release --"
- set -x
- fi
-
- local sql sqlName upgradeSqls downgradeSqls
-
- TARGET_UPGRADE_RELEASE=${ZERO_VERSION}
- TARGET_DOWNGRADE_RELEASE=${ZERO_VERSION}
-
- upgradeSqls=$(ls -v -r "${UPGRADE_DIR}"/*"${UPGRADE_SQL_SUFFIX}" 2> /dev/null)
- for sql in ${upgradeSqls}; do
- sqlName=$(basename "${sql}")
- TARGET_UPGRADE_RELEASE="${sqlName%-*}"
- break
- done
-
- # default unless overriden
- TARGET_DOWNGRADE_RELEASE="${ZERO_VERSION}"
-}
-
-#####################################################
-# is_upgrade
-#####################################################
-
-function is_upgrade
-{
- if [ "${DEBUG}" = "y" ]; then
- echo "-- is_upgrade --"
- set -x
- fi
-
- local upgradeSqls
-
- upgradeSqls=$(ls "${UPGRADE_DIR}"/*"${UPGRADE_SQL_SUFFIX}" 2> /dev/null)
- if [ -z "${upgradeSqls}" ]; then
- return 1
- else
- return 0
- fi
-}
-
-
-#####################################################
-# is_downgrade
-#####################################################
-
-function is_downgrade
-{
- if [ "${DEBUG}" = "y" ]; then
- echo "-- is_downgrade --"
- set -x
- fi
-
- local downgradeSqls
-
- downgradeSqls=$(ls "${DOWNGRADE_DIR}"/*"${DOWNGRADE_SQL_SUFFIX}" 2> /dev/null)
- if [ -z "${downgradeSqls}" ]; then
- return 1
- else
- return 0
- fi
-}
-
-
-#####################################################
-# set_current_release
-#####################################################
-
-function set_current_release
-{
- if [ "${DEBUG}" = "y" ]; then
- echo "-- set_current_release --"
- set -x
- fi
-
- CURRENT_RELEASE="${1}"
-
- local sql
- sql="INSERT INTO ${METADATA_TABLE} (name, version) "
- sql=${sql}"VALUES('${SCHEMA}', '${CURRENT_RELEASE}') "
- sql=${sql}"ON DUPLICATE KEY UPDATE version='${CURRENT_RELEASE}';"
-
- ${MYSQL} --execute "${sql}"
- return $?
-}
-
-#####################################################
-# current_release
-#####################################################
-
-function current_release
-{
- if [ "${DEBUG}" = "y" ]; then
- echo "-- current_release --"
- set -x
- fi
-
- local rc
- local query="SELECT version FROM ${METADATA_TABLE} WHERE name='${SCHEMA}'"
-
- CURRENT_RELEASE=$(${MYSQL} --skip-column-names --silent --execute "${query}")
- if [ -z "${CURRENT_RELEASE}" ]; then
- set_current_release "${ZERO_VERSION}"
- return $?
- fi
-
- return 0
-}
-
-#####################################################
-# execute sql script history
-#####################################################
-
-function track_script
-{
- if [ "${DEBUG}" = "y" ]; then
- echo "-- track_script $* --"
- set -x
- fi
-
- local script="${1}" operation="${2}" success="${3}"
- local sql="INSERT INTO ${METADATA_HISTORY}(script,operation,success,atTime) "
- sql=${sql}"VALUES ('${script}','${operation}','${success}',now()) "
- sql=${sql}"ON DUPLICATE KEY UPDATE operation=values(operation), success=values(success), atTime=values(atTime);"
-
- ${MYSQL} --execute "${sql}"
- return $?
-}
-
-
-#####################################################
-# execute sql script
-#####################################################
-
-function run_script
-{
- if [ "${DEBUG}" == "y" ]; then
- echo "-- run_script $* --"
- set -x
- fi
-
- local operation="${1}" script="${2}" scriptPath="${3}"
-
- echo
- echo "> ${operation} ${script}"
-
- ${MYSQL} --verbose < "${scriptPath}"
- local rc=$?
- if [ ${rc} -ne 0 ]; then
- success="0"
- else
- success="1"
- fi
-
- track_script "${script}" "${operation}" "${success}"
-
- return ${rc}
-}
-
-#####################################################
-# upgrade
-#####################################################
-
-function upgrade
-{
- if [ "${DEBUG}" = "y" ]; then
- echo "-- upgrade --"
- set -x
- fi
-
- local sqlName sqlFile schemaVersion upgradeSqls rc
-
- ${MYSQL} --execute "USE ${SCHEMA_DB}"
-
- echo "upgrade: ${CURRENT_RELEASE} -> ${TARGET_UPGRADE_RELEASE}"
-
- if [ ${CURRENT_RELEASE} \< ${TARGET_UPGRADE_RELEASE} ]; then
- upgradeSqls=$(ls -v "${UPGRADE_DIR}"/*"${UPGRADE_SQL_SUFFIX}" 2> /dev/null)
- for sqlFile in ${upgradeSqls}; do
- sqlName=$(basename "${sqlFile}")
- schemaVersion="${sqlName%-*}"
- if [ "${schemaVersion}" -gt "${CURRENT_RELEASE}" ] && \
- [ "${schemaVersion}" -le "${TARGET_UPGRADE_RELEASE}" ]; then
- run_script "upgrade" "${sqlName}" "${sqlFile}"
- rc=$?
- if [ ${rc} -ne 0 ]; then
- echo "${SCHEMA}: upgrade aborted at ${schemaVersion} by script ${sqlName}"
- set_current_release "${schemaVersion}"
- return ${rc}
- fi
- fi
- done
-
- set_current_release "${TARGET_UPGRADE_RELEASE}"
- fi
-
- return 0
-}
-
-#####################################################
-# downgrade
-#####################################################
-
-function downgrade
-{
- if [ "${DEBUG}" = "y" ]; then
- echo "-- downgrade --"
- set -x
- fi
-
- local sqlName sqlFile schemaVersion downgradeSqls rc
-
- ${MYSQL} --execute "USE ${SCHEMA_DB}"
-
- echo "downgrade: ${CURRENT_RELEASE} -> ${TARGET_DOWNGRADE_RELEASE}"
-
- if [ ${CURRENT_RELEASE} \> ${TARGET_DOWNGRADE_RELEASE} ]; then
- downgradeSqls=$(ls -v -r "${DOWNGRADE_DIR}"/*"${DOWNGRADE_SQL_SUFFIX}" 2> /dev/null)
- for sqlFile in ${downgradeSqls}; do
- sqlName=$(basename "${sqlFile}")
- schemaVersion="${sqlName%-*}"
- if [ "${schemaVersion}" -le "${CURRENT_RELEASE}" ] && \
- [ "${schemaVersion}" -gt "${TARGET_DOWNGRADE_RELEASE}" ]; then
- run_script "downgrade" "${sqlName}" "${sqlFile}"
- rc=$?
- if [ ${rc} -ne 0 ]; then
- echo "${SCHEMA}: downgrade aborted at ${schemaVersion} by script ${sqlName}"
- set_current_release "${schemaVersion}"
- return ${rc}
- fi
- fi
- done
-
- set_current_release "${TARGET_DOWNGRADE_RELEASE}"
- fi
-
- return 0
-}
-
-#####################################################
-# erase
-#####################################################
-
-function erase
-{
- if [ "${DEBUG}" = "y" ]; then
- echo "-- erase --"
- set -x
- fi
-
- local updateMetadata="UPDATE ${METADATA_TABLE} SET version='${ZERO_VERSION}';"
- ${MYSQL} --execute "${updateMetadata}"
-
- local deleteHistory="DELETE FROM ${METADATA_HISTORY};"
- ${MYSQL} --execute "${deleteHistory}"
-
- local dropDB="DROP DATABASE IF EXISTS ${SCHEMA_DB}";
- ${MYSQL} --execute "${dropDB}"
-}
-
-#####################################################
-# report
-#####################################################
-
-function report
-{
- if [ "${DEBUG}" = "y" ]; then
- echo "-- report --"
- set -x
- fi
-
- local versionSql="SELECT * FROM ${METADATA_TABLE} WHERE name='${SCHEMA}';"
- ${MYSQL} --execute "${versionSql}"
-
- local historySql="SELECT * FROM ${METADATA_HISTORY} ORDER BY atTime ASC;"
- ${MYSQL} --execute "${historySql}"
-
- okay
-}
-
-function okay
-{
- if [ "${DEBUG}" = "y" ]; then
- echo "-- okay --"
- set -x
- fi
-
- local rc=0
- if is_upgrade; then
- if [ "${CURRENT_RELEASE}" = "${TARGET_UPGRADE_RELEASE}" ]; then
- echo "${SCHEMA}: OK @ ${CURRENT_RELEASE}"
- else
- echo "${SCHEMA}: upgrade available: ${CURRENT_RELEASE} -> ${TARGET_UPGRADE_RELEASE}"
- rc=1
- fi
- else
- if [ "${CURRENT_RELEASE}" = "${TARGET_DOWNGRADE_RELEASE}" ]; then
- echo "${SCHEMA}: OK @ ${CURRENT_RELEASE}"
- else
- echo "${SCHEMA}: downgrade available: ${CURRENT_RELEASE} -> ${TARGET_DOWNGRADE_RELEASE}"
- rc=1
- fi
- fi
-
- return ${rc}
-}
-
-#####################################################
-# MAIN
-#####################################################
-
-if [ "${DEBUG}" = "y" ]; then
- echo "-- $0 $* --"
- set -x
-fi
-until [ -z "$1" ]; do
- case $1 in
- -s|--schema|--database) shift
- SCHEMA=$1
- ;;
- -b|--basedir) shift
- MIGRATION_DIR=$1
- ;;
- -t|--target) shift
- INPUT_TARGET_RELEASE=$1
- ;;
- -f|--from) shift
- INPUT_CURRENT_RELEASE=$1
- ;;
- -o|--operation) shift
- OPERATION=$1
- ;;
- *) usage
- exit 1
- ;;
- esac
- shift
-done
-
-case ${OPERATION} in
- upgrade) ;;
- downgrade) ;;
- auto) ;;
- version) ;;
- erase) ;;
- report) ;;
- ok) ;;
- *) echo "error: invalid operation provided"
- usage
- exit 1
- ;;
-esac
-
-if [ -z "${SCHEMA}" ]; then
- echo "error: a database name must be provided"
- usage
- exit 2
-fi
-
-source "${POLICY_HOME}"/etc/profile.d/env.sh
-
-if [ -z "${SQL_HOST}" ] || [ -z "${SQL_USER}" ] || [ -z "${SQL_PASSWORD}" ]; then
- echo "error: no database has been set up"
- exit 4
-fi
-
-if [ -z "${SQL_PORT}" ]; then
- export SQL_PORT=3306
-fi
-
-if [ -z "$MYSQL_CMD" ]; then
- MYSQL_CMD="mysql"
-fi
-
-MYSQL="${MYSQL_CMD} -u${SQL_USER} -p${SQL_PASSWORD} -h ${SQL_HOST} -P ${SQL_PORT}"
-
-if ! ${MYSQL} --execute "show databases;" > /dev/null 2>&1; then
- echo "error: No DB connectivity to ${SQL_HOST} for ${SQL_USER}"
- exit 5
-fi
-
-if [ "${SCHEMA}" = "ALL" ]; then
- SCHEMA="*"
-fi
-
-SCHEMA_S=$(ls -d "${MIGRATION_DIR}"/${SCHEMA}/ 2> /dev/null)
-if [ -z "${SCHEMA_S}" ]; then
- echo "error: no databases available"
- exit 0
-fi
-
-if ! ensure_metadata; then
- echo "error: migration metadata not accessible"
- exit 7
-fi
-
-rc=0
-for dbPath in ${SCHEMA_S}; do
- SCHEMA=$(basename "${dbPath}")
- SCHEMA_DB="\`${SCHEMA}\`"
- UPGRADE_DIR="${MIGRATION_DIR}"/"${SCHEMA}"/sql
- DOWNGRADE_DIR=${UPGRADE_DIR}
- METADATA_HISTORY="${METADATA_DB}.\`${SCHEMA}_history\`"
- TARGET_RELEASE=${INPUT_TARGET_RELEASE}
- CURRENT_RELEASE=${INPUT_CURRENT_RELEASE}
-
- if is_upgrade && is_downgrade; then
- echo "${SCHEMA}: failure: invalid configuration: ${UPGRADE_SQL_SUFFIX} and "\
- "${DOWNGRADE_SQL_SUFFIX} exist under ${DOWNGRADE_DIR}"
- rc=1
- continue
- fi
-
- if [ "${operation}" = "auto" ]; then
- if is_upgrade; then
- operation=upgrade
- else
- operation=downgrade
- fi
- fi
-
- if ! ensure_metadata_schema; then
- echo "${SCHEMA}: failure: metadata not accessible for this schema"
- continue
- fi
-
- if [ -z "${TARGET_RELEASE}" ]; then
- target_release
- else
- # user asked to override
- TARGET_UPGRADE_RELEASE="${TARGET_RELEASE}"
- TARGET_DOWNGRADE_RELEASE="${TARGET_RELEASE}"
- fi
-
- if [ -z "${CURRENT_RELEASE}" ]; then
- if ! current_release; then
- echo "${SCHEMA}: failure: cannot obtain current release"
- continue
- fi
- else
- if ! set_current_release "${CURRENT_RELEASE}"; then
- echo "${SCHEMA}: failure: cannot set current release"
- continue
- fi
- fi
-
- case ${OPERATION} in
- upgrade) if upgrade; then
- echo "${SCHEMA}: OK: upgrade (${CURRENT_RELEASE})"
- else
- rc=1
- echo "${SCHEMA}: failure: upgrade to release ${TARGET_UPGRADE_RELEASE} (${CURRENT_RELEASE})"
- fi
- ;;
- downgrade) if downgrade; then
- echo "${SCHEMA}: OK: downgrade (${CURRENT_RELEASE})"
- else
- rc=1
- echo "${SCHEMA}: failure: downgrade to release ${TARGET_DOWNGRADE_RELEASE} (${CURRENT_RELEASE})"
- fi
- ;;
- version) echo "${SCHEMA}: ${CURRENT_RELEASE}"
- ;;
- erase) erase
- ;;
- report) report
- ;;
- ok) okay
- ;;
- esac
-
-done
-exit $rc
diff --git a/policy-management/src/main/server-gen/bin/features b/policy-management/src/main/server-gen/bin/features
index 3343ffc4..01b77b1c 100644
--- a/policy-management/src/main/server-gen/bin/features
+++ b/policy-management/src/main/server-gen/bin/features
@@ -5,6 +5,7 @@
# ONAP POLICY
# ================================================================================
# Copyright (C) 2017-2021 AT&T Intellectual Property. All rights reserved.
+# Modifications Copyright (C) 2024 Nordix Foundation.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -35,10 +36,6 @@
#     |  | L─ <dependent-jar>+
#     │  L─ feature/
#     │  L─ <feature-jar>
-#     L─ [db]/
-#     │   L─ <db-name>/+
-#     │  L─ sql/
-#     │ L─ <sql-scripts>*
#     L─ [artifacts]/
#      L─ <artifact>+
#     L─ [install]
@@ -60,16 +57,6 @@
# of pdp-d that are necessary for <feature-name> to operate
# correctly.
# lib/feature the single feature jar that implements the feature.
-# [db] database directory, if the feature contains sql.
-# [db]/<db-name> database to which underlying sql scripts should be applied against.
-# ideally, <db-name> = <feature-name> so it is easily to associate
-# the db data with a feature itself. Ideally, since a feature is
-# a somewhat independent isolated unit of functionality,the <db-name>
-# database ideally isolates all its data.
-# [db]/<db-name>/sql directory with all the sql scripts.
-# [db]/<db-name>/sql/<sql-scripts> for this feature sql scripts
-# upgrade scripts should be suffixed with ".upgrade.sql"
-# downgrade scripts should be suffixed with ".downgrade.sql"
# [artifacts] maven artifacts to be deployed in a maven repository.
# [artifacts]/<artifact> maven artifact with identifiable maven coordinates embedded
# in the artifact.
@@ -87,10 +74,10 @@
# Operations:
# install: installs a feature
# uninstall: uninstalls a feature
-# enable : enables 1) dependencies, 2) configuration, 3) binaries 4) database, 5) artifacts,
-# 6) feature, 7) customization.
-# disable: disables 1) dependencies, 2) configuration, 3) binaries, 4) database, 5) feature,
-# 6) customization
+# enable : enables 1) dependencies, 2) configuration, 3) binaries 4) artifacts,
+# 5) feature, 6) customization.
+# disable: disables 1) dependencies, 2) configuration, 3) binaries, 4) feature,
+# 5) customization
# status : status of a feature
#
# 'enable' operation details:
@@ -99,10 +86,8 @@
# 2. sets symbolic links to feature dependencies in pdp-d classpath ($POLICY_HOME/lib)
# 3. sets symbolic links to feature configuration in pdp-d configuration directory ($POLICY_HOME/config)
# 4. sets symbolic links to feature executables in pdp-d bin directory ($POLICY_HOME/bin)
-# 5. sets symbolic links to feature upgrade scripts and removes links to downgrade scripts (if any)
-# in the pdp-d migration directory ($POLICY_HOME/etc/db/migration).
-# 6. deploys any maven artifacts in the maven repositories in use (if any)
-# 7. cd to the feature 'install' directory an executes (if exists) the 'enable' script to allow for specific
+# 5. deploys any maven artifacts in the maven repositories in use (if any)
+# 6. cd to the feature 'install' directory an executes (if exists) the 'enable' script to allow for specific
# customizations for this feature.
#
# 'disable' operation details:
@@ -111,16 +96,9 @@
# 2. removes symbolic links to feature dependencies in pdp-d classpath ($POLICY_HOME/lib)
# 3. removes symbolic links to feature configuration in pdp-d configuration directory ($POLICY_HOME/config)
# 4. removes symbolic links to feature executables in pdp-d bin directory ($POLICY_HOME/bin)
-# 5. removes symbolic links to feature upgrade scripts and sets links to downgrade scripts (if any)
-# in the pdp-d migration directory ($POLICY_HOME/etc/db/migration).
-# 6. cd to the feature 'install' directory an executes (if exists) the 'disable' script to allow for specific
+# 5. cd to the feature 'install' directory an executes (if exists) the 'disable' script to allow for specific
# customizations for this feature.
#
-# Notes for DB enabled features:
-# A. Upgrade/Downgrade SQL File Name Format:
-# <VERSION>-<pdp|feature-name>[-description](.upgrade|.downgrade).sql
-# B. See related tooling: db-migrator, deploy-artifact, and policy
-#
# Example:
#
# POLICY_HOME/
@@ -155,7 +133,6 @@ fi
LIB=${POLICY_HOME}/lib
CONFIG=${POLICY_HOME}/config
BIN=${POLICY_HOME}/bin
-DB=${POLICY_HOME}/etc/db/migration
FEATURES=${POLICY_HOME}/features
if [ ! -d "${LIB}" ]; then
@@ -171,10 +148,6 @@ fi
# ensure that the directory exists
mkdir -p "${FEATURES}" 2> /dev/null
-if [ ! -d "${DB}" ]; then
- mkdir -p "${DB}"
-fi
-
# relative per Feature Directory Paths
FEATURE_DEPS="lib/dependencies"
@@ -183,11 +156,6 @@ FEATURE_CONFIG="config"
FEATURE_BIN="bin"
FEATURE_INSTALL="install"
FEATURE_ARTIFACTS="artifacts"
-FEATURE_DB="db"
-FEATURE_SQL="sql"
-
-UPGRADE_SQL_SUFFIX=".upgrade.sql"
-DOWNGRADE_SQL_SUFFIX=".downgrade.sql"
featureJars=$(find "${FEATURES}" -name "feature-*.jar" -type f -exec basename {} \; 2> /dev/null)
@@ -386,39 +354,6 @@ function enableBinAnalysis ()
}
# ##########################################################
-# enableDbAnalysis (featureName):
-# reports on potential db access problems
-# featureName: name of the feature
-# ##########################################################
-function enableDbAnalysis()
-{
- if [ "${DEBUG}" = "y" ]; then
- echo "-- enableDbAnalysis $* --"
- set -x
- fi
-
- local featureName="$1"
- local featureSqls
-
- if [ -z "${featureName}" ]; then
- echo "warning: no feature name"
- return 1
- fi
-
- featureSqls=$(ls "${FEATURES}"/"${featureName}"/"${FEATURE_DB}"/*/${FEATURE_SQL}/*${UPGRADE_SQL_SUFFIX} 2> /dev/null)
- if [ -z "${featureSqls}" ]; then
- return 0
- fi
-
- source "${POLICY_HOME}"/etc/profile.d/env.sh
- if [ -z "${SQL_HOST}" ] || [ -z "${SQL_USER}" ] || [ -z "${SQL_PASSWORD}" ]; then
- echo "warning: DB server is not configured"
- fi
-
- return 0
-}
-
-# ##########################################################
# enableFeatureDeps(featureName):
# enables feature dependencies
# featureName: name of the feature
@@ -500,95 +435,6 @@ function enableFeatureBin()
}
# ##########################################################
-# enableFeatureDbSchema(featureName):
-# enables feature DB Schema configuration
-# featureName: name of the feature
-# ##########################################################
-function enableFeatureDbSchema()
-{
- if [ "${DEBUG}" = "y" ]; then
- echo "-- enableFeatureDbSchema $* --"
- set -x
- fi
-
- local featureName="$1"
- local featureDbPath="$2"
- local schemaName="$3"
-
- if [ -z "${featureName}" ]; then
- echo "warning: no feature name"
- return 1
- fi
-
- if [ -z "${featureDbPath}" ]; then
- echo "warning: ${featureName} contains no DB path"
- return 2
- fi
-
- if [ -z "${schemaName}" ]; then
- echo "warning: feature ${featureName} contains no schema name"
- return 3
- fi
-
- rc=0
- sqlUpgradeScripts=$(ls "${featureDbPath%/}"/${FEATURE_SQL}/*${UPGRADE_SQL_SUFFIX} 2> /dev/null)
- for sqlUpgradeScript in ${sqlUpgradeScripts}; do
- if [ ! -d "${DB}"/"${schemaName}"/${FEATURE_SQL} ]; then
- mkdir -p "${DB}"/"${schemaName}"/${FEATURE_SQL} 2> /dev/null
- fi
- ln -s -f "${sqlUpgradeScript}" "${DB}"/"${schemaName}"/${FEATURE_SQL}/
- done
-
- sqlDowngradeScripts=$(ls "${featureDbPath%/}"/${FEATURE_SQL}/*${DOWNGRADE_SQL_SUFFIX} 2> /dev/null)
- for sqlDowngradeScript in ${sqlDowngradeScripts}; do
- if [ -d "${DB}"/"${schemaName}"/${FEATURE_SQL} ]; then
- sqlName=$(basename "${sqlDowngradeScript}")
- rm -f "${DB}"/"${schemaName}"/"${FEATURE_SQL}"/"${sqlName}" 2> /dev/null
- else
- echo "warning: feature ${featureName} only contains downgrade scripts"
- rc=4
- break
- fi
- done
-
- if [ -n "${sqlUpgradeScripts}" ] || [ -n "${sqlDowngradeScripts}" ]; then
- DEBUG=${DEBUG} db-migrator -s "${schemaName}" -o ok
- fi
-
- return ${rc}
-}
-
-# ##########################################################
-# enableFeatureDb(featureName):
-# enables DB feature configuration
-# featureName: name of the feature
-# ##########################################################
-function enableFeatureDb()
-{
- if [ "${DEBUG}" = "y" ]; then
- echo "-- enableFeatureDb $* --"
- set -x
- fi
-
- local featureName="$1"
- local featureDbs featureDbPath schemaName sqls
- if [ -z "${featureName}" ]; then
- echo "warning: no feature name"
- return 1
- fi
-
- featureDbs=$(ls -d "${FEATURES}"/"${featureName}"/"${FEATURE_DB}"/*/ 2> /dev/null)
- for featureDbPath in ${featureDbs}; do
- sqls=$(ls "${featureDbPath%/}"/"${FEATURE_SQL}"/*.sql 2> /dev/null)
- if [ -z "${sqls}" ]; then
- continue
- fi
- schemaName=$(basename "${featureDbPath%/}")
- enableFeatureDbSchema "${featureName}" "${featureDbPath%/}" "${schemaName}"
- done
-}
-
-# ##########################################################
# enableFeatureArtifacts(featureName):
# deploys maven artifacts
# featureName: name of the feature
@@ -686,10 +532,6 @@ function enableFeature()
if ! enableBinAnalysis "${featureName}"; then
return "$?"
fi
-
- if ! enableDbAnalysis "${featureName}"; then
- return "$?"
- fi
# enable feature itself
@@ -707,10 +549,6 @@ function enableFeature()
enableFeatureBin "${featureName}"
- # enable db
-
- enableFeatureDb "${featureName}"
-
# enable feature artifacts
enableFeatureArtifacts "${featureName}"
@@ -839,97 +677,6 @@ function disableFeatureBin()
}
# ##########################################################
-# disableFeatureDbSchema(featureName, featureDbPath, schemaName):
-# disables feature db configuration for a schema
-# featureName: name of the feature
-# ##########################################################
-function disableFeatureDbSchema()
-{
- if [ "${DEBUG}" = "y" ]; then
- echo "-- disableFeatureDbSchema $* --"
- set -x
- fi
-
- local featureName="$1" featureDbPath="$2" schemaName="$3"
- local upgradeFeatureSqls downgradeFeatureSqls featureSql sqlDir sqlName schemaDir schemaName
-
- if [ -z "${featureName}" ]; then
- echo "warning: no feature name"
- return 1
- fi
-
- if [ -z "${featureDbPath}" ]; then
- echo "warning: ${featureName} contains no DB path"
- return 2
- fi
-
- if [ -z "${schemaName}" ]; then
- echo "warning: feature ${featureName} contains no schema name"
- return 3
- fi
-
- if [ -z "${featureName}" ]; then
- echo "warning: no feature name"
- return 1
- fi
-
- upgradeFeatureSqls=$(find "${FEATURES}"/"${featureName}"/"${FEATURE_DB}"/"${schemaName}"/"${FEATURE_SQL}"/*"${UPGRADE_SQL_SUFFIX}" -type f -maxdepth 1 2> /dev/null)
- for featureSql in ${upgradeFeatureSqls}; do
- sqlName=$(basename "${featureSql}")
- sqlDir=$(dirname "${featureSql}")
- schemaDir=$(dirname "${sqlDir}")
- schemaName=$(basename "${schemaDir}")
- rm -f "${DB}"/"${schemaName}"/"${FEATURE_SQL}"/"${sqlName}" 2> /dev/null
- done
-
- downgradeFeatureSqls=$(find "${FEATURES}"/"${featureName}"/"${FEATURE_DB}"/"${schemaName}"/"${FEATURE_SQL}"/*"${DOWNGRADE_SQL_SUFFIX}" -type f -maxdepth 1 2> /dev/null)
- for featureSql in ${downgradeFeatureSqls}; do
- sqlName=$(basename "${featureSql}")
- sqlDir=$(dirname "${featureSql}")
- schemaDir=$(dirname "${sqlDir}")
- schemaName=$(basename "${schemaDir}")
- if [ ! -d "${DB}"/"${schemaName}"/${FEATURE_SQL} ]; then
- mkdir -p "${DB}"/"${schemaName}"/${FEATURE_SQL} 2> /dev/null
- fi
- ln -s -f "${featureSql}" "${DB}"/"${schemaName}"/${FEATURE_SQL}/
- done
-
- if [ -n "${sqlUpgradeScripts}" ] || [ -n "${sqlDowngradeScripts}" ]; then
- DEBUG=${DEBUG} db-migrator -s "${schemaName}" -o ok
- fi
-}
-
-# ##########################################################
-# disableFeatureDb(featureName):
-# disables feature db configuration
-# featureName: name of the feature
-# ##########################################################
-function disableFeatureDb()
-{
- if [ "${DEBUG}" = "y" ]; then
- echo "-- disableFeatureDb $* --"
- set -x
- fi
-
- local featureName="$1"
- local featureDbPath featureDbs schemaName
-
- if [ -z "${featureName}" ]; then
- echo "warning: no feature name"
- return 1
- fi
-
- featureDbs=$(ls -d "${FEATURES}"/"${featureName}"/"${FEATURE_DB}"/*/ 2> /dev/null)
- for featureDbPath in ${featureDbs}; do
- if [ -z "$(ls "${featureDbPath%/}"/"${FEATURE_SQL}"/*${UPGRADE_SQL_SUFFIX} 2> /dev/null)" ]; then
- continue
- fi
- schemaName=$(basename "${featureDbPath%/}")
- disableFeatureDbSchema "${featureName}" "${featureDbPath%/}" "${schemaName}"
- done
-}
-
-# ##########################################################
# disableFeature(featureName): disables a feature
# featureName: name of the feature
# ##########################################################
@@ -966,10 +713,6 @@ function disableFeature()
disableFeatureBin "${featureName}"
- # disable DB SQL scripts if any
-
- disableFeatureDb "${featureName}"
-
# run custom disable if any
customOpScript "${featureName}" "disable"
@@ -1080,47 +823,6 @@ function installFeatures
fi
}
-# ##########################################################
-# uninstallFeatureDb(featureName):
-# uninstalls the feature db configuration
-# featureName: name of the feature
-# ##########################################################
-function uninstallFeatureDb()
-{
- if [ "${DEBUG}" = "y" ]; then
- echo "-- uninstallFeatureDb $* --"
- set -x
- fi
-
- local featureName="$1"
- local featureSqls sqlDir sqlName schemaDir schemaName schemaNames leftSqls
-
- if [ -z "${featureName}" ]; then
- echo "warning: no feature name"
- return 1
- fi
-
- featureSqls=$(find "${FEATURES}"/"${featureName}"/"${FEATURE_DB}"/*/${FEATURE_SQL}/*.sql -type f -maxdepth 1 2> /dev/null)
- for featureSql in ${featureSqls}; do
- sqlName=$(basename "${featureSql}")
- sqlDir=$(dirname "${featureSql}")
- schemaDir=$(dirname "${sqlDir}")
- schemaName=$(basename "${schemaDir}")
- schemaNames="${schemaNames} ${schemaName}"
- rm -f "${DB}"/"${schemaName}"/"${FEATURE_SQL}"/"${sqlName}" 2> /dev/null
- done
- for schemaName in ${schemaNames};
- do
- leftSqls=$(ls "${DB}"/"${schemaName}"/"${FEATURE_SQL}"/*.sql 2> /dev/null)
- if [ -n "${leftSqls}" ]; then
- if ! DEBUG=${DEBUG} db-migrator -s "${schemaName}" -o ok; then
- echo -n "warning: ${featureName}: ${schemaName}: database data is leftover. "
- echo -n "Consider cleaning left over data with 'db-migrator'."
- fi
- fi
- done
-}
-
############################################################
# uninstallFeature <feature-name> ...
############################################################
@@ -1138,7 +840,6 @@ function uninstallFeature
return
fi
disableFeature "${featureName}"
- uninstallFeatureDb "${featureName}"
customOpScript "${featureName}" "uninstall"
if [ -n "${FEATURES}" ] && [ -n "${featureName}" ]; then
diff --git a/policy-management/src/test/java/org/onap/policy/drools/system/PolicyEngineManagerTest.java b/policy-management/src/test/java/org/onap/policy/drools/system/PolicyEngineManagerTest.java
index 4006c9ed..8dae42e2 100644
--- a/policy-management/src/test/java/org/onap/policy/drools/system/PolicyEngineManagerTest.java
+++ b/policy-management/src/test/java/org/onap/policy/drools/system/PolicyEngineManagerTest.java
@@ -217,7 +217,7 @@ class PolicyEngineManagerTest {
when(lockmgr.lock()).thenReturn(true);
when(lockmgr.unlock()).thenReturn(true);
- when(prov2.beforeCreateLockManager(any(), any())).thenReturn(lockmgr);
+ when(prov2.beforeCreateLockManager()).thenReturn(lockmgr);
when(prov1.getName()).thenReturn(FEATURE1);
when(prov2.getName()).thenReturn(FEATURE2);
@@ -524,7 +524,7 @@ class PolicyEngineManagerTest {
@Test
void testCreateLockManagerHaveProvider() {
// first provider throws an exception
- when(prov1.beforeCreateLockManager(any(), any())).thenThrow(new RuntimeException(EXPECTED));
+ when(prov1.beforeCreateLockManager()).thenThrow(new RuntimeException(EXPECTED));
mgr.configure(properties);
assertSame(lockmgr, mgr.getLockManager());
@@ -535,7 +535,7 @@ class PolicyEngineManagerTest {
*/
@Test
void testCreateLockManagerSimpleEx() {
- when(prov2.beforeCreateLockManager(any(), any())).thenReturn(null);
+ when(prov2.beforeCreateLockManager()).thenReturn(null);
// invalid property for SimpleLockManager
properties.setProperty(SimpleLockProperties.EXPIRE_CHECK_SEC, "abc");
@@ -550,7 +550,7 @@ class PolicyEngineManagerTest {
*/
@Test
void testCreateLockManagerSimple() {
- when(prov2.beforeCreateLockManager(any(), any())).thenReturn(null);
+ when(prov2.beforeCreateLockManager()).thenReturn(null);
mgr.configure(properties);
assertInstanceOf(SimpleLockManager.class, mgr.getLockManager());
@@ -2086,8 +2086,8 @@ class PolicyEngineManagerTest {
private class MyShutdown extends ShutdownThread {
@Override
- protected void doSleep(long sleepMs) throws InterruptedException {
- threadSleepMs = sleepMs;
+ protected void doSleep() throws InterruptedException {
+ threadSleepMs = 300L;
if (shouldInterrupt) {
throw new InterruptedException(EXPECTED);
@@ -2095,8 +2095,8 @@ class PolicyEngineManagerTest {
}
@Override
- protected void doExit(int code) {
- threadExitCode = code;
+ protected void doExit() {
+ threadExitCode = 0;
}
@Override