aboutsummaryrefslogtreecommitdiffstats
path: root/catalog-be
diff options
context:
space:
mode:
Diffstat (limited to 'catalog-be')
-rw-r--r--catalog-be/README.md26
-rw-r--r--catalog-be/README.txt41
-rw-r--r--catalog-be/pom.xml2
-rw-r--r--catalog-be/src/main/java/org/openecomp/sdc/be/components/impl/CassandraHealthCheck.java24
4 files changed, 41 insertions, 52 deletions
diff --git a/catalog-be/README.md b/catalog-be/README.md
new file mode 100644
index 0000000000..de3bcdf69a
--- /dev/null
+++ b/catalog-be/README.md
@@ -0,0 +1,26 @@
+# sdc-be
+
+This maven module is named `catalog-be` but it's deployed service is called [sdc-be](https://git.onap.org/oom/tree/kubernetes/sdc/components/sdc-be).
+
+## Build images
+
+You can run `mvn clean install -P docker -Dcheckstyle.skip -DskipTests` to build both the `sdc-backend-init` and `sdc-backend` images.
+
+```sh
+$ mvn clean install -P docker -Dcheckstyle.skip -DskipTests
+...
+[INFO] DOCKER> Tagging image onap/sdc-backend:latest successful!
+[INFO] DOCKER> Tagging image onap/sdc-backend:1.14-STAGING-latest successful!
+[INFO] DOCKER> Tagging image onap/sdc-backend:1.14-20250304T083746Z successful!
+[INFO] Building tar: /home/ubuntu/development/onap/sdc/sdc/catalog-be/target/docker/onap/sdc-backend-init/tmp/docker-build.tar
+...
+[INFO] DOCKER> Tagging image onap/sdc-backend-init:latest successful!
+[INFO] DOCKER> Tagging image onap/sdc-backend-init:1.14-STAGING-latest successful!
+[INFO] DOCKER> Tagging image onap/sdc-backend-init:1.14-20250304T083746Z successful!
+[INFO] ------------------------------------------------------------------------
+[INFO] BUILD SUCCESS
+[INFO] ------------------------------------------------------------------------
+[INFO] Total time: 01:54 min
+[INFO] Finished at: 2025-03-04T09:39:41+01:00
+[INFO] ------------------------------------------------------------------------
+```
diff --git a/catalog-be/README.txt b/catalog-be/README.txt
deleted file mode 100644
index 0fa113be2a..0000000000
--- a/catalog-be/README.txt
+++ /dev/null
@@ -1,41 +0,0 @@
-#open rpm
-#install jetty
-#run installJettyBase.sh
-#copy jvm.properties to base
-#export variables
-#run startJetty.sh
-
-#Properties:
-
- STOP.PORT=[number]
- The port to use to stop the running Jetty server.
- Required along with STOP.KEY if you want to use the --stop option above.
-
- STOP.KEY=[alphanumeric]
- The passphrase defined to stop the server.
- Required along with STOP.PORT if you want to use the --stop option above.
-
- STOP.WAIT=[number]
- The time (in seconds) to wait for confirmation that the running
- Jetty server has stopped. If not specified, the stopper will wait
- indefinitely. Use in conjunction with the --stop option.
-
-
-#Upload Normative types:
-# 1. create a zip file containing the yaml
-# 2. create a json string (payloadName should be the yml file name): {
-# "payloadName":"normative-types-new-root.yml",
-# "userId":"adminid",
-# "resourceName":"tosca.nodes.Root",
-# "description":"Represents a generic software component that can be managed and run by a Compute Node Type.",
-# "resourceIconPath":"defaulticon",
-# "category":"Abstract",
-# "tags":["Root"]
-# }
-#
-#
-# 3. run curl command: curl -v -F resourceMetadata=<json string> -F resourceZip=@<zip file location> <BE host:port>/sdc2/rest/v1/catalog/upload/multipart
-# e.g.:
-# curl -v -F resourceMetadata='{"payloadName":"normative-types-new-root.yml","userId":"adminid","resourceName":"tosca.nodes.Root","description":"Represents a generic software component that can be managed and run by a Compute Node Type.","resourceIconPath":"defaulticon","category":"Abstract","tags":["Root"]}' -F resourceZip=@/var/tmp/normative-types-new-root.zip localhost:8080/sdc2/rest/v1/catalog/upload/multipart
-
-#
diff --git a/catalog-be/pom.xml b/catalog-be/pom.xml
index 812efc4328..f52ebf3202 100644
--- a/catalog-be/pom.xml
+++ b/catalog-be/pom.xml
@@ -9,7 +9,7 @@
<parent>
<groupId>org.openecomp.sdc</groupId>
<artifactId>sdc-main</artifactId>
- <version>1.13.9-SNAPSHOT</version>
+ <version>1.14.1-SNAPSHOT</version>
</parent>
<properties>
diff --git a/catalog-be/src/main/java/org/openecomp/sdc/be/components/impl/CassandraHealthCheck.java b/catalog-be/src/main/java/org/openecomp/sdc/be/components/impl/CassandraHealthCheck.java
index 6ebdb9bfab..789be722c6 100644
--- a/catalog-be/src/main/java/org/openecomp/sdc/be/components/impl/CassandraHealthCheck.java
+++ b/catalog-be/src/main/java/org/openecomp/sdc/be/components/impl/CassandraHealthCheck.java
@@ -80,20 +80,20 @@ public class CassandraHealthCheck {
sdcSchemaUtils = new SdcSchemaUtils();
//Calculate the Formula of Health Check
try {
- log.info("creating cluster for Cassandra Health Check.");
+ log.debug("creating cluster for Cassandra Health Check.");
//Create cluster from nodes in cassandra configuration
Metadata metadata = sdcSchemaUtils.getMetadata();
if (metadata == null) {
log.error("Failure get cassandra metadata.");
return;
}
- log.info("Cluster Metadata: {}", metadata);
+ log.debug("Cluster Metadata: {}", metadata);
List<KeyspaceMetadata> keyspaces = metadata.getKeyspaces();
List<Integer> replactionFactorList = new ArrayList<>();
//Collect the keyspaces Replication Factor of current localDataCenter
for (KeyspaceMetadata keyspace : keyspaces) {
if (sdcKeyspaces.contains(keyspace.getName())) {
- log.info("keyspace : {} , replication: {}", keyspace.getName(), keyspace.getReplication());
+ log.debug("keyspace : {} , replication: {}", keyspace.getName(), keyspace.getReplication());
Map<String, String> replicationOptions = keyspace.getReplication();
//In 1 site with one data center
if (replicationOptions.containsKey("replication_factor")) {
@@ -110,11 +110,11 @@ public class CassandraHealthCheck {
return;
}
int maxReplicationFactor = Collections.max(replactionFactorList);
- log.info("maxReplication Factor is: {}", maxReplicationFactor);
+ log.debug("maxReplication Factor is: {}", maxReplicationFactor);
int localQuorum = maxReplicationFactor / 2 + 1;
- log.info("localQuorum is: {}", localQuorum);
+ log.debug("localQuorum is: {}", localQuorum);
HC_FormulaNumber = maxReplicationFactor - localQuorum;
- log.info("Health Check formula : Replication Factor – Local_Quorum = {}", HC_FormulaNumber);
+ log.debug("Health Check formula : Replication Factor – Local_Quorum = {}", HC_FormulaNumber);
} catch (Exception e) {
log.error("create cassandra cluster failed with exception.", e);
}
@@ -126,8 +126,8 @@ public class CassandraHealthCheck {
return false;
}
try (final Session session = sdcSchemaUtils.connect()) {
- log.info("creating cluster for Cassandra for monitoring.");
- log.info("The cassandra session is {}", session);
+ log.debug("creating cluster for Cassandra for monitoring.");
+ log.debug("The cassandra session is {}", session);
if (session == null) {
log.error("Failed to connect to cassandra ");
return false;
@@ -137,10 +137,14 @@ public class CassandraHealthCheck {
log.error("Failure get cassandra metadata.");
return false;
}
- log.info("The number of cassandra nodes is:{}", metadata.getAllHosts().size());
+ log.debug("The number of cassandra nodes is:{}", metadata.getAllHosts().size());
//Count the number of data center nodes that are down
Long downHostsNumber = metadata.getAllHosts().stream().filter(x -> x.getDatacenter().equals(localDataCenterName) && !x.isUp()).count();
- log.info("The cassandra down nodes number is {}", downHostsNumber);
+ if(downHostsNumber > 0) {
+ log.warn("{} cassandra nodes are down", downHostsNumber);
+ } else {
+ log.debug("The cassandra down nodes number is {}", downHostsNumber);
+ }
return HC_FormulaNumber >= downHostsNumber;
} catch (Exception e) {
log.error("create cassandra cluster failed with exception.", e);