aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--cps-ri/src/main/java/org/onap/cps/spi/impl/CpsDataPersistenceServiceImpl.java35
-rw-r--r--docs/ncmp-data-operation.rst10
-rw-r--r--docs/release-notes.rst3
-rw-r--r--integration-test/src/test/groovy/org/onap/cps/integration/performance/cps/UpdatePerfTest.groovy10
-rw-r--r--integration-test/src/test/groovy/org/onap/cps/integration/performance/cps/WritePerfTest.groovy89
5 files changed, 113 insertions, 34 deletions
diff --git a/cps-ri/src/main/java/org/onap/cps/spi/impl/CpsDataPersistenceServiceImpl.java b/cps-ri/src/main/java/org/onap/cps/spi/impl/CpsDataPersistenceServiceImpl.java
index 50e671d247..19547bbccf 100644
--- a/cps-ri/src/main/java/org/onap/cps/spi/impl/CpsDataPersistenceServiceImpl.java
+++ b/cps-ri/src/main/java/org/onap/cps/spi/impl/CpsDataPersistenceServiceImpl.java
@@ -1,6 +1,6 @@
/*
* ============LICENSE_START=======================================================
- * Copyright (C) 2021-2023 Nordix Foundation
+ * Copyright (C) 2021-2024 Nordix Foundation
* Modifications Copyright (C) 2021 Pantheon.tech
* Modifications Copyright (C) 2020-2022 Bell Canada.
* Modifications Copyright (C) 2022-2023 TechMahindra Ltd.
@@ -38,6 +38,7 @@ import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
+import java.util.TreeMap;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
@@ -526,7 +527,7 @@ public class CpsDataPersistenceServiceImpl implements CpsDataPersistenceService
private void updateFragmentEntityAndDescendantsWithDataNode(final FragmentEntity existingFragmentEntity,
final DataNode newDataNode) {
- existingFragmentEntity.setAttributes(jsonObjectMapper.asJsonString(newDataNode.getLeaves()));
+ copyAttributesFromNewDataNode(existingFragmentEntity, newDataNode);
final Map<String, FragmentEntity> existingChildrenByXpath = existingFragmentEntity.getChildFragments().stream()
.collect(Collectors.toMap(FragmentEntity::getXpath, childFragmentEntity -> childFragmentEntity));
@@ -668,7 +669,7 @@ public class CpsDataPersistenceServiceImpl implements CpsDataPersistenceService
return convertToFragmentWithAllDescendants(parentEntity.getAnchor(), newListElement);
}
if (newListElement.getChildDataNodes().isEmpty()) {
- copyAttributesFromNewListElement(existingListElementEntity, newListElement);
+ copyAttributesFromNewDataNode(existingListElementEntity, newListElement);
existingListElementEntity.getChildFragments().clear();
} else {
updateFragmentEntityAndDescendantsWithDataNode(existingListElementEntity, newListElement);
@@ -681,12 +682,28 @@ public class CpsDataPersistenceServiceImpl implements CpsDataPersistenceService
return !existingListElementsByXpath.containsKey(replacementDataNode.getXpath());
}
- private void copyAttributesFromNewListElement(final FragmentEntity existingListElementEntity,
- final DataNode newListElement) {
- final FragmentEntity replacementFragmentEntity =
- FragmentEntity.builder().attributes(jsonObjectMapper.asJsonString(
- newListElement.getLeaves())).build();
- existingListElementEntity.setAttributes(replacementFragmentEntity.getAttributes());
+ private void copyAttributesFromNewDataNode(final FragmentEntity existingFragmentEntity,
+ final DataNode newDataNode) {
+ final String oldOrderedLeavesAsJson = getOrderedLeavesAsJson(existingFragmentEntity.getAttributes());
+ final String newOrderedLeavesAsJson = getOrderedLeavesAsJson(newDataNode.getLeaves());
+ if (!oldOrderedLeavesAsJson.equals(newOrderedLeavesAsJson)) {
+ existingFragmentEntity.setAttributes(jsonObjectMapper.asJsonString(newDataNode.getLeaves()));
+ }
+ }
+
+ private String getOrderedLeavesAsJson(final Map<String, Serializable> currentLeaves) {
+ final Map<String, Serializable> sortedLeaves = new TreeMap<>(String::compareTo);
+ sortedLeaves.putAll(currentLeaves);
+ return jsonObjectMapper.asJsonString(sortedLeaves);
+ }
+
+ private String getOrderedLeavesAsJson(final String currentLeavesAsString) {
+ if (currentLeavesAsString == null) {
+ return "{}";
+ }
+ final Map<String, Serializable> sortedLeaves = jsonObjectMapper.convertJsonString(currentLeavesAsString,
+ TreeMap.class);
+ return jsonObjectMapper.asJsonString(sortedLeaves);
}
private static Map<String, FragmentEntity> extractListElementFragmentEntitiesByXPath(
diff --git a/docs/ncmp-data-operation.rst b/docs/ncmp-data-operation.rst
index 617b3ed309..94d5ee9c0a 100644
--- a/docs/ncmp-data-operation.rst
+++ b/docs/ncmp-data-operation.rst
@@ -6,8 +6,8 @@
.. _cmHandleDataOperation:
-CM Handles Data Operation Endpoints
-###################################
+Data Operations Endpoint
+########################
.. toctree::
:maxdepth: 1
@@ -15,11 +15,11 @@ CM Handles Data Operation Endpoints
Introduction
============
-For data operation CM Handles we have a Post endpoints:
+For all data operations on cm handle(s), we have a post endpoint:
- /ncmp/v1/data?topic={client-topic-name} forward request to it's dmi plugin service.
-- Returns request id (UUID) with http status 202.
+- When asynchronous (with topic) operations are executed, a request id (UUID) will be returned.
Request Body
============
@@ -143,6 +143,6 @@ DMI Service 2 (POST) : `http://{dmi-host-name}:{dmi-port}/dmi/v1/data?topic=my-t
}
]
-Above examples are for illustration purpose only please refer link below for latest schema.
+Above examples are for illustration purposes only. Please refer to link below for latest schema.
:download:`Data operation event schema <schemas/data-operation-event-schema-1.0.0.json>` \ No newline at end of file
diff --git a/docs/release-notes.rst b/docs/release-notes.rst
index 7fabfc3bb3..b7f4c4fbee 100644
--- a/docs/release-notes.rst
+++ b/docs/release-notes.rst
@@ -1,6 +1,6 @@
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
.. http://creativecommons.org/licenses/by/4.0
-.. Copyright (C) 2021-2023 Nordix Foundation
+.. Copyright (C) 2021-2024 Nordix Foundation
.. DO NOT CHANGE THIS LABEL FOR RELEASE NOTES - EVEN THOUGH IT GIVES A WARNING
.. _release_notes:
@@ -43,6 +43,7 @@ Bug Fixes
Features
--------
+ - `CPS-2018 <https://jira.onap.org/browse/CPS-2018>`_ Improve performance of CPS update operations.
Version: 3.4.1
diff --git a/integration-test/src/test/groovy/org/onap/cps/integration/performance/cps/UpdatePerfTest.groovy b/integration-test/src/test/groovy/org/onap/cps/integration/performance/cps/UpdatePerfTest.groovy
index 35f65551f8..b3030b1c6b 100644
--- a/integration-test/src/test/groovy/org/onap/cps/integration/performance/cps/UpdatePerfTest.groovy
+++ b/integration-test/src/test/groovy/org/onap/cps/integration/performance/cps/UpdatePerfTest.groovy
@@ -1,6 +1,6 @@
/*
* ============LICENSE_START=======================================================
- * Copyright (C) 2023 Nordix Foundation
+ * Copyright (C) 2023-2024 Nordix Foundation
* ================================================================================
* Licensed under the Apache License, Version 2.0 (the 'License');
* you may not use this file except in compliance with the License.
@@ -80,8 +80,8 @@ class UpdatePerfTest extends CpsPerfTestBase {
where:
scenario | totalNodes | startId | changeLeaves || timeLimit | memoryLimit
'Replace 0 nodes with 100' | 100 | 1 | false || 7 | 250
- 'Replace 100 using same data' | 100 | 1 | false || 5 | 250
- 'Replace 100 with new leaf values' | 100 | 1 | true || 5 | 250
+ 'Replace 100 using same data' | 100 | 1 | false || 3 | 250
+ 'Replace 100 with new leaf values' | 100 | 1 | true || 3 | 250
'Replace 100 with 100 new nodes' | 100 | 101 | false || 12 | 300
'Replace 50 existing and 50 new' | 100 | 151 | true || 8 | 250
'Replace 100 nodes with 0' | 0 | 1 | false || 5 | 250
@@ -106,8 +106,8 @@ class UpdatePerfTest extends CpsPerfTestBase {
where:
scenario | totalNodes | startId | changeLeaves || timeLimit | memoryLimit
'Replace list of 0 with 100' | 100 | 1 | false || 7 | 250
- 'Replace list of 100 using same data' | 100 | 1 | false || 5 | 250
- 'Replace list of 100 with new leaf values' | 100 | 1 | true || 5 | 250
+ 'Replace list of 100 using same data' | 100 | 1 | false || 3 | 250
+ 'Replace list of 100 with new leaf values' | 100 | 1 | true || 3 | 250
'Replace list with 100 new nodes' | 100 | 101 | false || 12 | 300
'Replace list with 50 existing and 50 new' | 100 | 151 | true || 8 | 250
'Replace list of 100 nodes with 1' | 1 | 1 | false || 5 | 250
diff --git a/integration-test/src/test/groovy/org/onap/cps/integration/performance/cps/WritePerfTest.groovy b/integration-test/src/test/groovy/org/onap/cps/integration/performance/cps/WritePerfTest.groovy
index 2d38a0dfb6..a5669baf6a 100644
--- a/integration-test/src/test/groovy/org/onap/cps/integration/performance/cps/WritePerfTest.groovy
+++ b/integration-test/src/test/groovy/org/onap/cps/integration/performance/cps/WritePerfTest.groovy
@@ -1,6 +1,6 @@
/*
* ============LICENSE_START=======================================================
- * Copyright (C) 2023 Nordix Foundation
+ * Copyright (C) 2023-2024 Nordix Foundation
* ================================================================================
* Licensed under the Apache License, Version 2.0 (the 'License');
* you may not use this file except in compliance with the License.
@@ -25,21 +25,24 @@ import org.onap.cps.integration.performance.base.CpsPerfTestBase
class WritePerfTest extends CpsPerfTestBase {
+ static final def WRITE_TEST_ANCHOR = 'writeTestAnchor'
+
def 'Writing openroadm data has linear time.'() {
given: 'an empty anchor exists for openroadm'
- cpsAnchorService.createAnchor(CPS_PERFORMANCE_TEST_DATASPACE, LARGE_SCHEMA_SET, 'writeAnchor')
+ cpsAnchorService.createAnchor(CPS_PERFORMANCE_TEST_DATASPACE, LARGE_SCHEMA_SET, WRITE_TEST_ANCHOR)
and: 'a list of device nodes to add'
def jsonData = generateOpenRoadData(totalNodes)
when: 'device nodes are added'
resourceMeter.start()
- cpsDataService.saveData(CPS_PERFORMANCE_TEST_DATASPACE, 'writeAnchor', jsonData, OffsetDateTime.now())
+ cpsDataService.saveData(CPS_PERFORMANCE_TEST_DATASPACE, WRITE_TEST_ANCHOR, jsonData, OffsetDateTime.now())
resourceMeter.stop()
- def durationInSeconds = resourceMeter.getTotalTimeInSeconds()
then: 'the operation takes less than #expectedDuration and memory used is within limit'
- recordAndAssertResourceUsage("Writing ${totalNodes} devices", expectedDuration, durationInSeconds, memoryLimit, resourceMeter.getTotalMemoryUsageInMB())
+ recordAndAssertResourceUsage("Writing ${totalNodes} devices",
+ expectedDuration, resourceMeter.getTotalTimeInSeconds(),
+ memoryLimit, resourceMeter.getTotalMemoryUsageInMB())
cleanup:
- cpsDataService.deleteDataNodes(CPS_PERFORMANCE_TEST_DATASPACE, 'writeAnchor', OffsetDateTime.now())
- cpsAnchorService.deleteAnchor(CPS_PERFORMANCE_TEST_DATASPACE, 'writeAnchor')
+ cpsDataService.deleteDataNodes(CPS_PERFORMANCE_TEST_DATASPACE, WRITE_TEST_ANCHOR, OffsetDateTime.now())
+ cpsAnchorService.deleteAnchor(CPS_PERFORMANCE_TEST_DATASPACE, WRITE_TEST_ANCHOR)
where:
totalNodes || expectedDuration | memoryLimit
50 || 4 | 100
@@ -50,21 +53,22 @@ class WritePerfTest extends CpsPerfTestBase {
def 'Writing bookstore data has exponential time.'() {
given: 'an anchor containing a bookstore with a single category'
- cpsAnchorService.createAnchor(CPS_PERFORMANCE_TEST_DATASPACE, BOOKSTORE_SCHEMA_SET, 'writeAnchor')
+ cpsAnchorService.createAnchor(CPS_PERFORMANCE_TEST_DATASPACE, BOOKSTORE_SCHEMA_SET, WRITE_TEST_ANCHOR)
def parentNodeData = '{"bookstore": { "categories": [{ "code": 1, "name": "Test", "books" : [] }] }}'
- cpsDataService.saveData(CPS_PERFORMANCE_TEST_DATASPACE, 'writeAnchor', parentNodeData, OffsetDateTime.now())
+ cpsDataService.saveData(CPS_PERFORMANCE_TEST_DATASPACE, WRITE_TEST_ANCHOR, parentNodeData, OffsetDateTime.now())
and: 'a list of books to add'
def booksData = '{"books":[' + (1..totalBooks).collect {'{ "title": "' + it + '" }' }.join(',') + ']}'
when: 'books are added'
resourceMeter.start()
- cpsDataService.saveData(CPS_PERFORMANCE_TEST_DATASPACE, 'writeAnchor', '/bookstore/categories[@code=1]', booksData, OffsetDateTime.now())
+ cpsDataService.saveData(CPS_PERFORMANCE_TEST_DATASPACE, WRITE_TEST_ANCHOR, '/bookstore/categories[@code=1]', booksData, OffsetDateTime.now())
resourceMeter.stop()
- def durationInSeconds = resourceMeter.getTotalTimeInSeconds()
then: 'the operation takes less than #expectedDuration and memory used is within limit'
- recordAndAssertResourceUsage("Writing ${totalBooks} books", expectedDuration, durationInSeconds, memoryLimit, resourceMeter.getTotalMemoryUsageInMB())
+ recordAndAssertResourceUsage("Writing ${totalBooks} books",
+ expectedDuration, resourceMeter.getTotalTimeInSeconds(),
+ memoryLimit, resourceMeter.getTotalMemoryUsageInMB())
cleanup:
- cpsDataService.deleteDataNodes(CPS_PERFORMANCE_TEST_DATASPACE, 'writeAnchor', OffsetDateTime.now())
- cpsAnchorService.deleteAnchor(CPS_PERFORMANCE_TEST_DATASPACE, 'writeAnchor')
+ cpsDataService.deleteDataNodes(CPS_PERFORMANCE_TEST_DATASPACE, WRITE_TEST_ANCHOR, OffsetDateTime.now())
+ cpsAnchorService.deleteAnchor(CPS_PERFORMANCE_TEST_DATASPACE, WRITE_TEST_ANCHOR)
where:
totalBooks || expectedDuration | memoryLimit
800 || 1 | 50
@@ -73,4 +77,61 @@ class WritePerfTest extends CpsPerfTestBase {
6400 || 18 | 200
}
+ def 'Writing openroadm list data using saveListElements.'() {
+ given: 'an anchor and empty container node for openroadm'
+ cpsAnchorService.createAnchor(CPS_PERFORMANCE_TEST_DATASPACE, LARGE_SCHEMA_SET, WRITE_TEST_ANCHOR)
+ cpsDataService.saveData(CPS_PERFORMANCE_TEST_DATASPACE, WRITE_TEST_ANCHOR,
+ '{ "openroadm-devices": { "openroadm-device": []}}', now)
+ and: 'a list of device nodes to add'
+ def innerNode = readResourceDataFile('openroadm/innerNode.json')
+ def jsonListData = '{ "openroadm-device": [' +
+ (1..totalNodes).collect { innerNode.replace('NODE_ID_HERE', it.toString()) }.join(',') +
+ ']}'
+ when: 'device nodes are added'
+ resourceMeter.start()
+ cpsDataService.saveListElements(CPS_PERFORMANCE_TEST_DATASPACE, WRITE_TEST_ANCHOR, '/openroadm-devices', jsonListData, OffsetDateTime.now())
+ resourceMeter.stop()
+ then: 'the operation takes less than #expectedDuration and memory used is within limit'
+ recordAndAssertResourceUsage("Saving list of ${totalNodes} devices",
+ expectedDuration, resourceMeter.getTotalTimeInSeconds(),
+ memoryLimit, resourceMeter.getTotalMemoryUsageInMB())
+ cleanup:
+ cpsDataService.deleteDataNodes(CPS_PERFORMANCE_TEST_DATASPACE, WRITE_TEST_ANCHOR, OffsetDateTime.now())
+ cpsAnchorService.deleteAnchor(CPS_PERFORMANCE_TEST_DATASPACE, WRITE_TEST_ANCHOR)
+ where:
+ totalNodes || expectedDuration | memoryLimit
+ 50 || 4 | 200
+ 100 || 7 | 200
+ 200 || 14 | 250
+ 400 || 28 | 250
+ }
+
+ def 'Writing openroadm list data using saveListElementsBatch.'() {
+ given: 'an anchor and empty container node for openroadm'
+ cpsAnchorService.createAnchor(CPS_PERFORMANCE_TEST_DATASPACE, LARGE_SCHEMA_SET, WRITE_TEST_ANCHOR)
+ cpsDataService.saveData(CPS_PERFORMANCE_TEST_DATASPACE, WRITE_TEST_ANCHOR,
+ '{ "openroadm-devices": { "openroadm-device": []}}', now)
+ and: 'a list of device nodes to add'
+ def innerNode = readResourceDataFile('openroadm/innerNode.json')
+ def multipleJsonData = (1..totalNodes).collect {
+ '{ "openroadm-device": [' + innerNode.replace('NODE_ID_HERE', it.toString()) + ']}' }
+ when: 'device nodes are added'
+ resourceMeter.start()
+ cpsDataService.saveListElementsBatch(CPS_PERFORMANCE_TEST_DATASPACE, WRITE_TEST_ANCHOR, '/openroadm-devices', multipleJsonData, OffsetDateTime.now())
+ resourceMeter.stop()
+ then: 'the operation takes less than #expectedDuration and memory used is within limit'
+ recordAndAssertResourceUsage("Saving batch of ${totalNodes} lists",
+ expectedDuration, resourceMeter.getTotalTimeInSeconds(),
+ memoryLimit, resourceMeter.getTotalMemoryUsageInMB())
+ cleanup:
+ cpsDataService.deleteDataNodes(CPS_PERFORMANCE_TEST_DATASPACE, WRITE_TEST_ANCHOR, OffsetDateTime.now())
+ cpsAnchorService.deleteAnchor(CPS_PERFORMANCE_TEST_DATASPACE, WRITE_TEST_ANCHOR)
+ where:
+ totalNodes || expectedDuration | memoryLimit
+ 50 || 16 | 500
+ 100 || 32 | 500
+ 200 || 64 | 1000
+ 400 || 128 | 1250
+ }
+
}