diff options
4 files changed, 120 insertions, 49 deletions
diff --git a/docker-compose/config/endurance.env b/docker-compose/config/endurance.env new file mode 100644 index 0000000000..0ca1a1149a --- /dev/null +++ b/docker-compose/config/endurance.env @@ -0,0 +1,35 @@ +DB_CONTAINER_NAME=endurance-dbpostgresql +DB_PORT=5433 + +NGINX_CONTAINER_NAME=endurance-nginx-loadbalancer +CPS_CORE_PORT=8884 + +ZOOKEEPER_CONTAINER_NAME=endurance-zookeeper +ZOOKEEPER_PORT=2182 + +KAFKA_CONTAINER_NAME=endurance-kafka +KAFKA_PORT=9093 + +NCMP_DMI_PLUGIN_CONTAINER_NAME=endurance-ncmp-dmi-plugin +DMI_PORT=8786 + +NCMP_DMI_PLUGIN_DEMO_AND_CSIT_STUB_CONTAINER_NAME=endurance-ncmp-dmi-plugin-demo-and-csit-stub +DMI_DEMO_STUB_PORT=8787 + +POLICY_EXECUTOR_STUB_CONTAINER_NAME=endurance-policy-executor-stub +POLICY_EXECUTOR_STUB_PORT=8788 + +PROMETHEUS_CONTAINER_NAME=endurance-prometheus +PROMETHEUS_PORT=9091 + +GRAFANA_CONTAINER_NAME=endurance-grafana +GRAFANA_PORT=3001 + +KAFKA_UI_CONTAINER_NAME=endurance-kafka-ui +KAFKA_UI_PORT=8090 + +JAEGER_SERVICE_CONTAINER_NAME=endurance-jaeger-service +JAEGER_SERVICE_PORT=16687 + +CPS_NCMP_CACHES_CLUSTER_NAME=endurance-cps-and-ncmp-common-cache-cluster +CPS_NCMP_INSTANCE_CONFIG_NAME=endurance-cps-and-ncmp-hazelcast-instance-config
\ No newline at end of file diff --git a/docker-compose/docker-compose.yml b/docker-compose/docker-compose.yml index b854064ca5..feb58d849d 100644 --- a/docker-compose/docker-compose.yml +++ b/docker-compose/docker-compose.yml @@ -24,9 +24,11 @@ services: ### docker-compose --profile dmi-stub --profile policy-executor-stub up -d -> run CPS with stubbed dmi-plugin and policy executor stub (for policy executor service testing make POLICY_SERVICE_ENABLED "true") ### to disable notifications make notification.enabled to false & comment out kafka/zookeeper services ### ### DEBUG: Look for '### DEBUG' comments to enable CPS-NCMP debugging + ### docker-compose --profile dmi-stub --project-name endurance --env-file config/endurance.env up -d -> run CPS with stubbed dmi-plugin for endurance testing + ### docker-compose --profile dmi-stub --project-name endurance down --volumes dbpostgresql: - container_name: dbpostgresql + container_name: ${DB_CONTAINER_NAME:-dbpostgresql} image: postgres:14.1-alpine ports: - ${DB_PORT:-5432}:5432 @@ -80,7 +82,7 @@ services: ### DEBUG - ${CPS_CORE_DEBUG_PORT:-5005}:5005 nginx: - container_name: nginx-loadbalancer + container_name: ${NGINX_CONTAINER_NAME:-nginx-loadbalancer} image: nginx:latest ports: - ${CPS_CORE_PORT:-8883}:80 @@ -93,17 +95,17 @@ services: ### if kafka is not required comment out zookeeper and kafka ### zookeeper: image: confluentinc/cp-zookeeper:6.2.1 - container_name: zookeeper + container_name: ${ZOOKEEPER_CONTAINER_NAME:-zookeeper} ports: - - '2181:2181' + - ${ZOOKEEPER_PORT:-2181}:2181 environment: ZOOKEEPER_CLIENT_PORT: 2181 kafka: image: confluentinc/cp-kafka:6.2.1 - container_name: kafka + container_name: ${KAFKA_CONTAINER_NAME:-kafka} ports: - - '9092:9092' + - ${KAFKA_PORT:-9092}:9092 depends_on: - zookeeper environment: @@ -114,7 +116,7 @@ services: KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 ncmp-dmi-plugin: - container_name: ncmp-dmi-plugin + container_name: ${NCMP_DMI_PLUGIN_CONTAINER_NAME:-ncmp-dmi-plugin} image: ${DOCKER_REPO:-nexus3.onap.org:10003}/onap/ncmp-dmi-plugin:${DMI_VERSION:-1.6.0-SNAPSHOT-latest} ports: - ${DMI_PORT:-8783}:8080 @@ -139,7 +141,7 @@ services: - dmi-service ncmp-dmi-plugin-demo-and-csit-stub: - container_name: ncmp-dmi-plugin-demo-and-csit-stub + container_name: ${NCMP_DMI_PLUGIN_DEMO_AND_CSIT_STUB_CONTAINER_NAME:-ncmp-dmi-plugin-demo-and-csit-stub} image: ${DOCKER_REPO:-nexus3.onap.org:10003}/onap/dmi-plugin-demo-and-csit-stub:${DMI_DEMO_STUB_VERSION:-latest} ports: - ${DMI_DEMO_STUB_PORT:-8784}:8092 @@ -158,19 +160,19 @@ services: - dmi-service policy-executor-stub: - container_name: policy-executor-stub + container_name: ${POLICY_EXECUTOR_STUB_CONTAINER_NAME:-policy-executor-stub} image: ${DOCKER_REPO:-nexus3.onap.org:10003}/onap/policy-executor-stub:latest ports: - - 8785:8093 + - ${POLICY_EXECUTOR_STUB_PORT:-8785}:8093 restart: unless-stopped profiles: - policy-executor-stub prometheus: - container_name: prometheus + container_name: ${PROMETHEUS_CONTAINER_NAME:-prometheus} image: prom/prometheus:latest ports: - - 9090:9090 + - ${PROMETHEUS_PORT:-9090}:9090 restart: always volumes: - ./config/prometheus.yml:/etc/prometheus/prometheus.yml @@ -180,12 +182,12 @@ services: grafana: image: grafana/grafana-oss:latest user: "" - container_name: grafana + container_name: ${GRAFANA_CONTAINER_NAME:-grafana} depends_on: prometheus: condition: service_started ports: - - 3000:3000 + - ${GRAFANA_PORT:-3000}:3000 volumes: - ./config/grafana/provisioning/:/etc/grafana/provisioning/ - ./config/grafana/jvm-micrometer-dashboard.json:/var/lib/grafana/dashboards/jvm-micrometer-dashboard.json @@ -197,10 +199,10 @@ services: - monitoring kafka-ui: - container_name: kafka-ui + container_name: ${KAFKA_UI_CONTAINER_NAME:-kafka-ui} image: provectuslabs/kafka-ui:latest ports: - - 8089:8080 + - ${KAFKA_UI_PORT:-8089}:8080 environment: DYNAMIC_CONFIG_ENABLED: 'true' KAFKA_CLUSTERS_0_NAME: 'cps-kafka-local' @@ -209,10 +211,10 @@ services: - monitoring jaeger-service: - container_name: jaeger-service + container_name: ${JAEGER_SERVICE_CONTAINER_NAME:-jaeger-service} image: jaegertracing/all-in-one:latest ports: - - 16686:16686 + - ${JAEGER_SERVICE_PORT:-16686}:16686 restart: unless-stopped profiles: - tracing diff --git a/integration-test/src/test/groovy/org/onap/cps/integration/functional/ncmp/CmHandleCreateSpec.groovy b/integration-test/src/test/groovy/org/onap/cps/integration/functional/ncmp/CmHandleCreateSpec.groovy index 19b10a3c79..00ce38fa2d 100644 --- a/integration-test/src/test/groovy/org/onap/cps/integration/functional/ncmp/CmHandleCreateSpec.groovy +++ b/integration-test/src/test/groovy/org/onap/cps/integration/functional/ncmp/CmHandleCreateSpec.groovy @@ -20,7 +20,7 @@ package org.onap.cps.integration.functional.ncmp -import org.apache.kafka.common.TopicPartition +import org.apache.kafka.clients.consumer.KafkaConsumer import org.apache.kafka.common.serialization.StringDeserializer import org.onap.cps.integration.KafkaTestContainer import org.onap.cps.integration.base.CpsIntegrationSpecBase @@ -32,7 +32,6 @@ import org.onap.cps.ncmp.api.inventory.models.NcmpServiceCmHandle import org.onap.cps.ncmp.events.lcm.v1.LcmEvent import org.onap.cps.ncmp.impl.inventory.models.CmHandleState import org.onap.cps.ncmp.impl.inventory.models.LockReasonCategory -import spock.lang.Ignore import spock.util.concurrent.PollingConditions import java.time.Duration @@ -42,21 +41,23 @@ class CmHandleCreateSpec extends CpsIntegrationSpecBase { NetworkCmProxyInventoryFacade objectUnderTest def uniqueId = 'ch-unique-id-for-create-test' - def kafkaConsumer = KafkaTestContainer.getConsumer('test-group', StringDeserializer.class) + static KafkaConsumer kafkaConsumer def setup() { objectUnderTest = networkCmProxyInventoryFacade + subscribeAndClearPreviousMessages() } - @Ignore - def 'CM Handle registration is successful.'() { + def cleanupSpec() { + kafkaConsumer.unsubscribe() + kafkaConsumer.close() + } + + def 'CM Handle registration.'() { given: 'DMI will return modules when requested' dmiDispatcher1.moduleNamesPerCmHandleId['ch-1'] = ['M1', 'M2'] dmiDispatcher1.moduleNamesPerCmHandleId[uniqueId] = ['M1', 'M2'] - and: 'consumer subscribed to topic' - kafkaConsumer.subscribe(['ncmp-events']) - when: 'a CM-handle is registered for creation' def cmHandleToCreate = new NcmpServiceCmHandle(cmHandleId: uniqueId) def dmiPluginRegistration = new DmiPluginRegistration(dmiPlugin: DMI1_URL, createdCmHandles: [cmHandleToCreate]) @@ -68,32 +69,33 @@ class CmHandleCreateSpec extends CpsIntegrationSpecBase { and: 'CM-handle is initially in ADVISED state' assert CmHandleState.ADVISED == objectUnderTest.getCmHandleCompositeState(uniqueId).cmHandleState - and: 'the module sync watchdog is triggered' + then: 'the module sync watchdog is triggered' moduleSyncWatchdog.moduleSyncAdvisedCmHandles() - and: 'CM-handle goes to READY state after module sync' + then: 'CM-handle goes to READY state after module sync' new PollingConditions().within(MODULE_SYNC_WAIT_TIME_IN_SECONDS, () -> { assert CmHandleState.READY == objectUnderTest.getCmHandleCompositeState(uniqueId).cmHandleState }) - and: 'the messages is polled' - def message = kafkaConsumer.poll(Duration.ofMillis(10000)) - def records = message.records(new TopicPartition('ncmp-events', 0)) - - and: 'the newest lcm event notification is received with READY state' - def notificationMessage = jsonObjectMapper.convertJsonString(records.last().value().toString(), LcmEvent) - /*TODO (Toine) This test was failing intermittently (when running as part of suite). - I suspect that it often gave false positives as the message being assert here was any random message created by previous tests - By checking the cm-handle and using an unique cm-handle in this test this flaw became obvious. - I have now ignored this test as it is out of scope of this commit to fix it. - Created: https://lf-onap.atlassian.net/browse/CPS-2468 to fix this instead - */ - assert notificationMessage.event.cmHandleId == uniqueId - assert notificationMessage.event.newValues.cmHandleState.value() == 'READY' - and: 'the CM-handle has expected modules' assert ['M1', 'M2'] == objectUnderTest.getYangResourcesModuleReferences(uniqueId).moduleName.sort() + then: 'get the latest messages' + def consumerRecords = getLatestConsumerRecords() + + and: 'both converted messages are for the correct cm handle' + def notificationMessages = [] + for (def consumerRecord : consumerRecords) { + notificationMessages.add(jsonObjectMapper.convertJsonString(consumerRecord.value().toString(), LcmEvent)) + } + assert notificationMessages.event.cmHandleId == [ uniqueId, uniqueId ] + + and: 'the oldest event is about the update to ADVISED state' + notificationMessages[0].event.newValues.cmHandleState.value() == 'ADVISED' + + and: 'the next event is about update to READY state' + notificationMessages[1].event.newValues.cmHandleState.value() == 'READY' + cleanup: 'deregister CM handle' deregisterCmHandle(DMI1_URL, uniqueId) } @@ -224,4 +226,23 @@ class CmHandleCreateSpec extends CpsIntegrationSpecBase { cleanup: 'deregister CM handles' deregisterCmHandles(DMI1_URL, ['ch-1', 'ch-2']) } + + def subscribeAndClearPreviousMessages() { + kafkaConsumer = KafkaTestContainer.getConsumer('test-group', StringDeserializer.class) + kafkaConsumer.subscribe(['ncmp-events']) + kafkaConsumer.poll(Duration.ofMillis(500)) + } + + def getLatestConsumerRecords() { + def consumerRecords = [] + def retryAttempts = 10 + while (consumerRecords.size() < 2) { + retryAttempts-- + consumerRecords.addAll(kafkaConsumer.poll(Duration.ofMillis(100))) + if (retryAttempts == 0) + break + } + consumerRecords + } + } diff --git a/integration-test/src/test/java/org/onap/cps/integration/KafkaTestContainer.java b/integration-test/src/test/java/org/onap/cps/integration/KafkaTestContainer.java index d41f752912..ff4aec4175 100644 --- a/integration-test/src/test/java/org/onap/cps/integration/KafkaTestContainer.java +++ b/integration-test/src/test/java/org/onap/cps/integration/KafkaTestContainer.java @@ -21,6 +21,7 @@ package org.onap.cps.integration; import java.util.HashMap; import java.util.Map; +import lombok.extern.slf4j.Slf4j; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.KafkaConsumer; import org.apache.kafka.common.serialization.StringDeserializer; @@ -33,11 +34,12 @@ import org.testcontainers.utility.DockerImageName; * This ensures only one instance of Kafka container across the integration tests. * Avoid unnecessary resource and time consumption. */ +@Slf4j public class KafkaTestContainer extends KafkaContainer { private static final String IMAGE_NAME_AND_VERSION = "registry.nordix.org/onaptest/confluentinc/cp-kafka:6.2.1"; - private static KafkaTestContainer kafkaTestContainer; + private static volatile KafkaTestContainer kafkaTestContainer; private KafkaTestContainer() { super(DockerImageName.parse(IMAGE_NAME_AND_VERSION).asCompatibleSubstituteFor("confluentinc/cp-kafka")); @@ -51,8 +53,15 @@ public class KafkaTestContainer extends KafkaContainer { */ public static KafkaTestContainer getInstance() { if (kafkaTestContainer == null) { - kafkaTestContainer = new KafkaTestContainer(); - Runtime.getRuntime().addShutdownHook(new Thread(kafkaTestContainer::close)); + synchronized (KafkaTestContainer.class) { + if (kafkaTestContainer == null) { + kafkaTestContainer = new KafkaTestContainer(); + Runtime.getRuntime().addShutdownHook(new Thread(() -> { + log.info("Shutting down KafkaTestContainer..."); + kafkaTestContainer.stop(); + })); + } + } } return kafkaTestContainer; } @@ -63,8 +72,11 @@ public class KafkaTestContainer extends KafkaContainer { @Override public void start() { - super.start(); - System.setProperty("spring.kafka.properties.bootstrap.servers", kafkaTestContainer.getBootstrapServers()); + if (!isRunning()) { + super.start(); + System.setProperty("spring.kafka.properties.bootstrap.servers", getBootstrapServers()); + log.info("KafkaTestContainer started at {}", getBootstrapServers()); + } } @Override @@ -78,8 +90,9 @@ public class KafkaTestContainer extends KafkaContainer { configProps.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaTestContainer.getBootstrapServers()); configProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); configProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializer); - configProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); + configProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest"); configProps.put(ConsumerConfig.GROUP_ID_CONFIG, consumerGroupId); + configProps.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, Integer.MAX_VALUE); return configProps; } |